##// END OF EJS Templates
dirstate: rename parentchange to changing_parents...
marmoute -
r50855:7a8bfc05 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,1165 +1,1165 b''
1 # absorb.py
1 # absorb.py
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """apply working directory changes to changesets (EXPERIMENTAL)
8 """apply working directory changes to changesets (EXPERIMENTAL)
9
9
10 The absorb extension provides a command to use annotate information to
10 The absorb extension provides a command to use annotate information to
11 amend modified chunks into the corresponding non-public changesets.
11 amend modified chunks into the corresponding non-public changesets.
12
12
13 ::
13 ::
14
14
15 [absorb]
15 [absorb]
16 # only check 50 recent non-public changesets at most
16 # only check 50 recent non-public changesets at most
17 max-stack-size = 50
17 max-stack-size = 50
18 # whether to add noise to new commits to avoid obsolescence cycle
18 # whether to add noise to new commits to avoid obsolescence cycle
19 add-noise = 1
19 add-noise = 1
20 # make `amend --correlated` a shortcut to the main command
20 # make `amend --correlated` a shortcut to the main command
21 amend-flag = correlated
21 amend-flag = correlated
22
22
23 [color]
23 [color]
24 absorb.description = yellow
24 absorb.description = yellow
25 absorb.node = blue bold
25 absorb.node = blue bold
26 absorb.path = bold
26 absorb.path = bold
27 """
27 """
28
28
29 # TODO:
29 # TODO:
30 # * Rename config items to [commands] namespace
30 # * Rename config items to [commands] namespace
31 # * Converge getdraftstack() with other code in core
31 # * Converge getdraftstack() with other code in core
32 # * move many attributes on fixupstate to be private
32 # * move many attributes on fixupstate to be private
33
33
34
34
35 import collections
35 import collections
36
36
37 from mercurial.i18n import _
37 from mercurial.i18n import _
38 from mercurial.node import (
38 from mercurial.node import (
39 hex,
39 hex,
40 short,
40 short,
41 )
41 )
42 from mercurial import (
42 from mercurial import (
43 cmdutil,
43 cmdutil,
44 commands,
44 commands,
45 context,
45 context,
46 crecord,
46 crecord,
47 error,
47 error,
48 linelog,
48 linelog,
49 mdiff,
49 mdiff,
50 obsolete,
50 obsolete,
51 patch,
51 patch,
52 phases,
52 phases,
53 pycompat,
53 pycompat,
54 registrar,
54 registrar,
55 rewriteutil,
55 rewriteutil,
56 scmutil,
56 scmutil,
57 util,
57 util,
58 )
58 )
59 from mercurial.utils import stringutil
59 from mercurial.utils import stringutil
60
60
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 # be specifying the version(s) of Mercurial they are tested with, or
63 # be specifying the version(s) of Mercurial they are tested with, or
64 # leave the attribute unspecified.
64 # leave the attribute unspecified.
65 testedwith = b'ships-with-hg-core'
65 testedwith = b'ships-with-hg-core'
66
66
67 cmdtable = {}
67 cmdtable = {}
68 command = registrar.command(cmdtable)
68 command = registrar.command(cmdtable)
69
69
70 configtable = {}
70 configtable = {}
71 configitem = registrar.configitem(configtable)
71 configitem = registrar.configitem(configtable)
72
72
73 configitem(b'absorb', b'add-noise', default=True)
73 configitem(b'absorb', b'add-noise', default=True)
74 configitem(b'absorb', b'amend-flag', default=None)
74 configitem(b'absorb', b'amend-flag', default=None)
75 configitem(b'absorb', b'max-stack-size', default=50)
75 configitem(b'absorb', b'max-stack-size', default=50)
76
76
77 colortable = {
77 colortable = {
78 b'absorb.description': b'yellow',
78 b'absorb.description': b'yellow',
79 b'absorb.node': b'blue bold',
79 b'absorb.node': b'blue bold',
80 b'absorb.path': b'bold',
80 b'absorb.path': b'bold',
81 }
81 }
82
82
83 defaultdict = collections.defaultdict
83 defaultdict = collections.defaultdict
84
84
85
85
86 class nullui:
86 class nullui:
87 """blank ui object doing nothing"""
87 """blank ui object doing nothing"""
88
88
89 debugflag = False
89 debugflag = False
90 verbose = False
90 verbose = False
91 quiet = True
91 quiet = True
92
92
93 def __getitem__(name):
93 def __getitem__(name):
94 def nullfunc(*args, **kwds):
94 def nullfunc(*args, **kwds):
95 return
95 return
96
96
97 return nullfunc
97 return nullfunc
98
98
99
99
100 class emptyfilecontext:
100 class emptyfilecontext:
101 """minimal filecontext representing an empty file"""
101 """minimal filecontext representing an empty file"""
102
102
103 def __init__(self, repo):
103 def __init__(self, repo):
104 self._repo = repo
104 self._repo = repo
105
105
106 def data(self):
106 def data(self):
107 return b''
107 return b''
108
108
109 def node(self):
109 def node(self):
110 return self._repo.nullid
110 return self._repo.nullid
111
111
112
112
113 def uniq(lst):
113 def uniq(lst):
114 """list -> list. remove duplicated items without changing the order"""
114 """list -> list. remove duplicated items without changing the order"""
115 seen = set()
115 seen = set()
116 result = []
116 result = []
117 for x in lst:
117 for x in lst:
118 if x not in seen:
118 if x not in seen:
119 seen.add(x)
119 seen.add(x)
120 result.append(x)
120 result.append(x)
121 return result
121 return result
122
122
123
123
124 def getdraftstack(headctx, limit=None):
124 def getdraftstack(headctx, limit=None):
125 """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
125 """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
126
126
127 changesets are sorted in topo order, oldest first.
127 changesets are sorted in topo order, oldest first.
128 return at most limit items, if limit is a positive number.
128 return at most limit items, if limit is a positive number.
129
129
130 merges are considered as non-draft as well. i.e. every commit
130 merges are considered as non-draft as well. i.e. every commit
131 returned has and only has 1 parent.
131 returned has and only has 1 parent.
132 """
132 """
133 ctx = headctx
133 ctx = headctx
134 result = []
134 result = []
135 while ctx.phase() != phases.public:
135 while ctx.phase() != phases.public:
136 if limit and len(result) >= limit:
136 if limit and len(result) >= limit:
137 break
137 break
138 parents = ctx.parents()
138 parents = ctx.parents()
139 if len(parents) != 1:
139 if len(parents) != 1:
140 break
140 break
141 result.append(ctx)
141 result.append(ctx)
142 ctx = parents[0]
142 ctx = parents[0]
143 result.reverse()
143 result.reverse()
144 return result
144 return result
145
145
146
146
147 def getfilestack(stack, path, seenfctxs=None):
147 def getfilestack(stack, path, seenfctxs=None):
148 """([ctx], str, set) -> [fctx], {ctx: fctx}
148 """([ctx], str, set) -> [fctx], {ctx: fctx}
149
149
150 stack is a list of contexts, from old to new. usually they are what
150 stack is a list of contexts, from old to new. usually they are what
151 "getdraftstack" returns.
151 "getdraftstack" returns.
152
152
153 follows renames, but not copies.
153 follows renames, but not copies.
154
154
155 seenfctxs is a set of filecontexts that will be considered "immutable".
155 seenfctxs is a set of filecontexts that will be considered "immutable".
156 they are usually what this function returned in earlier calls, useful
156 they are usually what this function returned in earlier calls, useful
157 to avoid issues that a file was "moved" to multiple places and was then
157 to avoid issues that a file was "moved" to multiple places and was then
158 modified differently, like: "a" was copied to "b", "a" was also copied to
158 modified differently, like: "a" was copied to "b", "a" was also copied to
159 "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
159 "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
160 and we enforce only one of them to be able to affect "a"'s content.
160 and we enforce only one of them to be able to affect "a"'s content.
161
161
162 return an empty list and an empty dict, if the specified path does not
162 return an empty list and an empty dict, if the specified path does not
163 exist in stack[-1] (the top of the stack).
163 exist in stack[-1] (the top of the stack).
164
164
165 otherwise, return a list of de-duplicated filecontexts, and the map to
165 otherwise, return a list of de-duplicated filecontexts, and the map to
166 convert ctx in the stack to fctx, for possible mutable fctxs. the first item
166 convert ctx in the stack to fctx, for possible mutable fctxs. the first item
167 of the list would be outside the stack and should be considered immutable.
167 of the list would be outside the stack and should be considered immutable.
168 the remaining items are within the stack.
168 the remaining items are within the stack.
169
169
170 for example, given the following changelog and corresponding filelog
170 for example, given the following changelog and corresponding filelog
171 revisions:
171 revisions:
172
172
173 changelog: 3----4----5----6----7
173 changelog: 3----4----5----6----7
174 filelog: x 0----1----1----2 (x: no such file yet)
174 filelog: x 0----1----1----2 (x: no such file yet)
175
175
176 - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
176 - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
177 - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
177 - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
178 dummy empty filecontext.
178 dummy empty filecontext.
179 - if stack = [2], returns ([], {})
179 - if stack = [2], returns ([], {})
180 - if stack = [7], returns ([1, 2], {7: 2})
180 - if stack = [7], returns ([1, 2], {7: 2})
181 - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
181 - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
182 removed, since 1 is immutable.
182 removed, since 1 is immutable.
183 """
183 """
184 if seenfctxs is None:
184 if seenfctxs is None:
185 seenfctxs = set()
185 seenfctxs = set()
186 assert stack
186 assert stack
187
187
188 if path not in stack[-1]:
188 if path not in stack[-1]:
189 return [], {}
189 return [], {}
190
190
191 fctxs = []
191 fctxs = []
192 fctxmap = {}
192 fctxmap = {}
193
193
194 pctx = stack[0].p1() # the public (immutable) ctx we stop at
194 pctx = stack[0].p1() # the public (immutable) ctx we stop at
195 for ctx in reversed(stack):
195 for ctx in reversed(stack):
196 if path not in ctx: # the file is added in the next commit
196 if path not in ctx: # the file is added in the next commit
197 pctx = ctx
197 pctx = ctx
198 break
198 break
199 fctx = ctx[path]
199 fctx = ctx[path]
200 fctxs.append(fctx)
200 fctxs.append(fctx)
201 if fctx in seenfctxs: # treat fctx as the immutable one
201 if fctx in seenfctxs: # treat fctx as the immutable one
202 pctx = None # do not add another immutable fctx
202 pctx = None # do not add another immutable fctx
203 break
203 break
204 fctxmap[ctx] = fctx # only for mutable fctxs
204 fctxmap[ctx] = fctx # only for mutable fctxs
205 copy = fctx.copysource()
205 copy = fctx.copysource()
206 if copy:
206 if copy:
207 path = copy # follow rename
207 path = copy # follow rename
208 if path in ctx: # but do not follow copy
208 if path in ctx: # but do not follow copy
209 pctx = ctx.p1()
209 pctx = ctx.p1()
210 break
210 break
211
211
212 if pctx is not None: # need an extra immutable fctx
212 if pctx is not None: # need an extra immutable fctx
213 if path in pctx:
213 if path in pctx:
214 fctxs.append(pctx[path])
214 fctxs.append(pctx[path])
215 else:
215 else:
216 fctxs.append(emptyfilecontext(pctx.repo()))
216 fctxs.append(emptyfilecontext(pctx.repo()))
217
217
218 fctxs.reverse()
218 fctxs.reverse()
219 # note: we rely on a property of hg: filerev is not reused for linear
219 # note: we rely on a property of hg: filerev is not reused for linear
220 # history. i.e. it's impossible to have:
220 # history. i.e. it's impossible to have:
221 # changelog: 4----5----6 (linear, no merges)
221 # changelog: 4----5----6 (linear, no merges)
222 # filelog: 1----2----1
222 # filelog: 1----2----1
223 # ^ reuse filerev (impossible)
223 # ^ reuse filerev (impossible)
224 # because parents are part of the hash. if that's not true, we need to
224 # because parents are part of the hash. if that's not true, we need to
225 # remove uniq and find a different way to identify fctxs.
225 # remove uniq and find a different way to identify fctxs.
226 return uniq(fctxs), fctxmap
226 return uniq(fctxs), fctxmap
227
227
228
228
229 class overlaystore(patch.filestore):
229 class overlaystore(patch.filestore):
230 """read-only, hybrid store based on a dict and ctx.
230 """read-only, hybrid store based on a dict and ctx.
231 memworkingcopy: {path: content}, overrides file contents.
231 memworkingcopy: {path: content}, overrides file contents.
232 """
232 """
233
233
234 def __init__(self, basectx, memworkingcopy):
234 def __init__(self, basectx, memworkingcopy):
235 self.basectx = basectx
235 self.basectx = basectx
236 self.memworkingcopy = memworkingcopy
236 self.memworkingcopy = memworkingcopy
237
237
238 def getfile(self, path):
238 def getfile(self, path):
239 """comply with mercurial.patch.filestore.getfile"""
239 """comply with mercurial.patch.filestore.getfile"""
240 if path not in self.basectx:
240 if path not in self.basectx:
241 return None, None, None
241 return None, None, None
242 fctx = self.basectx[path]
242 fctx = self.basectx[path]
243 if path in self.memworkingcopy:
243 if path in self.memworkingcopy:
244 content = self.memworkingcopy[path]
244 content = self.memworkingcopy[path]
245 else:
245 else:
246 content = fctx.data()
246 content = fctx.data()
247 mode = (fctx.islink(), fctx.isexec())
247 mode = (fctx.islink(), fctx.isexec())
248 copy = fctx.copysource()
248 copy = fctx.copysource()
249 return content, mode, copy
249 return content, mode, copy
250
250
251
251
252 def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None):
252 def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None):
253 """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
253 """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
254 memworkingcopy overrides file contents.
254 memworkingcopy overrides file contents.
255 """
255 """
256 # parents must contain 2 items: (node1, node2)
256 # parents must contain 2 items: (node1, node2)
257 if parents is None:
257 if parents is None:
258 parents = ctx.repo().changelog.parents(ctx.node())
258 parents = ctx.repo().changelog.parents(ctx.node())
259 if extra is None:
259 if extra is None:
260 extra = ctx.extra()
260 extra = ctx.extra()
261 if desc is None:
261 if desc is None:
262 desc = ctx.description()
262 desc = ctx.description()
263 date = ctx.date()
263 date = ctx.date()
264 user = ctx.user()
264 user = ctx.user()
265 files = set(ctx.files()).union(memworkingcopy)
265 files = set(ctx.files()).union(memworkingcopy)
266 store = overlaystore(ctx, memworkingcopy)
266 store = overlaystore(ctx, memworkingcopy)
267 return context.memctx(
267 return context.memctx(
268 repo=ctx.repo(),
268 repo=ctx.repo(),
269 parents=parents,
269 parents=parents,
270 text=desc,
270 text=desc,
271 files=files,
271 files=files,
272 filectxfn=store,
272 filectxfn=store,
273 user=user,
273 user=user,
274 date=date,
274 date=date,
275 branch=None,
275 branch=None,
276 extra=extra,
276 extra=extra,
277 )
277 )
278
278
279
279
280 class filefixupstate:
280 class filefixupstate:
281 """state needed to apply fixups to a single file
281 """state needed to apply fixups to a single file
282
282
283 internally, it keeps file contents of several revisions and a linelog.
283 internally, it keeps file contents of several revisions and a linelog.
284
284
285 the linelog uses odd revision numbers for original contents (fctxs passed
285 the linelog uses odd revision numbers for original contents (fctxs passed
286 to __init__), and even revision numbers for fixups, like:
286 to __init__), and even revision numbers for fixups, like:
287
287
288 linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
288 linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
289 linelog rev 2: fixups made to self.fctxs[0]
289 linelog rev 2: fixups made to self.fctxs[0]
290 linelog rev 3: self.fctxs[1] (a child of fctxs[0])
290 linelog rev 3: self.fctxs[1] (a child of fctxs[0])
291 linelog rev 4: fixups made to self.fctxs[1]
291 linelog rev 4: fixups made to self.fctxs[1]
292 ...
292 ...
293
293
294 a typical use is like:
294 a typical use is like:
295
295
296 1. call diffwith, to calculate self.fixups
296 1. call diffwith, to calculate self.fixups
297 2. (optionally), present self.fixups to the user, or change it
297 2. (optionally), present self.fixups to the user, or change it
298 3. call apply, to apply changes
298 3. call apply, to apply changes
299 4. read results from "finalcontents", or call getfinalcontent
299 4. read results from "finalcontents", or call getfinalcontent
300 """
300 """
301
301
302 def __init__(self, fctxs, path, ui=None, opts=None):
302 def __init__(self, fctxs, path, ui=None, opts=None):
303 """([fctx], ui or None) -> None
303 """([fctx], ui or None) -> None
304
304
305 fctxs should be linear, and sorted by topo order - oldest first.
305 fctxs should be linear, and sorted by topo order - oldest first.
306 fctxs[0] will be considered as "immutable" and will not be changed.
306 fctxs[0] will be considered as "immutable" and will not be changed.
307 """
307 """
308 self.fctxs = fctxs
308 self.fctxs = fctxs
309 self.path = path
309 self.path = path
310 self.ui = ui or nullui()
310 self.ui = ui or nullui()
311 self.opts = opts or {}
311 self.opts = opts or {}
312
312
313 # following fields are built from fctxs. they exist for perf reason
313 # following fields are built from fctxs. they exist for perf reason
314 self.contents = [f.data() for f in fctxs]
314 self.contents = [f.data() for f in fctxs]
315 self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
315 self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
316 self.linelog = self._buildlinelog()
316 self.linelog = self._buildlinelog()
317 if self.ui.debugflag:
317 if self.ui.debugflag:
318 assert self._checkoutlinelog() == self.contents
318 assert self._checkoutlinelog() == self.contents
319
319
320 # following fields will be filled later
320 # following fields will be filled later
321 self.chunkstats = [0, 0] # [adopted, total : int]
321 self.chunkstats = [0, 0] # [adopted, total : int]
322 self.targetlines = [] # [str]
322 self.targetlines = [] # [str]
323 self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
323 self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
324 self.finalcontents = [] # [str]
324 self.finalcontents = [] # [str]
325 self.ctxaffected = set()
325 self.ctxaffected = set()
326
326
327 def diffwith(self, targetfctx, fm=None):
327 def diffwith(self, targetfctx, fm=None):
328 """calculate fixups needed by examining the differences between
328 """calculate fixups needed by examining the differences between
329 self.fctxs[-1] and targetfctx, chunk by chunk.
329 self.fctxs[-1] and targetfctx, chunk by chunk.
330
330
331 targetfctx is the target state we move towards. we may or may not be
331 targetfctx is the target state we move towards. we may or may not be
332 able to get there because not all modified chunks can be amended into
332 able to get there because not all modified chunks can be amended into
333 a non-public fctx unambiguously.
333 a non-public fctx unambiguously.
334
334
335 call this only once, before apply().
335 call this only once, before apply().
336
336
337 update self.fixups, self.chunkstats, and self.targetlines.
337 update self.fixups, self.chunkstats, and self.targetlines.
338 """
338 """
339 a = self.contents[-1]
339 a = self.contents[-1]
340 alines = self.contentlines[-1]
340 alines = self.contentlines[-1]
341 b = targetfctx.data()
341 b = targetfctx.data()
342 blines = mdiff.splitnewlines(b)
342 blines = mdiff.splitnewlines(b)
343 self.targetlines = blines
343 self.targetlines = blines
344
344
345 self.linelog.annotate(self.linelog.maxrev)
345 self.linelog.annotate(self.linelog.maxrev)
346 annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
346 annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
347 assert len(annotated) == len(alines)
347 assert len(annotated) == len(alines)
348 # add a dummy end line to make insertion at the end easier
348 # add a dummy end line to make insertion at the end easier
349 if annotated:
349 if annotated:
350 dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
350 dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
351 annotated.append(dummyendline)
351 annotated.append(dummyendline)
352
352
353 # analyse diff blocks
353 # analyse diff blocks
354 for chunk in self._alldiffchunks(a, b, alines, blines):
354 for chunk in self._alldiffchunks(a, b, alines, blines):
355 newfixups = self._analysediffchunk(chunk, annotated)
355 newfixups = self._analysediffchunk(chunk, annotated)
356 self.chunkstats[0] += bool(newfixups) # 1 or 0
356 self.chunkstats[0] += bool(newfixups) # 1 or 0
357 self.chunkstats[1] += 1
357 self.chunkstats[1] += 1
358 self.fixups += newfixups
358 self.fixups += newfixups
359 if fm is not None:
359 if fm is not None:
360 self._showchanges(fm, alines, blines, chunk, newfixups)
360 self._showchanges(fm, alines, blines, chunk, newfixups)
361
361
362 def apply(self):
362 def apply(self):
363 """apply self.fixups. update self.linelog, self.finalcontents.
363 """apply self.fixups. update self.linelog, self.finalcontents.
364
364
365 call this only once, before getfinalcontent(), after diffwith().
365 call this only once, before getfinalcontent(), after diffwith().
366 """
366 """
367 # the following is unnecessary, as it's done by "diffwith":
367 # the following is unnecessary, as it's done by "diffwith":
368 # self.linelog.annotate(self.linelog.maxrev)
368 # self.linelog.annotate(self.linelog.maxrev)
369 for rev, a1, a2, b1, b2 in reversed(self.fixups):
369 for rev, a1, a2, b1, b2 in reversed(self.fixups):
370 blines = self.targetlines[b1:b2]
370 blines = self.targetlines[b1:b2]
371 if self.ui.debugflag:
371 if self.ui.debugflag:
372 idx = (max(rev - 1, 0)) // 2
372 idx = (max(rev - 1, 0)) // 2
373 self.ui.write(
373 self.ui.write(
374 _(b'%s: chunk %d:%d -> %d lines\n')
374 _(b'%s: chunk %d:%d -> %d lines\n')
375 % (short(self.fctxs[idx].node()), a1, a2, len(blines))
375 % (short(self.fctxs[idx].node()), a1, a2, len(blines))
376 )
376 )
377 self.linelog.replacelines(rev, a1, a2, b1, b2)
377 self.linelog.replacelines(rev, a1, a2, b1, b2)
378 if self.opts.get(b'edit_lines', False):
378 if self.opts.get(b'edit_lines', False):
379 self.finalcontents = self._checkoutlinelogwithedits()
379 self.finalcontents = self._checkoutlinelogwithedits()
380 else:
380 else:
381 self.finalcontents = self._checkoutlinelog()
381 self.finalcontents = self._checkoutlinelog()
382
382
383 def getfinalcontent(self, fctx):
383 def getfinalcontent(self, fctx):
384 """(fctx) -> str. get modified file content for a given filecontext"""
384 """(fctx) -> str. get modified file content for a given filecontext"""
385 idx = self.fctxs.index(fctx)
385 idx = self.fctxs.index(fctx)
386 return self.finalcontents[idx]
386 return self.finalcontents[idx]
387
387
388 def _analysediffchunk(self, chunk, annotated):
388 def _analysediffchunk(self, chunk, annotated):
389 """analyse a different chunk and return new fixups found
389 """analyse a different chunk and return new fixups found
390
390
391 return [] if no lines from the chunk can be safely applied.
391 return [] if no lines from the chunk can be safely applied.
392
392
393 the chunk (or lines) cannot be safely applied, if, for example:
393 the chunk (or lines) cannot be safely applied, if, for example:
394 - the modified (deleted) lines belong to a public changeset
394 - the modified (deleted) lines belong to a public changeset
395 (self.fctxs[0])
395 (self.fctxs[0])
396 - the chunk is a pure insertion and the adjacent lines (at most 2
396 - the chunk is a pure insertion and the adjacent lines (at most 2
397 lines) belong to different non-public changesets, or do not belong
397 lines) belong to different non-public changesets, or do not belong
398 to any non-public changesets.
398 to any non-public changesets.
399 - the chunk is modifying lines from different changesets.
399 - the chunk is modifying lines from different changesets.
400 in this case, if the number of lines deleted equals to the number
400 in this case, if the number of lines deleted equals to the number
401 of lines added, assume it's a simple 1:1 map (could be wrong).
401 of lines added, assume it's a simple 1:1 map (could be wrong).
402 otherwise, give up.
402 otherwise, give up.
403 - the chunk is modifying lines from a single non-public changeset,
403 - the chunk is modifying lines from a single non-public changeset,
404 but other revisions touch the area as well. i.e. the lines are
404 but other revisions touch the area as well. i.e. the lines are
405 not continuous as seen from the linelog.
405 not continuous as seen from the linelog.
406 """
406 """
407 a1, a2, b1, b2 = chunk
407 a1, a2, b1, b2 = chunk
408 # find involved indexes from annotate result
408 # find involved indexes from annotate result
409 involved = annotated[a1:a2]
409 involved = annotated[a1:a2]
410 if not involved and annotated: # a1 == a2 and a is not empty
410 if not involved and annotated: # a1 == a2 and a is not empty
411 # pure insertion, check nearby lines. ignore lines belong
411 # pure insertion, check nearby lines. ignore lines belong
412 # to the public (first) changeset (i.e. annotated[i][0] == 1)
412 # to the public (first) changeset (i.e. annotated[i][0] == 1)
413 nearbylinenums = {a2, max(0, a1 - 1)}
413 nearbylinenums = {a2, max(0, a1 - 1)}
414 involved = [
414 involved = [
415 annotated[i] for i in nearbylinenums if annotated[i][0] != 1
415 annotated[i] for i in nearbylinenums if annotated[i][0] != 1
416 ]
416 ]
417 involvedrevs = list({r for r, l in involved})
417 involvedrevs = list({r for r, l in involved})
418 newfixups = []
418 newfixups = []
419 if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
419 if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
420 # chunk belongs to a single revision
420 # chunk belongs to a single revision
421 rev = involvedrevs[0]
421 rev = involvedrevs[0]
422 if rev > 1:
422 if rev > 1:
423 fixuprev = rev + 1
423 fixuprev = rev + 1
424 newfixups.append((fixuprev, a1, a2, b1, b2))
424 newfixups.append((fixuprev, a1, a2, b1, b2))
425 elif a2 - a1 == b2 - b1 or b1 == b2:
425 elif a2 - a1 == b2 - b1 or b1 == b2:
426 # 1:1 line mapping, or chunk was deleted
426 # 1:1 line mapping, or chunk was deleted
427 for i in range(a1, a2):
427 for i in range(a1, a2):
428 rev, linenum = annotated[i]
428 rev, linenum = annotated[i]
429 if rev > 1:
429 if rev > 1:
430 if b1 == b2: # deletion, simply remove that single line
430 if b1 == b2: # deletion, simply remove that single line
431 nb1 = nb2 = 0
431 nb1 = nb2 = 0
432 else: # 1:1 line mapping, change the corresponding rev
432 else: # 1:1 line mapping, change the corresponding rev
433 nb1 = b1 + i - a1
433 nb1 = b1 + i - a1
434 nb2 = nb1 + 1
434 nb2 = nb1 + 1
435 fixuprev = rev + 1
435 fixuprev = rev + 1
436 newfixups.append((fixuprev, i, i + 1, nb1, nb2))
436 newfixups.append((fixuprev, i, i + 1, nb1, nb2))
437 return self._optimizefixups(newfixups)
437 return self._optimizefixups(newfixups)
438
438
439 @staticmethod
439 @staticmethod
440 def _alldiffchunks(a, b, alines, blines):
440 def _alldiffchunks(a, b, alines, blines):
441 """like mdiff.allblocks, but only care about differences"""
441 """like mdiff.allblocks, but only care about differences"""
442 blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
442 blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
443 for chunk, btype in blocks:
443 for chunk, btype in blocks:
444 if btype != b'!':
444 if btype != b'!':
445 continue
445 continue
446 yield chunk
446 yield chunk
447
447
448 def _buildlinelog(self):
448 def _buildlinelog(self):
449 """calculate the initial linelog based on self.content{,line}s.
449 """calculate the initial linelog based on self.content{,line}s.
450 this is similar to running a partial "annotate".
450 this is similar to running a partial "annotate".
451 """
451 """
452 llog = linelog.linelog()
452 llog = linelog.linelog()
453 a, alines = b'', []
453 a, alines = b'', []
454 for i in range(len(self.contents)):
454 for i in range(len(self.contents)):
455 b, blines = self.contents[i], self.contentlines[i]
455 b, blines = self.contents[i], self.contentlines[i]
456 llrev = i * 2 + 1
456 llrev = i * 2 + 1
457 chunks = self._alldiffchunks(a, b, alines, blines)
457 chunks = self._alldiffchunks(a, b, alines, blines)
458 for a1, a2, b1, b2 in reversed(list(chunks)):
458 for a1, a2, b1, b2 in reversed(list(chunks)):
459 llog.replacelines(llrev, a1, a2, b1, b2)
459 llog.replacelines(llrev, a1, a2, b1, b2)
460 a, alines = b, blines
460 a, alines = b, blines
461 return llog
461 return llog
462
462
463 def _checkoutlinelog(self):
463 def _checkoutlinelog(self):
464 """() -> [str]. check out file contents from linelog"""
464 """() -> [str]. check out file contents from linelog"""
465 contents = []
465 contents = []
466 for i in range(len(self.contents)):
466 for i in range(len(self.contents)):
467 rev = (i + 1) * 2
467 rev = (i + 1) * 2
468 self.linelog.annotate(rev)
468 self.linelog.annotate(rev)
469 content = b''.join(map(self._getline, self.linelog.annotateresult))
469 content = b''.join(map(self._getline, self.linelog.annotateresult))
470 contents.append(content)
470 contents.append(content)
471 return contents
471 return contents
472
472
473 def _checkoutlinelogwithedits(self):
473 def _checkoutlinelogwithedits(self):
474 """() -> [str]. prompt all lines for edit"""
474 """() -> [str]. prompt all lines for edit"""
475 alllines = self.linelog.getalllines()
475 alllines = self.linelog.getalllines()
476 # header
476 # header
477 editortext = (
477 editortext = (
478 _(
478 _(
479 b'HG: editing %s\nHG: "y" means the line to the right '
479 b'HG: editing %s\nHG: "y" means the line to the right '
480 b'exists in the changeset to the top\nHG:\n'
480 b'exists in the changeset to the top\nHG:\n'
481 )
481 )
482 % self.fctxs[-1].path()
482 % self.fctxs[-1].path()
483 )
483 )
484 # [(idx, fctx)]. hide the dummy emptyfilecontext
484 # [(idx, fctx)]. hide the dummy emptyfilecontext
485 visiblefctxs = [
485 visiblefctxs = [
486 (i, f)
486 (i, f)
487 for i, f in enumerate(self.fctxs)
487 for i, f in enumerate(self.fctxs)
488 if not isinstance(f, emptyfilecontext)
488 if not isinstance(f, emptyfilecontext)
489 ]
489 ]
490 for i, (j, f) in enumerate(visiblefctxs):
490 for i, (j, f) in enumerate(visiblefctxs):
491 editortext += _(b'HG: %s/%s %s %s\n') % (
491 editortext += _(b'HG: %s/%s %s %s\n') % (
492 b'|' * i,
492 b'|' * i,
493 b'-' * (len(visiblefctxs) - i + 1),
493 b'-' * (len(visiblefctxs) - i + 1),
494 short(f.node()),
494 short(f.node()),
495 f.description().split(b'\n', 1)[0],
495 f.description().split(b'\n', 1)[0],
496 )
496 )
497 editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs))
497 editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs))
498 # figure out the lifetime of a line, this is relatively inefficient,
498 # figure out the lifetime of a line, this is relatively inefficient,
499 # but probably fine
499 # but probably fine
500 lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
500 lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
501 for i, f in visiblefctxs:
501 for i, f in visiblefctxs:
502 self.linelog.annotate((i + 1) * 2)
502 self.linelog.annotate((i + 1) * 2)
503 for l in self.linelog.annotateresult:
503 for l in self.linelog.annotateresult:
504 lineset[l].add(i)
504 lineset[l].add(i)
505 # append lines
505 # append lines
506 for l in alllines:
506 for l in alllines:
507 editortext += b' %s : %s' % (
507 editortext += b' %s : %s' % (
508 b''.join(
508 b''.join(
509 [
509 [
510 (b'y' if i in lineset[l] else b' ')
510 (b'y' if i in lineset[l] else b' ')
511 for i, _f in visiblefctxs
511 for i, _f in visiblefctxs
512 ]
512 ]
513 ),
513 ),
514 self._getline(l),
514 self._getline(l),
515 )
515 )
516 # run editor
516 # run editor
517 editedtext = self.ui.edit(editortext, b'', action=b'absorb')
517 editedtext = self.ui.edit(editortext, b'', action=b'absorb')
518 if not editedtext:
518 if not editedtext:
519 raise error.InputError(_(b'empty editor text'))
519 raise error.InputError(_(b'empty editor text'))
520 # parse edited result
520 # parse edited result
521 contents = [b''] * len(self.fctxs)
521 contents = [b''] * len(self.fctxs)
522 leftpadpos = 4
522 leftpadpos = 4
523 colonpos = leftpadpos + len(visiblefctxs) + 1
523 colonpos = leftpadpos + len(visiblefctxs) + 1
524 for l in mdiff.splitnewlines(editedtext):
524 for l in mdiff.splitnewlines(editedtext):
525 if l.startswith(b'HG:'):
525 if l.startswith(b'HG:'):
526 continue
526 continue
527 if l[colonpos - 1 : colonpos + 2] != b' : ':
527 if l[colonpos - 1 : colonpos + 2] != b' : ':
528 raise error.InputError(_(b'malformed line: %s') % l)
528 raise error.InputError(_(b'malformed line: %s') % l)
529 linecontent = l[colonpos + 2 :]
529 linecontent = l[colonpos + 2 :]
530 for i, ch in enumerate(
530 for i, ch in enumerate(
531 pycompat.bytestr(l[leftpadpos : colonpos - 1])
531 pycompat.bytestr(l[leftpadpos : colonpos - 1])
532 ):
532 ):
533 if ch == b'y':
533 if ch == b'y':
534 contents[visiblefctxs[i][0]] += linecontent
534 contents[visiblefctxs[i][0]] += linecontent
535 # chunkstats is hard to calculate if anything changes, therefore
535 # chunkstats is hard to calculate if anything changes, therefore
536 # set them to just a simple value (1, 1).
536 # set them to just a simple value (1, 1).
537 if editedtext != editortext:
537 if editedtext != editortext:
538 self.chunkstats = [1, 1]
538 self.chunkstats = [1, 1]
539 return contents
539 return contents
540
540
541 def _getline(self, lineinfo):
541 def _getline(self, lineinfo):
542 """((rev, linenum)) -> str. convert rev+line number to line content"""
542 """((rev, linenum)) -> str. convert rev+line number to line content"""
543 rev, linenum = lineinfo
543 rev, linenum = lineinfo
544 if rev & 1: # odd: original line taken from fctxs
544 if rev & 1: # odd: original line taken from fctxs
545 return self.contentlines[rev // 2][linenum]
545 return self.contentlines[rev // 2][linenum]
546 else: # even: fixup line from targetfctx
546 else: # even: fixup line from targetfctx
547 return self.targetlines[linenum]
547 return self.targetlines[linenum]
548
548
549 def _iscontinuous(self, a1, a2, closedinterval=False):
549 def _iscontinuous(self, a1, a2, closedinterval=False):
550 """(a1, a2 : int) -> bool
550 """(a1, a2 : int) -> bool
551
551
552 check if these lines are continuous. i.e. no other insertions or
552 check if these lines are continuous. i.e. no other insertions or
553 deletions (from other revisions) among these lines.
553 deletions (from other revisions) among these lines.
554
554
555 closedinterval decides whether a2 should be included or not. i.e. is
555 closedinterval decides whether a2 should be included or not. i.e. is
556 it [a1, a2), or [a1, a2] ?
556 it [a1, a2), or [a1, a2] ?
557 """
557 """
558 if a1 >= a2:
558 if a1 >= a2:
559 return True
559 return True
560 llog = self.linelog
560 llog = self.linelog
561 offset1 = llog.getoffset(a1)
561 offset1 = llog.getoffset(a1)
562 offset2 = llog.getoffset(a2) + int(closedinterval)
562 offset2 = llog.getoffset(a2) + int(closedinterval)
563 linesinbetween = llog.getalllines(offset1, offset2)
563 linesinbetween = llog.getalllines(offset1, offset2)
564 return len(linesinbetween) == a2 - a1 + int(closedinterval)
564 return len(linesinbetween) == a2 - a1 + int(closedinterval)
565
565
566 def _optimizefixups(self, fixups):
566 def _optimizefixups(self, fixups):
567 """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
567 """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
568 merge adjacent fixups to make them less fragmented.
568 merge adjacent fixups to make them less fragmented.
569 """
569 """
570 result = []
570 result = []
571 pcurrentchunk = [[-1, -1, -1, -1, -1]]
571 pcurrentchunk = [[-1, -1, -1, -1, -1]]
572
572
573 def pushchunk():
573 def pushchunk():
574 if pcurrentchunk[0][0] != -1:
574 if pcurrentchunk[0][0] != -1:
575 result.append(tuple(pcurrentchunk[0]))
575 result.append(tuple(pcurrentchunk[0]))
576
576
577 for i, chunk in enumerate(fixups):
577 for i, chunk in enumerate(fixups):
578 rev, a1, a2, b1, b2 = chunk
578 rev, a1, a2, b1, b2 = chunk
579 lastrev = pcurrentchunk[0][0]
579 lastrev = pcurrentchunk[0][0]
580 lasta2 = pcurrentchunk[0][2]
580 lasta2 = pcurrentchunk[0][2]
581 lastb2 = pcurrentchunk[0][4]
581 lastb2 = pcurrentchunk[0][4]
582 if (
582 if (
583 a1 == lasta2
583 a1 == lasta2
584 and b1 == lastb2
584 and b1 == lastb2
585 and rev == lastrev
585 and rev == lastrev
586 and self._iscontinuous(max(a1 - 1, 0), a1)
586 and self._iscontinuous(max(a1 - 1, 0), a1)
587 ):
587 ):
588 # merge into currentchunk
588 # merge into currentchunk
589 pcurrentchunk[0][2] = a2
589 pcurrentchunk[0][2] = a2
590 pcurrentchunk[0][4] = b2
590 pcurrentchunk[0][4] = b2
591 else:
591 else:
592 pushchunk()
592 pushchunk()
593 pcurrentchunk[0] = list(chunk)
593 pcurrentchunk[0] = list(chunk)
594 pushchunk()
594 pushchunk()
595 return result
595 return result
596
596
597 def _showchanges(self, fm, alines, blines, chunk, fixups):
597 def _showchanges(self, fm, alines, blines, chunk, fixups):
598 def trim(line):
598 def trim(line):
599 if line.endswith(b'\n'):
599 if line.endswith(b'\n'):
600 line = line[:-1]
600 line = line[:-1]
601 return line
601 return line
602
602
603 # this is not optimized for perf but _showchanges only gets executed
603 # this is not optimized for perf but _showchanges only gets executed
604 # with an extra command-line flag.
604 # with an extra command-line flag.
605 a1, a2, b1, b2 = chunk
605 a1, a2, b1, b2 = chunk
606 aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
606 aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
607 for idx, fa1, fa2, fb1, fb2 in fixups:
607 for idx, fa1, fa2, fb1, fb2 in fixups:
608 for i in range(fa1, fa2):
608 for i in range(fa1, fa2):
609 aidxs[i - a1] = (max(idx, 1) - 1) // 2
609 aidxs[i - a1] = (max(idx, 1) - 1) // 2
610 for i in range(fb1, fb2):
610 for i in range(fb1, fb2):
611 bidxs[i - b1] = (max(idx, 1) - 1) // 2
611 bidxs[i - b1] = (max(idx, 1) - 1) // 2
612
612
613 fm.startitem()
613 fm.startitem()
614 fm.write(
614 fm.write(
615 b'hunk',
615 b'hunk',
616 b' %s\n',
616 b' %s\n',
617 b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1),
617 b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1),
618 label=b'diff.hunk',
618 label=b'diff.hunk',
619 )
619 )
620 fm.data(path=self.path, linetype=b'hunk')
620 fm.data(path=self.path, linetype=b'hunk')
621
621
622 def writeline(idx, diffchar, line, linetype, linelabel):
622 def writeline(idx, diffchar, line, linetype, linelabel):
623 fm.startitem()
623 fm.startitem()
624 node = b''
624 node = b''
625 if idx:
625 if idx:
626 ctx = self.fctxs[idx]
626 ctx = self.fctxs[idx]
627 fm.context(fctx=ctx)
627 fm.context(fctx=ctx)
628 node = ctx.hex()
628 node = ctx.hex()
629 self.ctxaffected.add(ctx.changectx())
629 self.ctxaffected.add(ctx.changectx())
630 fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node')
630 fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node')
631 fm.write(
631 fm.write(
632 b'diffchar ' + linetype,
632 b'diffchar ' + linetype,
633 b'%s%s\n',
633 b'%s%s\n',
634 diffchar,
634 diffchar,
635 line,
635 line,
636 label=linelabel,
636 label=linelabel,
637 )
637 )
638 fm.data(path=self.path, linetype=linetype)
638 fm.data(path=self.path, linetype=linetype)
639
639
640 for i in range(a1, a2):
640 for i in range(a1, a2):
641 writeline(
641 writeline(
642 aidxs[i - a1],
642 aidxs[i - a1],
643 b'-',
643 b'-',
644 trim(alines[i]),
644 trim(alines[i]),
645 b'deleted',
645 b'deleted',
646 b'diff.deleted',
646 b'diff.deleted',
647 )
647 )
648 for i in range(b1, b2):
648 for i in range(b1, b2):
649 writeline(
649 writeline(
650 bidxs[i - b1],
650 bidxs[i - b1],
651 b'+',
651 b'+',
652 trim(blines[i]),
652 trim(blines[i]),
653 b'inserted',
653 b'inserted',
654 b'diff.inserted',
654 b'diff.inserted',
655 )
655 )
656
656
657
657
658 class fixupstate:
658 class fixupstate:
659 """state needed to run absorb
659 """state needed to run absorb
660
660
661 internally, it keeps paths and filefixupstates.
661 internally, it keeps paths and filefixupstates.
662
662
663 a typical use is like filefixupstates:
663 a typical use is like filefixupstates:
664
664
665 1. call diffwith, to calculate fixups
665 1. call diffwith, to calculate fixups
666 2. (optionally), present fixups to the user, or edit fixups
666 2. (optionally), present fixups to the user, or edit fixups
667 3. call apply, to apply changes to memory
667 3. call apply, to apply changes to memory
668 4. call commit, to commit changes to hg database
668 4. call commit, to commit changes to hg database
669 """
669 """
670
670
671 def __init__(self, stack, ui=None, opts=None):
671 def __init__(self, stack, ui=None, opts=None):
672 """([ctx], ui or None) -> None
672 """([ctx], ui or None) -> None
673
673
674 stack: should be linear, and sorted by topo order - oldest first.
674 stack: should be linear, and sorted by topo order - oldest first.
675 all commits in stack are considered mutable.
675 all commits in stack are considered mutable.
676 """
676 """
677 assert stack
677 assert stack
678 self.ui = ui or nullui()
678 self.ui = ui or nullui()
679 self.opts = opts or {}
679 self.opts = opts or {}
680 self.stack = stack
680 self.stack = stack
681 self.repo = stack[-1].repo().unfiltered()
681 self.repo = stack[-1].repo().unfiltered()
682
682
683 # following fields will be filled later
683 # following fields will be filled later
684 self.paths = [] # [str]
684 self.paths = [] # [str]
685 self.status = None # ctx.status output
685 self.status = None # ctx.status output
686 self.fctxmap = {} # {path: {ctx: fctx}}
686 self.fctxmap = {} # {path: {ctx: fctx}}
687 self.fixupmap = {} # {path: filefixupstate}
687 self.fixupmap = {} # {path: filefixupstate}
688 self.replacemap = {} # {oldnode: newnode or None}
688 self.replacemap = {} # {oldnode: newnode or None}
689 self.finalnode = None # head after all fixups
689 self.finalnode = None # head after all fixups
690 self.ctxaffected = set() # ctx that will be absorbed into
690 self.ctxaffected = set() # ctx that will be absorbed into
691
691
692 def diffwith(self, targetctx, match=None, fm=None):
692 def diffwith(self, targetctx, match=None, fm=None):
693 """diff and prepare fixups. update self.fixupmap, self.paths"""
693 """diff and prepare fixups. update self.fixupmap, self.paths"""
694 # only care about modified files
694 # only care about modified files
695 self.status = self.stack[-1].status(targetctx, match)
695 self.status = self.stack[-1].status(targetctx, match)
696 self.paths = []
696 self.paths = []
697 # but if --edit-lines is used, the user may want to edit files
697 # but if --edit-lines is used, the user may want to edit files
698 # even if they are not modified
698 # even if they are not modified
699 editopt = self.opts.get(b'edit_lines')
699 editopt = self.opts.get(b'edit_lines')
700 if not self.status.modified and editopt and match:
700 if not self.status.modified and editopt and match:
701 interestingpaths = match.files()
701 interestingpaths = match.files()
702 else:
702 else:
703 interestingpaths = self.status.modified
703 interestingpaths = self.status.modified
704 # prepare the filefixupstate
704 # prepare the filefixupstate
705 seenfctxs = set()
705 seenfctxs = set()
706 # sorting is necessary to eliminate ambiguity for the "double move"
706 # sorting is necessary to eliminate ambiguity for the "double move"
707 # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
707 # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
708 for path in sorted(interestingpaths):
708 for path in sorted(interestingpaths):
709 self.ui.debug(b'calculating fixups for %s\n' % path)
709 self.ui.debug(b'calculating fixups for %s\n' % path)
710 targetfctx = targetctx[path]
710 targetfctx = targetctx[path]
711 fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
711 fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
712 # ignore symbolic links or binary, or unchanged files
712 # ignore symbolic links or binary, or unchanged files
713 if any(
713 if any(
714 f.islink() or stringutil.binary(f.data())
714 f.islink() or stringutil.binary(f.data())
715 for f in [targetfctx] + fctxs
715 for f in [targetfctx] + fctxs
716 if not isinstance(f, emptyfilecontext)
716 if not isinstance(f, emptyfilecontext)
717 ):
717 ):
718 continue
718 continue
719 if targetfctx.data() == fctxs[-1].data() and not editopt:
719 if targetfctx.data() == fctxs[-1].data() and not editopt:
720 continue
720 continue
721 seenfctxs.update(fctxs[1:])
721 seenfctxs.update(fctxs[1:])
722 self.fctxmap[path] = ctx2fctx
722 self.fctxmap[path] = ctx2fctx
723 fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
723 fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
724 if fm is not None:
724 if fm is not None:
725 fm.startitem()
725 fm.startitem()
726 fm.plain(b'showing changes for ')
726 fm.plain(b'showing changes for ')
727 fm.write(b'path', b'%s\n', path, label=b'absorb.path')
727 fm.write(b'path', b'%s\n', path, label=b'absorb.path')
728 fm.data(linetype=b'path')
728 fm.data(linetype=b'path')
729 fstate.diffwith(targetfctx, fm)
729 fstate.diffwith(targetfctx, fm)
730 self.fixupmap[path] = fstate
730 self.fixupmap[path] = fstate
731 self.paths.append(path)
731 self.paths.append(path)
732 self.ctxaffected.update(fstate.ctxaffected)
732 self.ctxaffected.update(fstate.ctxaffected)
733
733
734 def apply(self):
734 def apply(self):
735 """apply fixups to individual filefixupstates"""
735 """apply fixups to individual filefixupstates"""
736 for path, state in self.fixupmap.items():
736 for path, state in self.fixupmap.items():
737 if self.ui.debugflag:
737 if self.ui.debugflag:
738 self.ui.write(_(b'applying fixups to %s\n') % path)
738 self.ui.write(_(b'applying fixups to %s\n') % path)
739 state.apply()
739 state.apply()
740
740
741 @property
741 @property
742 def chunkstats(self):
742 def chunkstats(self):
743 """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
743 """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
744 return {path: state.chunkstats for path, state in self.fixupmap.items()}
744 return {path: state.chunkstats for path, state in self.fixupmap.items()}
745
745
746 def commit(self):
746 def commit(self):
747 """commit changes. update self.finalnode, self.replacemap"""
747 """commit changes. update self.finalnode, self.replacemap"""
748 with self.repo.transaction(b'absorb') as tr:
748 with self.repo.transaction(b'absorb') as tr:
749 self._commitstack()
749 self._commitstack()
750 self._movebookmarks(tr)
750 self._movebookmarks(tr)
751 if self.repo[b'.'].node() in self.replacemap:
751 if self.repo[b'.'].node() in self.replacemap:
752 self._moveworkingdirectoryparent()
752 self._moveworkingdirectoryparent()
753 self._cleanupoldcommits()
753 self._cleanupoldcommits()
754 return self.finalnode
754 return self.finalnode
755
755
756 def printchunkstats(self):
756 def printchunkstats(self):
757 """print things like '1 of 2 chunk(s) applied'"""
757 """print things like '1 of 2 chunk(s) applied'"""
758 ui = self.ui
758 ui = self.ui
759 chunkstats = self.chunkstats
759 chunkstats = self.chunkstats
760 if ui.verbose:
760 if ui.verbose:
761 # chunkstats for each file
761 # chunkstats for each file
762 for path, stat in chunkstats.items():
762 for path, stat in chunkstats.items():
763 if stat[0]:
763 if stat[0]:
764 ui.write(
764 ui.write(
765 _(b'%s: %d of %d chunk(s) applied\n')
765 _(b'%s: %d of %d chunk(s) applied\n')
766 % (path, stat[0], stat[1])
766 % (path, stat[0], stat[1])
767 )
767 )
768 elif not ui.quiet:
768 elif not ui.quiet:
769 # a summary for all files
769 # a summary for all files
770 stats = chunkstats.values()
770 stats = chunkstats.values()
771 applied, total = (sum(s[i] for s in stats) for i in (0, 1))
771 applied, total = (sum(s[i] for s in stats) for i in (0, 1))
772 ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total))
772 ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total))
773
773
774 def _commitstack(self):
774 def _commitstack(self):
775 """make new commits. update self.finalnode, self.replacemap.
775 """make new commits. update self.finalnode, self.replacemap.
776 it is splitted from "commit" to avoid too much indentation.
776 it is splitted from "commit" to avoid too much indentation.
777 """
777 """
778 # last node (20-char) committed by us
778 # last node (20-char) committed by us
779 lastcommitted = None
779 lastcommitted = None
780 # p1 which overrides the parent of the next commit, "None" means use
780 # p1 which overrides the parent of the next commit, "None" means use
781 # the original parent unchanged
781 # the original parent unchanged
782 nextp1 = None
782 nextp1 = None
783 for ctx in self.stack:
783 for ctx in self.stack:
784 memworkingcopy = self._getnewfilecontents(ctx)
784 memworkingcopy = self._getnewfilecontents(ctx)
785 if not memworkingcopy and not lastcommitted:
785 if not memworkingcopy and not lastcommitted:
786 # nothing changed, nothing commited
786 # nothing changed, nothing commited
787 nextp1 = ctx
787 nextp1 = ctx
788 continue
788 continue
789 willbecomenoop = ctx.files() and self._willbecomenoop(
789 willbecomenoop = ctx.files() and self._willbecomenoop(
790 memworkingcopy, ctx, nextp1
790 memworkingcopy, ctx, nextp1
791 )
791 )
792 if self.skip_empty_successor and willbecomenoop:
792 if self.skip_empty_successor and willbecomenoop:
793 # changeset is no longer necessary
793 # changeset is no longer necessary
794 self.replacemap[ctx.node()] = None
794 self.replacemap[ctx.node()] = None
795 msg = _(b'became empty and was dropped')
795 msg = _(b'became empty and was dropped')
796 else:
796 else:
797 # changeset needs re-commit
797 # changeset needs re-commit
798 nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
798 nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
799 lastcommitted = self.repo[nodestr]
799 lastcommitted = self.repo[nodestr]
800 nextp1 = lastcommitted
800 nextp1 = lastcommitted
801 self.replacemap[ctx.node()] = lastcommitted.node()
801 self.replacemap[ctx.node()] = lastcommitted.node()
802 if memworkingcopy:
802 if memworkingcopy:
803 if willbecomenoop:
803 if willbecomenoop:
804 msg = _(b'%d file(s) changed, became empty as %s')
804 msg = _(b'%d file(s) changed, became empty as %s')
805 else:
805 else:
806 msg = _(b'%d file(s) changed, became %s')
806 msg = _(b'%d file(s) changed, became %s')
807 msg = msg % (
807 msg = msg % (
808 len(memworkingcopy),
808 len(memworkingcopy),
809 self._ctx2str(lastcommitted),
809 self._ctx2str(lastcommitted),
810 )
810 )
811 else:
811 else:
812 msg = _(b'became %s') % self._ctx2str(lastcommitted)
812 msg = _(b'became %s') % self._ctx2str(lastcommitted)
813 if self.ui.verbose and msg:
813 if self.ui.verbose and msg:
814 self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg))
814 self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg))
815 self.finalnode = lastcommitted and lastcommitted.node()
815 self.finalnode = lastcommitted and lastcommitted.node()
816
816
817 def _ctx2str(self, ctx):
817 def _ctx2str(self, ctx):
818 if self.ui.debugflag:
818 if self.ui.debugflag:
819 return b'%d:%s' % (ctx.rev(), ctx.hex())
819 return b'%d:%s' % (ctx.rev(), ctx.hex())
820 else:
820 else:
821 return b'%d:%s' % (ctx.rev(), short(ctx.node()))
821 return b'%d:%s' % (ctx.rev(), short(ctx.node()))
822
822
823 def _getnewfilecontents(self, ctx):
823 def _getnewfilecontents(self, ctx):
824 """(ctx) -> {path: str}
824 """(ctx) -> {path: str}
825
825
826 fetch file contents from filefixupstates.
826 fetch file contents from filefixupstates.
827 return the working copy overrides - files different from ctx.
827 return the working copy overrides - files different from ctx.
828 """
828 """
829 result = {}
829 result = {}
830 for path in self.paths:
830 for path in self.paths:
831 ctx2fctx = self.fctxmap[path] # {ctx: fctx}
831 ctx2fctx = self.fctxmap[path] # {ctx: fctx}
832 if ctx not in ctx2fctx:
832 if ctx not in ctx2fctx:
833 continue
833 continue
834 fctx = ctx2fctx[ctx]
834 fctx = ctx2fctx[ctx]
835 content = fctx.data()
835 content = fctx.data()
836 newcontent = self.fixupmap[path].getfinalcontent(fctx)
836 newcontent = self.fixupmap[path].getfinalcontent(fctx)
837 if content != newcontent:
837 if content != newcontent:
838 result[fctx.path()] = newcontent
838 result[fctx.path()] = newcontent
839 return result
839 return result
840
840
841 def _movebookmarks(self, tr):
841 def _movebookmarks(self, tr):
842 repo = self.repo
842 repo = self.repo
843 needupdate = [
843 needupdate = [
844 (name, self.replacemap[hsh])
844 (name, self.replacemap[hsh])
845 for name, hsh in repo._bookmarks.items()
845 for name, hsh in repo._bookmarks.items()
846 if hsh in self.replacemap
846 if hsh in self.replacemap
847 ]
847 ]
848 changes = []
848 changes = []
849 for name, hsh in needupdate:
849 for name, hsh in needupdate:
850 if hsh:
850 if hsh:
851 changes.append((name, hsh))
851 changes.append((name, hsh))
852 if self.ui.verbose:
852 if self.ui.verbose:
853 self.ui.write(
853 self.ui.write(
854 _(b'moving bookmark %s to %s\n') % (name, hex(hsh))
854 _(b'moving bookmark %s to %s\n') % (name, hex(hsh))
855 )
855 )
856 else:
856 else:
857 changes.append((name, None))
857 changes.append((name, None))
858 if self.ui.verbose:
858 if self.ui.verbose:
859 self.ui.write(_(b'deleting bookmark %s\n') % name)
859 self.ui.write(_(b'deleting bookmark %s\n') % name)
860 repo._bookmarks.applychanges(repo, tr, changes)
860 repo._bookmarks.applychanges(repo, tr, changes)
861
861
862 def _moveworkingdirectoryparent(self):
862 def _moveworkingdirectoryparent(self):
863 if not self.finalnode:
863 if not self.finalnode:
864 # Find the latest not-{obsoleted,stripped} parent.
864 # Find the latest not-{obsoleted,stripped} parent.
865 revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys())
865 revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys())
866 ctx = self.repo[revs.first()]
866 ctx = self.repo[revs.first()]
867 self.finalnode = ctx.node()
867 self.finalnode = ctx.node()
868 else:
868 else:
869 ctx = self.repo[self.finalnode]
869 ctx = self.repo[self.finalnode]
870
870
871 dirstate = self.repo.dirstate
871 dirstate = self.repo.dirstate
872 # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
872 # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
873 # be slow. in absorb's case, no need to invalidate fsmonitorstate.
873 # be slow. in absorb's case, no need to invalidate fsmonitorstate.
874 noop = lambda: 0
874 noop = lambda: 0
875 restore = noop
875 restore = noop
876 if util.safehasattr(dirstate, '_fsmonitorstate'):
876 if util.safehasattr(dirstate, '_fsmonitorstate'):
877 bak = dirstate._fsmonitorstate.invalidate
877 bak = dirstate._fsmonitorstate.invalidate
878
878
879 def restore():
879 def restore():
880 dirstate._fsmonitorstate.invalidate = bak
880 dirstate._fsmonitorstate.invalidate = bak
881
881
882 dirstate._fsmonitorstate.invalidate = noop
882 dirstate._fsmonitorstate.invalidate = noop
883 try:
883 try:
884 with dirstate.parentchange(self.repo):
884 with dirstate.changing_parents(self.repo):
885 dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
885 dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
886 finally:
886 finally:
887 restore()
887 restore()
888
888
889 @staticmethod
889 @staticmethod
890 def _willbecomenoop(memworkingcopy, ctx, pctx=None):
890 def _willbecomenoop(memworkingcopy, ctx, pctx=None):
891 """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
891 """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
892
892
893 if it will become an empty commit (does not change anything, after the
893 if it will become an empty commit (does not change anything, after the
894 memworkingcopy overrides), return True. otherwise return False.
894 memworkingcopy overrides), return True. otherwise return False.
895 """
895 """
896 if not pctx:
896 if not pctx:
897 parents = ctx.parents()
897 parents = ctx.parents()
898 if len(parents) != 1:
898 if len(parents) != 1:
899 return False
899 return False
900 pctx = parents[0]
900 pctx = parents[0]
901 if ctx.branch() != pctx.branch():
901 if ctx.branch() != pctx.branch():
902 return False
902 return False
903 if ctx.extra().get(b'close'):
903 if ctx.extra().get(b'close'):
904 return False
904 return False
905 # ctx changes more files (not a subset of memworkingcopy)
905 # ctx changes more files (not a subset of memworkingcopy)
906 if not set(ctx.files()).issubset(set(memworkingcopy)):
906 if not set(ctx.files()).issubset(set(memworkingcopy)):
907 return False
907 return False
908 for path, content in memworkingcopy.items():
908 for path, content in memworkingcopy.items():
909 if path not in pctx or path not in ctx:
909 if path not in pctx or path not in ctx:
910 return False
910 return False
911 fctx = ctx[path]
911 fctx = ctx[path]
912 pfctx = pctx[path]
912 pfctx = pctx[path]
913 if pfctx.flags() != fctx.flags():
913 if pfctx.flags() != fctx.flags():
914 return False
914 return False
915 if pfctx.data() != content:
915 if pfctx.data() != content:
916 return False
916 return False
917 return True
917 return True
918
918
919 def _commitsingle(self, memworkingcopy, ctx, p1=None):
919 def _commitsingle(self, memworkingcopy, ctx, p1=None):
920 """(ctx, {path: content}, node) -> node. make a single commit
920 """(ctx, {path: content}, node) -> node. make a single commit
921
921
922 the commit is a clone from ctx, with a (optionally) different p1, and
922 the commit is a clone from ctx, with a (optionally) different p1, and
923 different file contents replaced by memworkingcopy.
923 different file contents replaced by memworkingcopy.
924 """
924 """
925 parents = p1 and (p1, self.repo.nullid)
925 parents = p1 and (p1, self.repo.nullid)
926 extra = ctx.extra()
926 extra = ctx.extra()
927 if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
927 if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
928 extra[b'absorb_source'] = ctx.hex()
928 extra[b'absorb_source'] = ctx.hex()
929
929
930 desc = rewriteutil.update_hash_refs(
930 desc = rewriteutil.update_hash_refs(
931 ctx.repo(),
931 ctx.repo(),
932 ctx.description(),
932 ctx.description(),
933 {
933 {
934 oldnode: [newnode]
934 oldnode: [newnode]
935 for oldnode, newnode in self.replacemap.items()
935 for oldnode, newnode in self.replacemap.items()
936 },
936 },
937 )
937 )
938 mctx = overlaycontext(
938 mctx = overlaycontext(
939 memworkingcopy, ctx, parents, extra=extra, desc=desc
939 memworkingcopy, ctx, parents, extra=extra, desc=desc
940 )
940 )
941 return mctx.commit()
941 return mctx.commit()
942
942
943 @util.propertycache
943 @util.propertycache
944 def _useobsolete(self):
944 def _useobsolete(self):
945 """() -> bool"""
945 """() -> bool"""
946 return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
946 return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
947
947
948 def _cleanupoldcommits(self):
948 def _cleanupoldcommits(self):
949 replacements = {
949 replacements = {
950 k: ([v] if v is not None else [])
950 k: ([v] if v is not None else [])
951 for k, v in self.replacemap.items()
951 for k, v in self.replacemap.items()
952 }
952 }
953 if replacements:
953 if replacements:
954 scmutil.cleanupnodes(
954 scmutil.cleanupnodes(
955 self.repo, replacements, operation=b'absorb', fixphase=True
955 self.repo, replacements, operation=b'absorb', fixphase=True
956 )
956 )
957
957
958 @util.propertycache
958 @util.propertycache
959 def skip_empty_successor(self):
959 def skip_empty_successor(self):
960 return rewriteutil.skip_empty_successor(self.ui, b'absorb')
960 return rewriteutil.skip_empty_successor(self.ui, b'absorb')
961
961
962
962
963 def _parsechunk(hunk):
963 def _parsechunk(hunk):
964 """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
964 """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
965 if type(hunk) not in (crecord.uihunk, patch.recordhunk):
965 if type(hunk) not in (crecord.uihunk, patch.recordhunk):
966 return None, None
966 return None, None
967 path = hunk.header.filename()
967 path = hunk.header.filename()
968 a1 = hunk.fromline + len(hunk.before) - 1
968 a1 = hunk.fromline + len(hunk.before) - 1
969 # remove before and after context
969 # remove before and after context
970 hunk.before = hunk.after = []
970 hunk.before = hunk.after = []
971 buf = util.stringio()
971 buf = util.stringio()
972 hunk.write(buf)
972 hunk.write(buf)
973 patchlines = mdiff.splitnewlines(buf.getvalue())
973 patchlines = mdiff.splitnewlines(buf.getvalue())
974 # hunk.prettystr() will update hunk.removed
974 # hunk.prettystr() will update hunk.removed
975 a2 = a1 + hunk.removed
975 a2 = a1 + hunk.removed
976 blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')]
976 blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')]
977 return path, (a1, a2, blines)
977 return path, (a1, a2, blines)
978
978
979
979
980 def overlaydiffcontext(ctx, chunks):
980 def overlaydiffcontext(ctx, chunks):
981 """(ctx, [crecord.uihunk]) -> memctx
981 """(ctx, [crecord.uihunk]) -> memctx
982
982
983 return a memctx with some [1] patches (chunks) applied to ctx.
983 return a memctx with some [1] patches (chunks) applied to ctx.
984 [1]: modifications are handled. renames, mode changes, etc. are ignored.
984 [1]: modifications are handled. renames, mode changes, etc. are ignored.
985 """
985 """
986 # sadly the applying-patch logic is hardly reusable, and messy:
986 # sadly the applying-patch logic is hardly reusable, and messy:
987 # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
987 # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
988 # needs a file stream of a patch and will re-parse it, while we have
988 # needs a file stream of a patch and will re-parse it, while we have
989 # structured hunk objects at hand.
989 # structured hunk objects at hand.
990 # 2. a lot of different implementations about "chunk" (patch.hunk,
990 # 2. a lot of different implementations about "chunk" (patch.hunk,
991 # patch.recordhunk, crecord.uihunk)
991 # patch.recordhunk, crecord.uihunk)
992 # as we only care about applying changes to modified files, no mode
992 # as we only care about applying changes to modified files, no mode
993 # change, no binary diff, and no renames, it's probably okay to
993 # change, no binary diff, and no renames, it's probably okay to
994 # re-invent the logic using much simpler code here.
994 # re-invent the logic using much simpler code here.
995 memworkingcopy = {} # {path: content}
995 memworkingcopy = {} # {path: content}
996 patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
996 patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
997 for path, info in map(_parsechunk, chunks):
997 for path, info in map(_parsechunk, chunks):
998 if not path or not info:
998 if not path or not info:
999 continue
999 continue
1000 patchmap[path].append(info)
1000 patchmap[path].append(info)
1001 for path, patches in patchmap.items():
1001 for path, patches in patchmap.items():
1002 if path not in ctx or not patches:
1002 if path not in ctx or not patches:
1003 continue
1003 continue
1004 patches.sort(reverse=True)
1004 patches.sort(reverse=True)
1005 lines = mdiff.splitnewlines(ctx[path].data())
1005 lines = mdiff.splitnewlines(ctx[path].data())
1006 for a1, a2, blines in patches:
1006 for a1, a2, blines in patches:
1007 lines[a1:a2] = blines
1007 lines[a1:a2] = blines
1008 memworkingcopy[path] = b''.join(lines)
1008 memworkingcopy[path] = b''.join(lines)
1009 return overlaycontext(memworkingcopy, ctx)
1009 return overlaycontext(memworkingcopy, ctx)
1010
1010
1011
1011
1012 def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
1012 def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
1013 """pick fixup chunks from targetctx, apply them to stack.
1013 """pick fixup chunks from targetctx, apply them to stack.
1014
1014
1015 if targetctx is None, the working copy context will be used.
1015 if targetctx is None, the working copy context will be used.
1016 if stack is None, the current draft stack will be used.
1016 if stack is None, the current draft stack will be used.
1017 return fixupstate.
1017 return fixupstate.
1018 """
1018 """
1019 if stack is None:
1019 if stack is None:
1020 limit = ui.configint(b'absorb', b'max-stack-size')
1020 limit = ui.configint(b'absorb', b'max-stack-size')
1021 headctx = repo[b'.']
1021 headctx = repo[b'.']
1022 if len(headctx.parents()) > 1:
1022 if len(headctx.parents()) > 1:
1023 raise error.InputError(_(b'cannot absorb into a merge'))
1023 raise error.InputError(_(b'cannot absorb into a merge'))
1024 stack = getdraftstack(headctx, limit)
1024 stack = getdraftstack(headctx, limit)
1025 if limit and len(stack) >= limit:
1025 if limit and len(stack) >= limit:
1026 ui.warn(
1026 ui.warn(
1027 _(
1027 _(
1028 b'absorb: only the recent %d changesets will '
1028 b'absorb: only the recent %d changesets will '
1029 b'be analysed\n'
1029 b'be analysed\n'
1030 )
1030 )
1031 % limit
1031 % limit
1032 )
1032 )
1033 if not stack:
1033 if not stack:
1034 raise error.InputError(_(b'no mutable changeset to change'))
1034 raise error.InputError(_(b'no mutable changeset to change'))
1035 if targetctx is None: # default to working copy
1035 if targetctx is None: # default to working copy
1036 targetctx = repo[None]
1036 targetctx = repo[None]
1037 if pats is None:
1037 if pats is None:
1038 pats = ()
1038 pats = ()
1039 if opts is None:
1039 if opts is None:
1040 opts = {}
1040 opts = {}
1041 state = fixupstate(stack, ui=ui, opts=opts)
1041 state = fixupstate(stack, ui=ui, opts=opts)
1042 matcher = scmutil.match(targetctx, pats, opts)
1042 matcher = scmutil.match(targetctx, pats, opts)
1043 if opts.get(b'interactive'):
1043 if opts.get(b'interactive'):
1044 diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
1044 diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
1045 origchunks = patch.parsepatch(diff)
1045 origchunks = patch.parsepatch(diff)
1046 chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
1046 chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
1047 targetctx = overlaydiffcontext(stack[-1], chunks)
1047 targetctx = overlaydiffcontext(stack[-1], chunks)
1048 if opts.get(b'edit_lines'):
1048 if opts.get(b'edit_lines'):
1049 # If we're going to open the editor, don't ask the user to confirm
1049 # If we're going to open the editor, don't ask the user to confirm
1050 # first
1050 # first
1051 opts[b'apply_changes'] = True
1051 opts[b'apply_changes'] = True
1052 fm = None
1052 fm = None
1053 if opts.get(b'print_changes') or not opts.get(b'apply_changes'):
1053 if opts.get(b'print_changes') or not opts.get(b'apply_changes'):
1054 fm = ui.formatter(b'absorb', opts)
1054 fm = ui.formatter(b'absorb', opts)
1055 state.diffwith(targetctx, matcher, fm)
1055 state.diffwith(targetctx, matcher, fm)
1056 if fm is not None:
1056 if fm is not None:
1057 fm.startitem()
1057 fm.startitem()
1058 fm.write(
1058 fm.write(
1059 b"count", b"\n%d changesets affected\n", len(state.ctxaffected)
1059 b"count", b"\n%d changesets affected\n", len(state.ctxaffected)
1060 )
1060 )
1061 fm.data(linetype=b'summary')
1061 fm.data(linetype=b'summary')
1062 for ctx in reversed(stack):
1062 for ctx in reversed(stack):
1063 if ctx not in state.ctxaffected:
1063 if ctx not in state.ctxaffected:
1064 continue
1064 continue
1065 fm.startitem()
1065 fm.startitem()
1066 fm.context(ctx=ctx)
1066 fm.context(ctx=ctx)
1067 fm.data(linetype=b'changeset')
1067 fm.data(linetype=b'changeset')
1068 fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node')
1068 fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node')
1069 descfirstline = stringutil.firstline(ctx.description())
1069 descfirstline = stringutil.firstline(ctx.description())
1070 fm.write(
1070 fm.write(
1071 b'descfirstline',
1071 b'descfirstline',
1072 b'%s\n',
1072 b'%s\n',
1073 descfirstline,
1073 descfirstline,
1074 label=b'absorb.description',
1074 label=b'absorb.description',
1075 )
1075 )
1076 fm.end()
1076 fm.end()
1077 if not opts.get(b'dry_run'):
1077 if not opts.get(b'dry_run'):
1078 if (
1078 if (
1079 not opts.get(b'apply_changes')
1079 not opts.get(b'apply_changes')
1080 and state.ctxaffected
1080 and state.ctxaffected
1081 and ui.promptchoice(
1081 and ui.promptchoice(
1082 b"apply changes (y/N)? $$ &Yes $$ &No", default=1
1082 b"apply changes (y/N)? $$ &Yes $$ &No", default=1
1083 )
1083 )
1084 ):
1084 ):
1085 raise error.CanceledError(_(b'absorb cancelled\n'))
1085 raise error.CanceledError(_(b'absorb cancelled\n'))
1086
1086
1087 state.apply()
1087 state.apply()
1088 if state.commit():
1088 if state.commit():
1089 state.printchunkstats()
1089 state.printchunkstats()
1090 elif not ui.quiet:
1090 elif not ui.quiet:
1091 ui.write(_(b'nothing applied\n'))
1091 ui.write(_(b'nothing applied\n'))
1092 return state
1092 return state
1093
1093
1094
1094
1095 @command(
1095 @command(
1096 b'absorb',
1096 b'absorb',
1097 [
1097 [
1098 (
1098 (
1099 b'a',
1099 b'a',
1100 b'apply-changes',
1100 b'apply-changes',
1101 None,
1101 None,
1102 _(b'apply changes without prompting for confirmation'),
1102 _(b'apply changes without prompting for confirmation'),
1103 ),
1103 ),
1104 (
1104 (
1105 b'p',
1105 b'p',
1106 b'print-changes',
1106 b'print-changes',
1107 None,
1107 None,
1108 _(b'always print which changesets are modified by which changes'),
1108 _(b'always print which changesets are modified by which changes'),
1109 ),
1109 ),
1110 (
1110 (
1111 b'i',
1111 b'i',
1112 b'interactive',
1112 b'interactive',
1113 None,
1113 None,
1114 _(b'interactively select which chunks to apply'),
1114 _(b'interactively select which chunks to apply'),
1115 ),
1115 ),
1116 (
1116 (
1117 b'e',
1117 b'e',
1118 b'edit-lines',
1118 b'edit-lines',
1119 None,
1119 None,
1120 _(
1120 _(
1121 b'edit what lines belong to which changesets before commit '
1121 b'edit what lines belong to which changesets before commit '
1122 b'(EXPERIMENTAL)'
1122 b'(EXPERIMENTAL)'
1123 ),
1123 ),
1124 ),
1124 ),
1125 ]
1125 ]
1126 + commands.dryrunopts
1126 + commands.dryrunopts
1127 + commands.templateopts
1127 + commands.templateopts
1128 + commands.walkopts,
1128 + commands.walkopts,
1129 _(b'hg absorb [OPTION] [FILE]...'),
1129 _(b'hg absorb [OPTION] [FILE]...'),
1130 helpcategory=command.CATEGORY_COMMITTING,
1130 helpcategory=command.CATEGORY_COMMITTING,
1131 helpbasic=True,
1131 helpbasic=True,
1132 )
1132 )
1133 def absorbcmd(ui, repo, *pats, **opts):
1133 def absorbcmd(ui, repo, *pats, **opts):
1134 """incorporate corrections into the stack of draft changesets
1134 """incorporate corrections into the stack of draft changesets
1135
1135
1136 absorb analyzes each change in your working directory and attempts to
1136 absorb analyzes each change in your working directory and attempts to
1137 amend the changed lines into the changesets in your stack that first
1137 amend the changed lines into the changesets in your stack that first
1138 introduced those lines.
1138 introduced those lines.
1139
1139
1140 If absorb cannot find an unambiguous changeset to amend for a change,
1140 If absorb cannot find an unambiguous changeset to amend for a change,
1141 that change will be left in the working directory, untouched. They can be
1141 that change will be left in the working directory, untouched. They can be
1142 observed by :hg:`status` or :hg:`diff` afterwards. In other words,
1142 observed by :hg:`status` or :hg:`diff` afterwards. In other words,
1143 absorb does not write to the working directory.
1143 absorb does not write to the working directory.
1144
1144
1145 Changesets outside the revset `::. and not public() and not merge()` will
1145 Changesets outside the revset `::. and not public() and not merge()` will
1146 not be changed.
1146 not be changed.
1147
1147
1148 Changesets that become empty after applying the changes will be deleted.
1148 Changesets that become empty after applying the changes will be deleted.
1149
1149
1150 By default, absorb will show what it plans to do and prompt for
1150 By default, absorb will show what it plans to do and prompt for
1151 confirmation. If you are confident that the changes will be absorbed
1151 confirmation. If you are confident that the changes will be absorbed
1152 to the correct place, run :hg:`absorb -a` to apply the changes
1152 to the correct place, run :hg:`absorb -a` to apply the changes
1153 immediately.
1153 immediately.
1154
1154
1155 Returns 0 on success, 1 if all chunks were ignored and nothing amended.
1155 Returns 0 on success, 1 if all chunks were ignored and nothing amended.
1156 """
1156 """
1157 opts = pycompat.byteskwargs(opts)
1157 opts = pycompat.byteskwargs(opts)
1158
1158
1159 with repo.wlock(), repo.lock():
1159 with repo.wlock(), repo.lock():
1160 if not opts[b'dry_run']:
1160 if not opts[b'dry_run']:
1161 cmdutil.checkunfinished(repo)
1161 cmdutil.checkunfinished(repo)
1162
1162
1163 state = absorb(ui, repo, pats=pats, opts=opts)
1163 state = absorb(ui, repo, pats=pats, opts=opts)
1164 if sum(s[0] for s in state.chunkstats.values()) == 0:
1164 if sum(s[0] for s in state.chunkstats.values()) == 0:
1165 return 1
1165 return 1
@@ -1,955 +1,955 b''
1 # fix - rewrite file content in changesets and working copy
1 # fix - rewrite file content in changesets and working copy
2 #
2 #
3 # Copyright 2018 Google LLC.
3 # Copyright 2018 Google LLC.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """rewrite file content in changesets or working copy (EXPERIMENTAL)
7 """rewrite file content in changesets or working copy (EXPERIMENTAL)
8
8
9 Provides a command that runs configured tools on the contents of modified files,
9 Provides a command that runs configured tools on the contents of modified files,
10 writing back any fixes to the working copy or replacing changesets.
10 writing back any fixes to the working copy or replacing changesets.
11
11
12 Here is an example configuration that causes :hg:`fix` to apply automatic
12 Here is an example configuration that causes :hg:`fix` to apply automatic
13 formatting fixes to modified lines in C++ code::
13 formatting fixes to modified lines in C++ code::
14
14
15 [fix]
15 [fix]
16 clang-format:command=clang-format --assume-filename={rootpath}
16 clang-format:command=clang-format --assume-filename={rootpath}
17 clang-format:linerange=--lines={first}:{last}
17 clang-format:linerange=--lines={first}:{last}
18 clang-format:pattern=set:**.cpp or **.hpp
18 clang-format:pattern=set:**.cpp or **.hpp
19
19
20 The :command suboption forms the first part of the shell command that will be
20 The :command suboption forms the first part of the shell command that will be
21 used to fix a file. The content of the file is passed on standard input, and the
21 used to fix a file. The content of the file is passed on standard input, and the
22 fixed file content is expected on standard output. Any output on standard error
22 fixed file content is expected on standard output. Any output on standard error
23 will be displayed as a warning. If the exit status is not zero, the file will
23 will be displayed as a warning. If the exit status is not zero, the file will
24 not be affected. A placeholder warning is displayed if there is a non-zero exit
24 not be affected. A placeholder warning is displayed if there is a non-zero exit
25 status but no standard error output. Some values may be substituted into the
25 status but no standard error output. Some values may be substituted into the
26 command::
26 command::
27
27
28 {rootpath} The path of the file being fixed, relative to the repo root
28 {rootpath} The path of the file being fixed, relative to the repo root
29 {basename} The name of the file being fixed, without the directory path
29 {basename} The name of the file being fixed, without the directory path
30
30
31 If the :linerange suboption is set, the tool will only be run if there are
31 If the :linerange suboption is set, the tool will only be run if there are
32 changed lines in a file. The value of this suboption is appended to the shell
32 changed lines in a file. The value of this suboption is appended to the shell
33 command once for every range of changed lines in the file. Some values may be
33 command once for every range of changed lines in the file. Some values may be
34 substituted into the command::
34 substituted into the command::
35
35
36 {first} The 1-based line number of the first line in the modified range
36 {first} The 1-based line number of the first line in the modified range
37 {last} The 1-based line number of the last line in the modified range
37 {last} The 1-based line number of the last line in the modified range
38
38
39 Deleted sections of a file will be ignored by :linerange, because there is no
39 Deleted sections of a file will be ignored by :linerange, because there is no
40 corresponding line range in the version being fixed.
40 corresponding line range in the version being fixed.
41
41
42 By default, tools that set :linerange will only be executed if there is at least
42 By default, tools that set :linerange will only be executed if there is at least
43 one changed line range. This is meant to prevent accidents like running a code
43 one changed line range. This is meant to prevent accidents like running a code
44 formatter in such a way that it unexpectedly reformats the whole file. If such a
44 formatter in such a way that it unexpectedly reformats the whole file. If such a
45 tool needs to operate on unchanged files, it should set the :skipclean suboption
45 tool needs to operate on unchanged files, it should set the :skipclean suboption
46 to false.
46 to false.
47
47
48 The :pattern suboption determines which files will be passed through each
48 The :pattern suboption determines which files will be passed through each
49 configured tool. See :hg:`help patterns` for possible values. However, all
49 configured tool. See :hg:`help patterns` for possible values. However, all
50 patterns are relative to the repo root, even if that text says they are relative
50 patterns are relative to the repo root, even if that text says they are relative
51 to the current working directory. If there are file arguments to :hg:`fix`, the
51 to the current working directory. If there are file arguments to :hg:`fix`, the
52 intersection of these patterns is used.
52 intersection of these patterns is used.
53
53
54 There is also a configurable limit for the maximum size of file that will be
54 There is also a configurable limit for the maximum size of file that will be
55 processed by :hg:`fix`::
55 processed by :hg:`fix`::
56
56
57 [fix]
57 [fix]
58 maxfilesize = 2MB
58 maxfilesize = 2MB
59
59
60 Normally, execution of configured tools will continue after a failure (indicated
60 Normally, execution of configured tools will continue after a failure (indicated
61 by a non-zero exit status). It can also be configured to abort after the first
61 by a non-zero exit status). It can also be configured to abort after the first
62 such failure, so that no files will be affected if any tool fails. This abort
62 such failure, so that no files will be affected if any tool fails. This abort
63 will also cause :hg:`fix` to exit with a non-zero status::
63 will also cause :hg:`fix` to exit with a non-zero status::
64
64
65 [fix]
65 [fix]
66 failure = abort
66 failure = abort
67
67
68 When multiple tools are configured to affect a file, they execute in an order
68 When multiple tools are configured to affect a file, they execute in an order
69 defined by the :priority suboption. The priority suboption has a default value
69 defined by the :priority suboption. The priority suboption has a default value
70 of zero for each tool. Tools are executed in order of descending priority. The
70 of zero for each tool. Tools are executed in order of descending priority. The
71 execution order of tools with equal priority is unspecified. For example, you
71 execution order of tools with equal priority is unspecified. For example, you
72 could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
72 could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
73 in a text file by ensuring that 'sort' runs before 'head'::
73 in a text file by ensuring that 'sort' runs before 'head'::
74
74
75 [fix]
75 [fix]
76 sort:command = sort -n
76 sort:command = sort -n
77 head:command = head -n 10
77 head:command = head -n 10
78 sort:pattern = numbers.txt
78 sort:pattern = numbers.txt
79 head:pattern = numbers.txt
79 head:pattern = numbers.txt
80 sort:priority = 2
80 sort:priority = 2
81 head:priority = 1
81 head:priority = 1
82
82
83 To account for changes made by each tool, the line numbers used for incremental
83 To account for changes made by each tool, the line numbers used for incremental
84 formatting are recomputed before executing the next tool. So, each tool may see
84 formatting are recomputed before executing the next tool. So, each tool may see
85 different values for the arguments added by the :linerange suboption.
85 different values for the arguments added by the :linerange suboption.
86
86
87 Each fixer tool is allowed to return some metadata in addition to the fixed file
87 Each fixer tool is allowed to return some metadata in addition to the fixed file
88 content. The metadata must be placed before the file content on stdout,
88 content. The metadata must be placed before the file content on stdout,
89 separated from the file content by a zero byte. The metadata is parsed as a JSON
89 separated from the file content by a zero byte. The metadata is parsed as a JSON
90 value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool
90 value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool
91 is expected to produce this metadata encoding if and only if the :metadata
91 is expected to produce this metadata encoding if and only if the :metadata
92 suboption is true::
92 suboption is true::
93
93
94 [fix]
94 [fix]
95 tool:command = tool --prepend-json-metadata
95 tool:command = tool --prepend-json-metadata
96 tool:metadata = true
96 tool:metadata = true
97
97
98 The metadata values are passed to hooks, which can be used to print summaries or
98 The metadata values are passed to hooks, which can be used to print summaries or
99 perform other post-fixing work. The supported hooks are::
99 perform other post-fixing work. The supported hooks are::
100
100
101 "postfixfile"
101 "postfixfile"
102 Run once for each file in each revision where any fixer tools made changes
102 Run once for each file in each revision where any fixer tools made changes
103 to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file,
103 to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file,
104 and "$HG_METADATA" with a map of fixer names to metadata values from fixer
104 and "$HG_METADATA" with a map of fixer names to metadata values from fixer
105 tools that affected the file. Fixer tools that didn't affect the file have a
105 tools that affected the file. Fixer tools that didn't affect the file have a
106 value of None. Only fixer tools that executed are present in the metadata.
106 value of None. Only fixer tools that executed are present in the metadata.
107
107
108 "postfix"
108 "postfix"
109 Run once after all files and revisions have been handled. Provides
109 Run once after all files and revisions have been handled. Provides
110 "$HG_REPLACEMENTS" with information about what revisions were created and
110 "$HG_REPLACEMENTS" with information about what revisions were created and
111 made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any
111 made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any
112 files in the working copy were updated. Provides a list "$HG_METADATA"
112 files in the working copy were updated. Provides a list "$HG_METADATA"
113 mapping fixer tool names to lists of metadata values returned from
113 mapping fixer tool names to lists of metadata values returned from
114 executions that modified a file. This aggregates the same metadata
114 executions that modified a file. This aggregates the same metadata
115 previously passed to the "postfixfile" hook.
115 previously passed to the "postfixfile" hook.
116
116
117 Fixer tools are run in the repository's root directory. This allows them to read
117 Fixer tools are run in the repository's root directory. This allows them to read
118 configuration files from the working copy, or even write to the working copy.
118 configuration files from the working copy, or even write to the working copy.
119 The working copy is not updated to match the revision being fixed. In fact,
119 The working copy is not updated to match the revision being fixed. In fact,
120 several revisions may be fixed in parallel. Writes to the working copy are not
120 several revisions may be fixed in parallel. Writes to the working copy are not
121 amended into the revision being fixed; fixer tools should always write fixed
121 amended into the revision being fixed; fixer tools should always write fixed
122 file content back to stdout as documented above.
122 file content back to stdout as documented above.
123 """
123 """
124
124
125
125
126 import collections
126 import collections
127 import itertools
127 import itertools
128 import os
128 import os
129 import re
129 import re
130 import subprocess
130 import subprocess
131
131
132 from mercurial.i18n import _
132 from mercurial.i18n import _
133 from mercurial.node import (
133 from mercurial.node import (
134 nullid,
134 nullid,
135 nullrev,
135 nullrev,
136 wdirrev,
136 wdirrev,
137 )
137 )
138
138
139 from mercurial.utils import procutil
139 from mercurial.utils import procutil
140
140
141 from mercurial import (
141 from mercurial import (
142 cmdutil,
142 cmdutil,
143 context,
143 context,
144 copies,
144 copies,
145 error,
145 error,
146 logcmdutil,
146 logcmdutil,
147 match as matchmod,
147 match as matchmod,
148 mdiff,
148 mdiff,
149 merge,
149 merge,
150 mergestate as mergestatemod,
150 mergestate as mergestatemod,
151 pycompat,
151 pycompat,
152 registrar,
152 registrar,
153 rewriteutil,
153 rewriteutil,
154 scmutil,
154 scmutil,
155 util,
155 util,
156 worker,
156 worker,
157 )
157 )
158
158
159 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
159 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
160 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
160 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
161 # be specifying the version(s) of Mercurial they are tested with, or
161 # be specifying the version(s) of Mercurial they are tested with, or
162 # leave the attribute unspecified.
162 # leave the attribute unspecified.
163 testedwith = b'ships-with-hg-core'
163 testedwith = b'ships-with-hg-core'
164
164
165 cmdtable = {}
165 cmdtable = {}
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167
167
168 configtable = {}
168 configtable = {}
169 configitem = registrar.configitem(configtable)
169 configitem = registrar.configitem(configtable)
170
170
171 # Register the suboptions allowed for each configured fixer, and default values.
171 # Register the suboptions allowed for each configured fixer, and default values.
172 FIXER_ATTRS = {
172 FIXER_ATTRS = {
173 b'command': None,
173 b'command': None,
174 b'linerange': None,
174 b'linerange': None,
175 b'pattern': None,
175 b'pattern': None,
176 b'priority': 0,
176 b'priority': 0,
177 b'metadata': False,
177 b'metadata': False,
178 b'skipclean': True,
178 b'skipclean': True,
179 b'enabled': True,
179 b'enabled': True,
180 }
180 }
181
181
182 for key, default in FIXER_ATTRS.items():
182 for key, default in FIXER_ATTRS.items():
183 configitem(b'fix', b'.*:%s$' % key, default=default, generic=True)
183 configitem(b'fix', b'.*:%s$' % key, default=default, generic=True)
184
184
185 # A good default size allows most source code files to be fixed, but avoids
185 # A good default size allows most source code files to be fixed, but avoids
186 # letting fixer tools choke on huge inputs, which could be surprising to the
186 # letting fixer tools choke on huge inputs, which could be surprising to the
187 # user.
187 # user.
188 configitem(b'fix', b'maxfilesize', default=b'2MB')
188 configitem(b'fix', b'maxfilesize', default=b'2MB')
189
189
190 # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero.
190 # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero.
191 # This helps users do shell scripts that stop when a fixer tool signals a
191 # This helps users do shell scripts that stop when a fixer tool signals a
192 # problem.
192 # problem.
193 configitem(b'fix', b'failure', default=b'continue')
193 configitem(b'fix', b'failure', default=b'continue')
194
194
195
195
196 def checktoolfailureaction(ui, message, hint=None):
196 def checktoolfailureaction(ui, message, hint=None):
197 """Abort with 'message' if fix.failure=abort"""
197 """Abort with 'message' if fix.failure=abort"""
198 action = ui.config(b'fix', b'failure')
198 action = ui.config(b'fix', b'failure')
199 if action not in (b'continue', b'abort'):
199 if action not in (b'continue', b'abort'):
200 raise error.Abort(
200 raise error.Abort(
201 _(b'unknown fix.failure action: %s') % (action,),
201 _(b'unknown fix.failure action: %s') % (action,),
202 hint=_(b'use "continue" or "abort"'),
202 hint=_(b'use "continue" or "abort"'),
203 )
203 )
204 if action == b'abort':
204 if action == b'abort':
205 raise error.Abort(message, hint=hint)
205 raise error.Abort(message, hint=hint)
206
206
207
207
208 allopt = (b'', b'all', False, _(b'fix all non-public non-obsolete revisions'))
208 allopt = (b'', b'all', False, _(b'fix all non-public non-obsolete revisions'))
209 baseopt = (
209 baseopt = (
210 b'',
210 b'',
211 b'base',
211 b'base',
212 [],
212 [],
213 _(
213 _(
214 b'revisions to diff against (overrides automatic '
214 b'revisions to diff against (overrides automatic '
215 b'selection, and applies to every revision being '
215 b'selection, and applies to every revision being '
216 b'fixed)'
216 b'fixed)'
217 ),
217 ),
218 _(b'REV'),
218 _(b'REV'),
219 )
219 )
220 revopt = (b'r', b'rev', [], _(b'revisions to fix (ADVANCED)'), _(b'REV'))
220 revopt = (b'r', b'rev', [], _(b'revisions to fix (ADVANCED)'), _(b'REV'))
221 sourceopt = (
221 sourceopt = (
222 b's',
222 b's',
223 b'source',
223 b'source',
224 [],
224 [],
225 _(b'fix the specified revisions and their descendants'),
225 _(b'fix the specified revisions and their descendants'),
226 _(b'REV'),
226 _(b'REV'),
227 )
227 )
228 wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory'))
228 wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory'))
229 wholeopt = (b'', b'whole', False, _(b'always fix every line of a file'))
229 wholeopt = (b'', b'whole', False, _(b'always fix every line of a file'))
230 usage = _(b'[OPTION]... [FILE]...')
230 usage = _(b'[OPTION]... [FILE]...')
231
231
232
232
233 @command(
233 @command(
234 b'fix',
234 b'fix',
235 [allopt, baseopt, revopt, sourceopt, wdiropt, wholeopt],
235 [allopt, baseopt, revopt, sourceopt, wdiropt, wholeopt],
236 usage,
236 usage,
237 helpcategory=command.CATEGORY_FILE_CONTENTS,
237 helpcategory=command.CATEGORY_FILE_CONTENTS,
238 )
238 )
239 def fix(ui, repo, *pats, **opts):
239 def fix(ui, repo, *pats, **opts):
240 """rewrite file content in changesets or working directory
240 """rewrite file content in changesets or working directory
241
241
242 Runs any configured tools to fix the content of files. Only affects files
242 Runs any configured tools to fix the content of files. Only affects files
243 with changes, unless file arguments are provided. Only affects changed lines
243 with changes, unless file arguments are provided. Only affects changed lines
244 of files, unless the --whole flag is used. Some tools may always affect the
244 of files, unless the --whole flag is used. Some tools may always affect the
245 whole file regardless of --whole.
245 whole file regardless of --whole.
246
246
247 If --working-dir is used, files with uncommitted changes in the working copy
247 If --working-dir is used, files with uncommitted changes in the working copy
248 will be fixed. Note that no backup are made.
248 will be fixed. Note that no backup are made.
249
249
250 If revisions are specified with --source, those revisions and their
250 If revisions are specified with --source, those revisions and their
251 descendants will be checked, and they may be replaced with new revisions
251 descendants will be checked, and they may be replaced with new revisions
252 that have fixed file content. By automatically including the descendants,
252 that have fixed file content. By automatically including the descendants,
253 no merging, rebasing, or evolution will be required. If an ancestor of the
253 no merging, rebasing, or evolution will be required. If an ancestor of the
254 working copy is included, then the working copy itself will also be fixed,
254 working copy is included, then the working copy itself will also be fixed,
255 and the working copy will be updated to the fixed parent.
255 and the working copy will be updated to the fixed parent.
256
256
257 When determining what lines of each file to fix at each revision, the whole
257 When determining what lines of each file to fix at each revision, the whole
258 set of revisions being fixed is considered, so that fixes to earlier
258 set of revisions being fixed is considered, so that fixes to earlier
259 revisions are not forgotten in later ones. The --base flag can be used to
259 revisions are not forgotten in later ones. The --base flag can be used to
260 override this default behavior, though it is not usually desirable to do so.
260 override this default behavior, though it is not usually desirable to do so.
261 """
261 """
262 opts = pycompat.byteskwargs(opts)
262 opts = pycompat.byteskwargs(opts)
263 cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev')
263 cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev')
264 cmdutil.check_incompatible_arguments(
264 cmdutil.check_incompatible_arguments(
265 opts, b'working_dir', [b'all', b'source']
265 opts, b'working_dir', [b'all', b'source']
266 )
266 )
267
267
268 with repo.wlock(), repo.lock(), repo.transaction(b'fix'):
268 with repo.wlock(), repo.lock(), repo.transaction(b'fix'):
269 revstofix = getrevstofix(ui, repo, opts)
269 revstofix = getrevstofix(ui, repo, opts)
270 basectxs = getbasectxs(repo, opts, revstofix)
270 basectxs = getbasectxs(repo, opts, revstofix)
271 workqueue, numitems = getworkqueue(
271 workqueue, numitems = getworkqueue(
272 ui, repo, pats, opts, revstofix, basectxs
272 ui, repo, pats, opts, revstofix, basectxs
273 )
273 )
274 basepaths = getbasepaths(repo, opts, workqueue, basectxs)
274 basepaths = getbasepaths(repo, opts, workqueue, basectxs)
275 fixers = getfixers(ui)
275 fixers = getfixers(ui)
276
276
277 # Rather than letting each worker independently fetch the files
277 # Rather than letting each worker independently fetch the files
278 # (which also would add complications for shared/keepalive
278 # (which also would add complications for shared/keepalive
279 # connections), prefetch them all first.
279 # connections), prefetch them all first.
280 _prefetchfiles(repo, workqueue, basepaths)
280 _prefetchfiles(repo, workqueue, basepaths)
281
281
282 # There are no data dependencies between the workers fixing each file
282 # There are no data dependencies between the workers fixing each file
283 # revision, so we can use all available parallelism.
283 # revision, so we can use all available parallelism.
284 def getfixes(items):
284 def getfixes(items):
285 for srcrev, path, dstrevs in items:
285 for srcrev, path, dstrevs in items:
286 ctx = repo[srcrev]
286 ctx = repo[srcrev]
287 olddata = ctx[path].data()
287 olddata = ctx[path].data()
288 metadata, newdata = fixfile(
288 metadata, newdata = fixfile(
289 ui,
289 ui,
290 repo,
290 repo,
291 opts,
291 opts,
292 fixers,
292 fixers,
293 ctx,
293 ctx,
294 path,
294 path,
295 basepaths,
295 basepaths,
296 basectxs[srcrev],
296 basectxs[srcrev],
297 )
297 )
298 # We ungroup the work items now, because the code that consumes
298 # We ungroup the work items now, because the code that consumes
299 # these results has to handle each dstrev separately, and in
299 # these results has to handle each dstrev separately, and in
300 # topological order. Because these are handled in topological
300 # topological order. Because these are handled in topological
301 # order, it's important that we pass around references to
301 # order, it's important that we pass around references to
302 # "newdata" instead of copying it. Otherwise, we would be
302 # "newdata" instead of copying it. Otherwise, we would be
303 # keeping more copies of file content in memory at a time than
303 # keeping more copies of file content in memory at a time than
304 # if we hadn't bothered to group/deduplicate the work items.
304 # if we hadn't bothered to group/deduplicate the work items.
305 data = newdata if newdata != olddata else None
305 data = newdata if newdata != olddata else None
306 for dstrev in dstrevs:
306 for dstrev in dstrevs:
307 yield (dstrev, path, metadata, data)
307 yield (dstrev, path, metadata, data)
308
308
309 results = worker.worker(
309 results = worker.worker(
310 ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
310 ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
311 )
311 )
312
312
313 # We have to hold on to the data for each successor revision in memory
313 # We have to hold on to the data for each successor revision in memory
314 # until all its parents are committed. We ensure this by committing and
314 # until all its parents are committed. We ensure this by committing and
315 # freeing memory for the revisions in some topological order. This
315 # freeing memory for the revisions in some topological order. This
316 # leaves a little bit of memory efficiency on the table, but also makes
316 # leaves a little bit of memory efficiency on the table, but also makes
317 # the tests deterministic. It might also be considered a feature since
317 # the tests deterministic. It might also be considered a feature since
318 # it makes the results more easily reproducible.
318 # it makes the results more easily reproducible.
319 filedata = collections.defaultdict(dict)
319 filedata = collections.defaultdict(dict)
320 aggregatemetadata = collections.defaultdict(list)
320 aggregatemetadata = collections.defaultdict(list)
321 replacements = {}
321 replacements = {}
322 wdirwritten = False
322 wdirwritten = False
323 commitorder = sorted(revstofix, reverse=True)
323 commitorder = sorted(revstofix, reverse=True)
324 with ui.makeprogress(
324 with ui.makeprogress(
325 topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values())
325 topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values())
326 ) as progress:
326 ) as progress:
327 for rev, path, filerevmetadata, newdata in results:
327 for rev, path, filerevmetadata, newdata in results:
328 progress.increment(item=path)
328 progress.increment(item=path)
329 for fixername, fixermetadata in filerevmetadata.items():
329 for fixername, fixermetadata in filerevmetadata.items():
330 aggregatemetadata[fixername].append(fixermetadata)
330 aggregatemetadata[fixername].append(fixermetadata)
331 if newdata is not None:
331 if newdata is not None:
332 filedata[rev][path] = newdata
332 filedata[rev][path] = newdata
333 hookargs = {
333 hookargs = {
334 b'rev': rev,
334 b'rev': rev,
335 b'path': path,
335 b'path': path,
336 b'metadata': filerevmetadata,
336 b'metadata': filerevmetadata,
337 }
337 }
338 repo.hook(
338 repo.hook(
339 b'postfixfile',
339 b'postfixfile',
340 throw=False,
340 throw=False,
341 **pycompat.strkwargs(hookargs)
341 **pycompat.strkwargs(hookargs)
342 )
342 )
343 numitems[rev] -= 1
343 numitems[rev] -= 1
344 # Apply the fixes for this and any other revisions that are
344 # Apply the fixes for this and any other revisions that are
345 # ready and sitting at the front of the queue. Using a loop here
345 # ready and sitting at the front of the queue. Using a loop here
346 # prevents the queue from being blocked by the first revision to
346 # prevents the queue from being blocked by the first revision to
347 # be ready out of order.
347 # be ready out of order.
348 while commitorder and not numitems[commitorder[-1]]:
348 while commitorder and not numitems[commitorder[-1]]:
349 rev = commitorder.pop()
349 rev = commitorder.pop()
350 ctx = repo[rev]
350 ctx = repo[rev]
351 if rev == wdirrev:
351 if rev == wdirrev:
352 writeworkingdir(repo, ctx, filedata[rev], replacements)
352 writeworkingdir(repo, ctx, filedata[rev], replacements)
353 wdirwritten = bool(filedata[rev])
353 wdirwritten = bool(filedata[rev])
354 else:
354 else:
355 replacerev(ui, repo, ctx, filedata[rev], replacements)
355 replacerev(ui, repo, ctx, filedata[rev], replacements)
356 del filedata[rev]
356 del filedata[rev]
357
357
358 cleanup(repo, replacements, wdirwritten)
358 cleanup(repo, replacements, wdirwritten)
359 hookargs = {
359 hookargs = {
360 b'replacements': replacements,
360 b'replacements': replacements,
361 b'wdirwritten': wdirwritten,
361 b'wdirwritten': wdirwritten,
362 b'metadata': aggregatemetadata,
362 b'metadata': aggregatemetadata,
363 }
363 }
364 repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs))
364 repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs))
365
365
366
366
367 def cleanup(repo, replacements, wdirwritten):
367 def cleanup(repo, replacements, wdirwritten):
368 """Calls scmutil.cleanupnodes() with the given replacements.
368 """Calls scmutil.cleanupnodes() with the given replacements.
369
369
370 "replacements" is a dict from nodeid to nodeid, with one key and one value
370 "replacements" is a dict from nodeid to nodeid, with one key and one value
371 for every revision that was affected by fixing. This is slightly different
371 for every revision that was affected by fixing. This is slightly different
372 from cleanupnodes().
372 from cleanupnodes().
373
373
374 "wdirwritten" is a bool which tells whether the working copy was affected by
374 "wdirwritten" is a bool which tells whether the working copy was affected by
375 fixing, since it has no entry in "replacements".
375 fixing, since it has no entry in "replacements".
376
376
377 Useful as a hook point for extending "hg fix" with output summarizing the
377 Useful as a hook point for extending "hg fix" with output summarizing the
378 effects of the command, though we choose not to output anything here.
378 effects of the command, though we choose not to output anything here.
379 """
379 """
380 replacements = {prec: [succ] for prec, succ in replacements.items()}
380 replacements = {prec: [succ] for prec, succ in replacements.items()}
381 scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True)
381 scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True)
382
382
383
383
384 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
384 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
385 """Constructs a list of files to fix and which revisions each fix applies to
385 """Constructs a list of files to fix and which revisions each fix applies to
386
386
387 To avoid duplicating work, there is usually only one work item for each file
387 To avoid duplicating work, there is usually only one work item for each file
388 revision that might need to be fixed. There can be multiple work items per
388 revision that might need to be fixed. There can be multiple work items per
389 file revision if the same file needs to be fixed in multiple changesets with
389 file revision if the same file needs to be fixed in multiple changesets with
390 different baserevs. Each work item also contains a list of changesets where
390 different baserevs. Each work item also contains a list of changesets where
391 the file's data should be replaced with the fixed data. The work items for
391 the file's data should be replaced with the fixed data. The work items for
392 earlier changesets come earlier in the work queue, to improve pipelining by
392 earlier changesets come earlier in the work queue, to improve pipelining by
393 allowing the first changeset to be replaced while fixes are still being
393 allowing the first changeset to be replaced while fixes are still being
394 computed for later changesets.
394 computed for later changesets.
395
395
396 Also returned is a map from changesets to the count of work items that might
396 Also returned is a map from changesets to the count of work items that might
397 affect each changeset. This is used later to count when all of a changeset's
397 affect each changeset. This is used later to count when all of a changeset's
398 work items have been finished, without having to inspect the remaining work
398 work items have been finished, without having to inspect the remaining work
399 queue in each worker subprocess.
399 queue in each worker subprocess.
400
400
401 The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of
401 The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of
402 bar.txt should be read from revision 1, then fixed, and written back to
402 bar.txt should be read from revision 1, then fixed, and written back to
403 revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of
403 revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of
404 revisions is called the "dstrevs". In practice the srcrev is always one of
404 revisions is called the "dstrevs". In practice the srcrev is always one of
405 the dstrevs, and we make that choice when constructing the work item so that
405 the dstrevs, and we make that choice when constructing the work item so that
406 the choice can't be made inconsistently later on. The dstrevs should all
406 the choice can't be made inconsistently later on. The dstrevs should all
407 have the same file revision for the given path, so the choice of srcrev is
407 have the same file revision for the given path, so the choice of srcrev is
408 arbitrary. The wdirrev can be a dstrev and a srcrev.
408 arbitrary. The wdirrev can be a dstrev and a srcrev.
409 """
409 """
410 dstrevmap = collections.defaultdict(list)
410 dstrevmap = collections.defaultdict(list)
411 numitems = collections.defaultdict(int)
411 numitems = collections.defaultdict(int)
412 maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
412 maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
413 for rev in sorted(revstofix):
413 for rev in sorted(revstofix):
414 fixctx = repo[rev]
414 fixctx = repo[rev]
415 match = scmutil.match(fixctx, pats, opts)
415 match = scmutil.match(fixctx, pats, opts)
416 for path in sorted(
416 for path in sorted(
417 pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx)
417 pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx)
418 ):
418 ):
419 fctx = fixctx[path]
419 fctx = fixctx[path]
420 if fctx.islink():
420 if fctx.islink():
421 continue
421 continue
422 if fctx.size() > maxfilesize:
422 if fctx.size() > maxfilesize:
423 ui.warn(
423 ui.warn(
424 _(b'ignoring file larger than %s: %s\n')
424 _(b'ignoring file larger than %s: %s\n')
425 % (util.bytecount(maxfilesize), path)
425 % (util.bytecount(maxfilesize), path)
426 )
426 )
427 continue
427 continue
428 baserevs = tuple(ctx.rev() for ctx in basectxs[rev])
428 baserevs = tuple(ctx.rev() for ctx in basectxs[rev])
429 dstrevmap[(fctx.filerev(), baserevs, path)].append(rev)
429 dstrevmap[(fctx.filerev(), baserevs, path)].append(rev)
430 numitems[rev] += 1
430 numitems[rev] += 1
431 workqueue = [
431 workqueue = [
432 (min(dstrevs), path, dstrevs)
432 (min(dstrevs), path, dstrevs)
433 for (_filerev, _baserevs, path), dstrevs in dstrevmap.items()
433 for (_filerev, _baserevs, path), dstrevs in dstrevmap.items()
434 ]
434 ]
435 # Move work items for earlier changesets to the front of the queue, so we
435 # Move work items for earlier changesets to the front of the queue, so we
436 # might be able to replace those changesets (in topological order) while
436 # might be able to replace those changesets (in topological order) while
437 # we're still processing later work items. Note the min() in the previous
437 # we're still processing later work items. Note the min() in the previous
438 # expression, which means we don't need a custom comparator here. The path
438 # expression, which means we don't need a custom comparator here. The path
439 # is also important in the sort order to make the output order stable. There
439 # is also important in the sort order to make the output order stable. There
440 # are some situations where this doesn't help much, but some situations
440 # are some situations where this doesn't help much, but some situations
441 # where it lets us buffer O(1) files instead of O(n) files.
441 # where it lets us buffer O(1) files instead of O(n) files.
442 workqueue.sort()
442 workqueue.sort()
443 return workqueue, numitems
443 return workqueue, numitems
444
444
445
445
446 def getrevstofix(ui, repo, opts):
446 def getrevstofix(ui, repo, opts):
447 """Returns the set of revision numbers that should be fixed"""
447 """Returns the set of revision numbers that should be fixed"""
448 if opts[b'all']:
448 if opts[b'all']:
449 revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
449 revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
450 elif opts[b'source']:
450 elif opts[b'source']:
451 source_revs = logcmdutil.revrange(repo, opts[b'source'])
451 source_revs = logcmdutil.revrange(repo, opts[b'source'])
452 revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
452 revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
453 if wdirrev in source_revs:
453 if wdirrev in source_revs:
454 # `wdir()::` is currently empty, so manually add wdir
454 # `wdir()::` is currently empty, so manually add wdir
455 revs.add(wdirrev)
455 revs.add(wdirrev)
456 if repo[b'.'].rev() in revs:
456 if repo[b'.'].rev() in revs:
457 revs.add(wdirrev)
457 revs.add(wdirrev)
458 else:
458 else:
459 revs = set(logcmdutil.revrange(repo, opts[b'rev']))
459 revs = set(logcmdutil.revrange(repo, opts[b'rev']))
460 if opts.get(b'working_dir'):
460 if opts.get(b'working_dir'):
461 revs.add(wdirrev)
461 revs.add(wdirrev)
462 # Allow fixing only wdir() even if there's an unfinished operation
462 # Allow fixing only wdir() even if there's an unfinished operation
463 if not (len(revs) == 1 and wdirrev in revs):
463 if not (len(revs) == 1 and wdirrev in revs):
464 cmdutil.checkunfinished(repo)
464 cmdutil.checkunfinished(repo)
465 rewriteutil.precheck(repo, revs, b'fix')
465 rewriteutil.precheck(repo, revs, b'fix')
466 if (
466 if (
467 wdirrev in revs
467 wdirrev in revs
468 and mergestatemod.mergestate.read(repo).unresolvedcount()
468 and mergestatemod.mergestate.read(repo).unresolvedcount()
469 ):
469 ):
470 raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
470 raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
471 if not revs:
471 if not revs:
472 raise error.Abort(
472 raise error.Abort(
473 b'no changesets specified', hint=b'use --source or --working-dir'
473 b'no changesets specified', hint=b'use --source or --working-dir'
474 )
474 )
475 return revs
475 return revs
476
476
477
477
478 def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx):
478 def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx):
479 """Returns the set of files that should be fixed in a context
479 """Returns the set of files that should be fixed in a context
480
480
481 The result depends on the base contexts; we include any file that has
481 The result depends on the base contexts; we include any file that has
482 changed relative to any of the base contexts. Base contexts should be
482 changed relative to any of the base contexts. Base contexts should be
483 ancestors of the context being fixed.
483 ancestors of the context being fixed.
484 """
484 """
485 files = set()
485 files = set()
486 for basectx in basectxs:
486 for basectx in basectxs:
487 stat = basectx.status(
487 stat = basectx.status(
488 fixctx, match=match, listclean=bool(pats), listunknown=bool(pats)
488 fixctx, match=match, listclean=bool(pats), listunknown=bool(pats)
489 )
489 )
490 files.update(
490 files.update(
491 set(
491 set(
492 itertools.chain(
492 itertools.chain(
493 stat.added, stat.modified, stat.clean, stat.unknown
493 stat.added, stat.modified, stat.clean, stat.unknown
494 )
494 )
495 )
495 )
496 )
496 )
497 return files
497 return files
498
498
499
499
500 def lineranges(opts, path, basepaths, basectxs, fixctx, content2):
500 def lineranges(opts, path, basepaths, basectxs, fixctx, content2):
501 """Returns the set of line ranges that should be fixed in a file
501 """Returns the set of line ranges that should be fixed in a file
502
502
503 Of the form [(10, 20), (30, 40)].
503 Of the form [(10, 20), (30, 40)].
504
504
505 This depends on the given base contexts; we must consider lines that have
505 This depends on the given base contexts; we must consider lines that have
506 changed versus any of the base contexts, and whether the file has been
506 changed versus any of the base contexts, and whether the file has been
507 renamed versus any of them.
507 renamed versus any of them.
508
508
509 Another way to understand this is that we exclude line ranges that are
509 Another way to understand this is that we exclude line ranges that are
510 common to the file in all base contexts.
510 common to the file in all base contexts.
511 """
511 """
512 if opts.get(b'whole'):
512 if opts.get(b'whole'):
513 # Return a range containing all lines. Rely on the diff implementation's
513 # Return a range containing all lines. Rely on the diff implementation's
514 # idea of how many lines are in the file, instead of reimplementing it.
514 # idea of how many lines are in the file, instead of reimplementing it.
515 return difflineranges(b'', content2)
515 return difflineranges(b'', content2)
516
516
517 rangeslist = []
517 rangeslist = []
518 for basectx in basectxs:
518 for basectx in basectxs:
519 basepath = basepaths.get((basectx.rev(), fixctx.rev(), path), path)
519 basepath = basepaths.get((basectx.rev(), fixctx.rev(), path), path)
520
520
521 if basepath in basectx:
521 if basepath in basectx:
522 content1 = basectx[basepath].data()
522 content1 = basectx[basepath].data()
523 else:
523 else:
524 content1 = b''
524 content1 = b''
525 rangeslist.extend(difflineranges(content1, content2))
525 rangeslist.extend(difflineranges(content1, content2))
526 return unionranges(rangeslist)
526 return unionranges(rangeslist)
527
527
528
528
529 def getbasepaths(repo, opts, workqueue, basectxs):
529 def getbasepaths(repo, opts, workqueue, basectxs):
530 if opts.get(b'whole'):
530 if opts.get(b'whole'):
531 # Base paths will never be fetched for line range determination.
531 # Base paths will never be fetched for line range determination.
532 return {}
532 return {}
533
533
534 basepaths = {}
534 basepaths = {}
535 for srcrev, path, _dstrevs in workqueue:
535 for srcrev, path, _dstrevs in workqueue:
536 fixctx = repo[srcrev]
536 fixctx = repo[srcrev]
537 for basectx in basectxs[srcrev]:
537 for basectx in basectxs[srcrev]:
538 basepath = copies.pathcopies(basectx, fixctx).get(path, path)
538 basepath = copies.pathcopies(basectx, fixctx).get(path, path)
539 if basepath in basectx:
539 if basepath in basectx:
540 basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
540 basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
541 return basepaths
541 return basepaths
542
542
543
543
544 def unionranges(rangeslist):
544 def unionranges(rangeslist):
545 """Return the union of some closed intervals
545 """Return the union of some closed intervals
546
546
547 >>> unionranges([])
547 >>> unionranges([])
548 []
548 []
549 >>> unionranges([(1, 100)])
549 >>> unionranges([(1, 100)])
550 [(1, 100)]
550 [(1, 100)]
551 >>> unionranges([(1, 100), (1, 100)])
551 >>> unionranges([(1, 100), (1, 100)])
552 [(1, 100)]
552 [(1, 100)]
553 >>> unionranges([(1, 100), (2, 100)])
553 >>> unionranges([(1, 100), (2, 100)])
554 [(1, 100)]
554 [(1, 100)]
555 >>> unionranges([(1, 99), (1, 100)])
555 >>> unionranges([(1, 99), (1, 100)])
556 [(1, 100)]
556 [(1, 100)]
557 >>> unionranges([(1, 100), (40, 60)])
557 >>> unionranges([(1, 100), (40, 60)])
558 [(1, 100)]
558 [(1, 100)]
559 >>> unionranges([(1, 49), (50, 100)])
559 >>> unionranges([(1, 49), (50, 100)])
560 [(1, 100)]
560 [(1, 100)]
561 >>> unionranges([(1, 48), (50, 100)])
561 >>> unionranges([(1, 48), (50, 100)])
562 [(1, 48), (50, 100)]
562 [(1, 48), (50, 100)]
563 >>> unionranges([(1, 2), (3, 4), (5, 6)])
563 >>> unionranges([(1, 2), (3, 4), (5, 6)])
564 [(1, 6)]
564 [(1, 6)]
565 """
565 """
566 rangeslist = sorted(set(rangeslist))
566 rangeslist = sorted(set(rangeslist))
567 unioned = []
567 unioned = []
568 if rangeslist:
568 if rangeslist:
569 unioned, rangeslist = [rangeslist[0]], rangeslist[1:]
569 unioned, rangeslist = [rangeslist[0]], rangeslist[1:]
570 for a, b in rangeslist:
570 for a, b in rangeslist:
571 c, d = unioned[-1]
571 c, d = unioned[-1]
572 if a > d + 1:
572 if a > d + 1:
573 unioned.append((a, b))
573 unioned.append((a, b))
574 else:
574 else:
575 unioned[-1] = (c, max(b, d))
575 unioned[-1] = (c, max(b, d))
576 return unioned
576 return unioned
577
577
578
578
579 def difflineranges(content1, content2):
579 def difflineranges(content1, content2):
580 """Return list of line number ranges in content2 that differ from content1.
580 """Return list of line number ranges in content2 that differ from content1.
581
581
582 Line numbers are 1-based. The numbers are the first and last line contained
582 Line numbers are 1-based. The numbers are the first and last line contained
583 in the range. Single-line ranges have the same line number for the first and
583 in the range. Single-line ranges have the same line number for the first and
584 last line. Excludes any empty ranges that result from lines that are only
584 last line. Excludes any empty ranges that result from lines that are only
585 present in content1. Relies on mdiff's idea of where the line endings are in
585 present in content1. Relies on mdiff's idea of where the line endings are in
586 the string.
586 the string.
587
587
588 >>> from mercurial import pycompat
588 >>> from mercurial import pycompat
589 >>> lines = lambda s: b'\\n'.join([c for c in pycompat.iterbytestr(s)])
589 >>> lines = lambda s: b'\\n'.join([c for c in pycompat.iterbytestr(s)])
590 >>> difflineranges2 = lambda a, b: difflineranges(lines(a), lines(b))
590 >>> difflineranges2 = lambda a, b: difflineranges(lines(a), lines(b))
591 >>> difflineranges2(b'', b'')
591 >>> difflineranges2(b'', b'')
592 []
592 []
593 >>> difflineranges2(b'a', b'')
593 >>> difflineranges2(b'a', b'')
594 []
594 []
595 >>> difflineranges2(b'', b'A')
595 >>> difflineranges2(b'', b'A')
596 [(1, 1)]
596 [(1, 1)]
597 >>> difflineranges2(b'a', b'a')
597 >>> difflineranges2(b'a', b'a')
598 []
598 []
599 >>> difflineranges2(b'a', b'A')
599 >>> difflineranges2(b'a', b'A')
600 [(1, 1)]
600 [(1, 1)]
601 >>> difflineranges2(b'ab', b'')
601 >>> difflineranges2(b'ab', b'')
602 []
602 []
603 >>> difflineranges2(b'', b'AB')
603 >>> difflineranges2(b'', b'AB')
604 [(1, 2)]
604 [(1, 2)]
605 >>> difflineranges2(b'abc', b'ac')
605 >>> difflineranges2(b'abc', b'ac')
606 []
606 []
607 >>> difflineranges2(b'ab', b'aCb')
607 >>> difflineranges2(b'ab', b'aCb')
608 [(2, 2)]
608 [(2, 2)]
609 >>> difflineranges2(b'abc', b'aBc')
609 >>> difflineranges2(b'abc', b'aBc')
610 [(2, 2)]
610 [(2, 2)]
611 >>> difflineranges2(b'ab', b'AB')
611 >>> difflineranges2(b'ab', b'AB')
612 [(1, 2)]
612 [(1, 2)]
613 >>> difflineranges2(b'abcde', b'aBcDe')
613 >>> difflineranges2(b'abcde', b'aBcDe')
614 [(2, 2), (4, 4)]
614 [(2, 2), (4, 4)]
615 >>> difflineranges2(b'abcde', b'aBCDe')
615 >>> difflineranges2(b'abcde', b'aBCDe')
616 [(2, 4)]
616 [(2, 4)]
617 """
617 """
618 ranges = []
618 ranges = []
619 for lines, kind in mdiff.allblocks(content1, content2):
619 for lines, kind in mdiff.allblocks(content1, content2):
620 firstline, lastline = lines[2:4]
620 firstline, lastline = lines[2:4]
621 if kind == b'!' and firstline != lastline:
621 if kind == b'!' and firstline != lastline:
622 ranges.append((firstline + 1, lastline))
622 ranges.append((firstline + 1, lastline))
623 return ranges
623 return ranges
624
624
625
625
626 def getbasectxs(repo, opts, revstofix):
626 def getbasectxs(repo, opts, revstofix):
627 """Returns a map of the base contexts for each revision
627 """Returns a map of the base contexts for each revision
628
628
629 The base contexts determine which lines are considered modified when we
629 The base contexts determine which lines are considered modified when we
630 attempt to fix just the modified lines in a file. It also determines which
630 attempt to fix just the modified lines in a file. It also determines which
631 files we attempt to fix, so it is important to compute this even when
631 files we attempt to fix, so it is important to compute this even when
632 --whole is used.
632 --whole is used.
633 """
633 """
634 # The --base flag overrides the usual logic, and we give every revision
634 # The --base flag overrides the usual logic, and we give every revision
635 # exactly the set of baserevs that the user specified.
635 # exactly the set of baserevs that the user specified.
636 if opts.get(b'base'):
636 if opts.get(b'base'):
637 baserevs = set(logcmdutil.revrange(repo, opts.get(b'base')))
637 baserevs = set(logcmdutil.revrange(repo, opts.get(b'base')))
638 if not baserevs:
638 if not baserevs:
639 baserevs = {nullrev}
639 baserevs = {nullrev}
640 basectxs = {repo[rev] for rev in baserevs}
640 basectxs = {repo[rev] for rev in baserevs}
641 return {rev: basectxs for rev in revstofix}
641 return {rev: basectxs for rev in revstofix}
642
642
643 # Proceed in topological order so that we can easily determine each
643 # Proceed in topological order so that we can easily determine each
644 # revision's baserevs by looking at its parents and their baserevs.
644 # revision's baserevs by looking at its parents and their baserevs.
645 basectxs = collections.defaultdict(set)
645 basectxs = collections.defaultdict(set)
646 for rev in sorted(revstofix):
646 for rev in sorted(revstofix):
647 ctx = repo[rev]
647 ctx = repo[rev]
648 for pctx in ctx.parents():
648 for pctx in ctx.parents():
649 if pctx.rev() in basectxs:
649 if pctx.rev() in basectxs:
650 basectxs[rev].update(basectxs[pctx.rev()])
650 basectxs[rev].update(basectxs[pctx.rev()])
651 else:
651 else:
652 basectxs[rev].add(pctx)
652 basectxs[rev].add(pctx)
653 return basectxs
653 return basectxs
654
654
655
655
656 def _prefetchfiles(repo, workqueue, basepaths):
656 def _prefetchfiles(repo, workqueue, basepaths):
657 toprefetch = set()
657 toprefetch = set()
658
658
659 # Prefetch the files that will be fixed.
659 # Prefetch the files that will be fixed.
660 for srcrev, path, _dstrevs in workqueue:
660 for srcrev, path, _dstrevs in workqueue:
661 if srcrev == wdirrev:
661 if srcrev == wdirrev:
662 continue
662 continue
663 toprefetch.add((srcrev, path))
663 toprefetch.add((srcrev, path))
664
664
665 # Prefetch the base contents for lineranges().
665 # Prefetch the base contents for lineranges().
666 for (baserev, fixrev, path), basepath in basepaths.items():
666 for (baserev, fixrev, path), basepath in basepaths.items():
667 toprefetch.add((baserev, basepath))
667 toprefetch.add((baserev, basepath))
668
668
669 if toprefetch:
669 if toprefetch:
670 scmutil.prefetchfiles(
670 scmutil.prefetchfiles(
671 repo,
671 repo,
672 [
672 [
673 (rev, scmutil.matchfiles(repo, [path]))
673 (rev, scmutil.matchfiles(repo, [path]))
674 for rev, path in toprefetch
674 for rev, path in toprefetch
675 ],
675 ],
676 )
676 )
677
677
678
678
679 def fixfile(ui, repo, opts, fixers, fixctx, path, basepaths, basectxs):
679 def fixfile(ui, repo, opts, fixers, fixctx, path, basepaths, basectxs):
680 """Run any configured fixers that should affect the file in this context
680 """Run any configured fixers that should affect the file in this context
681
681
682 Returns the file content that results from applying the fixers in some order
682 Returns the file content that results from applying the fixers in some order
683 starting with the file's content in the fixctx. Fixers that support line
683 starting with the file's content in the fixctx. Fixers that support line
684 ranges will affect lines that have changed relative to any of the basectxs
684 ranges will affect lines that have changed relative to any of the basectxs
685 (i.e. they will only avoid lines that are common to all basectxs).
685 (i.e. they will only avoid lines that are common to all basectxs).
686
686
687 A fixer tool's stdout will become the file's new content if and only if it
687 A fixer tool's stdout will become the file's new content if and only if it
688 exits with code zero. The fixer tool's working directory is the repository's
688 exits with code zero. The fixer tool's working directory is the repository's
689 root.
689 root.
690 """
690 """
691 metadata = {}
691 metadata = {}
692 newdata = fixctx[path].data()
692 newdata = fixctx[path].data()
693 for fixername, fixer in fixers.items():
693 for fixername, fixer in fixers.items():
694 if fixer.affects(opts, fixctx, path):
694 if fixer.affects(opts, fixctx, path):
695 ranges = lineranges(
695 ranges = lineranges(
696 opts, path, basepaths, basectxs, fixctx, newdata
696 opts, path, basepaths, basectxs, fixctx, newdata
697 )
697 )
698 command = fixer.command(ui, path, ranges)
698 command = fixer.command(ui, path, ranges)
699 if command is None:
699 if command is None:
700 continue
700 continue
701 ui.debug(b'subprocess: %s\n' % (command,))
701 ui.debug(b'subprocess: %s\n' % (command,))
702 proc = subprocess.Popen(
702 proc = subprocess.Popen(
703 procutil.tonativestr(command),
703 procutil.tonativestr(command),
704 shell=True,
704 shell=True,
705 cwd=procutil.tonativestr(repo.root),
705 cwd=procutil.tonativestr(repo.root),
706 stdin=subprocess.PIPE,
706 stdin=subprocess.PIPE,
707 stdout=subprocess.PIPE,
707 stdout=subprocess.PIPE,
708 stderr=subprocess.PIPE,
708 stderr=subprocess.PIPE,
709 )
709 )
710 stdout, stderr = proc.communicate(newdata)
710 stdout, stderr = proc.communicate(newdata)
711 if stderr:
711 if stderr:
712 showstderr(ui, fixctx.rev(), fixername, stderr)
712 showstderr(ui, fixctx.rev(), fixername, stderr)
713 newerdata = stdout
713 newerdata = stdout
714 if fixer.shouldoutputmetadata():
714 if fixer.shouldoutputmetadata():
715 try:
715 try:
716 metadatajson, newerdata = stdout.split(b'\0', 1)
716 metadatajson, newerdata = stdout.split(b'\0', 1)
717 metadata[fixername] = pycompat.json_loads(metadatajson)
717 metadata[fixername] = pycompat.json_loads(metadatajson)
718 except ValueError:
718 except ValueError:
719 ui.warn(
719 ui.warn(
720 _(b'ignored invalid output from fixer tool: %s\n')
720 _(b'ignored invalid output from fixer tool: %s\n')
721 % (fixername,)
721 % (fixername,)
722 )
722 )
723 continue
723 continue
724 else:
724 else:
725 metadata[fixername] = None
725 metadata[fixername] = None
726 if proc.returncode == 0:
726 if proc.returncode == 0:
727 newdata = newerdata
727 newdata = newerdata
728 else:
728 else:
729 if not stderr:
729 if not stderr:
730 message = _(b'exited with status %d\n') % (proc.returncode,)
730 message = _(b'exited with status %d\n') % (proc.returncode,)
731 showstderr(ui, fixctx.rev(), fixername, message)
731 showstderr(ui, fixctx.rev(), fixername, message)
732 checktoolfailureaction(
732 checktoolfailureaction(
733 ui,
733 ui,
734 _(b'no fixes will be applied'),
734 _(b'no fixes will be applied'),
735 hint=_(
735 hint=_(
736 b'use --config fix.failure=continue to apply any '
736 b'use --config fix.failure=continue to apply any '
737 b'successful fixes anyway'
737 b'successful fixes anyway'
738 ),
738 ),
739 )
739 )
740 return metadata, newdata
740 return metadata, newdata
741
741
742
742
743 def showstderr(ui, rev, fixername, stderr):
743 def showstderr(ui, rev, fixername, stderr):
744 """Writes the lines of the stderr string as warnings on the ui
744 """Writes the lines of the stderr string as warnings on the ui
745
745
746 Uses the revision number and fixername to give more context to each line of
746 Uses the revision number and fixername to give more context to each line of
747 the error message. Doesn't include file names, since those take up a lot of
747 the error message. Doesn't include file names, since those take up a lot of
748 space and would tend to be included in the error message if they were
748 space and would tend to be included in the error message if they were
749 relevant.
749 relevant.
750 """
750 """
751 for line in re.split(b'[\r\n]+', stderr):
751 for line in re.split(b'[\r\n]+', stderr):
752 if line:
752 if line:
753 ui.warn(b'[')
753 ui.warn(b'[')
754 if rev is None:
754 if rev is None:
755 ui.warn(_(b'wdir'), label=b'evolve.rev')
755 ui.warn(_(b'wdir'), label=b'evolve.rev')
756 else:
756 else:
757 ui.warn(b'%d' % rev, label=b'evolve.rev')
757 ui.warn(b'%d' % rev, label=b'evolve.rev')
758 ui.warn(b'] %s: %s\n' % (fixername, line))
758 ui.warn(b'] %s: %s\n' % (fixername, line))
759
759
760
760
761 def writeworkingdir(repo, ctx, filedata, replacements):
761 def writeworkingdir(repo, ctx, filedata, replacements):
762 """Write new content to the working copy and check out the new p1 if any
762 """Write new content to the working copy and check out the new p1 if any
763
763
764 We check out a new revision if and only if we fixed something in both the
764 We check out a new revision if and only if we fixed something in both the
765 working directory and its parent revision. This avoids the need for a full
765 working directory and its parent revision. This avoids the need for a full
766 update/merge, and means that the working directory simply isn't affected
766 update/merge, and means that the working directory simply isn't affected
767 unless the --working-dir flag is given.
767 unless the --working-dir flag is given.
768
768
769 Directly updates the dirstate for the affected files.
769 Directly updates the dirstate for the affected files.
770 """
770 """
771 for path, data in filedata.items():
771 for path, data in filedata.items():
772 fctx = ctx[path]
772 fctx = ctx[path]
773 fctx.write(data, fctx.flags())
773 fctx.write(data, fctx.flags())
774
774
775 oldp1 = repo.dirstate.p1()
775 oldp1 = repo.dirstate.p1()
776 newp1 = replacements.get(oldp1, oldp1)
776 newp1 = replacements.get(oldp1, oldp1)
777 if newp1 != oldp1:
777 if newp1 != oldp1:
778 assert repo.dirstate.p2() == nullid
778 assert repo.dirstate.p2() == nullid
779 with repo.dirstate.parentchange(repo):
779 with repo.dirstate.changing_parents(repo):
780 scmutil.movedirstate(repo, repo[newp1])
780 scmutil.movedirstate(repo, repo[newp1])
781
781
782
782
783 def replacerev(ui, repo, ctx, filedata, replacements):
783 def replacerev(ui, repo, ctx, filedata, replacements):
784 """Commit a new revision like the given one, but with file content changes
784 """Commit a new revision like the given one, but with file content changes
785
785
786 "ctx" is the original revision to be replaced by a modified one.
786 "ctx" is the original revision to be replaced by a modified one.
787
787
788 "filedata" is a dict that maps paths to their new file content. All other
788 "filedata" is a dict that maps paths to their new file content. All other
789 paths will be recreated from the original revision without changes.
789 paths will be recreated from the original revision without changes.
790 "filedata" may contain paths that didn't exist in the original revision;
790 "filedata" may contain paths that didn't exist in the original revision;
791 they will be added.
791 they will be added.
792
792
793 "replacements" is a dict that maps a single node to a single node, and it is
793 "replacements" is a dict that maps a single node to a single node, and it is
794 updated to indicate the original revision is replaced by the newly created
794 updated to indicate the original revision is replaced by the newly created
795 one. No entry is added if the replacement's node already exists.
795 one. No entry is added if the replacement's node already exists.
796
796
797 The new revision has the same parents as the old one, unless those parents
797 The new revision has the same parents as the old one, unless those parents
798 have already been replaced, in which case those replacements are the parents
798 have already been replaced, in which case those replacements are the parents
799 of this new revision. Thus, if revisions are replaced in topological order,
799 of this new revision. Thus, if revisions are replaced in topological order,
800 there is no need to rebase them into the original topology later.
800 there is no need to rebase them into the original topology later.
801 """
801 """
802
802
803 p1rev, p2rev = repo.changelog.parentrevs(ctx.rev())
803 p1rev, p2rev = repo.changelog.parentrevs(ctx.rev())
804 p1ctx, p2ctx = repo[p1rev], repo[p2rev]
804 p1ctx, p2ctx = repo[p1rev], repo[p2rev]
805 newp1node = replacements.get(p1ctx.node(), p1ctx.node())
805 newp1node = replacements.get(p1ctx.node(), p1ctx.node())
806 newp2node = replacements.get(p2ctx.node(), p2ctx.node())
806 newp2node = replacements.get(p2ctx.node(), p2ctx.node())
807
807
808 # We don't want to create a revision that has no changes from the original,
808 # We don't want to create a revision that has no changes from the original,
809 # but we should if the original revision's parent has been replaced.
809 # but we should if the original revision's parent has been replaced.
810 # Otherwise, we would produce an orphan that needs no actual human
810 # Otherwise, we would produce an orphan that needs no actual human
811 # intervention to evolve. We can't rely on commit() to avoid creating the
811 # intervention to evolve. We can't rely on commit() to avoid creating the
812 # un-needed revision because the extra field added below produces a new hash
812 # un-needed revision because the extra field added below produces a new hash
813 # regardless of file content changes.
813 # regardless of file content changes.
814 if (
814 if (
815 not filedata
815 not filedata
816 and p1ctx.node() not in replacements
816 and p1ctx.node() not in replacements
817 and p2ctx.node() not in replacements
817 and p2ctx.node() not in replacements
818 ):
818 ):
819 return
819 return
820
820
821 extra = ctx.extra().copy()
821 extra = ctx.extra().copy()
822 extra[b'fix_source'] = ctx.hex()
822 extra[b'fix_source'] = ctx.hex()
823
823
824 wctx = context.overlayworkingctx(repo)
824 wctx = context.overlayworkingctx(repo)
825 wctx.setbase(repo[newp1node])
825 wctx.setbase(repo[newp1node])
826 merge.revert_to(ctx, wc=wctx)
826 merge.revert_to(ctx, wc=wctx)
827 copies.graftcopies(wctx, ctx, ctx.p1())
827 copies.graftcopies(wctx, ctx, ctx.p1())
828
828
829 for path in filedata.keys():
829 for path in filedata.keys():
830 fctx = ctx[path]
830 fctx = ctx[path]
831 copysource = fctx.copysource()
831 copysource = fctx.copysource()
832 wctx.write(path, filedata[path], flags=fctx.flags())
832 wctx.write(path, filedata[path], flags=fctx.flags())
833 if copysource:
833 if copysource:
834 wctx.markcopied(path, copysource)
834 wctx.markcopied(path, copysource)
835
835
836 desc = rewriteutil.update_hash_refs(
836 desc = rewriteutil.update_hash_refs(
837 repo,
837 repo,
838 ctx.description(),
838 ctx.description(),
839 {oldnode: [newnode] for oldnode, newnode in replacements.items()},
839 {oldnode: [newnode] for oldnode, newnode in replacements.items()},
840 )
840 )
841
841
842 memctx = wctx.tomemctx(
842 memctx = wctx.tomemctx(
843 text=desc,
843 text=desc,
844 branch=ctx.branch(),
844 branch=ctx.branch(),
845 extra=extra,
845 extra=extra,
846 date=ctx.date(),
846 date=ctx.date(),
847 parents=(newp1node, newp2node),
847 parents=(newp1node, newp2node),
848 user=ctx.user(),
848 user=ctx.user(),
849 )
849 )
850
850
851 sucnode = memctx.commit()
851 sucnode = memctx.commit()
852 prenode = ctx.node()
852 prenode = ctx.node()
853 if prenode == sucnode:
853 if prenode == sucnode:
854 ui.debug(b'node %s already existed\n' % (ctx.hex()))
854 ui.debug(b'node %s already existed\n' % (ctx.hex()))
855 else:
855 else:
856 replacements[ctx.node()] = sucnode
856 replacements[ctx.node()] = sucnode
857
857
858
858
859 def getfixers(ui):
859 def getfixers(ui):
860 """Returns a map of configured fixer tools indexed by their names
860 """Returns a map of configured fixer tools indexed by their names
861
861
862 Each value is a Fixer object with methods that implement the behavior of the
862 Each value is a Fixer object with methods that implement the behavior of the
863 fixer's config suboptions. Does not validate the config values.
863 fixer's config suboptions. Does not validate the config values.
864 """
864 """
865 fixers = {}
865 fixers = {}
866 for name in fixernames(ui):
866 for name in fixernames(ui):
867 enabled = ui.configbool(b'fix', name + b':enabled')
867 enabled = ui.configbool(b'fix', name + b':enabled')
868 command = ui.config(b'fix', name + b':command')
868 command = ui.config(b'fix', name + b':command')
869 pattern = ui.config(b'fix', name + b':pattern')
869 pattern = ui.config(b'fix', name + b':pattern')
870 linerange = ui.config(b'fix', name + b':linerange')
870 linerange = ui.config(b'fix', name + b':linerange')
871 priority = ui.configint(b'fix', name + b':priority')
871 priority = ui.configint(b'fix', name + b':priority')
872 metadata = ui.configbool(b'fix', name + b':metadata')
872 metadata = ui.configbool(b'fix', name + b':metadata')
873 skipclean = ui.configbool(b'fix', name + b':skipclean')
873 skipclean = ui.configbool(b'fix', name + b':skipclean')
874 # Don't use a fixer if it has no pattern configured. It would be
874 # Don't use a fixer if it has no pattern configured. It would be
875 # dangerous to let it affect all files. It would be pointless to let it
875 # dangerous to let it affect all files. It would be pointless to let it
876 # affect no files. There is no reasonable subset of files to use as the
876 # affect no files. There is no reasonable subset of files to use as the
877 # default.
877 # default.
878 if command is None:
878 if command is None:
879 ui.warn(
879 ui.warn(
880 _(b'fixer tool has no command configuration: %s\n') % (name,)
880 _(b'fixer tool has no command configuration: %s\n') % (name,)
881 )
881 )
882 elif pattern is None:
882 elif pattern is None:
883 ui.warn(
883 ui.warn(
884 _(b'fixer tool has no pattern configuration: %s\n') % (name,)
884 _(b'fixer tool has no pattern configuration: %s\n') % (name,)
885 )
885 )
886 elif not enabled:
886 elif not enabled:
887 ui.debug(b'ignoring disabled fixer tool: %s\n' % (name,))
887 ui.debug(b'ignoring disabled fixer tool: %s\n' % (name,))
888 else:
888 else:
889 fixers[name] = Fixer(
889 fixers[name] = Fixer(
890 command, pattern, linerange, priority, metadata, skipclean
890 command, pattern, linerange, priority, metadata, skipclean
891 )
891 )
892 return collections.OrderedDict(
892 return collections.OrderedDict(
893 sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True)
893 sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True)
894 )
894 )
895
895
896
896
897 def fixernames(ui):
897 def fixernames(ui):
898 """Returns the names of [fix] config options that have suboptions"""
898 """Returns the names of [fix] config options that have suboptions"""
899 names = set()
899 names = set()
900 for k, v in ui.configitems(b'fix'):
900 for k, v in ui.configitems(b'fix'):
901 if b':' in k:
901 if b':' in k:
902 names.add(k.split(b':', 1)[0])
902 names.add(k.split(b':', 1)[0])
903 return names
903 return names
904
904
905
905
906 class Fixer:
906 class Fixer:
907 """Wraps the raw config values for a fixer with methods"""
907 """Wraps the raw config values for a fixer with methods"""
908
908
909 def __init__(
909 def __init__(
910 self, command, pattern, linerange, priority, metadata, skipclean
910 self, command, pattern, linerange, priority, metadata, skipclean
911 ):
911 ):
912 self._command = command
912 self._command = command
913 self._pattern = pattern
913 self._pattern = pattern
914 self._linerange = linerange
914 self._linerange = linerange
915 self._priority = priority
915 self._priority = priority
916 self._metadata = metadata
916 self._metadata = metadata
917 self._skipclean = skipclean
917 self._skipclean = skipclean
918
918
919 def affects(self, opts, fixctx, path):
919 def affects(self, opts, fixctx, path):
920 """Should this fixer run on the file at the given path and context?"""
920 """Should this fixer run on the file at the given path and context?"""
921 repo = fixctx.repo()
921 repo = fixctx.repo()
922 matcher = matchmod.match(
922 matcher = matchmod.match(
923 repo.root, repo.root, [self._pattern], ctx=fixctx
923 repo.root, repo.root, [self._pattern], ctx=fixctx
924 )
924 )
925 return matcher(path)
925 return matcher(path)
926
926
927 def shouldoutputmetadata(self):
927 def shouldoutputmetadata(self):
928 """Should the stdout of this fixer start with JSON and a null byte?"""
928 """Should the stdout of this fixer start with JSON and a null byte?"""
929 return self._metadata
929 return self._metadata
930
930
931 def command(self, ui, path, ranges):
931 def command(self, ui, path, ranges):
932 """A shell command to use to invoke this fixer on the given file/lines
932 """A shell command to use to invoke this fixer on the given file/lines
933
933
934 May return None if there is no appropriate command to run for the given
934 May return None if there is no appropriate command to run for the given
935 parameters.
935 parameters.
936 """
936 """
937 expand = cmdutil.rendercommandtemplate
937 expand = cmdutil.rendercommandtemplate
938 parts = [
938 parts = [
939 expand(
939 expand(
940 ui,
940 ui,
941 self._command,
941 self._command,
942 {b'rootpath': path, b'basename': os.path.basename(path)},
942 {b'rootpath': path, b'basename': os.path.basename(path)},
943 )
943 )
944 ]
944 ]
945 if self._linerange:
945 if self._linerange:
946 if self._skipclean and not ranges:
946 if self._skipclean and not ranges:
947 # No line ranges to fix, so don't run the fixer.
947 # No line ranges to fix, so don't run the fixer.
948 return None
948 return None
949 for first, last in ranges:
949 for first, last in ranges:
950 parts.append(
950 parts.append(
951 expand(
951 expand(
952 ui, self._linerange, {b'first': first, b'last': last}
952 ui, self._linerange, {b'first': first, b'last': last}
953 )
953 )
954 )
954 )
955 return b' '.join(parts)
955 return b' '.join(parts)
@@ -1,402 +1,402 b''
1 import contextlib
1 import contextlib
2 import os
2 import os
3
3
4 from mercurial.node import sha1nodeconstants
4 from mercurial.node import sha1nodeconstants
5 from mercurial import (
5 from mercurial import (
6 dirstatemap,
6 dirstatemap,
7 error,
7 error,
8 extensions,
8 extensions,
9 match as matchmod,
9 match as matchmod,
10 pycompat,
10 pycompat,
11 scmutil,
11 scmutil,
12 util,
12 util,
13 )
13 )
14 from mercurial.dirstateutils import (
14 from mercurial.dirstateutils import (
15 timestamp,
15 timestamp,
16 )
16 )
17 from mercurial.interfaces import (
17 from mercurial.interfaces import (
18 dirstate as intdirstate,
18 dirstate as intdirstate,
19 util as interfaceutil,
19 util as interfaceutil,
20 )
20 )
21
21
22 from . import gitutil
22 from . import gitutil
23
23
24
24
25 DirstateItem = dirstatemap.DirstateItem
25 DirstateItem = dirstatemap.DirstateItem
26 propertycache = util.propertycache
26 propertycache = util.propertycache
27 pygit2 = gitutil.get_pygit2()
27 pygit2 = gitutil.get_pygit2()
28
28
29
29
30 def readpatternfile(orig, filepath, warn, sourceinfo=False):
30 def readpatternfile(orig, filepath, warn, sourceinfo=False):
31 if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')):
31 if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')):
32 return orig(filepath, warn, sourceinfo=False)
32 return orig(filepath, warn, sourceinfo=False)
33 result = []
33 result = []
34 warnings = []
34 warnings = []
35 with open(filepath, 'rb') as fp:
35 with open(filepath, 'rb') as fp:
36 for l in fp:
36 for l in fp:
37 l = l.strip()
37 l = l.strip()
38 if not l or l.startswith(b'#'):
38 if not l or l.startswith(b'#'):
39 continue
39 continue
40 if l.startswith(b'!'):
40 if l.startswith(b'!'):
41 warnings.append(b'unsupported ignore pattern %s' % l)
41 warnings.append(b'unsupported ignore pattern %s' % l)
42 continue
42 continue
43 if l.startswith(b'/'):
43 if l.startswith(b'/'):
44 result.append(b'rootglob:' + l[1:])
44 result.append(b'rootglob:' + l[1:])
45 else:
45 else:
46 result.append(b'relglob:' + l)
46 result.append(b'relglob:' + l)
47 return result, warnings
47 return result, warnings
48
48
49
49
50 extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile)
50 extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile)
51
51
52
52
53 _STATUS_MAP = {}
53 _STATUS_MAP = {}
54 if pygit2:
54 if pygit2:
55 _STATUS_MAP = {
55 _STATUS_MAP = {
56 pygit2.GIT_STATUS_CONFLICTED: b'm',
56 pygit2.GIT_STATUS_CONFLICTED: b'm',
57 pygit2.GIT_STATUS_CURRENT: b'n',
57 pygit2.GIT_STATUS_CURRENT: b'n',
58 pygit2.GIT_STATUS_IGNORED: b'?',
58 pygit2.GIT_STATUS_IGNORED: b'?',
59 pygit2.GIT_STATUS_INDEX_DELETED: b'r',
59 pygit2.GIT_STATUS_INDEX_DELETED: b'r',
60 pygit2.GIT_STATUS_INDEX_MODIFIED: b'n',
60 pygit2.GIT_STATUS_INDEX_MODIFIED: b'n',
61 pygit2.GIT_STATUS_INDEX_NEW: b'a',
61 pygit2.GIT_STATUS_INDEX_NEW: b'a',
62 pygit2.GIT_STATUS_INDEX_RENAMED: b'a',
62 pygit2.GIT_STATUS_INDEX_RENAMED: b'a',
63 pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n',
63 pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n',
64 pygit2.GIT_STATUS_WT_DELETED: b'r',
64 pygit2.GIT_STATUS_WT_DELETED: b'r',
65 pygit2.GIT_STATUS_WT_MODIFIED: b'n',
65 pygit2.GIT_STATUS_WT_MODIFIED: b'n',
66 pygit2.GIT_STATUS_WT_NEW: b'?',
66 pygit2.GIT_STATUS_WT_NEW: b'?',
67 pygit2.GIT_STATUS_WT_RENAMED: b'a',
67 pygit2.GIT_STATUS_WT_RENAMED: b'a',
68 pygit2.GIT_STATUS_WT_TYPECHANGE: b'n',
68 pygit2.GIT_STATUS_WT_TYPECHANGE: b'n',
69 pygit2.GIT_STATUS_WT_UNREADABLE: b'?',
69 pygit2.GIT_STATUS_WT_UNREADABLE: b'?',
70 pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: b'm',
70 pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: b'm',
71 }
71 }
72
72
73
73
74 @interfaceutil.implementer(intdirstate.idirstate)
74 @interfaceutil.implementer(intdirstate.idirstate)
75 class gitdirstate:
75 class gitdirstate:
76 def __init__(self, ui, vfs, gitrepo, use_dirstate_v2):
76 def __init__(self, ui, vfs, gitrepo, use_dirstate_v2):
77 self._ui = ui
77 self._ui = ui
78 self._root = os.path.dirname(vfs.base)
78 self._root = os.path.dirname(vfs.base)
79 self._opener = vfs
79 self._opener = vfs
80 self.git = gitrepo
80 self.git = gitrepo
81 self._plchangecallbacks = {}
81 self._plchangecallbacks = {}
82 # TODO: context.poststatusfixup is bad and uses this attribute
82 # TODO: context.poststatusfixup is bad and uses this attribute
83 self._dirty = False
83 self._dirty = False
84 self._mapcls = dirstatemap.dirstatemap
84 self._mapcls = dirstatemap.dirstatemap
85 self._use_dirstate_v2 = use_dirstate_v2
85 self._use_dirstate_v2 = use_dirstate_v2
86
86
87 @propertycache
87 @propertycache
88 def _map(self):
88 def _map(self):
89 """Return the dirstate contents (see documentation for dirstatemap)."""
89 """Return the dirstate contents (see documentation for dirstatemap)."""
90 self._map = self._mapcls(
90 self._map = self._mapcls(
91 self._ui,
91 self._ui,
92 self._opener,
92 self._opener,
93 self._root,
93 self._root,
94 sha1nodeconstants,
94 sha1nodeconstants,
95 self._use_dirstate_v2,
95 self._use_dirstate_v2,
96 )
96 )
97 return self._map
97 return self._map
98
98
99 def p1(self):
99 def p1(self):
100 try:
100 try:
101 return self.git.head.peel().id.raw
101 return self.git.head.peel().id.raw
102 except pygit2.GitError:
102 except pygit2.GitError:
103 # Typically happens when peeling HEAD fails, as in an
103 # Typically happens when peeling HEAD fails, as in an
104 # empty repository.
104 # empty repository.
105 return sha1nodeconstants.nullid
105 return sha1nodeconstants.nullid
106
106
107 def p2(self):
107 def p2(self):
108 # TODO: MERGE_HEAD? something like that, right?
108 # TODO: MERGE_HEAD? something like that, right?
109 return sha1nodeconstants.nullid
109 return sha1nodeconstants.nullid
110
110
111 def setparents(self, p1, p2=None):
111 def setparents(self, p1, p2=None):
112 if p2 is None:
112 if p2 is None:
113 p2 = sha1nodeconstants.nullid
113 p2 = sha1nodeconstants.nullid
114 assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
114 assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
115 self.git.head.set_target(gitutil.togitnode(p1))
115 self.git.head.set_target(gitutil.togitnode(p1))
116
116
117 @util.propertycache
117 @util.propertycache
118 def identity(self):
118 def identity(self):
119 return util.filestat.frompath(
119 return util.filestat.frompath(
120 os.path.join(self._root, b'.git', b'index')
120 os.path.join(self._root, b'.git', b'index')
121 )
121 )
122
122
123 def branch(self):
123 def branch(self):
124 return b'default'
124 return b'default'
125
125
126 def parents(self):
126 def parents(self):
127 # TODO how on earth do we find p2 if a merge is in flight?
127 # TODO how on earth do we find p2 if a merge is in flight?
128 return self.p1(), sha1nodeconstants.nullid
128 return self.p1(), sha1nodeconstants.nullid
129
129
130 def __iter__(self):
130 def __iter__(self):
131 return (pycompat.fsencode(f.path) for f in self.git.index)
131 return (pycompat.fsencode(f.path) for f in self.git.index)
132
132
133 def items(self):
133 def items(self):
134 for ie in self.git.index:
134 for ie in self.git.index:
135 yield ie.path, None # value should be a DirstateItem
135 yield ie.path, None # value should be a DirstateItem
136
136
137 # py2,3 compat forward
137 # py2,3 compat forward
138 iteritems = items
138 iteritems = items
139
139
140 def __getitem__(self, filename):
140 def __getitem__(self, filename):
141 try:
141 try:
142 gs = self.git.status_file(filename)
142 gs = self.git.status_file(filename)
143 except KeyError:
143 except KeyError:
144 return b'?'
144 return b'?'
145 return _STATUS_MAP[gs]
145 return _STATUS_MAP[gs]
146
146
147 def __contains__(self, filename):
147 def __contains__(self, filename):
148 try:
148 try:
149 gs = self.git.status_file(filename)
149 gs = self.git.status_file(filename)
150 return _STATUS_MAP[gs] != b'?'
150 return _STATUS_MAP[gs] != b'?'
151 except KeyError:
151 except KeyError:
152 return False
152 return False
153
153
154 def status(self, match, subrepos, ignored, clean, unknown):
154 def status(self, match, subrepos, ignored, clean, unknown):
155 listclean = clean
155 listclean = clean
156 # TODO handling of clean files - can we get that from git.status()?
156 # TODO handling of clean files - can we get that from git.status()?
157 modified, added, removed, deleted, unknown, ignored, clean = (
157 modified, added, removed, deleted, unknown, ignored, clean = (
158 [],
158 [],
159 [],
159 [],
160 [],
160 [],
161 [],
161 [],
162 [],
162 [],
163 [],
163 [],
164 [],
164 [],
165 )
165 )
166
166
167 try:
167 try:
168 mtime_boundary = timestamp.get_fs_now(self._opener)
168 mtime_boundary = timestamp.get_fs_now(self._opener)
169 except OSError:
169 except OSError:
170 # In largefiles or readonly context
170 # In largefiles or readonly context
171 mtime_boundary = None
171 mtime_boundary = None
172
172
173 gstatus = self.git.status()
173 gstatus = self.git.status()
174 for path, status in gstatus.items():
174 for path, status in gstatus.items():
175 path = pycompat.fsencode(path)
175 path = pycompat.fsencode(path)
176 if not match(path):
176 if not match(path):
177 continue
177 continue
178 if status == pygit2.GIT_STATUS_IGNORED:
178 if status == pygit2.GIT_STATUS_IGNORED:
179 if path.endswith(b'/'):
179 if path.endswith(b'/'):
180 continue
180 continue
181 ignored.append(path)
181 ignored.append(path)
182 elif status in (
182 elif status in (
183 pygit2.GIT_STATUS_WT_MODIFIED,
183 pygit2.GIT_STATUS_WT_MODIFIED,
184 pygit2.GIT_STATUS_INDEX_MODIFIED,
184 pygit2.GIT_STATUS_INDEX_MODIFIED,
185 pygit2.GIT_STATUS_WT_MODIFIED
185 pygit2.GIT_STATUS_WT_MODIFIED
186 | pygit2.GIT_STATUS_INDEX_MODIFIED,
186 | pygit2.GIT_STATUS_INDEX_MODIFIED,
187 ):
187 ):
188 modified.append(path)
188 modified.append(path)
189 elif status == pygit2.GIT_STATUS_INDEX_NEW:
189 elif status == pygit2.GIT_STATUS_INDEX_NEW:
190 added.append(path)
190 added.append(path)
191 elif status == pygit2.GIT_STATUS_WT_NEW:
191 elif status == pygit2.GIT_STATUS_WT_NEW:
192 unknown.append(path)
192 unknown.append(path)
193 elif status == pygit2.GIT_STATUS_WT_DELETED:
193 elif status == pygit2.GIT_STATUS_WT_DELETED:
194 deleted.append(path)
194 deleted.append(path)
195 elif status == pygit2.GIT_STATUS_INDEX_DELETED:
195 elif status == pygit2.GIT_STATUS_INDEX_DELETED:
196 removed.append(path)
196 removed.append(path)
197 else:
197 else:
198 raise error.Abort(
198 raise error.Abort(
199 b'unhandled case: status for %r is %r' % (path, status)
199 b'unhandled case: status for %r is %r' % (path, status)
200 )
200 )
201
201
202 if listclean:
202 if listclean:
203 observed = set(
203 observed = set(
204 modified + added + removed + deleted + unknown + ignored
204 modified + added + removed + deleted + unknown + ignored
205 )
205 )
206 index = self.git.index
206 index = self.git.index
207 index.read()
207 index.read()
208 for entry in index:
208 for entry in index:
209 path = pycompat.fsencode(entry.path)
209 path = pycompat.fsencode(entry.path)
210 if not match(path):
210 if not match(path):
211 continue
211 continue
212 if path in observed:
212 if path in observed:
213 continue # already in some other set
213 continue # already in some other set
214 if path[-1] == b'/':
214 if path[-1] == b'/':
215 continue # directory
215 continue # directory
216 clean.append(path)
216 clean.append(path)
217
217
218 # TODO are we really always sure of status here?
218 # TODO are we really always sure of status here?
219 return (
219 return (
220 False,
220 False,
221 scmutil.status(
221 scmutil.status(
222 modified, added, removed, deleted, unknown, ignored, clean
222 modified, added, removed, deleted, unknown, ignored, clean
223 ),
223 ),
224 mtime_boundary,
224 mtime_boundary,
225 )
225 )
226
226
227 def flagfunc(self, buildfallback):
227 def flagfunc(self, buildfallback):
228 # TODO we can do better
228 # TODO we can do better
229 return buildfallback()
229 return buildfallback()
230
230
231 def getcwd(self):
231 def getcwd(self):
232 # TODO is this a good way to do this?
232 # TODO is this a good way to do this?
233 return os.path.dirname(
233 return os.path.dirname(
234 os.path.dirname(pycompat.fsencode(self.git.path))
234 os.path.dirname(pycompat.fsencode(self.git.path))
235 )
235 )
236
236
237 def get_entry(self, path):
237 def get_entry(self, path):
238 """return a DirstateItem for the associated path"""
238 """return a DirstateItem for the associated path"""
239 entry = self._map.get(path)
239 entry = self._map.get(path)
240 if entry is None:
240 if entry is None:
241 return DirstateItem()
241 return DirstateItem()
242 return entry
242 return entry
243
243
244 def normalize(self, path):
244 def normalize(self, path):
245 normed = util.normcase(path)
245 normed = util.normcase(path)
246 assert normed == path, b"TODO handling of case folding: %s != %s" % (
246 assert normed == path, b"TODO handling of case folding: %s != %s" % (
247 normed,
247 normed,
248 path,
248 path,
249 )
249 )
250 return path
250 return path
251
251
252 @property
252 @property
253 def _checklink(self):
253 def _checklink(self):
254 return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path)))
254 return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path)))
255
255
256 def copies(self):
256 def copies(self):
257 # TODO support copies?
257 # TODO support copies?
258 return {}
258 return {}
259
259
260 # # TODO what the heck is this
260 # # TODO what the heck is this
261 _filecache = set()
261 _filecache = set()
262
262
263 def pendingparentchange(self):
263 def pendingparentchange(self):
264 # TODO: we need to implement the context manager bits and
264 # TODO: we need to implement the context manager bits and
265 # correctly stage/revert index edits.
265 # correctly stage/revert index edits.
266 return False
266 return False
267
267
268 def write(self, tr):
268 def write(self, tr):
269 # TODO: call parent change callbacks
269 # TODO: call parent change callbacks
270
270
271 if tr:
271 if tr:
272
272
273 def writeinner(category):
273 def writeinner(category):
274 self.git.index.write()
274 self.git.index.write()
275
275
276 tr.addpending(b'gitdirstate', writeinner)
276 tr.addpending(b'gitdirstate', writeinner)
277 else:
277 else:
278 self.git.index.write()
278 self.git.index.write()
279
279
280 def pathto(self, f, cwd=None):
280 def pathto(self, f, cwd=None):
281 if cwd is None:
281 if cwd is None:
282 cwd = self.getcwd()
282 cwd = self.getcwd()
283 # TODO core dirstate does something about slashes here
283 # TODO core dirstate does something about slashes here
284 assert isinstance(f, bytes)
284 assert isinstance(f, bytes)
285 r = util.pathto(self._root, cwd, f)
285 r = util.pathto(self._root, cwd, f)
286 return r
286 return r
287
287
288 def matches(self, match):
288 def matches(self, match):
289 for x in self.git.index:
289 for x in self.git.index:
290 p = pycompat.fsencode(x.path)
290 p = pycompat.fsencode(x.path)
291 if match(p):
291 if match(p):
292 yield p
292 yield p
293
293
294 def set_clean(self, f, parentfiledata):
294 def set_clean(self, f, parentfiledata):
295 """Mark a file normal and clean."""
295 """Mark a file normal and clean."""
296 # TODO: for now we just let libgit2 re-stat the file. We can
296 # TODO: for now we just let libgit2 re-stat the file. We can
297 # clearly do better.
297 # clearly do better.
298
298
299 def set_possibly_dirty(self, f):
299 def set_possibly_dirty(self, f):
300 """Mark a file normal, but possibly dirty."""
300 """Mark a file normal, but possibly dirty."""
301 # TODO: for now we just let libgit2 re-stat the file. We can
301 # TODO: for now we just let libgit2 re-stat the file. We can
302 # clearly do better.
302 # clearly do better.
303
303
304 def walk(self, match, subrepos, unknown, ignored, full=True):
304 def walk(self, match, subrepos, unknown, ignored, full=True):
305 # TODO: we need to use .status() and not iterate the index,
305 # TODO: we need to use .status() and not iterate the index,
306 # because the index doesn't force a re-walk and so `hg add` of
306 # because the index doesn't force a re-walk and so `hg add` of
307 # a new file without an intervening call to status will
307 # a new file without an intervening call to status will
308 # silently do nothing.
308 # silently do nothing.
309 r = {}
309 r = {}
310 cwd = self.getcwd()
310 cwd = self.getcwd()
311 for path, status in self.git.status().items():
311 for path, status in self.git.status().items():
312 if path.startswith('.hg/'):
312 if path.startswith('.hg/'):
313 continue
313 continue
314 path = pycompat.fsencode(path)
314 path = pycompat.fsencode(path)
315 if not match(path):
315 if not match(path):
316 continue
316 continue
317 # TODO construct the stat info from the status object?
317 # TODO construct the stat info from the status object?
318 try:
318 try:
319 s = os.stat(os.path.join(cwd, path))
319 s = os.stat(os.path.join(cwd, path))
320 except FileNotFoundError:
320 except FileNotFoundError:
321 continue
321 continue
322 r[path] = s
322 r[path] = s
323 return r
323 return r
324
324
325 def savebackup(self, tr, backupname):
325 def savebackup(self, tr, backupname):
326 # TODO: figure out a strategy for saving index backups.
326 # TODO: figure out a strategy for saving index backups.
327 pass
327 pass
328
328
329 def restorebackup(self, tr, backupname):
329 def restorebackup(self, tr, backupname):
330 # TODO: figure out a strategy for saving index backups.
330 # TODO: figure out a strategy for saving index backups.
331 pass
331 pass
332
332
333 def set_tracked(self, f, reset_copy=False):
333 def set_tracked(self, f, reset_copy=False):
334 # TODO: support copies and reset_copy=True
334 # TODO: support copies and reset_copy=True
335 uf = pycompat.fsdecode(f)
335 uf = pycompat.fsdecode(f)
336 if uf in self.git.index:
336 if uf in self.git.index:
337 return False
337 return False
338 index = self.git.index
338 index = self.git.index
339 index.read()
339 index.read()
340 index.add(uf)
340 index.add(uf)
341 index.write()
341 index.write()
342 return True
342 return True
343
343
344 def add(self, f):
344 def add(self, f):
345 index = self.git.index
345 index = self.git.index
346 index.read()
346 index.read()
347 index.add(pycompat.fsdecode(f))
347 index.add(pycompat.fsdecode(f))
348 index.write()
348 index.write()
349
349
350 def drop(self, f):
350 def drop(self, f):
351 index = self.git.index
351 index = self.git.index
352 index.read()
352 index.read()
353 fs = pycompat.fsdecode(f)
353 fs = pycompat.fsdecode(f)
354 if fs in index:
354 if fs in index:
355 index.remove(fs)
355 index.remove(fs)
356 index.write()
356 index.write()
357
357
358 def set_untracked(self, f):
358 def set_untracked(self, f):
359 index = self.git.index
359 index = self.git.index
360 index.read()
360 index.read()
361 fs = pycompat.fsdecode(f)
361 fs = pycompat.fsdecode(f)
362 if fs in index:
362 if fs in index:
363 index.remove(fs)
363 index.remove(fs)
364 index.write()
364 index.write()
365 return True
365 return True
366 return False
366 return False
367
367
368 def remove(self, f):
368 def remove(self, f):
369 index = self.git.index
369 index = self.git.index
370 index.read()
370 index.read()
371 index.remove(pycompat.fsdecode(f))
371 index.remove(pycompat.fsdecode(f))
372 index.write()
372 index.write()
373
373
374 def copied(self, path):
374 def copied(self, path):
375 # TODO: track copies?
375 # TODO: track copies?
376 return None
376 return None
377
377
378 def prefetch_parents(self):
378 def prefetch_parents(self):
379 # TODO
379 # TODO
380 pass
380 pass
381
381
382 def update_file(self, *args, **kwargs):
382 def update_file(self, *args, **kwargs):
383 # TODO
383 # TODO
384 pass
384 pass
385
385
386 @contextlib.contextmanager
386 @contextlib.contextmanager
387 def parentchange(self, repo):
387 def changing_parents(self, repo):
388 # TODO: track this maybe?
388 # TODO: track this maybe?
389 yield
389 yield
390
390
391 def addparentchangecallback(self, category, callback):
391 def addparentchangecallback(self, category, callback):
392 # TODO: should this be added to the dirstate interface?
392 # TODO: should this be added to the dirstate interface?
393 self._plchangecallbacks[category] = callback
393 self._plchangecallbacks[category] = callback
394
394
395 def clearbackup(self, tr, backupname):
395 def clearbackup(self, tr, backupname):
396 # TODO
396 # TODO
397 pass
397 pass
398
398
399 def setbranch(self, branch):
399 def setbranch(self, branch):
400 raise error.Abort(
400 raise error.Abort(
401 b'git repos do not support branches. try using bookmarks'
401 b'git repos do not support branches. try using bookmarks'
402 )
402 )
@@ -1,894 +1,894 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a Distributed SCM
10 # Keyword expansion hack against the grain of a Distributed SCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56
56
57 The more specific you are in your filename patterns the less you
57 The more specific you are in your filename patterns the less you
58 lose speed in huge repositories.
58 lose speed in huge repositories.
59
59
60 For [keywordmaps] template mapping and expansion demonstration and
60 For [keywordmaps] template mapping and expansion demonstration and
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 available templates and filters.
62 available templates and filters.
63
63
64 Three additional date template filters are provided:
64 Three additional date template filters are provided:
65
65
66 :``utcdate``: "2006/09/18 15:13:13"
66 :``utcdate``: "2006/09/18 15:13:13"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69
69
70 The default template mappings (view with :hg:`kwdemo -d`) can be
70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 replaced with customized keywords and templates. Again, run
71 replaced with customized keywords and templates. Again, run
72 :hg:`kwdemo` to control the results of your configuration changes.
72 :hg:`kwdemo` to control the results of your configuration changes.
73
73
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 to avoid storing expanded keywords in the change history.
75 to avoid storing expanded keywords in the change history.
76
76
77 To force expansion after enabling it, or a configuration change, run
77 To force expansion after enabling it, or a configuration change, run
78 :hg:`kwexpand`.
78 :hg:`kwexpand`.
79
79
80 Expansions spanning more than one line and incremental expansions,
80 Expansions spanning more than one line and incremental expansions,
81 like CVS' $Log$, are not supported. A keyword template map "Log =
81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 {desc}" expands to the first line of the changeset description.
82 {desc}" expands to the first line of the changeset description.
83 '''
83 '''
84
84
85
85
86 import os
86 import os
87 import re
87 import re
88 import weakref
88 import weakref
89
89
90 from mercurial.i18n import _
90 from mercurial.i18n import _
91 from mercurial.pycompat import getattr
91 from mercurial.pycompat import getattr
92 from mercurial.hgweb import webcommands
92 from mercurial.hgweb import webcommands
93
93
94 from mercurial import (
94 from mercurial import (
95 cmdutil,
95 cmdutil,
96 context,
96 context,
97 dispatch,
97 dispatch,
98 error,
98 error,
99 extensions,
99 extensions,
100 filelog,
100 filelog,
101 localrepo,
101 localrepo,
102 logcmdutil,
102 logcmdutil,
103 match,
103 match,
104 patch,
104 patch,
105 pathutil,
105 pathutil,
106 pycompat,
106 pycompat,
107 registrar,
107 registrar,
108 scmutil,
108 scmutil,
109 templatefilters,
109 templatefilters,
110 templateutil,
110 templateutil,
111 util,
111 util,
112 )
112 )
113 from mercurial.utils import (
113 from mercurial.utils import (
114 dateutil,
114 dateutil,
115 stringutil,
115 stringutil,
116 )
116 )
117 from mercurial.dirstateutils import timestamp
117 from mercurial.dirstateutils import timestamp
118
118
119 cmdtable = {}
119 cmdtable = {}
120 command = registrar.command(cmdtable)
120 command = registrar.command(cmdtable)
121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 # be specifying the version(s) of Mercurial they are tested with, or
123 # be specifying the version(s) of Mercurial they are tested with, or
124 # leave the attribute unspecified.
124 # leave the attribute unspecified.
125 testedwith = b'ships-with-hg-core'
125 testedwith = b'ships-with-hg-core'
126
126
127 # hg commands that do not act on keywords
127 # hg commands that do not act on keywords
128 nokwcommands = (
128 nokwcommands = (
129 b'add addremove annotate bundle export grep incoming init log'
129 b'add addremove annotate bundle export grep incoming init log'
130 b' outgoing push tip verify convert email glog'
130 b' outgoing push tip verify convert email glog'
131 )
131 )
132
132
133 # webcommands that do not act on keywords
133 # webcommands that do not act on keywords
134 nokwwebcommands = b'annotate changeset rev filediff diff comparison'
134 nokwwebcommands = b'annotate changeset rev filediff diff comparison'
135
135
136 # hg commands that trigger expansion only when writing to working dir,
136 # hg commands that trigger expansion only when writing to working dir,
137 # not when reading filelog, and unexpand when reading from working dir
137 # not when reading filelog, and unexpand when reading from working dir
138 restricted = (
138 restricted = (
139 b'merge kwexpand kwshrink record qrecord resolve transplant'
139 b'merge kwexpand kwshrink record qrecord resolve transplant'
140 b' unshelve rebase graft backout histedit fetch'
140 b' unshelve rebase graft backout histedit fetch'
141 )
141 )
142
142
143 # names of extensions using dorecord
143 # names of extensions using dorecord
144 recordextensions = b'record'
144 recordextensions = b'record'
145
145
146 colortable = {
146 colortable = {
147 b'kwfiles.enabled': b'green bold',
147 b'kwfiles.enabled': b'green bold',
148 b'kwfiles.deleted': b'cyan bold underline',
148 b'kwfiles.deleted': b'cyan bold underline',
149 b'kwfiles.enabledunknown': b'green',
149 b'kwfiles.enabledunknown': b'green',
150 b'kwfiles.ignored': b'bold',
150 b'kwfiles.ignored': b'bold',
151 b'kwfiles.ignoredunknown': b'none',
151 b'kwfiles.ignoredunknown': b'none',
152 }
152 }
153
153
154 templatefilter = registrar.templatefilter()
154 templatefilter = registrar.templatefilter()
155
155
156 configtable = {}
156 configtable = {}
157 configitem = registrar.configitem(configtable)
157 configitem = registrar.configitem(configtable)
158
158
159 configitem(
159 configitem(
160 b'keywordset',
160 b'keywordset',
161 b'svn',
161 b'svn',
162 default=False,
162 default=False,
163 )
163 )
164 # date like in cvs' $Date
164 # date like in cvs' $Date
165 @templatefilter(b'utcdate', intype=templateutil.date)
165 @templatefilter(b'utcdate', intype=templateutil.date)
166 def utcdate(date):
166 def utcdate(date):
167 """Date. Returns a UTC-date in this format: "2009/08/18 11:00:13"."""
167 """Date. Returns a UTC-date in this format: "2009/08/18 11:00:13"."""
168 dateformat = b'%Y/%m/%d %H:%M:%S'
168 dateformat = b'%Y/%m/%d %H:%M:%S'
169 return dateutil.datestr((date[0], 0), dateformat)
169 return dateutil.datestr((date[0], 0), dateformat)
170
170
171
171
172 # date like in svn's $Date
172 # date like in svn's $Date
173 @templatefilter(b'svnisodate', intype=templateutil.date)
173 @templatefilter(b'svnisodate', intype=templateutil.date)
174 def svnisodate(date):
174 def svnisodate(date):
175 """Date. Returns a date in this format: "2009-08-18 13:00:13
175 """Date. Returns a date in this format: "2009-08-18 13:00:13
176 +0200 (Tue, 18 Aug 2009)".
176 +0200 (Tue, 18 Aug 2009)".
177 """
177 """
178 return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
178 return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
179
179
180
180
181 # date like in svn's $Id
181 # date like in svn's $Id
182 @templatefilter(b'svnutcdate', intype=templateutil.date)
182 @templatefilter(b'svnutcdate', intype=templateutil.date)
183 def svnutcdate(date):
183 def svnutcdate(date):
184 """Date. Returns a UTC-date in this format: "2009-08-18
184 """Date. Returns a UTC-date in this format: "2009-08-18
185 11:00:13Z".
185 11:00:13Z".
186 """
186 """
187 dateformat = b'%Y-%m-%d %H:%M:%SZ'
187 dateformat = b'%Y-%m-%d %H:%M:%SZ'
188 return dateutil.datestr((date[0], 0), dateformat)
188 return dateutil.datestr((date[0], 0), dateformat)
189
189
190
190
191 # make keyword tools accessible
191 # make keyword tools accessible
192 kwtools = {b'hgcmd': b''}
192 kwtools = {b'hgcmd': b''}
193
193
194
194
195 def _defaultkwmaps(ui):
195 def _defaultkwmaps(ui):
196 '''Returns default keywordmaps according to keywordset configuration.'''
196 '''Returns default keywordmaps according to keywordset configuration.'''
197 templates = {
197 templates = {
198 b'Revision': b'{node|short}',
198 b'Revision': b'{node|short}',
199 b'Author': b'{author|user}',
199 b'Author': b'{author|user}',
200 }
200 }
201 kwsets = (
201 kwsets = (
202 {
202 {
203 b'Date': b'{date|utcdate}',
203 b'Date': b'{date|utcdate}',
204 b'RCSfile': b'{file|basename},v',
204 b'RCSfile': b'{file|basename},v',
205 b'RCSFile': b'{file|basename},v', # kept for backwards compatibility
205 b'RCSFile': b'{file|basename},v', # kept for backwards compatibility
206 # with hg-keyword
206 # with hg-keyword
207 b'Source': b'{root}/{file},v',
207 b'Source': b'{root}/{file},v',
208 b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
208 b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
209 b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
209 b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
210 },
210 },
211 {
211 {
212 b'Date': b'{date|svnisodate}',
212 b'Date': b'{date|svnisodate}',
213 b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
213 b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
214 b'LastChangedRevision': b'{node|short}',
214 b'LastChangedRevision': b'{node|short}',
215 b'LastChangedBy': b'{author|user}',
215 b'LastChangedBy': b'{author|user}',
216 b'LastChangedDate': b'{date|svnisodate}',
216 b'LastChangedDate': b'{date|svnisodate}',
217 },
217 },
218 )
218 )
219 templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
219 templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
220 return templates
220 return templates
221
221
222
222
223 def _shrinktext(text, subfunc):
223 def _shrinktext(text, subfunc):
224 """Helper for keyword expansion removal in text.
224 """Helper for keyword expansion removal in text.
225 Depending on subfunc also returns number of substitutions."""
225 Depending on subfunc also returns number of substitutions."""
226 return subfunc(br'$\1$', text)
226 return subfunc(br'$\1$', text)
227
227
228
228
229 def _preselect(wstatus, changed):
229 def _preselect(wstatus, changed):
230 """Retrieves modified and added files from a working directory state
230 """Retrieves modified and added files from a working directory state
231 and returns the subset of each contained in given changed files
231 and returns the subset of each contained in given changed files
232 retrieved from a change context."""
232 retrieved from a change context."""
233 modified = [f for f in wstatus.modified if f in changed]
233 modified = [f for f in wstatus.modified if f in changed]
234 added = [f for f in wstatus.added if f in changed]
234 added = [f for f in wstatus.added if f in changed]
235 return modified, added
235 return modified, added
236
236
237
237
238 class kwtemplater:
238 class kwtemplater:
239 """
239 """
240 Sets up keyword templates, corresponding keyword regex, and
240 Sets up keyword templates, corresponding keyword regex, and
241 provides keyword substitution functions.
241 provides keyword substitution functions.
242 """
242 """
243
243
244 def __init__(self, ui, repo, inc, exc):
244 def __init__(self, ui, repo, inc, exc):
245 self.ui = ui
245 self.ui = ui
246 self._repo = weakref.ref(repo)
246 self._repo = weakref.ref(repo)
247 self.match = match.match(repo.root, b'', [], inc, exc)
247 self.match = match.match(repo.root, b'', [], inc, exc)
248 self.restrict = kwtools[b'hgcmd'] in restricted.split()
248 self.restrict = kwtools[b'hgcmd'] in restricted.split()
249 self.postcommit = False
249 self.postcommit = False
250
250
251 kwmaps = self.ui.configitems(b'keywordmaps')
251 kwmaps = self.ui.configitems(b'keywordmaps')
252 if kwmaps: # override default templates
252 if kwmaps: # override default templates
253 self.templates = dict(kwmaps)
253 self.templates = dict(kwmaps)
254 else:
254 else:
255 self.templates = _defaultkwmaps(self.ui)
255 self.templates = _defaultkwmaps(self.ui)
256
256
257 @property
257 @property
258 def repo(self):
258 def repo(self):
259 return self._repo()
259 return self._repo()
260
260
261 @util.propertycache
261 @util.propertycache
262 def escape(self):
262 def escape(self):
263 '''Returns bar-separated and escaped keywords.'''
263 '''Returns bar-separated and escaped keywords.'''
264 return b'|'.join(map(stringutil.reescape, self.templates.keys()))
264 return b'|'.join(map(stringutil.reescape, self.templates.keys()))
265
265
266 @util.propertycache
266 @util.propertycache
267 def rekw(self):
267 def rekw(self):
268 '''Returns regex for unexpanded keywords.'''
268 '''Returns regex for unexpanded keywords.'''
269 return re.compile(br'\$(%s)\$' % self.escape)
269 return re.compile(br'\$(%s)\$' % self.escape)
270
270
271 @util.propertycache
271 @util.propertycache
272 def rekwexp(self):
272 def rekwexp(self):
273 '''Returns regex for expanded keywords.'''
273 '''Returns regex for expanded keywords.'''
274 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
274 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
275
275
276 def substitute(self, data, path, ctx, subfunc):
276 def substitute(self, data, path, ctx, subfunc):
277 '''Replaces keywords in data with expanded template.'''
277 '''Replaces keywords in data with expanded template.'''
278
278
279 def kwsub(mobj):
279 def kwsub(mobj):
280 kw = mobj.group(1)
280 kw = mobj.group(1)
281 ct = logcmdutil.maketemplater(
281 ct = logcmdutil.maketemplater(
282 self.ui, self.repo, self.templates[kw]
282 self.ui, self.repo, self.templates[kw]
283 )
283 )
284 self.ui.pushbuffer()
284 self.ui.pushbuffer()
285 ct.show(ctx, root=self.repo.root, file=path)
285 ct.show(ctx, root=self.repo.root, file=path)
286 ekw = templatefilters.firstline(self.ui.popbuffer())
286 ekw = templatefilters.firstline(self.ui.popbuffer())
287 return b'$%s: %s $' % (kw, ekw)
287 return b'$%s: %s $' % (kw, ekw)
288
288
289 return subfunc(kwsub, data)
289 return subfunc(kwsub, data)
290
290
291 def linkctx(self, path, fileid):
291 def linkctx(self, path, fileid):
292 '''Similar to filelog.linkrev, but returns a changectx.'''
292 '''Similar to filelog.linkrev, but returns a changectx.'''
293 return self.repo.filectx(path, fileid=fileid).changectx()
293 return self.repo.filectx(path, fileid=fileid).changectx()
294
294
295 def expand(self, path, node, data):
295 def expand(self, path, node, data):
296 '''Returns data with keywords expanded.'''
296 '''Returns data with keywords expanded.'''
297 if (
297 if (
298 not self.restrict
298 not self.restrict
299 and self.match(path)
299 and self.match(path)
300 and not stringutil.binary(data)
300 and not stringutil.binary(data)
301 ):
301 ):
302 ctx = self.linkctx(path, node)
302 ctx = self.linkctx(path, node)
303 return self.substitute(data, path, ctx, self.rekw.sub)
303 return self.substitute(data, path, ctx, self.rekw.sub)
304 return data
304 return data
305
305
306 def iskwfile(self, cand, ctx):
306 def iskwfile(self, cand, ctx):
307 """Returns subset of candidates which are configured for keyword
307 """Returns subset of candidates which are configured for keyword
308 expansion but are not symbolic links."""
308 expansion but are not symbolic links."""
309 return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
309 return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
310
310
311 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
311 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
312 '''Overwrites selected files expanding/shrinking keywords.'''
312 '''Overwrites selected files expanding/shrinking keywords.'''
313 if self.restrict or lookup or self.postcommit: # exclude kw_copy
313 if self.restrict or lookup or self.postcommit: # exclude kw_copy
314 candidates = self.iskwfile(candidates, ctx)
314 candidates = self.iskwfile(candidates, ctx)
315 if not candidates:
315 if not candidates:
316 return
316 return
317 kwcmd = self.restrict and lookup # kwexpand/kwshrink
317 kwcmd = self.restrict and lookup # kwexpand/kwshrink
318 if self.restrict or expand and lookup:
318 if self.restrict or expand and lookup:
319 mf = ctx.manifest()
319 mf = ctx.manifest()
320 if self.restrict or rekw:
320 if self.restrict or rekw:
321 re_kw = self.rekw
321 re_kw = self.rekw
322 else:
322 else:
323 re_kw = self.rekwexp
323 re_kw = self.rekwexp
324 if expand:
324 if expand:
325 msg = _(b'overwriting %s expanding keywords\n')
325 msg = _(b'overwriting %s expanding keywords\n')
326 else:
326 else:
327 msg = _(b'overwriting %s shrinking keywords\n')
327 msg = _(b'overwriting %s shrinking keywords\n')
328 wctx = self.repo[None]
328 wctx = self.repo[None]
329 for f in candidates:
329 for f in candidates:
330 if self.restrict:
330 if self.restrict:
331 data = self.repo.file(f).read(mf[f])
331 data = self.repo.file(f).read(mf[f])
332 else:
332 else:
333 data = self.repo.wread(f)
333 data = self.repo.wread(f)
334 if stringutil.binary(data):
334 if stringutil.binary(data):
335 continue
335 continue
336 if expand:
336 if expand:
337 parents = ctx.parents()
337 parents = ctx.parents()
338 if lookup:
338 if lookup:
339 ctx = self.linkctx(f, mf[f])
339 ctx = self.linkctx(f, mf[f])
340 elif self.restrict and len(parents) > 1:
340 elif self.restrict and len(parents) > 1:
341 # merge commit
341 # merge commit
342 # in case of conflict f is in modified state during
342 # in case of conflict f is in modified state during
343 # merge, even if f does not differ from f in parent
343 # merge, even if f does not differ from f in parent
344 for p in parents:
344 for p in parents:
345 if f in p and not p[f].cmp(ctx[f]):
345 if f in p and not p[f].cmp(ctx[f]):
346 ctx = p[f].changectx()
346 ctx = p[f].changectx()
347 break
347 break
348 data, found = self.substitute(data, f, ctx, re_kw.subn)
348 data, found = self.substitute(data, f, ctx, re_kw.subn)
349 elif self.restrict:
349 elif self.restrict:
350 found = re_kw.search(data)
350 found = re_kw.search(data)
351 else:
351 else:
352 data, found = _shrinktext(data, re_kw.subn)
352 data, found = _shrinktext(data, re_kw.subn)
353 if found:
353 if found:
354 self.ui.note(msg % f)
354 self.ui.note(msg % f)
355 fp = self.repo.wvfs(f, b"wb", atomictemp=True)
355 fp = self.repo.wvfs(f, b"wb", atomictemp=True)
356 fp.write(data)
356 fp.write(data)
357 fp.close()
357 fp.close()
358 if kwcmd:
358 if kwcmd:
359 s = wctx[f].lstat()
359 s = wctx[f].lstat()
360 mode = s.st_mode
360 mode = s.st_mode
361 size = s.st_size
361 size = s.st_size
362 mtime = timestamp.mtime_of(s)
362 mtime = timestamp.mtime_of(s)
363 cache_data = (mode, size, mtime)
363 cache_data = (mode, size, mtime)
364 self.repo.dirstate.set_clean(f, cache_data)
364 self.repo.dirstate.set_clean(f, cache_data)
365 elif self.postcommit:
365 elif self.postcommit:
366 self.repo.dirstate.update_file_p1(f, p1_tracked=True)
366 self.repo.dirstate.update_file_p1(f, p1_tracked=True)
367
367
368 def shrink(self, fname, text):
368 def shrink(self, fname, text):
369 '''Returns text with all keyword substitutions removed.'''
369 '''Returns text with all keyword substitutions removed.'''
370 if self.match(fname) and not stringutil.binary(text):
370 if self.match(fname) and not stringutil.binary(text):
371 return _shrinktext(text, self.rekwexp.sub)
371 return _shrinktext(text, self.rekwexp.sub)
372 return text
372 return text
373
373
374 def shrinklines(self, fname, lines):
374 def shrinklines(self, fname, lines):
375 '''Returns lines with keyword substitutions removed.'''
375 '''Returns lines with keyword substitutions removed.'''
376 if self.match(fname):
376 if self.match(fname):
377 text = b''.join(lines)
377 text = b''.join(lines)
378 if not stringutil.binary(text):
378 if not stringutil.binary(text):
379 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
379 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
380 return lines
380 return lines
381
381
382 def wread(self, fname, data):
382 def wread(self, fname, data):
383 """If in restricted mode returns data read from wdir with
383 """If in restricted mode returns data read from wdir with
384 keyword substitutions removed."""
384 keyword substitutions removed."""
385 if self.restrict:
385 if self.restrict:
386 return self.shrink(fname, data)
386 return self.shrink(fname, data)
387 return data
387 return data
388
388
389
389
390 class kwfilelog(filelog.filelog):
390 class kwfilelog(filelog.filelog):
391 """
391 """
392 Subclass of filelog to hook into its read, add, cmp methods.
392 Subclass of filelog to hook into its read, add, cmp methods.
393 Keywords are "stored" unexpanded, and processed on reading.
393 Keywords are "stored" unexpanded, and processed on reading.
394 """
394 """
395
395
396 def __init__(self, opener, kwt, path):
396 def __init__(self, opener, kwt, path):
397 super(kwfilelog, self).__init__(opener, path)
397 super(kwfilelog, self).__init__(opener, path)
398 self.kwt = kwt
398 self.kwt = kwt
399 self.path = path
399 self.path = path
400
400
401 def read(self, node):
401 def read(self, node):
402 '''Expands keywords when reading filelog.'''
402 '''Expands keywords when reading filelog.'''
403 data = super(kwfilelog, self).read(node)
403 data = super(kwfilelog, self).read(node)
404 if self.renamed(node):
404 if self.renamed(node):
405 return data
405 return data
406 return self.kwt.expand(self.path, node, data)
406 return self.kwt.expand(self.path, node, data)
407
407
408 def add(self, text, meta, tr, link, p1=None, p2=None):
408 def add(self, text, meta, tr, link, p1=None, p2=None):
409 '''Removes keyword substitutions when adding to filelog.'''
409 '''Removes keyword substitutions when adding to filelog.'''
410 text = self.kwt.shrink(self.path, text)
410 text = self.kwt.shrink(self.path, text)
411 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
411 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
412
412
413 def cmp(self, node, text):
413 def cmp(self, node, text):
414 '''Removes keyword substitutions for comparison.'''
414 '''Removes keyword substitutions for comparison.'''
415 text = self.kwt.shrink(self.path, text)
415 text = self.kwt.shrink(self.path, text)
416 return super(kwfilelog, self).cmp(node, text)
416 return super(kwfilelog, self).cmp(node, text)
417
417
418
418
419 def _status(ui, repo, wctx, kwt, *pats, **opts):
419 def _status(ui, repo, wctx, kwt, *pats, **opts):
420 """Bails out if [keyword] configuration is not active.
420 """Bails out if [keyword] configuration is not active.
421 Returns status of working directory."""
421 Returns status of working directory."""
422 if kwt:
422 if kwt:
423 opts = pycompat.byteskwargs(opts)
423 opts = pycompat.byteskwargs(opts)
424 return repo.status(
424 return repo.status(
425 match=scmutil.match(wctx, pats, opts),
425 match=scmutil.match(wctx, pats, opts),
426 clean=True,
426 clean=True,
427 unknown=opts.get(b'unknown') or opts.get(b'all'),
427 unknown=opts.get(b'unknown') or opts.get(b'all'),
428 )
428 )
429 if ui.configitems(b'keyword'):
429 if ui.configitems(b'keyword'):
430 raise error.Abort(_(b'[keyword] patterns cannot match'))
430 raise error.Abort(_(b'[keyword] patterns cannot match'))
431 raise error.Abort(_(b'no [keyword] patterns configured'))
431 raise error.Abort(_(b'no [keyword] patterns configured'))
432
432
433
433
434 def _kwfwrite(ui, repo, expand, *pats, **opts):
434 def _kwfwrite(ui, repo, expand, *pats, **opts):
435 '''Selects files and passes them to kwtemplater.overwrite.'''
435 '''Selects files and passes them to kwtemplater.overwrite.'''
436 wctx = repo[None]
436 wctx = repo[None]
437 if len(wctx.parents()) > 1:
437 if len(wctx.parents()) > 1:
438 raise error.Abort(_(b'outstanding uncommitted merge'))
438 raise error.Abort(_(b'outstanding uncommitted merge'))
439 kwt = getattr(repo, '_keywordkwt', None)
439 kwt = getattr(repo, '_keywordkwt', None)
440 with repo.wlock():
440 with repo.wlock():
441 status = _status(ui, repo, wctx, kwt, *pats, **opts)
441 status = _status(ui, repo, wctx, kwt, *pats, **opts)
442 if status.modified or status.added or status.removed or status.deleted:
442 if status.modified or status.added or status.removed or status.deleted:
443 raise error.Abort(_(b'outstanding uncommitted changes'))
443 raise error.Abort(_(b'outstanding uncommitted changes'))
444 kwt.overwrite(wctx, status.clean, True, expand)
444 kwt.overwrite(wctx, status.clean, True, expand)
445
445
446
446
447 @command(
447 @command(
448 b'kwdemo',
448 b'kwdemo',
449 [
449 [
450 (b'd', b'default', None, _(b'show default keyword template maps')),
450 (b'd', b'default', None, _(b'show default keyword template maps')),
451 (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
451 (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
452 ],
452 ],
453 _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
453 _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
454 optionalrepo=True,
454 optionalrepo=True,
455 )
455 )
456 def demo(ui, repo, *args, **opts):
456 def demo(ui, repo, *args, **opts):
457 """print [keywordmaps] configuration and an expansion example
457 """print [keywordmaps] configuration and an expansion example
458
458
459 Show current, custom, or default keyword template maps and their
459 Show current, custom, or default keyword template maps and their
460 expansions.
460 expansions.
461
461
462 Extend the current configuration by specifying maps as arguments
462 Extend the current configuration by specifying maps as arguments
463 and using -f/--rcfile to source an external hgrc file.
463 and using -f/--rcfile to source an external hgrc file.
464
464
465 Use -d/--default to disable current configuration.
465 Use -d/--default to disable current configuration.
466
466
467 See :hg:`help templates` for information on templates and filters.
467 See :hg:`help templates` for information on templates and filters.
468 """
468 """
469
469
470 def demoitems(section, items):
470 def demoitems(section, items):
471 ui.write(b'[%s]\n' % section)
471 ui.write(b'[%s]\n' % section)
472 for k, v in sorted(items):
472 for k, v in sorted(items):
473 if isinstance(v, bool):
473 if isinstance(v, bool):
474 v = stringutil.pprint(v)
474 v = stringutil.pprint(v)
475 ui.write(b'%s = %s\n' % (k, v))
475 ui.write(b'%s = %s\n' % (k, v))
476
476
477 fn = b'demo.txt'
477 fn = b'demo.txt'
478 tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
478 tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
479 ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
479 ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
480 if repo is None:
480 if repo is None:
481 baseui = ui
481 baseui = ui
482 else:
482 else:
483 baseui = repo.baseui
483 baseui = repo.baseui
484 repo = localrepo.instance(baseui, tmpdir, create=True)
484 repo = localrepo.instance(baseui, tmpdir, create=True)
485 ui.setconfig(b'keyword', fn, b'', b'keyword')
485 ui.setconfig(b'keyword', fn, b'', b'keyword')
486 svn = ui.configbool(b'keywordset', b'svn')
486 svn = ui.configbool(b'keywordset', b'svn')
487 # explicitly set keywordset for demo output
487 # explicitly set keywordset for demo output
488 ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
488 ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
489
489
490 uikwmaps = ui.configitems(b'keywordmaps')
490 uikwmaps = ui.configitems(b'keywordmaps')
491 if args or opts.get('rcfile'):
491 if args or opts.get('rcfile'):
492 ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
492 ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
493 if uikwmaps:
493 if uikwmaps:
494 ui.status(_(b'\textending current template maps\n'))
494 ui.status(_(b'\textending current template maps\n'))
495 if opts.get('default') or not uikwmaps:
495 if opts.get('default') or not uikwmaps:
496 if svn:
496 if svn:
497 ui.status(_(b'\toverriding default svn keywordset\n'))
497 ui.status(_(b'\toverriding default svn keywordset\n'))
498 else:
498 else:
499 ui.status(_(b'\toverriding default cvs keywordset\n'))
499 ui.status(_(b'\toverriding default cvs keywordset\n'))
500 if opts.get('rcfile'):
500 if opts.get('rcfile'):
501 ui.readconfig(opts.get(b'rcfile'))
501 ui.readconfig(opts.get(b'rcfile'))
502 if args:
502 if args:
503 # simulate hgrc parsing
503 # simulate hgrc parsing
504 rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
504 rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
505 repo.vfs.write(b'hgrc', rcmaps)
505 repo.vfs.write(b'hgrc', rcmaps)
506 ui.readconfig(repo.vfs.join(b'hgrc'))
506 ui.readconfig(repo.vfs.join(b'hgrc'))
507 kwmaps = dict(ui.configitems(b'keywordmaps'))
507 kwmaps = dict(ui.configitems(b'keywordmaps'))
508 elif opts.get('default'):
508 elif opts.get('default'):
509 if svn:
509 if svn:
510 ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
510 ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
511 else:
511 else:
512 ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
512 ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
513 kwmaps = _defaultkwmaps(ui)
513 kwmaps = _defaultkwmaps(ui)
514 if uikwmaps:
514 if uikwmaps:
515 ui.status(_(b'\tdisabling current template maps\n'))
515 ui.status(_(b'\tdisabling current template maps\n'))
516 for k, v in kwmaps.items():
516 for k, v in kwmaps.items():
517 ui.setconfig(b'keywordmaps', k, v, b'keyword')
517 ui.setconfig(b'keywordmaps', k, v, b'keyword')
518 else:
518 else:
519 ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
519 ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
520 if uikwmaps:
520 if uikwmaps:
521 kwmaps = dict(uikwmaps)
521 kwmaps = dict(uikwmaps)
522 else:
522 else:
523 kwmaps = _defaultkwmaps(ui)
523 kwmaps = _defaultkwmaps(ui)
524
524
525 uisetup(ui)
525 uisetup(ui)
526 reposetup(ui, repo)
526 reposetup(ui, repo)
527 ui.writenoi18n(b'[extensions]\nkeyword =\n')
527 ui.writenoi18n(b'[extensions]\nkeyword =\n')
528 demoitems(b'keyword', ui.configitems(b'keyword'))
528 demoitems(b'keyword', ui.configitems(b'keyword'))
529 demoitems(b'keywordset', ui.configitems(b'keywordset'))
529 demoitems(b'keywordset', ui.configitems(b'keywordset'))
530 demoitems(b'keywordmaps', kwmaps.items())
530 demoitems(b'keywordmaps', kwmaps.items())
531 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
531 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
532 repo.wvfs.write(fn, keywords)
532 repo.wvfs.write(fn, keywords)
533 repo[None].add([fn])
533 repo[None].add([fn])
534 ui.note(_(b'\nkeywords written to %s:\n') % fn)
534 ui.note(_(b'\nkeywords written to %s:\n') % fn)
535 ui.note(keywords)
535 ui.note(keywords)
536 with repo.wlock():
536 with repo.wlock():
537 repo.dirstate.setbranch(b'demobranch')
537 repo.dirstate.setbranch(b'demobranch')
538 for name, cmd in ui.configitems(b'hooks'):
538 for name, cmd in ui.configitems(b'hooks'):
539 if name.split(b'.', 1)[0].find(b'commit') > -1:
539 if name.split(b'.', 1)[0].find(b'commit') > -1:
540 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
540 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
541 msg = _(b'hg keyword configuration and expansion example')
541 msg = _(b'hg keyword configuration and expansion example')
542 ui.note((b"hg ci -m '%s'\n" % msg))
542 ui.note((b"hg ci -m '%s'\n" % msg))
543 repo.commit(text=msg)
543 repo.commit(text=msg)
544 ui.status(_(b'\n\tkeywords expanded\n'))
544 ui.status(_(b'\n\tkeywords expanded\n'))
545 ui.write(repo.wread(fn))
545 ui.write(repo.wread(fn))
546 repo.wvfs.rmtree(repo.root)
546 repo.wvfs.rmtree(repo.root)
547
547
548
548
549 @command(
549 @command(
550 b'kwexpand',
550 b'kwexpand',
551 cmdutil.walkopts,
551 cmdutil.walkopts,
552 _(b'hg kwexpand [OPTION]... [FILE]...'),
552 _(b'hg kwexpand [OPTION]... [FILE]...'),
553 inferrepo=True,
553 inferrepo=True,
554 )
554 )
555 def expand(ui, repo, *pats, **opts):
555 def expand(ui, repo, *pats, **opts):
556 """expand keywords in the working directory
556 """expand keywords in the working directory
557
557
558 Run after (re)enabling keyword expansion.
558 Run after (re)enabling keyword expansion.
559
559
560 kwexpand refuses to run if given files contain local changes.
560 kwexpand refuses to run if given files contain local changes.
561 """
561 """
562 # 3rd argument sets expansion to True
562 # 3rd argument sets expansion to True
563 _kwfwrite(ui, repo, True, *pats, **opts)
563 _kwfwrite(ui, repo, True, *pats, **opts)
564
564
565
565
566 @command(
566 @command(
567 b'kwfiles',
567 b'kwfiles',
568 [
568 [
569 (b'A', b'all', None, _(b'show keyword status flags of all files')),
569 (b'A', b'all', None, _(b'show keyword status flags of all files')),
570 (b'i', b'ignore', None, _(b'show files excluded from expansion')),
570 (b'i', b'ignore', None, _(b'show files excluded from expansion')),
571 (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
571 (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
572 ]
572 ]
573 + cmdutil.walkopts,
573 + cmdutil.walkopts,
574 _(b'hg kwfiles [OPTION]... [FILE]...'),
574 _(b'hg kwfiles [OPTION]... [FILE]...'),
575 inferrepo=True,
575 inferrepo=True,
576 )
576 )
577 def files(ui, repo, *pats, **opts):
577 def files(ui, repo, *pats, **opts):
578 """show files configured for keyword expansion
578 """show files configured for keyword expansion
579
579
580 List which files in the working directory are matched by the
580 List which files in the working directory are matched by the
581 [keyword] configuration patterns.
581 [keyword] configuration patterns.
582
582
583 Useful to prevent inadvertent keyword expansion and to speed up
583 Useful to prevent inadvertent keyword expansion and to speed up
584 execution by including only files that are actual candidates for
584 execution by including only files that are actual candidates for
585 expansion.
585 expansion.
586
586
587 See :hg:`help keyword` on how to construct patterns both for
587 See :hg:`help keyword` on how to construct patterns both for
588 inclusion and exclusion of files.
588 inclusion and exclusion of files.
589
589
590 With -A/--all and -v/--verbose the codes used to show the status
590 With -A/--all and -v/--verbose the codes used to show the status
591 of files are::
591 of files are::
592
592
593 K = keyword expansion candidate
593 K = keyword expansion candidate
594 k = keyword expansion candidate (not tracked)
594 k = keyword expansion candidate (not tracked)
595 I = ignored
595 I = ignored
596 i = ignored (not tracked)
596 i = ignored (not tracked)
597 """
597 """
598 kwt = getattr(repo, '_keywordkwt', None)
598 kwt = getattr(repo, '_keywordkwt', None)
599 wctx = repo[None]
599 wctx = repo[None]
600 status = _status(ui, repo, wctx, kwt, *pats, **opts)
600 status = _status(ui, repo, wctx, kwt, *pats, **opts)
601 if pats:
601 if pats:
602 cwd = repo.getcwd()
602 cwd = repo.getcwd()
603 else:
603 else:
604 cwd = b''
604 cwd = b''
605 files = []
605 files = []
606 opts = pycompat.byteskwargs(opts)
606 opts = pycompat.byteskwargs(opts)
607 if not opts.get(b'unknown') or opts.get(b'all'):
607 if not opts.get(b'unknown') or opts.get(b'all'):
608 files = sorted(status.modified + status.added + status.clean)
608 files = sorted(status.modified + status.added + status.clean)
609 kwfiles = kwt.iskwfile(files, wctx)
609 kwfiles = kwt.iskwfile(files, wctx)
610 kwdeleted = kwt.iskwfile(status.deleted, wctx)
610 kwdeleted = kwt.iskwfile(status.deleted, wctx)
611 kwunknown = kwt.iskwfile(status.unknown, wctx)
611 kwunknown = kwt.iskwfile(status.unknown, wctx)
612 if not opts.get(b'ignore') or opts.get(b'all'):
612 if not opts.get(b'ignore') or opts.get(b'all'):
613 showfiles = kwfiles, kwdeleted, kwunknown
613 showfiles = kwfiles, kwdeleted, kwunknown
614 else:
614 else:
615 showfiles = [], [], []
615 showfiles = [], [], []
616 if opts.get(b'all') or opts.get(b'ignore'):
616 if opts.get(b'all') or opts.get(b'ignore'):
617 showfiles += (
617 showfiles += (
618 [f for f in files if f not in kwfiles],
618 [f for f in files if f not in kwfiles],
619 [f for f in status.unknown if f not in kwunknown],
619 [f for f in status.unknown if f not in kwunknown],
620 )
620 )
621 kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
621 kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
622 kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
622 kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
623 fm = ui.formatter(b'kwfiles', opts)
623 fm = ui.formatter(b'kwfiles', opts)
624 fmt = b'%.0s%s\n'
624 fmt = b'%.0s%s\n'
625 if opts.get(b'all') or ui.verbose:
625 if opts.get(b'all') or ui.verbose:
626 fmt = b'%s %s\n'
626 fmt = b'%s %s\n'
627 for kwstate, char, filenames in kwstates:
627 for kwstate, char, filenames in kwstates:
628 label = b'kwfiles.' + kwstate
628 label = b'kwfiles.' + kwstate
629 for f in filenames:
629 for f in filenames:
630 fm.startitem()
630 fm.startitem()
631 fm.data(kwstatus=char, path=f)
631 fm.data(kwstatus=char, path=f)
632 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
632 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
633 fm.end()
633 fm.end()
634
634
635
635
636 @command(
636 @command(
637 b'kwshrink',
637 b'kwshrink',
638 cmdutil.walkopts,
638 cmdutil.walkopts,
639 _(b'hg kwshrink [OPTION]... [FILE]...'),
639 _(b'hg kwshrink [OPTION]... [FILE]...'),
640 inferrepo=True,
640 inferrepo=True,
641 )
641 )
642 def shrink(ui, repo, *pats, **opts):
642 def shrink(ui, repo, *pats, **opts):
643 """revert expanded keywords in the working directory
643 """revert expanded keywords in the working directory
644
644
645 Must be run before changing/disabling active keywords.
645 Must be run before changing/disabling active keywords.
646
646
647 kwshrink refuses to run if given files contain local changes.
647 kwshrink refuses to run if given files contain local changes.
648 """
648 """
649 # 3rd argument sets expansion to False
649 # 3rd argument sets expansion to False
650 _kwfwrite(ui, repo, False, *pats, **opts)
650 _kwfwrite(ui, repo, False, *pats, **opts)
651
651
652
652
653 # monkeypatches
653 # monkeypatches
654
654
655
655
656 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
656 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
657 """Monkeypatch/wrap patch.patchfile.__init__ to avoid
657 """Monkeypatch/wrap patch.patchfile.__init__ to avoid
658 rejects or conflicts due to expanded keywords in working dir."""
658 rejects or conflicts due to expanded keywords in working dir."""
659 orig(self, ui, gp, backend, store, eolmode)
659 orig(self, ui, gp, backend, store, eolmode)
660 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
660 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
661 if kwt:
661 if kwt:
662 # shrink keywords read from working dir
662 # shrink keywords read from working dir
663 self.lines = kwt.shrinklines(self.fname, self.lines)
663 self.lines = kwt.shrinklines(self.fname, self.lines)
664
664
665
665
666 def kwdiff(orig, repo, *args, **kwargs):
666 def kwdiff(orig, repo, *args, **kwargs):
667 '''Monkeypatch patch.diff to avoid expansion.'''
667 '''Monkeypatch patch.diff to avoid expansion.'''
668 kwt = getattr(repo, '_keywordkwt', None)
668 kwt = getattr(repo, '_keywordkwt', None)
669 if kwt:
669 if kwt:
670 restrict = kwt.restrict
670 restrict = kwt.restrict
671 kwt.restrict = True
671 kwt.restrict = True
672 try:
672 try:
673 for chunk in orig(repo, *args, **kwargs):
673 for chunk in orig(repo, *args, **kwargs):
674 yield chunk
674 yield chunk
675 finally:
675 finally:
676 if kwt:
676 if kwt:
677 kwt.restrict = restrict
677 kwt.restrict = restrict
678
678
679
679
680 def kwweb_skip(orig, web):
680 def kwweb_skip(orig, web):
681 '''Wraps webcommands.x turning off keyword expansion.'''
681 '''Wraps webcommands.x turning off keyword expansion.'''
682 kwt = getattr(web.repo, '_keywordkwt', None)
682 kwt = getattr(web.repo, '_keywordkwt', None)
683 if kwt:
683 if kwt:
684 origmatch = kwt.match
684 origmatch = kwt.match
685 kwt.match = util.never
685 kwt.match = util.never
686 try:
686 try:
687 for chunk in orig(web):
687 for chunk in orig(web):
688 yield chunk
688 yield chunk
689 finally:
689 finally:
690 if kwt:
690 if kwt:
691 kwt.match = origmatch
691 kwt.match = origmatch
692
692
693
693
694 def kw_amend(orig, ui, repo, old, extra, pats, opts):
694 def kw_amend(orig, ui, repo, old, extra, pats, opts):
695 '''Wraps cmdutil.amend expanding keywords after amend.'''
695 '''Wraps cmdutil.amend expanding keywords after amend.'''
696 kwt = getattr(repo, '_keywordkwt', None)
696 kwt = getattr(repo, '_keywordkwt', None)
697 if kwt is None:
697 if kwt is None:
698 return orig(ui, repo, old, extra, pats, opts)
698 return orig(ui, repo, old, extra, pats, opts)
699 with repo.wlock(), repo.dirstate.parentchange(repo):
699 with repo.wlock(), repo.dirstate.changing_parents(repo):
700 kwt.postcommit = True
700 kwt.postcommit = True
701 newid = orig(ui, repo, old, extra, pats, opts)
701 newid = orig(ui, repo, old, extra, pats, opts)
702 if newid != old.node():
702 if newid != old.node():
703 ctx = repo[newid]
703 ctx = repo[newid]
704 kwt.restrict = True
704 kwt.restrict = True
705 kwt.overwrite(ctx, ctx.files(), False, True)
705 kwt.overwrite(ctx, ctx.files(), False, True)
706 kwt.restrict = False
706 kwt.restrict = False
707 return newid
707 return newid
708
708
709
709
710 def kw_copy(orig, ui, repo, pats, opts, rename=False):
710 def kw_copy(orig, ui, repo, pats, opts, rename=False):
711 """Wraps cmdutil.copy so that copy/rename destinations do not
711 """Wraps cmdutil.copy so that copy/rename destinations do not
712 contain expanded keywords.
712 contain expanded keywords.
713 Note that the source of a regular file destination may also be a
713 Note that the source of a regular file destination may also be a
714 symlink:
714 symlink:
715 hg cp sym x -> x is symlink
715 hg cp sym x -> x is symlink
716 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
716 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
717 For the latter we have to follow the symlink to find out whether its
717 For the latter we have to follow the symlink to find out whether its
718 target is configured for expansion and we therefore must unexpand the
718 target is configured for expansion and we therefore must unexpand the
719 keywords in the destination."""
719 keywords in the destination."""
720 kwt = getattr(repo, '_keywordkwt', None)
720 kwt = getattr(repo, '_keywordkwt', None)
721 if kwt is None:
721 if kwt is None:
722 return orig(ui, repo, pats, opts, rename)
722 return orig(ui, repo, pats, opts, rename)
723 with repo.wlock():
723 with repo.wlock():
724 orig(ui, repo, pats, opts, rename)
724 orig(ui, repo, pats, opts, rename)
725 if opts.get(b'dry_run'):
725 if opts.get(b'dry_run'):
726 return
726 return
727 wctx = repo[None]
727 wctx = repo[None]
728 cwd = repo.getcwd()
728 cwd = repo.getcwd()
729
729
730 def haskwsource(dest):
730 def haskwsource(dest):
731 """Returns true if dest is a regular file and configured for
731 """Returns true if dest is a regular file and configured for
732 expansion or a symlink which points to a file configured for
732 expansion or a symlink which points to a file configured for
733 expansion."""
733 expansion."""
734 source = repo.dirstate.copied(dest)
734 source = repo.dirstate.copied(dest)
735 if b'l' in wctx.flags(source):
735 if b'l' in wctx.flags(source):
736 source = pathutil.canonpath(
736 source = pathutil.canonpath(
737 repo.root, cwd, os.path.realpath(source)
737 repo.root, cwd, os.path.realpath(source)
738 )
738 )
739 return kwt.match(source)
739 return kwt.match(source)
740
740
741 candidates = [
741 candidates = [
742 f
742 f
743 for f in repo.dirstate.copies()
743 for f in repo.dirstate.copies()
744 if b'l' not in wctx.flags(f) and haskwsource(f)
744 if b'l' not in wctx.flags(f) and haskwsource(f)
745 ]
745 ]
746 kwt.overwrite(wctx, candidates, False, False)
746 kwt.overwrite(wctx, candidates, False, False)
747
747
748
748
749 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
749 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
750 '''Wraps record.dorecord expanding keywords after recording.'''
750 '''Wraps record.dorecord expanding keywords after recording.'''
751 kwt = getattr(repo, '_keywordkwt', None)
751 kwt = getattr(repo, '_keywordkwt', None)
752 if kwt is None:
752 if kwt is None:
753 return orig(ui, repo, commitfunc, *pats, **opts)
753 return orig(ui, repo, commitfunc, *pats, **opts)
754 with repo.wlock():
754 with repo.wlock():
755 # record returns 0 even when nothing has changed
755 # record returns 0 even when nothing has changed
756 # therefore compare nodes before and after
756 # therefore compare nodes before and after
757 kwt.postcommit = True
757 kwt.postcommit = True
758 ctx = repo[b'.']
758 ctx = repo[b'.']
759 wstatus = ctx.status()
759 wstatus = ctx.status()
760 ret = orig(ui, repo, commitfunc, *pats, **opts)
760 ret = orig(ui, repo, commitfunc, *pats, **opts)
761 recctx = repo[b'.']
761 recctx = repo[b'.']
762 if ctx != recctx:
762 if ctx != recctx:
763 modified, added = _preselect(wstatus, recctx.files())
763 modified, added = _preselect(wstatus, recctx.files())
764 kwt.restrict = False
764 kwt.restrict = False
765 with repo.dirstate.parentchange(repo):
765 with repo.dirstate.changing_parents(repo):
766 kwt.overwrite(recctx, modified, False, True)
766 kwt.overwrite(recctx, modified, False, True)
767 kwt.overwrite(recctx, added, False, True, True)
767 kwt.overwrite(recctx, added, False, True, True)
768 kwt.restrict = True
768 kwt.restrict = True
769 return ret
769 return ret
770
770
771
771
772 def kwfilectx_cmp(orig, self, fctx):
772 def kwfilectx_cmp(orig, self, fctx):
773 if fctx._customcmp:
773 if fctx._customcmp:
774 return fctx.cmp(self)
774 return fctx.cmp(self)
775 kwt = getattr(self._repo, '_keywordkwt', None)
775 kwt = getattr(self._repo, '_keywordkwt', None)
776 if kwt is None:
776 if kwt is None:
777 return orig(self, fctx)
777 return orig(self, fctx)
778 # keyword affects data size, comparing wdir and filelog size does
778 # keyword affects data size, comparing wdir and filelog size does
779 # not make sense
779 # not make sense
780 if (
780 if (
781 fctx._filenode is None
781 fctx._filenode is None
782 and (
782 and (
783 self._repo._encodefilterpats
783 self._repo._encodefilterpats
784 or kwt.match(fctx.path())
784 or kwt.match(fctx.path())
785 and b'l' not in fctx.flags()
785 and b'l' not in fctx.flags()
786 or self.size() - 4 == fctx.size()
786 or self.size() - 4 == fctx.size()
787 )
787 )
788 or self.size() == fctx.size()
788 or self.size() == fctx.size()
789 ):
789 ):
790 return self._filelog.cmp(self._filenode, fctx.data())
790 return self._filelog.cmp(self._filenode, fctx.data())
791 return True
791 return True
792
792
793
793
794 def uisetup(ui):
794 def uisetup(ui):
795 """Monkeypatches dispatch._parse to retrieve user command.
795 """Monkeypatches dispatch._parse to retrieve user command.
796 Overrides file method to return kwfilelog instead of filelog
796 Overrides file method to return kwfilelog instead of filelog
797 if file matches user configuration.
797 if file matches user configuration.
798 Wraps commit to overwrite configured files with updated
798 Wraps commit to overwrite configured files with updated
799 keyword substitutions.
799 keyword substitutions.
800 Monkeypatches patch and webcommands."""
800 Monkeypatches patch and webcommands."""
801
801
802 def kwdispatch_parse(orig, ui, args):
802 def kwdispatch_parse(orig, ui, args):
803 '''Monkeypatch dispatch._parse to obtain running hg command.'''
803 '''Monkeypatch dispatch._parse to obtain running hg command.'''
804 cmd, func, args, options, cmdoptions = orig(ui, args)
804 cmd, func, args, options, cmdoptions = orig(ui, args)
805 kwtools[b'hgcmd'] = cmd
805 kwtools[b'hgcmd'] = cmd
806 return cmd, func, args, options, cmdoptions
806 return cmd, func, args, options, cmdoptions
807
807
808 extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse)
808 extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse)
809
809
810 extensions.wrapfunction(context.filectx, b'cmp', kwfilectx_cmp)
810 extensions.wrapfunction(context.filectx, b'cmp', kwfilectx_cmp)
811 extensions.wrapfunction(patch.patchfile, b'__init__', kwpatchfile_init)
811 extensions.wrapfunction(patch.patchfile, b'__init__', kwpatchfile_init)
812 extensions.wrapfunction(patch, b'diff', kwdiff)
812 extensions.wrapfunction(patch, b'diff', kwdiff)
813 extensions.wrapfunction(cmdutil, b'amend', kw_amend)
813 extensions.wrapfunction(cmdutil, b'amend', kw_amend)
814 extensions.wrapfunction(cmdutil, b'copy', kw_copy)
814 extensions.wrapfunction(cmdutil, b'copy', kw_copy)
815 extensions.wrapfunction(cmdutil, b'dorecord', kw_dorecord)
815 extensions.wrapfunction(cmdutil, b'dorecord', kw_dorecord)
816 for c in nokwwebcommands.split():
816 for c in nokwwebcommands.split():
817 extensions.wrapfunction(webcommands, c, kwweb_skip)
817 extensions.wrapfunction(webcommands, c, kwweb_skip)
818
818
819
819
820 def reposetup(ui, repo):
820 def reposetup(ui, repo):
821 '''Sets up repo as kwrepo for keyword substitution.'''
821 '''Sets up repo as kwrepo for keyword substitution.'''
822
822
823 try:
823 try:
824 if (
824 if (
825 not repo.local()
825 not repo.local()
826 or kwtools[b'hgcmd'] in nokwcommands.split()
826 or kwtools[b'hgcmd'] in nokwcommands.split()
827 or b'.hg' in util.splitpath(repo.root)
827 or b'.hg' in util.splitpath(repo.root)
828 or repo._url.startswith(b'bundle:')
828 or repo._url.startswith(b'bundle:')
829 ):
829 ):
830 return
830 return
831 except AttributeError:
831 except AttributeError:
832 pass
832 pass
833
833
834 inc, exc = [], [b'.hg*']
834 inc, exc = [], [b'.hg*']
835 for pat, opt in ui.configitems(b'keyword'):
835 for pat, opt in ui.configitems(b'keyword'):
836 if opt != b'ignore':
836 if opt != b'ignore':
837 inc.append(pat)
837 inc.append(pat)
838 else:
838 else:
839 exc.append(pat)
839 exc.append(pat)
840 if not inc:
840 if not inc:
841 return
841 return
842
842
843 kwt = kwtemplater(ui, repo, inc, exc)
843 kwt = kwtemplater(ui, repo, inc, exc)
844
844
845 class kwrepo(repo.__class__):
845 class kwrepo(repo.__class__):
846 def file(self, f):
846 def file(self, f):
847 if f[0] == b'/':
847 if f[0] == b'/':
848 f = f[1:]
848 f = f[1:]
849 return kwfilelog(self.svfs, kwt, f)
849 return kwfilelog(self.svfs, kwt, f)
850
850
851 def wread(self, filename):
851 def wread(self, filename):
852 data = super(kwrepo, self).wread(filename)
852 data = super(kwrepo, self).wread(filename)
853 return kwt.wread(filename, data)
853 return kwt.wread(filename, data)
854
854
855 def commit(self, *args, **opts):
855 def commit(self, *args, **opts):
856 # use custom commitctx for user commands
856 # use custom commitctx for user commands
857 # other extensions can still wrap repo.commitctx directly
857 # other extensions can still wrap repo.commitctx directly
858 self.commitctx = self.kwcommitctx
858 self.commitctx = self.kwcommitctx
859 try:
859 try:
860 return super(kwrepo, self).commit(*args, **opts)
860 return super(kwrepo, self).commit(*args, **opts)
861 finally:
861 finally:
862 del self.commitctx
862 del self.commitctx
863
863
864 def kwcommitctx(self, ctx, error=False, origctx=None):
864 def kwcommitctx(self, ctx, error=False, origctx=None):
865 n = super(kwrepo, self).commitctx(ctx, error, origctx)
865 n = super(kwrepo, self).commitctx(ctx, error, origctx)
866 # no lock needed, only called from repo.commit() which already locks
866 # no lock needed, only called from repo.commit() which already locks
867 if not kwt.postcommit:
867 if not kwt.postcommit:
868 restrict = kwt.restrict
868 restrict = kwt.restrict
869 kwt.restrict = True
869 kwt.restrict = True
870 kwt.overwrite(
870 kwt.overwrite(
871 self[n], sorted(ctx.added() + ctx.modified()), False, True
871 self[n], sorted(ctx.added() + ctx.modified()), False, True
872 )
872 )
873 kwt.restrict = restrict
873 kwt.restrict = restrict
874 return n
874 return n
875
875
876 def rollback(self, dryrun=False, force=False):
876 def rollback(self, dryrun=False, force=False):
877 with self.wlock():
877 with self.wlock():
878 origrestrict = kwt.restrict
878 origrestrict = kwt.restrict
879 try:
879 try:
880 if not dryrun:
880 if not dryrun:
881 changed = self[b'.'].files()
881 changed = self[b'.'].files()
882 ret = super(kwrepo, self).rollback(dryrun, force)
882 ret = super(kwrepo, self).rollback(dryrun, force)
883 if not dryrun:
883 if not dryrun:
884 ctx = self[b'.']
884 ctx = self[b'.']
885 modified, added = _preselect(ctx.status(), changed)
885 modified, added = _preselect(ctx.status(), changed)
886 kwt.restrict = False
886 kwt.restrict = False
887 kwt.overwrite(ctx, modified, True, True)
887 kwt.overwrite(ctx, modified, True, True)
888 kwt.overwrite(ctx, added, True, False)
888 kwt.overwrite(ctx, added, True, False)
889 return ret
889 return ret
890 finally:
890 finally:
891 kwt.restrict = origrestrict
891 kwt.restrict = origrestrict
892
892
893 repo.__class__ = kwrepo
893 repo.__class__ = kwrepo
894 repo._keywordkwt = kwt
894 repo._keywordkwt = kwt
@@ -1,673 +1,673 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import binascii
11 import binascii
12 import os
12 import os
13 import shutil
13 import shutil
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.node import (
16 from mercurial.node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 )
19 )
20
20
21 from mercurial import (
21 from mercurial import (
22 cmdutil,
22 cmdutil,
23 context,
23 context,
24 error,
24 error,
25 exthelper,
25 exthelper,
26 hg,
26 hg,
27 lock,
27 lock,
28 logcmdutil,
28 logcmdutil,
29 match as matchmod,
29 match as matchmod,
30 pycompat,
30 pycompat,
31 scmutil,
31 scmutil,
32 util,
32 util,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 from ..convert import (
36 from ..convert import (
37 convcmd,
37 convcmd,
38 filemap,
38 filemap,
39 )
39 )
40
40
41 from . import lfutil, storefactory
41 from . import lfutil, storefactory
42
42
43 release = lock.release
43 release = lock.release
44
44
45 # -- Commands ----------------------------------------------------------
45 # -- Commands ----------------------------------------------------------
46
46
47 eh = exthelper.exthelper()
47 eh = exthelper.exthelper()
48
48
49
49
50 @eh.command(
50 @eh.command(
51 b'lfconvert',
51 b'lfconvert',
52 [
52 [
53 (
53 (
54 b's',
54 b's',
55 b'size',
55 b'size',
56 b'',
56 b'',
57 _(b'minimum size (MB) for files to be converted as largefiles'),
57 _(b'minimum size (MB) for files to be converted as largefiles'),
58 b'SIZE',
58 b'SIZE',
59 ),
59 ),
60 (
60 (
61 b'',
61 b'',
62 b'to-normal',
62 b'to-normal',
63 False,
63 False,
64 _(b'convert from a largefiles repo to a normal repo'),
64 _(b'convert from a largefiles repo to a normal repo'),
65 ),
65 ),
66 ],
66 ],
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
68 norepo=True,
68 norepo=True,
69 inferrepo=True,
69 inferrepo=True,
70 )
70 )
71 def lfconvert(ui, src, dest, *pats, **opts):
71 def lfconvert(ui, src, dest, *pats, **opts):
72 """convert a normal repository to a largefiles repository
72 """convert a normal repository to a largefiles repository
73
73
74 Convert repository SOURCE to a new repository DEST, identical to
74 Convert repository SOURCE to a new repository DEST, identical to
75 SOURCE except that certain files will be converted as largefiles:
75 SOURCE except that certain files will be converted as largefiles:
76 specifically, any file that matches any PATTERN *or* whose size is
76 specifically, any file that matches any PATTERN *or* whose size is
77 above the minimum size threshold is converted as a largefile. The
77 above the minimum size threshold is converted as a largefile. The
78 size used to determine whether or not to track a file as a
78 size used to determine whether or not to track a file as a
79 largefile is the size of the first version of the file. The
79 largefile is the size of the first version of the file. The
80 minimum size can be specified either with --size or in
80 minimum size can be specified either with --size or in
81 configuration as ``largefiles.size``.
81 configuration as ``largefiles.size``.
82
82
83 After running this command you will need to make sure that
83 After running this command you will need to make sure that
84 largefiles is enabled anywhere you intend to push the new
84 largefiles is enabled anywhere you intend to push the new
85 repository.
85 repository.
86
86
87 Use --to-normal to convert largefiles back to normal files; after
87 Use --to-normal to convert largefiles back to normal files; after
88 this, the DEST repository can be used without largefiles at all."""
88 this, the DEST repository can be used without largefiles at all."""
89
89
90 opts = pycompat.byteskwargs(opts)
90 opts = pycompat.byteskwargs(opts)
91 if opts[b'to_normal']:
91 if opts[b'to_normal']:
92 tolfile = False
92 tolfile = False
93 else:
93 else:
94 tolfile = True
94 tolfile = True
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
96
96
97 if not hg.islocal(src):
97 if not hg.islocal(src):
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
99 if not hg.islocal(dest):
99 if not hg.islocal(dest):
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
101
101
102 rsrc = hg.repository(ui, src)
102 rsrc = hg.repository(ui, src)
103 ui.status(_(b'initializing destination %s\n') % dest)
103 ui.status(_(b'initializing destination %s\n') % dest)
104 rdst = hg.repository(ui, dest, create=True)
104 rdst = hg.repository(ui, dest, create=True)
105
105
106 success = False
106 success = False
107 dstwlock = dstlock = None
107 dstwlock = dstlock = None
108 try:
108 try:
109 # Get a list of all changesets in the source. The easy way to do this
109 # Get a list of all changesets in the source. The easy way to do this
110 # is to simply walk the changelog, using changelog.nodesbetween().
110 # is to simply walk the changelog, using changelog.nodesbetween().
111 # Take a look at mercurial/revlog.py:639 for more details.
111 # Take a look at mercurial/revlog.py:639 for more details.
112 # Use a generator instead of a list to decrease memory usage
112 # Use a generator instead of a list to decrease memory usage
113 ctxs = (
113 ctxs = (
114 rsrc[ctx]
114 rsrc[ctx]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
116 )
116 )
117 revmap = {rsrc.nullid: rdst.nullid}
117 revmap = {rsrc.nullid: rdst.nullid}
118 if tolfile:
118 if tolfile:
119 # Lock destination to prevent modification while it is converted to.
119 # Lock destination to prevent modification while it is converted to.
120 # Don't need to lock src because we are just reading from its
120 # Don't need to lock src because we are just reading from its
121 # history which can't change.
121 # history which can't change.
122 dstwlock = rdst.wlock()
122 dstwlock = rdst.wlock()
123 dstlock = rdst.lock()
123 dstlock = rdst.lock()
124
124
125 lfiles = set()
125 lfiles = set()
126 normalfiles = set()
126 normalfiles = set()
127 if not pats:
127 if not pats:
128 pats = ui.configlist(lfutil.longname, b'patterns')
128 pats = ui.configlist(lfutil.longname, b'patterns')
129 if pats:
129 if pats:
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
131 else:
131 else:
132 matcher = None
132 matcher = None
133
133
134 lfiletohash = {}
134 lfiletohash = {}
135 with ui.makeprogress(
135 with ui.makeprogress(
136 _(b'converting revisions'),
136 _(b'converting revisions'),
137 unit=_(b'revisions'),
137 unit=_(b'revisions'),
138 total=rsrc[b'tip'].rev(),
138 total=rsrc[b'tip'].rev(),
139 ) as progress:
139 ) as progress:
140 for ctx in ctxs:
140 for ctx in ctxs:
141 progress.update(ctx.rev())
141 progress.update(ctx.rev())
142 _lfconvert_addchangeset(
142 _lfconvert_addchangeset(
143 rsrc,
143 rsrc,
144 rdst,
144 rdst,
145 ctx,
145 ctx,
146 revmap,
146 revmap,
147 lfiles,
147 lfiles,
148 normalfiles,
148 normalfiles,
149 matcher,
149 matcher,
150 size,
150 size,
151 lfiletohash,
151 lfiletohash,
152 )
152 )
153
153
154 if rdst.wvfs.exists(lfutil.shortname):
154 if rdst.wvfs.exists(lfutil.shortname):
155 rdst.wvfs.rmtree(lfutil.shortname)
155 rdst.wvfs.rmtree(lfutil.shortname)
156
156
157 for f in lfiletohash.keys():
157 for f in lfiletohash.keys():
158 if rdst.wvfs.isfile(f):
158 if rdst.wvfs.isfile(f):
159 rdst.wvfs.unlink(f)
159 rdst.wvfs.unlink(f)
160 try:
160 try:
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
162 except OSError:
162 except OSError:
163 pass
163 pass
164
164
165 # If there were any files converted to largefiles, add largefiles
165 # If there were any files converted to largefiles, add largefiles
166 # to the destination repository's requirements.
166 # to the destination repository's requirements.
167 if lfiles:
167 if lfiles:
168 rdst.requirements.add(b'largefiles')
168 rdst.requirements.add(b'largefiles')
169 scmutil.writereporequirements(rdst)
169 scmutil.writereporequirements(rdst)
170 else:
170 else:
171
171
172 class lfsource(filemap.filemap_source):
172 class lfsource(filemap.filemap_source):
173 def __init__(self, ui, source):
173 def __init__(self, ui, source):
174 super(lfsource, self).__init__(ui, source, None)
174 super(lfsource, self).__init__(ui, source, None)
175 self.filemapper.rename[lfutil.shortname] = b'.'
175 self.filemapper.rename[lfutil.shortname] = b'.'
176
176
177 def getfile(self, name, rev):
177 def getfile(self, name, rev):
178 realname, realrev = rev
178 realname, realrev = rev
179 f = super(lfsource, self).getfile(name, rev)
179 f = super(lfsource, self).getfile(name, rev)
180
180
181 if (
181 if (
182 not realname.startswith(lfutil.shortnameslash)
182 not realname.startswith(lfutil.shortnameslash)
183 or f[0] is None
183 or f[0] is None
184 ):
184 ):
185 return f
185 return f
186
186
187 # Substitute in the largefile data for the hash
187 # Substitute in the largefile data for the hash
188 hash = f[0].strip()
188 hash = f[0].strip()
189 path = lfutil.findfile(rsrc, hash)
189 path = lfutil.findfile(rsrc, hash)
190
190
191 if path is None:
191 if path is None:
192 raise error.Abort(
192 raise error.Abort(
193 _(b"missing largefile for '%s' in %s")
193 _(b"missing largefile for '%s' in %s")
194 % (realname, realrev)
194 % (realname, realrev)
195 )
195 )
196 return util.readfile(path), f[1]
196 return util.readfile(path), f[1]
197
197
198 class converter(convcmd.converter):
198 class converter(convcmd.converter):
199 def __init__(self, ui, source, dest, revmapfile, opts):
199 def __init__(self, ui, source, dest, revmapfile, opts):
200 src = lfsource(ui, source)
200 src = lfsource(ui, source)
201
201
202 super(converter, self).__init__(
202 super(converter, self).__init__(
203 ui, src, dest, revmapfile, opts
203 ui, src, dest, revmapfile, opts
204 )
204 )
205
205
206 found, missing = downloadlfiles(ui, rsrc)
206 found, missing = downloadlfiles(ui, rsrc)
207 if missing != 0:
207 if missing != 0:
208 raise error.Abort(_(b"all largefiles must be present locally"))
208 raise error.Abort(_(b"all largefiles must be present locally"))
209
209
210 orig = convcmd.converter
210 orig = convcmd.converter
211 convcmd.converter = converter
211 convcmd.converter = converter
212
212
213 try:
213 try:
214 convcmd.convert(
214 convcmd.convert(
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
216 )
216 )
217 finally:
217 finally:
218 convcmd.converter = orig
218 convcmd.converter = orig
219 success = True
219 success = True
220 finally:
220 finally:
221 if tolfile:
221 if tolfile:
222 rdst.dirstate.clear()
222 rdst.dirstate.clear()
223 release(dstlock, dstwlock)
223 release(dstlock, dstwlock)
224 if not success:
224 if not success:
225 # we failed, remove the new directory
225 # we failed, remove the new directory
226 shutil.rmtree(rdst.root)
226 shutil.rmtree(rdst.root)
227
227
228
228
229 def _lfconvert_addchangeset(
229 def _lfconvert_addchangeset(
230 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
230 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
231 ):
231 ):
232 # Convert src parents to dst parents
232 # Convert src parents to dst parents
233 parents = _convertparents(ctx, revmap)
233 parents = _convertparents(ctx, revmap)
234
234
235 # Generate list of changed files
235 # Generate list of changed files
236 files = _getchangedfiles(ctx, parents)
236 files = _getchangedfiles(ctx, parents)
237
237
238 dstfiles = []
238 dstfiles = []
239 for f in files:
239 for f in files:
240 if f not in lfiles and f not in normalfiles:
240 if f not in lfiles and f not in normalfiles:
241 islfile = _islfile(f, ctx, matcher, size)
241 islfile = _islfile(f, ctx, matcher, size)
242 # If this file was renamed or copied then copy
242 # If this file was renamed or copied then copy
243 # the largefile-ness of its predecessor
243 # the largefile-ness of its predecessor
244 if f in ctx.manifest():
244 if f in ctx.manifest():
245 fctx = ctx.filectx(f)
245 fctx = ctx.filectx(f)
246 renamed = fctx.copysource()
246 renamed = fctx.copysource()
247 if renamed is None:
247 if renamed is None:
248 # the code below assumes renamed to be a boolean or a list
248 # the code below assumes renamed to be a boolean or a list
249 # and won't quite work with the value None
249 # and won't quite work with the value None
250 renamed = False
250 renamed = False
251 renamedlfile = renamed and renamed in lfiles
251 renamedlfile = renamed and renamed in lfiles
252 islfile |= renamedlfile
252 islfile |= renamedlfile
253 if b'l' in fctx.flags():
253 if b'l' in fctx.flags():
254 if renamedlfile:
254 if renamedlfile:
255 raise error.Abort(
255 raise error.Abort(
256 _(b'renamed/copied largefile %s becomes symlink')
256 _(b'renamed/copied largefile %s becomes symlink')
257 % f
257 % f
258 )
258 )
259 islfile = False
259 islfile = False
260 if islfile:
260 if islfile:
261 lfiles.add(f)
261 lfiles.add(f)
262 else:
262 else:
263 normalfiles.add(f)
263 normalfiles.add(f)
264
264
265 if f in lfiles:
265 if f in lfiles:
266 fstandin = lfutil.standin(f)
266 fstandin = lfutil.standin(f)
267 dstfiles.append(fstandin)
267 dstfiles.append(fstandin)
268 # largefile in manifest if it has not been removed/renamed
268 # largefile in manifest if it has not been removed/renamed
269 if f in ctx.manifest():
269 if f in ctx.manifest():
270 fctx = ctx.filectx(f)
270 fctx = ctx.filectx(f)
271 if b'l' in fctx.flags():
271 if b'l' in fctx.flags():
272 renamed = fctx.copysource()
272 renamed = fctx.copysource()
273 if renamed and renamed in lfiles:
273 if renamed and renamed in lfiles:
274 raise error.Abort(
274 raise error.Abort(
275 _(b'largefile %s becomes symlink') % f
275 _(b'largefile %s becomes symlink') % f
276 )
276 )
277
277
278 # largefile was modified, update standins
278 # largefile was modified, update standins
279 m = hashutil.sha1(b'')
279 m = hashutil.sha1(b'')
280 m.update(ctx[f].data())
280 m.update(ctx[f].data())
281 hash = hex(m.digest())
281 hash = hex(m.digest())
282 if f not in lfiletohash or lfiletohash[f] != hash:
282 if f not in lfiletohash or lfiletohash[f] != hash:
283 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
283 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
284 executable = b'x' in ctx[f].flags()
284 executable = b'x' in ctx[f].flags()
285 lfutil.writestandin(rdst, fstandin, hash, executable)
285 lfutil.writestandin(rdst, fstandin, hash, executable)
286 lfiletohash[f] = hash
286 lfiletohash[f] = hash
287 else:
287 else:
288 # normal file
288 # normal file
289 dstfiles.append(f)
289 dstfiles.append(f)
290
290
291 def getfilectx(repo, memctx, f):
291 def getfilectx(repo, memctx, f):
292 srcfname = lfutil.splitstandin(f)
292 srcfname = lfutil.splitstandin(f)
293 if srcfname is not None:
293 if srcfname is not None:
294 # if the file isn't in the manifest then it was removed
294 # if the file isn't in the manifest then it was removed
295 # or renamed, return None to indicate this
295 # or renamed, return None to indicate this
296 try:
296 try:
297 fctx = ctx.filectx(srcfname)
297 fctx = ctx.filectx(srcfname)
298 except error.LookupError:
298 except error.LookupError:
299 return None
299 return None
300 renamed = fctx.copysource()
300 renamed = fctx.copysource()
301 if renamed:
301 if renamed:
302 # standin is always a largefile because largefile-ness
302 # standin is always a largefile because largefile-ness
303 # doesn't change after rename or copy
303 # doesn't change after rename or copy
304 renamed = lfutil.standin(renamed)
304 renamed = lfutil.standin(renamed)
305
305
306 return context.memfilectx(
306 return context.memfilectx(
307 repo,
307 repo,
308 memctx,
308 memctx,
309 f,
309 f,
310 lfiletohash[srcfname] + b'\n',
310 lfiletohash[srcfname] + b'\n',
311 b'l' in fctx.flags(),
311 b'l' in fctx.flags(),
312 b'x' in fctx.flags(),
312 b'x' in fctx.flags(),
313 renamed,
313 renamed,
314 )
314 )
315 else:
315 else:
316 return _getnormalcontext(repo, ctx, f, revmap)
316 return _getnormalcontext(repo, ctx, f, revmap)
317
317
318 # Commit
318 # Commit
319 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
319 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
320
320
321
321
322 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
322 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
323 mctx = context.memctx(
323 mctx = context.memctx(
324 rdst,
324 rdst,
325 parents,
325 parents,
326 ctx.description(),
326 ctx.description(),
327 dstfiles,
327 dstfiles,
328 getfilectx,
328 getfilectx,
329 ctx.user(),
329 ctx.user(),
330 ctx.date(),
330 ctx.date(),
331 ctx.extra(),
331 ctx.extra(),
332 )
332 )
333 ret = rdst.commitctx(mctx)
333 ret = rdst.commitctx(mctx)
334 lfutil.copyalltostore(rdst, ret)
334 lfutil.copyalltostore(rdst, ret)
335 rdst.setparents(ret)
335 rdst.setparents(ret)
336 revmap[ctx.node()] = rdst.changelog.tip()
336 revmap[ctx.node()] = rdst.changelog.tip()
337
337
338
338
339 # Generate list of changed files
339 # Generate list of changed files
340 def _getchangedfiles(ctx, parents):
340 def _getchangedfiles(ctx, parents):
341 files = set(ctx.files())
341 files = set(ctx.files())
342 if ctx.repo().nullid not in parents:
342 if ctx.repo().nullid not in parents:
343 mc = ctx.manifest()
343 mc = ctx.manifest()
344 for pctx in ctx.parents():
344 for pctx in ctx.parents():
345 for fn in pctx.manifest().diff(mc):
345 for fn in pctx.manifest().diff(mc):
346 files.add(fn)
346 files.add(fn)
347 return files
347 return files
348
348
349
349
350 # Convert src parents to dst parents
350 # Convert src parents to dst parents
351 def _convertparents(ctx, revmap):
351 def _convertparents(ctx, revmap):
352 parents = []
352 parents = []
353 for p in ctx.parents():
353 for p in ctx.parents():
354 parents.append(revmap[p.node()])
354 parents.append(revmap[p.node()])
355 while len(parents) < 2:
355 while len(parents) < 2:
356 parents.append(ctx.repo().nullid)
356 parents.append(ctx.repo().nullid)
357 return parents
357 return parents
358
358
359
359
360 # Get memfilectx for a normal file
360 # Get memfilectx for a normal file
361 def _getnormalcontext(repo, ctx, f, revmap):
361 def _getnormalcontext(repo, ctx, f, revmap):
362 try:
362 try:
363 fctx = ctx.filectx(f)
363 fctx = ctx.filectx(f)
364 except error.LookupError:
364 except error.LookupError:
365 return None
365 return None
366 renamed = fctx.copysource()
366 renamed = fctx.copysource()
367
367
368 data = fctx.data()
368 data = fctx.data()
369 if f == b'.hgtags':
369 if f == b'.hgtags':
370 data = _converttags(repo.ui, revmap, data)
370 data = _converttags(repo.ui, revmap, data)
371 return context.memfilectx(
371 return context.memfilectx(
372 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
372 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
373 )
373 )
374
374
375
375
376 # Remap tag data using a revision map
376 # Remap tag data using a revision map
377 def _converttags(ui, revmap, data):
377 def _converttags(ui, revmap, data):
378 newdata = []
378 newdata = []
379 for line in data.splitlines():
379 for line in data.splitlines():
380 try:
380 try:
381 id, name = line.split(b' ', 1)
381 id, name = line.split(b' ', 1)
382 except ValueError:
382 except ValueError:
383 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
383 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
384 continue
384 continue
385 try:
385 try:
386 newid = bin(id)
386 newid = bin(id)
387 except binascii.Error:
387 except binascii.Error:
388 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
388 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
389 continue
389 continue
390 try:
390 try:
391 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
391 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
392 except KeyError:
392 except KeyError:
393 ui.warn(_(b'no mapping for id %s\n') % id)
393 ui.warn(_(b'no mapping for id %s\n') % id)
394 continue
394 continue
395 return b''.join(newdata)
395 return b''.join(newdata)
396
396
397
397
398 def _islfile(file, ctx, matcher, size):
398 def _islfile(file, ctx, matcher, size):
399 """Return true if file should be considered a largefile, i.e.
399 """Return true if file should be considered a largefile, i.e.
400 matcher matches it or it is larger than size."""
400 matcher matches it or it is larger than size."""
401 # never store special .hg* files as largefiles
401 # never store special .hg* files as largefiles
402 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
402 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
403 return False
403 return False
404 if matcher and matcher(file):
404 if matcher and matcher(file):
405 return True
405 return True
406 try:
406 try:
407 return ctx.filectx(file).size() >= size * 1024 * 1024
407 return ctx.filectx(file).size() >= size * 1024 * 1024
408 except error.LookupError:
408 except error.LookupError:
409 return False
409 return False
410
410
411
411
412 def uploadlfiles(ui, rsrc, rdst, files):
412 def uploadlfiles(ui, rsrc, rdst, files):
413 '''upload largefiles to the central store'''
413 '''upload largefiles to the central store'''
414
414
415 if not files:
415 if not files:
416 return
416 return
417
417
418 store = storefactory.openstore(rsrc, rdst, put=True)
418 store = storefactory.openstore(rsrc, rdst, put=True)
419
419
420 at = 0
420 at = 0
421 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
421 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
422 retval = store.exists(files)
422 retval = store.exists(files)
423 files = [h for h in files if not retval[h]]
423 files = [h for h in files if not retval[h]]
424 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
424 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
425
425
426 with ui.makeprogress(
426 with ui.makeprogress(
427 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
427 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
428 ) as progress:
428 ) as progress:
429 for hash in files:
429 for hash in files:
430 progress.update(at)
430 progress.update(at)
431 source = lfutil.findfile(rsrc, hash)
431 source = lfutil.findfile(rsrc, hash)
432 if not source:
432 if not source:
433 raise error.Abort(
433 raise error.Abort(
434 _(
434 _(
435 b'largefile %s missing from store'
435 b'largefile %s missing from store'
436 b' (needs to be uploaded)'
436 b' (needs to be uploaded)'
437 )
437 )
438 % hash
438 % hash
439 )
439 )
440 # XXX check for errors here
440 # XXX check for errors here
441 store.put(source, hash)
441 store.put(source, hash)
442 at += 1
442 at += 1
443
443
444
444
445 def verifylfiles(ui, repo, all=False, contents=False):
445 def verifylfiles(ui, repo, all=False, contents=False):
446 """Verify that every largefile revision in the current changeset
446 """Verify that every largefile revision in the current changeset
447 exists in the central store. With --contents, also verify that
447 exists in the central store. With --contents, also verify that
448 the contents of each local largefile file revision are correct (SHA-1 hash
448 the contents of each local largefile file revision are correct (SHA-1 hash
449 matches the revision ID). With --all, check every changeset in
449 matches the revision ID). With --all, check every changeset in
450 this repository."""
450 this repository."""
451 if all:
451 if all:
452 revs = repo.revs(b'all()')
452 revs = repo.revs(b'all()')
453 else:
453 else:
454 revs = [b'.']
454 revs = [b'.']
455
455
456 store = storefactory.openstore(repo)
456 store = storefactory.openstore(repo)
457 return store.verify(revs, contents=contents)
457 return store.verify(revs, contents=contents)
458
458
459
459
460 def cachelfiles(ui, repo, node, filelist=None):
460 def cachelfiles(ui, repo, node, filelist=None):
461 """cachelfiles ensures that all largefiles needed by the specified revision
461 """cachelfiles ensures that all largefiles needed by the specified revision
462 are present in the repository's largefile cache.
462 are present in the repository's largefile cache.
463
463
464 returns a tuple (cached, missing). cached is the list of files downloaded
464 returns a tuple (cached, missing). cached is the list of files downloaded
465 by this operation; missing is the list of files that were needed but could
465 by this operation; missing is the list of files that were needed but could
466 not be found."""
466 not be found."""
467 lfiles = lfutil.listlfiles(repo, node)
467 lfiles = lfutil.listlfiles(repo, node)
468 if filelist:
468 if filelist:
469 lfiles = set(lfiles) & set(filelist)
469 lfiles = set(lfiles) & set(filelist)
470 toget = []
470 toget = []
471
471
472 ctx = repo[node]
472 ctx = repo[node]
473 for lfile in lfiles:
473 for lfile in lfiles:
474 try:
474 try:
475 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
475 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
476 except FileNotFoundError:
476 except FileNotFoundError:
477 continue # node must be None and standin wasn't found in wctx
477 continue # node must be None and standin wasn't found in wctx
478 if not lfutil.findfile(repo, expectedhash):
478 if not lfutil.findfile(repo, expectedhash):
479 toget.append((lfile, expectedhash))
479 toget.append((lfile, expectedhash))
480
480
481 if toget:
481 if toget:
482 store = storefactory.openstore(repo)
482 store = storefactory.openstore(repo)
483 ret = store.get(toget)
483 ret = store.get(toget)
484 return ret
484 return ret
485
485
486 return ([], [])
486 return ([], [])
487
487
488
488
489 def downloadlfiles(ui, repo):
489 def downloadlfiles(ui, repo):
490 tonode = repo.changelog.node
490 tonode = repo.changelog.node
491 totalsuccess = 0
491 totalsuccess = 0
492 totalmissing = 0
492 totalmissing = 0
493 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
493 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
494 success, missing = cachelfiles(ui, repo, tonode(rev))
494 success, missing = cachelfiles(ui, repo, tonode(rev))
495 totalsuccess += len(success)
495 totalsuccess += len(success)
496 totalmissing += len(missing)
496 totalmissing += len(missing)
497 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
497 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
498 if totalmissing > 0:
498 if totalmissing > 0:
499 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
499 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
500 return totalsuccess, totalmissing
500 return totalsuccess, totalmissing
501
501
502
502
503 def updatelfiles(
503 def updatelfiles(
504 ui, repo, filelist=None, printmessage=None, normallookup=False
504 ui, repo, filelist=None, printmessage=None, normallookup=False
505 ):
505 ):
506 """Update largefiles according to standins in the working directory
506 """Update largefiles according to standins in the working directory
507
507
508 If ``printmessage`` is other than ``None``, it means "print (or
508 If ``printmessage`` is other than ``None``, it means "print (or
509 ignore, for false) message forcibly".
509 ignore, for false) message forcibly".
510 """
510 """
511 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
511 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
512 with repo.wlock():
512 with repo.wlock():
513 lfdirstate = lfutil.openlfdirstate(ui, repo)
513 lfdirstate = lfutil.openlfdirstate(ui, repo)
514 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
514 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
515
515
516 if filelist is not None:
516 if filelist is not None:
517 filelist = set(filelist)
517 filelist = set(filelist)
518 lfiles = [f for f in lfiles if f in filelist]
518 lfiles = [f for f in lfiles if f in filelist]
519
519
520 with lfdirstate.parentchange(repo):
520 with lfdirstate.changing_parents(repo):
521 update = {}
521 update = {}
522 dropped = set()
522 dropped = set()
523 updated, removed = 0, 0
523 updated, removed = 0, 0
524 wvfs = repo.wvfs
524 wvfs = repo.wvfs
525 wctx = repo[None]
525 wctx = repo[None]
526 for lfile in lfiles:
526 for lfile in lfiles:
527 lfileorig = os.path.relpath(
527 lfileorig = os.path.relpath(
528 scmutil.backuppath(ui, repo, lfile), start=repo.root
528 scmutil.backuppath(ui, repo, lfile), start=repo.root
529 )
529 )
530 standin = lfutil.standin(lfile)
530 standin = lfutil.standin(lfile)
531 standinorig = os.path.relpath(
531 standinorig = os.path.relpath(
532 scmutil.backuppath(ui, repo, standin), start=repo.root
532 scmutil.backuppath(ui, repo, standin), start=repo.root
533 )
533 )
534 if wvfs.exists(standin):
534 if wvfs.exists(standin):
535 if wvfs.exists(standinorig) and wvfs.exists(lfile):
535 if wvfs.exists(standinorig) and wvfs.exists(lfile):
536 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
536 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
537 wvfs.unlinkpath(standinorig)
537 wvfs.unlinkpath(standinorig)
538 expecthash = lfutil.readasstandin(wctx[standin])
538 expecthash = lfutil.readasstandin(wctx[standin])
539 if expecthash != b'':
539 if expecthash != b'':
540 if lfile not in wctx: # not switched to normal file
540 if lfile not in wctx: # not switched to normal file
541 if repo.dirstate.get_entry(standin).any_tracked:
541 if repo.dirstate.get_entry(standin).any_tracked:
542 wvfs.unlinkpath(lfile, ignoremissing=True)
542 wvfs.unlinkpath(lfile, ignoremissing=True)
543 else:
543 else:
544 dropped.add(lfile)
544 dropped.add(lfile)
545
545
546 # use normallookup() to allocate an entry in largefiles
546 # use normallookup() to allocate an entry in largefiles
547 # dirstate to prevent lfilesrepo.status() from reporting
547 # dirstate to prevent lfilesrepo.status() from reporting
548 # missing files as removed.
548 # missing files as removed.
549 lfdirstate.update_file(
549 lfdirstate.update_file(
550 lfile,
550 lfile,
551 p1_tracked=True,
551 p1_tracked=True,
552 wc_tracked=True,
552 wc_tracked=True,
553 possibly_dirty=True,
553 possibly_dirty=True,
554 )
554 )
555 update[lfile] = expecthash
555 update[lfile] = expecthash
556 else:
556 else:
557 # Remove lfiles for which the standin is deleted, unless the
557 # Remove lfiles for which the standin is deleted, unless the
558 # lfile is added to the repository again. This happens when a
558 # lfile is added to the repository again. This happens when a
559 # largefile is converted back to a normal file: the standin
559 # largefile is converted back to a normal file: the standin
560 # disappears, but a new (normal) file appears as the lfile.
560 # disappears, but a new (normal) file appears as the lfile.
561 if (
561 if (
562 wvfs.exists(lfile)
562 wvfs.exists(lfile)
563 and repo.dirstate.normalize(lfile) not in wctx
563 and repo.dirstate.normalize(lfile) not in wctx
564 ):
564 ):
565 wvfs.unlinkpath(lfile)
565 wvfs.unlinkpath(lfile)
566 removed += 1
566 removed += 1
567
567
568 # largefile processing might be slow and be interrupted - be prepared
568 # largefile processing might be slow and be interrupted - be prepared
569 lfdirstate.write(repo.currenttransaction())
569 lfdirstate.write(repo.currenttransaction())
570
570
571 if lfiles:
571 if lfiles:
572 lfiles = [f for f in lfiles if f not in dropped]
572 lfiles = [f for f in lfiles if f not in dropped]
573
573
574 for f in dropped:
574 for f in dropped:
575 repo.wvfs.unlinkpath(lfutil.standin(f))
575 repo.wvfs.unlinkpath(lfutil.standin(f))
576 # This needs to happen for dropped files, otherwise they stay in
576 # This needs to happen for dropped files, otherwise they stay in
577 # the M state.
577 # the M state.
578 lfdirstate._map.reset_state(f)
578 lfdirstate._map.reset_state(f)
579
579
580 statuswriter(_(b'getting changed largefiles\n'))
580 statuswriter(_(b'getting changed largefiles\n'))
581 cachelfiles(ui, repo, None, lfiles)
581 cachelfiles(ui, repo, None, lfiles)
582
582
583 with lfdirstate.parentchange(repo):
583 with lfdirstate.changing_parents(repo):
584 for lfile in lfiles:
584 for lfile in lfiles:
585 update1 = 0
585 update1 = 0
586
586
587 expecthash = update.get(lfile)
587 expecthash = update.get(lfile)
588 if expecthash:
588 if expecthash:
589 if not lfutil.copyfromcache(repo, expecthash, lfile):
589 if not lfutil.copyfromcache(repo, expecthash, lfile):
590 # failed ... but already removed and set to normallookup
590 # failed ... but already removed and set to normallookup
591 continue
591 continue
592 # Synchronize largefile dirstate to the last modified
592 # Synchronize largefile dirstate to the last modified
593 # time of the file
593 # time of the file
594 lfdirstate.update_file(
594 lfdirstate.update_file(
595 lfile, p1_tracked=True, wc_tracked=True
595 lfile, p1_tracked=True, wc_tracked=True
596 )
596 )
597 update1 = 1
597 update1 = 1
598
598
599 # copy the exec mode of largefile standin from the repository's
599 # copy the exec mode of largefile standin from the repository's
600 # dirstate to its state in the lfdirstate.
600 # dirstate to its state in the lfdirstate.
601 standin = lfutil.standin(lfile)
601 standin = lfutil.standin(lfile)
602 if wvfs.exists(standin):
602 if wvfs.exists(standin):
603 # exec is decided by the users permissions using mask 0o100
603 # exec is decided by the users permissions using mask 0o100
604 standinexec = wvfs.stat(standin).st_mode & 0o100
604 standinexec = wvfs.stat(standin).st_mode & 0o100
605 st = wvfs.stat(lfile)
605 st = wvfs.stat(lfile)
606 mode = st.st_mode
606 mode = st.st_mode
607 if standinexec != mode & 0o100:
607 if standinexec != mode & 0o100:
608 # first remove all X bits, then shift all R bits to X
608 # first remove all X bits, then shift all R bits to X
609 mode &= ~0o111
609 mode &= ~0o111
610 if standinexec:
610 if standinexec:
611 mode |= (mode >> 2) & 0o111 & ~util.umask
611 mode |= (mode >> 2) & 0o111 & ~util.umask
612 wvfs.chmod(lfile, mode)
612 wvfs.chmod(lfile, mode)
613 update1 = 1
613 update1 = 1
614
614
615 updated += update1
615 updated += update1
616
616
617 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
617 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
618
618
619 lfdirstate.write(repo.currenttransaction())
619 lfdirstate.write(repo.currenttransaction())
620 if lfiles:
620 if lfiles:
621 statuswriter(
621 statuswriter(
622 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
622 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
623 )
623 )
624
624
625
625
626 @eh.command(
626 @eh.command(
627 b'lfpull',
627 b'lfpull',
628 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
628 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
629 + cmdutil.remoteopts,
629 + cmdutil.remoteopts,
630 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
630 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
631 )
631 )
632 def lfpull(ui, repo, source=b"default", **opts):
632 def lfpull(ui, repo, source=b"default", **opts):
633 """pull largefiles for the specified revisions from the specified source
633 """pull largefiles for the specified revisions from the specified source
634
634
635 Pull largefiles that are referenced from local changesets but missing
635 Pull largefiles that are referenced from local changesets but missing
636 locally, pulling from a remote repository to the local cache.
636 locally, pulling from a remote repository to the local cache.
637
637
638 If SOURCE is omitted, the 'default' path will be used.
638 If SOURCE is omitted, the 'default' path will be used.
639 See :hg:`help urls` for more information.
639 See :hg:`help urls` for more information.
640
640
641 .. container:: verbose
641 .. container:: verbose
642
642
643 Some examples:
643 Some examples:
644
644
645 - pull largefiles for all branch heads::
645 - pull largefiles for all branch heads::
646
646
647 hg lfpull -r "head() and not closed()"
647 hg lfpull -r "head() and not closed()"
648
648
649 - pull largefiles on the default branch::
649 - pull largefiles on the default branch::
650
650
651 hg lfpull -r "branch(default)"
651 hg lfpull -r "branch(default)"
652 """
652 """
653 repo.lfpullsource = source
653 repo.lfpullsource = source
654
654
655 revs = opts.get('rev', [])
655 revs = opts.get('rev', [])
656 if not revs:
656 if not revs:
657 raise error.Abort(_(b'no revisions specified'))
657 raise error.Abort(_(b'no revisions specified'))
658 revs = logcmdutil.revrange(repo, revs)
658 revs = logcmdutil.revrange(repo, revs)
659
659
660 numcached = 0
660 numcached = 0
661 for rev in revs:
661 for rev in revs:
662 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
662 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
663 (cached, missing) = cachelfiles(ui, repo, rev)
663 (cached, missing) = cachelfiles(ui, repo, rev)
664 numcached += len(cached)
664 numcached += len(cached)
665 ui.status(_(b"%d largefiles cached\n") % numcached)
665 ui.status(_(b"%d largefiles cached\n") % numcached)
666
666
667
667
668 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
668 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
669 def debuglfput(ui, repo, filepath, **kwargs):
669 def debuglfput(ui, repo, filepath, **kwargs):
670 hash = lfutil.hashfile(filepath)
670 hash = lfutil.hashfile(filepath)
671 storefactory.openstore(repo).put(filepath, hash)
671 storefactory.openstore(repo).put(filepath, hash)
672 ui.write(b'%s\n' % hash)
672 ui.write(b'%s\n' % hash)
673 return 0
673 return 0
@@ -1,797 +1,797 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f, parentfiledata=None):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
173 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
175
175
176 def remove(self, f):
176 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
178
178
179 def add(self, f):
179 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
181
181
182 def drop(self, f):
182 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
184
184
185 def forget(self, f):
185 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
187
187
188 def normallookup(self, f):
188 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
190
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr):
194 def write(self, tr):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 if tr:
198 if tr:
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
200 super(largefilesdirstate, self).write(None)
200 super(largefilesdirstate, self).write(None)
201
201
202
202
203 def openlfdirstate(ui, repo, create=True):
203 def openlfdirstate(ui, repo, create=True):
204 """
204 """
205 Return a dirstate object that tracks largefiles: i.e. its root is
205 Return a dirstate object that tracks largefiles: i.e. its root is
206 the repo root, but it is saved in .hg/largefiles/dirstate.
206 the repo root, but it is saved in .hg/largefiles/dirstate.
207 """
207 """
208 vfs = repo.vfs
208 vfs = repo.vfs
209 lfstoredir = longname
209 lfstoredir = longname
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
212 lfdirstate = largefilesdirstate(
212 lfdirstate = largefilesdirstate(
213 opener,
213 opener,
214 ui,
214 ui,
215 repo.root,
215 repo.root,
216 repo.dirstate._validate,
216 repo.dirstate._validate,
217 lambda: sparse.matcher(repo),
217 lambda: sparse.matcher(repo),
218 repo.nodeconstants,
218 repo.nodeconstants,
219 use_dirstate_v2,
219 use_dirstate_v2,
220 )
220 )
221
221
222 # If the largefiles dirstate does not exist, populate and create
222 # If the largefiles dirstate does not exist, populate and create
223 # it. This ensures that we create it on the first meaningful
223 # it. This ensures that we create it on the first meaningful
224 # largefiles operation in a new clone.
224 # largefiles operation in a new clone.
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
226 matcher = getstandinmatcher(repo)
226 matcher = getstandinmatcher(repo)
227 standins = repo.dirstate.walk(
227 standins = repo.dirstate.walk(
228 matcher, subrepos=[], unknown=False, ignored=False
228 matcher, subrepos=[], unknown=False, ignored=False
229 )
229 )
230
230
231 if len(standins) > 0:
231 if len(standins) > 0:
232 vfs.makedirs(lfstoredir)
232 vfs.makedirs(lfstoredir)
233
233
234 with lfdirstate.parentchange(repo):
234 with lfdirstate.changing_parents(repo):
235 for standin in standins:
235 for standin in standins:
236 lfile = splitstandin(standin)
236 lfile = splitstandin(standin)
237 lfdirstate.update_file(
237 lfdirstate.update_file(
238 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
238 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
239 )
239 )
240 return lfdirstate
240 return lfdirstate
241
241
242
242
243 def lfdirstatestatus(lfdirstate, repo):
243 def lfdirstatestatus(lfdirstate, repo):
244 pctx = repo[b'.']
244 pctx = repo[b'.']
245 match = matchmod.always()
245 match = matchmod.always()
246 unsure, s, mtime_boundary = lfdirstate.status(
246 unsure, s, mtime_boundary = lfdirstate.status(
247 match, subrepos=[], ignored=False, clean=False, unknown=False
247 match, subrepos=[], ignored=False, clean=False, unknown=False
248 )
248 )
249 modified, clean = s.modified, s.clean
249 modified, clean = s.modified, s.clean
250 wctx = repo[None]
250 wctx = repo[None]
251 for lfile in unsure:
251 for lfile in unsure:
252 try:
252 try:
253 fctx = pctx[standin(lfile)]
253 fctx = pctx[standin(lfile)]
254 except LookupError:
254 except LookupError:
255 fctx = None
255 fctx = None
256 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
256 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
257 modified.append(lfile)
257 modified.append(lfile)
258 else:
258 else:
259 clean.append(lfile)
259 clean.append(lfile)
260 st = wctx[lfile].lstat()
260 st = wctx[lfile].lstat()
261 mode = st.st_mode
261 mode = st.st_mode
262 size = st.st_size
262 size = st.st_size
263 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
263 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
264 if mtime is not None:
264 if mtime is not None:
265 cache_data = (mode, size, mtime)
265 cache_data = (mode, size, mtime)
266 lfdirstate.set_clean(lfile, cache_data)
266 lfdirstate.set_clean(lfile, cache_data)
267 return s
267 return s
268
268
269
269
270 def listlfiles(repo, rev=None, matcher=None):
270 def listlfiles(repo, rev=None, matcher=None):
271 """return a list of largefiles in the working copy or the
271 """return a list of largefiles in the working copy or the
272 specified changeset"""
272 specified changeset"""
273
273
274 if matcher is None:
274 if matcher is None:
275 matcher = getstandinmatcher(repo)
275 matcher = getstandinmatcher(repo)
276
276
277 # ignore unknown files in working directory
277 # ignore unknown files in working directory
278 return [
278 return [
279 splitstandin(f)
279 splitstandin(f)
280 for f in repo[rev].walk(matcher)
280 for f in repo[rev].walk(matcher)
281 if rev is not None or repo.dirstate.get_entry(f).any_tracked
281 if rev is not None or repo.dirstate.get_entry(f).any_tracked
282 ]
282 ]
283
283
284
284
285 def instore(repo, hash, forcelocal=False):
285 def instore(repo, hash, forcelocal=False):
286 '''Return true if a largefile with the given hash exists in the store'''
286 '''Return true if a largefile with the given hash exists in the store'''
287 return os.path.exists(storepath(repo, hash, forcelocal))
287 return os.path.exists(storepath(repo, hash, forcelocal))
288
288
289
289
290 def storepath(repo, hash, forcelocal=False):
290 def storepath(repo, hash, forcelocal=False):
291 """Return the correct location in the repository largefiles store for a
291 """Return the correct location in the repository largefiles store for a
292 file with the given hash."""
292 file with the given hash."""
293 if not forcelocal and repo.shared():
293 if not forcelocal and repo.shared():
294 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
294 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
295 return repo.vfs.join(longname, hash)
295 return repo.vfs.join(longname, hash)
296
296
297
297
298 def findstorepath(repo, hash):
298 def findstorepath(repo, hash):
299 """Search through the local store path(s) to find the file for the given
299 """Search through the local store path(s) to find the file for the given
300 hash. If the file is not found, its path in the primary store is returned.
300 hash. If the file is not found, its path in the primary store is returned.
301 The return value is a tuple of (path, exists(path)).
301 The return value is a tuple of (path, exists(path)).
302 """
302 """
303 # For shared repos, the primary store is in the share source. But for
303 # For shared repos, the primary store is in the share source. But for
304 # backward compatibility, force a lookup in the local store if it wasn't
304 # backward compatibility, force a lookup in the local store if it wasn't
305 # found in the share source.
305 # found in the share source.
306 path = storepath(repo, hash, False)
306 path = storepath(repo, hash, False)
307
307
308 if instore(repo, hash):
308 if instore(repo, hash):
309 return (path, True)
309 return (path, True)
310 elif repo.shared() and instore(repo, hash, True):
310 elif repo.shared() and instore(repo, hash, True):
311 return storepath(repo, hash, True), True
311 return storepath(repo, hash, True), True
312
312
313 return (path, False)
313 return (path, False)
314
314
315
315
316 def copyfromcache(repo, hash, filename):
316 def copyfromcache(repo, hash, filename):
317 """Copy the specified largefile from the repo or system cache to
317 """Copy the specified largefile from the repo or system cache to
318 filename in the repository. Return true on success or false if the
318 filename in the repository. Return true on success or false if the
319 file was not found in either cache (which should not happened:
319 file was not found in either cache (which should not happened:
320 this is meant to be called only after ensuring that the needed
320 this is meant to be called only after ensuring that the needed
321 largefile exists in the cache)."""
321 largefile exists in the cache)."""
322 wvfs = repo.wvfs
322 wvfs = repo.wvfs
323 path = findfile(repo, hash)
323 path = findfile(repo, hash)
324 if path is None:
324 if path is None:
325 return False
325 return False
326 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
326 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
327 # The write may fail before the file is fully written, but we
327 # The write may fail before the file is fully written, but we
328 # don't use atomic writes in the working copy.
328 # don't use atomic writes in the working copy.
329 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
329 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
330 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
330 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
331 if gothash != hash:
331 if gothash != hash:
332 repo.ui.warn(
332 repo.ui.warn(
333 _(b'%s: data corruption in %s with hash %s\n')
333 _(b'%s: data corruption in %s with hash %s\n')
334 % (filename, path, gothash)
334 % (filename, path, gothash)
335 )
335 )
336 wvfs.unlink(filename)
336 wvfs.unlink(filename)
337 return False
337 return False
338 return True
338 return True
339
339
340
340
341 def copytostore(repo, ctx, file, fstandin):
341 def copytostore(repo, ctx, file, fstandin):
342 wvfs = repo.wvfs
342 wvfs = repo.wvfs
343 hash = readasstandin(ctx[fstandin])
343 hash = readasstandin(ctx[fstandin])
344 if instore(repo, hash):
344 if instore(repo, hash):
345 return
345 return
346 if wvfs.exists(file):
346 if wvfs.exists(file):
347 copytostoreabsolute(repo, wvfs.join(file), hash)
347 copytostoreabsolute(repo, wvfs.join(file), hash)
348 else:
348 else:
349 repo.ui.warn(
349 repo.ui.warn(
350 _(b"%s: largefile %s not available from local store\n")
350 _(b"%s: largefile %s not available from local store\n")
351 % (file, hash)
351 % (file, hash)
352 )
352 )
353
353
354
354
355 def copyalltostore(repo, node):
355 def copyalltostore(repo, node):
356 '''Copy all largefiles in a given revision to the store'''
356 '''Copy all largefiles in a given revision to the store'''
357
357
358 ctx = repo[node]
358 ctx = repo[node]
359 for filename in ctx.files():
359 for filename in ctx.files():
360 realfile = splitstandin(filename)
360 realfile = splitstandin(filename)
361 if realfile is not None and filename in ctx.manifest():
361 if realfile is not None and filename in ctx.manifest():
362 copytostore(repo, ctx, realfile, filename)
362 copytostore(repo, ctx, realfile, filename)
363
363
364
364
365 def copytostoreabsolute(repo, file, hash):
365 def copytostoreabsolute(repo, file, hash):
366 if inusercache(repo.ui, hash):
366 if inusercache(repo.ui, hash):
367 link(usercachepath(repo.ui, hash), storepath(repo, hash))
367 link(usercachepath(repo.ui, hash), storepath(repo, hash))
368 else:
368 else:
369 util.makedirs(os.path.dirname(storepath(repo, hash)))
369 util.makedirs(os.path.dirname(storepath(repo, hash)))
370 with open(file, b'rb') as srcf:
370 with open(file, b'rb') as srcf:
371 with util.atomictempfile(
371 with util.atomictempfile(
372 storepath(repo, hash), createmode=repo.store.createmode
372 storepath(repo, hash), createmode=repo.store.createmode
373 ) as dstf:
373 ) as dstf:
374 for chunk in util.filechunkiter(srcf):
374 for chunk in util.filechunkiter(srcf):
375 dstf.write(chunk)
375 dstf.write(chunk)
376 linktousercache(repo, hash)
376 linktousercache(repo, hash)
377
377
378
378
379 def linktousercache(repo, hash):
379 def linktousercache(repo, hash):
380 """Link / copy the largefile with the specified hash from the store
380 """Link / copy the largefile with the specified hash from the store
381 to the cache."""
381 to the cache."""
382 path = usercachepath(repo.ui, hash)
382 path = usercachepath(repo.ui, hash)
383 link(storepath(repo, hash), path)
383 link(storepath(repo, hash), path)
384
384
385
385
386 def getstandinmatcher(repo, rmatcher=None):
386 def getstandinmatcher(repo, rmatcher=None):
387 '''Return a match object that applies rmatcher to the standin directory'''
387 '''Return a match object that applies rmatcher to the standin directory'''
388 wvfs = repo.wvfs
388 wvfs = repo.wvfs
389 standindir = shortname
389 standindir = shortname
390
390
391 # no warnings about missing files or directories
391 # no warnings about missing files or directories
392 badfn = lambda f, msg: None
392 badfn = lambda f, msg: None
393
393
394 if rmatcher and not rmatcher.always():
394 if rmatcher and not rmatcher.always():
395 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
395 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
396 if not pats:
396 if not pats:
397 pats = [wvfs.join(standindir)]
397 pats = [wvfs.join(standindir)]
398 match = scmutil.match(repo[None], pats, badfn=badfn)
398 match = scmutil.match(repo[None], pats, badfn=badfn)
399 else:
399 else:
400 # no patterns: relative to repo root
400 # no patterns: relative to repo root
401 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
401 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
402 return match
402 return match
403
403
404
404
405 def composestandinmatcher(repo, rmatcher):
405 def composestandinmatcher(repo, rmatcher):
406 """Return a matcher that accepts standins corresponding to the
406 """Return a matcher that accepts standins corresponding to the
407 files accepted by rmatcher. Pass the list of files in the matcher
407 files accepted by rmatcher. Pass the list of files in the matcher
408 as the paths specified by the user."""
408 as the paths specified by the user."""
409 smatcher = getstandinmatcher(repo, rmatcher)
409 smatcher = getstandinmatcher(repo, rmatcher)
410 isstandin = smatcher.matchfn
410 isstandin = smatcher.matchfn
411
411
412 def composedmatchfn(f):
412 def composedmatchfn(f):
413 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
413 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
414
414
415 smatcher.matchfn = composedmatchfn
415 smatcher.matchfn = composedmatchfn
416
416
417 return smatcher
417 return smatcher
418
418
419
419
420 def standin(filename):
420 def standin(filename):
421 """Return the repo-relative path to the standin for the specified big
421 """Return the repo-relative path to the standin for the specified big
422 file."""
422 file."""
423 # Notes:
423 # Notes:
424 # 1) Some callers want an absolute path, but for instance addlargefiles
424 # 1) Some callers want an absolute path, but for instance addlargefiles
425 # needs it repo-relative so it can be passed to repo[None].add(). So
425 # needs it repo-relative so it can be passed to repo[None].add(). So
426 # leave it up to the caller to use repo.wjoin() to get an absolute path.
426 # leave it up to the caller to use repo.wjoin() to get an absolute path.
427 # 2) Join with '/' because that's what dirstate always uses, even on
427 # 2) Join with '/' because that's what dirstate always uses, even on
428 # Windows. Change existing separator to '/' first in case we are
428 # Windows. Change existing separator to '/' first in case we are
429 # passed filenames from an external source (like the command line).
429 # passed filenames from an external source (like the command line).
430 return shortnameslash + util.pconvert(filename)
430 return shortnameslash + util.pconvert(filename)
431
431
432
432
433 def isstandin(filename):
433 def isstandin(filename):
434 """Return true if filename is a big file standin. filename must be
434 """Return true if filename is a big file standin. filename must be
435 in Mercurial's internal form (slash-separated)."""
435 in Mercurial's internal form (slash-separated)."""
436 return filename.startswith(shortnameslash)
436 return filename.startswith(shortnameslash)
437
437
438
438
439 def splitstandin(filename):
439 def splitstandin(filename):
440 # Split on / because that's what dirstate always uses, even on Windows.
440 # Split on / because that's what dirstate always uses, even on Windows.
441 # Change local separator to / first just in case we are passed filenames
441 # Change local separator to / first just in case we are passed filenames
442 # from an external source (like the command line).
442 # from an external source (like the command line).
443 bits = util.pconvert(filename).split(b'/', 1)
443 bits = util.pconvert(filename).split(b'/', 1)
444 if len(bits) == 2 and bits[0] == shortname:
444 if len(bits) == 2 and bits[0] == shortname:
445 return bits[1]
445 return bits[1]
446 else:
446 else:
447 return None
447 return None
448
448
449
449
450 def updatestandin(repo, lfile, standin):
450 def updatestandin(repo, lfile, standin):
451 """Re-calculate hash value of lfile and write it into standin
451 """Re-calculate hash value of lfile and write it into standin
452
452
453 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
453 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
454 """
454 """
455 file = repo.wjoin(lfile)
455 file = repo.wjoin(lfile)
456 if repo.wvfs.exists(lfile):
456 if repo.wvfs.exists(lfile):
457 hash = hashfile(file)
457 hash = hashfile(file)
458 executable = getexecutable(file)
458 executable = getexecutable(file)
459 writestandin(repo, standin, hash, executable)
459 writestandin(repo, standin, hash, executable)
460 else:
460 else:
461 raise error.Abort(_(b'%s: file not found!') % lfile)
461 raise error.Abort(_(b'%s: file not found!') % lfile)
462
462
463
463
464 def readasstandin(fctx):
464 def readasstandin(fctx):
465 """read hex hash from given filectx of standin file
465 """read hex hash from given filectx of standin file
466
466
467 This encapsulates how "standin" data is stored into storage layer."""
467 This encapsulates how "standin" data is stored into storage layer."""
468 return fctx.data().strip()
468 return fctx.data().strip()
469
469
470
470
471 def writestandin(repo, standin, hash, executable):
471 def writestandin(repo, standin, hash, executable):
472 '''write hash to <repo.root>/<standin>'''
472 '''write hash to <repo.root>/<standin>'''
473 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
473 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
474
474
475
475
476 def copyandhash(instream, outfile):
476 def copyandhash(instream, outfile):
477 """Read bytes from instream (iterable) and write them to outfile,
477 """Read bytes from instream (iterable) and write them to outfile,
478 computing the SHA-1 hash of the data along the way. Return the hash."""
478 computing the SHA-1 hash of the data along the way. Return the hash."""
479 hasher = hashutil.sha1(b'')
479 hasher = hashutil.sha1(b'')
480 for data in instream:
480 for data in instream:
481 hasher.update(data)
481 hasher.update(data)
482 outfile.write(data)
482 outfile.write(data)
483 return hex(hasher.digest())
483 return hex(hasher.digest())
484
484
485
485
486 def hashfile(file):
486 def hashfile(file):
487 if not os.path.exists(file):
487 if not os.path.exists(file):
488 return b''
488 return b''
489 with open(file, b'rb') as fd:
489 with open(file, b'rb') as fd:
490 return hexsha1(fd)
490 return hexsha1(fd)
491
491
492
492
493 def getexecutable(filename):
493 def getexecutable(filename):
494 mode = os.stat(filename).st_mode
494 mode = os.stat(filename).st_mode
495 return (
495 return (
496 (mode & stat.S_IXUSR)
496 (mode & stat.S_IXUSR)
497 and (mode & stat.S_IXGRP)
497 and (mode & stat.S_IXGRP)
498 and (mode & stat.S_IXOTH)
498 and (mode & stat.S_IXOTH)
499 )
499 )
500
500
501
501
502 def urljoin(first, second, *arg):
502 def urljoin(first, second, *arg):
503 def join(left, right):
503 def join(left, right):
504 if not left.endswith(b'/'):
504 if not left.endswith(b'/'):
505 left += b'/'
505 left += b'/'
506 if right.startswith(b'/'):
506 if right.startswith(b'/'):
507 right = right[1:]
507 right = right[1:]
508 return left + right
508 return left + right
509
509
510 url = join(first, second)
510 url = join(first, second)
511 for a in arg:
511 for a in arg:
512 url = join(url, a)
512 url = join(url, a)
513 return url
513 return url
514
514
515
515
516 def hexsha1(fileobj):
516 def hexsha1(fileobj):
517 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
517 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
518 object data"""
518 object data"""
519 h = hashutil.sha1()
519 h = hashutil.sha1()
520 for chunk in util.filechunkiter(fileobj):
520 for chunk in util.filechunkiter(fileobj):
521 h.update(chunk)
521 h.update(chunk)
522 return hex(h.digest())
522 return hex(h.digest())
523
523
524
524
525 def httpsendfile(ui, filename):
525 def httpsendfile(ui, filename):
526 return httpconnection.httpsendfile(ui, filename, b'rb')
526 return httpconnection.httpsendfile(ui, filename, b'rb')
527
527
528
528
529 def unixpath(path):
529 def unixpath(path):
530 '''Return a version of path normalized for use with the lfdirstate.'''
530 '''Return a version of path normalized for use with the lfdirstate.'''
531 return util.pconvert(os.path.normpath(path))
531 return util.pconvert(os.path.normpath(path))
532
532
533
533
534 def islfilesrepo(repo):
534 def islfilesrepo(repo):
535 '''Return true if the repo is a largefile repo.'''
535 '''Return true if the repo is a largefile repo.'''
536 if b'largefiles' in repo.requirements and any(
536 if b'largefiles' in repo.requirements and any(
537 shortnameslash in f[1] for f in repo.store.datafiles()
537 shortnameslash in f[1] for f in repo.store.datafiles()
538 ):
538 ):
539 return True
539 return True
540
540
541 return any(openlfdirstate(repo.ui, repo, False))
541 return any(openlfdirstate(repo.ui, repo, False))
542
542
543
543
544 class storeprotonotcapable(Exception):
544 class storeprotonotcapable(Exception):
545 def __init__(self, storetypes):
545 def __init__(self, storetypes):
546 self.storetypes = storetypes
546 self.storetypes = storetypes
547
547
548
548
549 def getstandinsstate(repo):
549 def getstandinsstate(repo):
550 standins = []
550 standins = []
551 matcher = getstandinmatcher(repo)
551 matcher = getstandinmatcher(repo)
552 wctx = repo[None]
552 wctx = repo[None]
553 for standin in repo.dirstate.walk(
553 for standin in repo.dirstate.walk(
554 matcher, subrepos=[], unknown=False, ignored=False
554 matcher, subrepos=[], unknown=False, ignored=False
555 ):
555 ):
556 lfile = splitstandin(standin)
556 lfile = splitstandin(standin)
557 try:
557 try:
558 hash = readasstandin(wctx[standin])
558 hash = readasstandin(wctx[standin])
559 except IOError:
559 except IOError:
560 hash = None
560 hash = None
561 standins.append((lfile, hash))
561 standins.append((lfile, hash))
562 return standins
562 return standins
563
563
564
564
565 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
565 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
566 lfstandin = standin(lfile)
566 lfstandin = standin(lfile)
567 if lfstandin not in repo.dirstate:
567 if lfstandin not in repo.dirstate:
568 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
568 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
569 else:
569 else:
570 entry = repo.dirstate.get_entry(lfstandin)
570 entry = repo.dirstate.get_entry(lfstandin)
571 lfdirstate.update_file(
571 lfdirstate.update_file(
572 lfile,
572 lfile,
573 wc_tracked=entry.tracked,
573 wc_tracked=entry.tracked,
574 p1_tracked=entry.p1_tracked,
574 p1_tracked=entry.p1_tracked,
575 p2_info=entry.p2_info,
575 p2_info=entry.p2_info,
576 possibly_dirty=True,
576 possibly_dirty=True,
577 )
577 )
578
578
579
579
580 def markcommitted(orig, ctx, node):
580 def markcommitted(orig, ctx, node):
581 repo = ctx.repo()
581 repo = ctx.repo()
582
582
583 lfdirstate = openlfdirstate(repo.ui, repo)
583 lfdirstate = openlfdirstate(repo.ui, repo)
584 with lfdirstate.parentchange(repo):
584 with lfdirstate.changing_parents(repo):
585 orig(node)
585 orig(node)
586
586
587 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
587 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
588 # because files coming from the 2nd parent are omitted in the latter.
588 # because files coming from the 2nd parent are omitted in the latter.
589 #
589 #
590 # The former should be used to get targets of "synclfdirstate",
590 # The former should be used to get targets of "synclfdirstate",
591 # because such files:
591 # because such files:
592 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
592 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
593 # - have to be marked as "n" after commit, but
593 # - have to be marked as "n" after commit, but
594 # - aren't listed in "repo[node].files()"
594 # - aren't listed in "repo[node].files()"
595
595
596 for f in ctx.files():
596 for f in ctx.files():
597 lfile = splitstandin(f)
597 lfile = splitstandin(f)
598 if lfile is not None:
598 if lfile is not None:
599 synclfdirstate(repo, lfdirstate, lfile, False)
599 synclfdirstate(repo, lfdirstate, lfile, False)
600 lfdirstate.write(repo.currenttransaction())
600 lfdirstate.write(repo.currenttransaction())
601
601
602 # As part of committing, copy all of the largefiles into the cache.
602 # As part of committing, copy all of the largefiles into the cache.
603 #
603 #
604 # Using "node" instead of "ctx" implies additional "repo[node]"
604 # Using "node" instead of "ctx" implies additional "repo[node]"
605 # lookup while copyalltostore(), but can omit redundant check for
605 # lookup while copyalltostore(), but can omit redundant check for
606 # files comming from the 2nd parent, which should exist in store
606 # files comming from the 2nd parent, which should exist in store
607 # at merging.
607 # at merging.
608 copyalltostore(repo, node)
608 copyalltostore(repo, node)
609
609
610
610
611 def getlfilestoupdate(oldstandins, newstandins):
611 def getlfilestoupdate(oldstandins, newstandins):
612 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
612 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
613 filelist = []
613 filelist = []
614 for f in changedstandins:
614 for f in changedstandins:
615 if f[0] not in filelist:
615 if f[0] not in filelist:
616 filelist.append(f[0])
616 filelist.append(f[0])
617 return filelist
617 return filelist
618
618
619
619
620 def getlfilestoupload(repo, missing, addfunc):
620 def getlfilestoupload(repo, missing, addfunc):
621 makeprogress = repo.ui.makeprogress
621 makeprogress = repo.ui.makeprogress
622 with makeprogress(
622 with makeprogress(
623 _(b'finding outgoing largefiles'),
623 _(b'finding outgoing largefiles'),
624 unit=_(b'revisions'),
624 unit=_(b'revisions'),
625 total=len(missing),
625 total=len(missing),
626 ) as progress:
626 ) as progress:
627 for i, n in enumerate(missing):
627 for i, n in enumerate(missing):
628 progress.update(i)
628 progress.update(i)
629 parents = [p for p in repo[n].parents() if p != repo.nullid]
629 parents = [p for p in repo[n].parents() if p != repo.nullid]
630
630
631 with lfstatus(repo, value=False):
631 with lfstatus(repo, value=False):
632 ctx = repo[n]
632 ctx = repo[n]
633
633
634 files = set(ctx.files())
634 files = set(ctx.files())
635 if len(parents) == 2:
635 if len(parents) == 2:
636 mc = ctx.manifest()
636 mc = ctx.manifest()
637 mp1 = ctx.p1().manifest()
637 mp1 = ctx.p1().manifest()
638 mp2 = ctx.p2().manifest()
638 mp2 = ctx.p2().manifest()
639 for f in mp1:
639 for f in mp1:
640 if f not in mc:
640 if f not in mc:
641 files.add(f)
641 files.add(f)
642 for f in mp2:
642 for f in mp2:
643 if f not in mc:
643 if f not in mc:
644 files.add(f)
644 files.add(f)
645 for f in mc:
645 for f in mc:
646 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
646 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
647 files.add(f)
647 files.add(f)
648 for fn in files:
648 for fn in files:
649 if isstandin(fn) and fn in ctx:
649 if isstandin(fn) and fn in ctx:
650 addfunc(fn, readasstandin(ctx[fn]))
650 addfunc(fn, readasstandin(ctx[fn]))
651
651
652
652
653 def updatestandinsbymatch(repo, match):
653 def updatestandinsbymatch(repo, match):
654 """Update standins in the working directory according to specified match
654 """Update standins in the working directory according to specified match
655
655
656 This returns (possibly modified) ``match`` object to be used for
656 This returns (possibly modified) ``match`` object to be used for
657 subsequent commit process.
657 subsequent commit process.
658 """
658 """
659
659
660 ui = repo.ui
660 ui = repo.ui
661
661
662 # Case 1: user calls commit with no specific files or
662 # Case 1: user calls commit with no specific files or
663 # include/exclude patterns: refresh and commit all files that
663 # include/exclude patterns: refresh and commit all files that
664 # are "dirty".
664 # are "dirty".
665 if match is None or match.always():
665 if match is None or match.always():
666 # Spend a bit of time here to get a list of files we know
666 # Spend a bit of time here to get a list of files we know
667 # are modified so we can compare only against those.
667 # are modified so we can compare only against those.
668 # It can cost a lot of time (several seconds)
668 # It can cost a lot of time (several seconds)
669 # otherwise to update all standins if the largefiles are
669 # otherwise to update all standins if the largefiles are
670 # large.
670 # large.
671 lfdirstate = openlfdirstate(ui, repo)
671 lfdirstate = openlfdirstate(ui, repo)
672 dirtymatch = matchmod.always()
672 dirtymatch = matchmod.always()
673 unsure, s, mtime_boundary = lfdirstate.status(
673 unsure, s, mtime_boundary = lfdirstate.status(
674 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
674 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
675 )
675 )
676 modifiedfiles = unsure + s.modified + s.added + s.removed
676 modifiedfiles = unsure + s.modified + s.added + s.removed
677 lfiles = listlfiles(repo)
677 lfiles = listlfiles(repo)
678 # this only loops through largefiles that exist (not
678 # this only loops through largefiles that exist (not
679 # removed/renamed)
679 # removed/renamed)
680 for lfile in lfiles:
680 for lfile in lfiles:
681 if lfile in modifiedfiles:
681 if lfile in modifiedfiles:
682 fstandin = standin(lfile)
682 fstandin = standin(lfile)
683 if repo.wvfs.exists(fstandin):
683 if repo.wvfs.exists(fstandin):
684 # this handles the case where a rebase is being
684 # this handles the case where a rebase is being
685 # performed and the working copy is not updated
685 # performed and the working copy is not updated
686 # yet.
686 # yet.
687 if repo.wvfs.exists(lfile):
687 if repo.wvfs.exists(lfile):
688 updatestandin(repo, lfile, fstandin)
688 updatestandin(repo, lfile, fstandin)
689
689
690 return match
690 return match
691
691
692 lfiles = listlfiles(repo)
692 lfiles = listlfiles(repo)
693 match._files = repo._subdirlfs(match.files(), lfiles)
693 match._files = repo._subdirlfs(match.files(), lfiles)
694
694
695 # Case 2: user calls commit with specified patterns: refresh
695 # Case 2: user calls commit with specified patterns: refresh
696 # any matching big files.
696 # any matching big files.
697 smatcher = composestandinmatcher(repo, match)
697 smatcher = composestandinmatcher(repo, match)
698 standins = repo.dirstate.walk(
698 standins = repo.dirstate.walk(
699 smatcher, subrepos=[], unknown=False, ignored=False
699 smatcher, subrepos=[], unknown=False, ignored=False
700 )
700 )
701
701
702 # No matching big files: get out of the way and pass control to
702 # No matching big files: get out of the way and pass control to
703 # the usual commit() method.
703 # the usual commit() method.
704 if not standins:
704 if not standins:
705 return match
705 return match
706
706
707 # Refresh all matching big files. It's possible that the
707 # Refresh all matching big files. It's possible that the
708 # commit will end up failing, in which case the big files will
708 # commit will end up failing, in which case the big files will
709 # stay refreshed. No harm done: the user modified them and
709 # stay refreshed. No harm done: the user modified them and
710 # asked to commit them, so sooner or later we're going to
710 # asked to commit them, so sooner or later we're going to
711 # refresh the standins. Might as well leave them refreshed.
711 # refresh the standins. Might as well leave them refreshed.
712 lfdirstate = openlfdirstate(ui, repo)
712 lfdirstate = openlfdirstate(ui, repo)
713 for fstandin in standins:
713 for fstandin in standins:
714 lfile = splitstandin(fstandin)
714 lfile = splitstandin(fstandin)
715 if lfdirstate.get_entry(lfile).tracked:
715 if lfdirstate.get_entry(lfile).tracked:
716 updatestandin(repo, lfile, fstandin)
716 updatestandin(repo, lfile, fstandin)
717
717
718 # Cook up a new matcher that only matches regular files or
718 # Cook up a new matcher that only matches regular files or
719 # standins corresponding to the big files requested by the
719 # standins corresponding to the big files requested by the
720 # user. Have to modify _files to prevent commit() from
720 # user. Have to modify _files to prevent commit() from
721 # complaining "not tracked" for big files.
721 # complaining "not tracked" for big files.
722 match = copy.copy(match)
722 match = copy.copy(match)
723 origmatchfn = match.matchfn
723 origmatchfn = match.matchfn
724
724
725 # Check both the list of largefiles and the list of
725 # Check both the list of largefiles and the list of
726 # standins because if a largefile was removed, it
726 # standins because if a largefile was removed, it
727 # won't be in the list of largefiles at this point
727 # won't be in the list of largefiles at this point
728 match._files += sorted(standins)
728 match._files += sorted(standins)
729
729
730 actualfiles = []
730 actualfiles = []
731 for f in match._files:
731 for f in match._files:
732 fstandin = standin(f)
732 fstandin = standin(f)
733
733
734 # For largefiles, only one of the normal and standin should be
734 # For largefiles, only one of the normal and standin should be
735 # committed (except if one of them is a remove). In the case of a
735 # committed (except if one of them is a remove). In the case of a
736 # standin removal, drop the normal file if it is unknown to dirstate.
736 # standin removal, drop the normal file if it is unknown to dirstate.
737 # Thus, skip plain largefile names but keep the standin.
737 # Thus, skip plain largefile names but keep the standin.
738 if f in lfiles or fstandin in standins:
738 if f in lfiles or fstandin in standins:
739 if not repo.dirstate.get_entry(fstandin).removed:
739 if not repo.dirstate.get_entry(fstandin).removed:
740 if not repo.dirstate.get_entry(f).removed:
740 if not repo.dirstate.get_entry(f).removed:
741 continue
741 continue
742 elif not repo.dirstate.get_entry(f).any_tracked:
742 elif not repo.dirstate.get_entry(f).any_tracked:
743 continue
743 continue
744
744
745 actualfiles.append(f)
745 actualfiles.append(f)
746 match._files = actualfiles
746 match._files = actualfiles
747
747
748 def matchfn(f):
748 def matchfn(f):
749 if origmatchfn(f):
749 if origmatchfn(f):
750 return f not in lfiles
750 return f not in lfiles
751 else:
751 else:
752 return f in standins
752 return f in standins
753
753
754 match.matchfn = matchfn
754 match.matchfn = matchfn
755
755
756 return match
756 return match
757
757
758
758
759 class automatedcommithook:
759 class automatedcommithook:
760 """Stateful hook to update standins at the 1st commit of resuming
760 """Stateful hook to update standins at the 1st commit of resuming
761
761
762 For efficiency, updating standins in the working directory should
762 For efficiency, updating standins in the working directory should
763 be avoided while automated committing (like rebase, transplant and
763 be avoided while automated committing (like rebase, transplant and
764 so on), because they should be updated before committing.
764 so on), because they should be updated before committing.
765
765
766 But the 1st commit of resuming automated committing (e.g. ``rebase
766 But the 1st commit of resuming automated committing (e.g. ``rebase
767 --continue``) should update them, because largefiles may be
767 --continue``) should update them, because largefiles may be
768 modified manually.
768 modified manually.
769 """
769 """
770
770
771 def __init__(self, resuming):
771 def __init__(self, resuming):
772 self.resuming = resuming
772 self.resuming = resuming
773
773
774 def __call__(self, repo, match):
774 def __call__(self, repo, match):
775 if self.resuming:
775 if self.resuming:
776 self.resuming = False # avoids updating at subsequent commits
776 self.resuming = False # avoids updating at subsequent commits
777 return updatestandinsbymatch(repo, match)
777 return updatestandinsbymatch(repo, match)
778 else:
778 else:
779 return match
779 return match
780
780
781
781
782 def getstatuswriter(ui, repo, forcibly=None):
782 def getstatuswriter(ui, repo, forcibly=None):
783 """Return the function to write largefiles specific status out
783 """Return the function to write largefiles specific status out
784
784
785 If ``forcibly`` is ``None``, this returns the last element of
785 If ``forcibly`` is ``None``, this returns the last element of
786 ``repo._lfstatuswriters`` as "default" writer function.
786 ``repo._lfstatuswriters`` as "default" writer function.
787
787
788 Otherwise, this returns the function to always write out (or
788 Otherwise, this returns the function to always write out (or
789 ignore if ``not forcibly``) status.
789 ignore if ``not forcibly``) status.
790 """
790 """
791 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
791 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
792 return repo._lfstatuswriters[-1]
792 return repo._lfstatuswriters[-1]
793 else:
793 else:
794 if forcibly:
794 if forcibly:
795 return ui.status # forcibly WRITE OUT
795 return ui.status # forcibly WRITE OUT
796 else:
796 else:
797 return lambda *msg, **opts: None # forcibly IGNORE
797 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1867 +1,1867 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import copy
11 import copy
12 import os
12 import os
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15
15
16 from mercurial.pycompat import open
16 from mercurial.pycompat import open
17
17
18 from mercurial.hgweb import webcommands
18 from mercurial.hgweb import webcommands
19
19
20 from mercurial import (
20 from mercurial import (
21 archival,
21 archival,
22 cmdutil,
22 cmdutil,
23 copies as copiesmod,
23 copies as copiesmod,
24 error,
24 error,
25 exchange,
25 exchange,
26 extensions,
26 extensions,
27 exthelper,
27 exthelper,
28 filemerge,
28 filemerge,
29 hg,
29 hg,
30 logcmdutil,
30 logcmdutil,
31 match as matchmod,
31 match as matchmod,
32 merge,
32 merge,
33 mergestate as mergestatemod,
33 mergestate as mergestatemod,
34 pathutil,
34 pathutil,
35 pycompat,
35 pycompat,
36 scmutil,
36 scmutil,
37 smartset,
37 smartset,
38 subrepo,
38 subrepo,
39 url as urlmod,
39 url as urlmod,
40 util,
40 util,
41 )
41 )
42
42
43 from mercurial.upgrade_utils import (
43 from mercurial.upgrade_utils import (
44 actions as upgrade_actions,
44 actions as upgrade_actions,
45 )
45 )
46
46
47 from . import (
47 from . import (
48 lfcommands,
48 lfcommands,
49 lfutil,
49 lfutil,
50 storefactory,
50 storefactory,
51 )
51 )
52
52
53 ACTION_ADD = mergestatemod.ACTION_ADD
53 ACTION_ADD = mergestatemod.ACTION_ADD
54 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
54 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
55 ACTION_GET = mergestatemod.ACTION_GET
55 ACTION_GET = mergestatemod.ACTION_GET
56 ACTION_KEEP = mergestatemod.ACTION_KEEP
56 ACTION_KEEP = mergestatemod.ACTION_KEEP
57 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
57 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
58
58
59 eh = exthelper.exthelper()
59 eh = exthelper.exthelper()
60
60
61 lfstatus = lfutil.lfstatus
61 lfstatus = lfutil.lfstatus
62
62
63 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
63 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
64
64
65 # -- Utility functions: commonly/repeatedly needed functionality ---------------
65 # -- Utility functions: commonly/repeatedly needed functionality ---------------
66
66
67
67
68 def composelargefilematcher(match, manifest):
68 def composelargefilematcher(match, manifest):
69 """create a matcher that matches only the largefiles in the original
69 """create a matcher that matches only the largefiles in the original
70 matcher"""
70 matcher"""
71 m = copy.copy(match)
71 m = copy.copy(match)
72 lfile = lambda f: lfutil.standin(f) in manifest
72 lfile = lambda f: lfutil.standin(f) in manifest
73 m._files = [lf for lf in m._files if lfile(lf)]
73 m._files = [lf for lf in m._files if lfile(lf)]
74 m._fileset = set(m._files)
74 m._fileset = set(m._files)
75 m.always = lambda: False
75 m.always = lambda: False
76 origmatchfn = m.matchfn
76 origmatchfn = m.matchfn
77 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
77 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
78 return m
78 return m
79
79
80
80
81 def composenormalfilematcher(match, manifest, exclude=None):
81 def composenormalfilematcher(match, manifest, exclude=None):
82 excluded = set()
82 excluded = set()
83 if exclude is not None:
83 if exclude is not None:
84 excluded.update(exclude)
84 excluded.update(exclude)
85
85
86 m = copy.copy(match)
86 m = copy.copy(match)
87 notlfile = lambda f: not (
87 notlfile = lambda f: not (
88 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
88 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
89 )
89 )
90 m._files = [lf for lf in m._files if notlfile(lf)]
90 m._files = [lf for lf in m._files if notlfile(lf)]
91 m._fileset = set(m._files)
91 m._fileset = set(m._files)
92 m.always = lambda: False
92 m.always = lambda: False
93 origmatchfn = m.matchfn
93 origmatchfn = m.matchfn
94 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
94 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
95 return m
95 return m
96
96
97
97
98 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
98 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
99 large = opts.get('large')
99 large = opts.get('large')
100 lfsize = lfutil.getminsize(
100 lfsize = lfutil.getminsize(
101 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
101 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
102 )
102 )
103
103
104 lfmatcher = None
104 lfmatcher = None
105 if lfutil.islfilesrepo(repo):
105 if lfutil.islfilesrepo(repo):
106 lfpats = ui.configlist(lfutil.longname, b'patterns')
106 lfpats = ui.configlist(lfutil.longname, b'patterns')
107 if lfpats:
107 if lfpats:
108 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
108 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
109
109
110 lfnames = []
110 lfnames = []
111 m = matcher
111 m = matcher
112
112
113 wctx = repo[None]
113 wctx = repo[None]
114 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
114 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
115 exact = m.exact(f)
115 exact = m.exact(f)
116 lfile = lfutil.standin(f) in wctx
116 lfile = lfutil.standin(f) in wctx
117 nfile = f in wctx
117 nfile = f in wctx
118 exists = lfile or nfile
118 exists = lfile or nfile
119
119
120 # Don't warn the user when they attempt to add a normal tracked file.
120 # Don't warn the user when they attempt to add a normal tracked file.
121 # The normal add code will do that for us.
121 # The normal add code will do that for us.
122 if exact and exists:
122 if exact and exists:
123 if lfile:
123 if lfile:
124 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
124 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
125 continue
125 continue
126
126
127 if (exact or not exists) and not lfutil.isstandin(f):
127 if (exact or not exists) and not lfutil.isstandin(f):
128 # In case the file was removed previously, but not committed
128 # In case the file was removed previously, but not committed
129 # (issue3507)
129 # (issue3507)
130 if not repo.wvfs.exists(f):
130 if not repo.wvfs.exists(f):
131 continue
131 continue
132
132
133 abovemin = (
133 abovemin = (
134 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
134 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
135 )
135 )
136 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 if large or abovemin or (lfmatcher and lfmatcher(f)):
137 lfnames.append(f)
137 lfnames.append(f)
138 if ui.verbose or not exact:
138 if ui.verbose or not exact:
139 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
139 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
140
140
141 bad = []
141 bad = []
142
142
143 # Need to lock, otherwise there could be a race condition between
143 # Need to lock, otherwise there could be a race condition between
144 # when standins are created and added to the repo.
144 # when standins are created and added to the repo.
145 with repo.wlock():
145 with repo.wlock():
146 if not opts.get('dry_run'):
146 if not opts.get('dry_run'):
147 standins = []
147 standins = []
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 for f in lfnames:
149 for f in lfnames:
150 standinname = lfutil.standin(f)
150 standinname = lfutil.standin(f)
151 lfutil.writestandin(
151 lfutil.writestandin(
152 repo,
152 repo,
153 standinname,
153 standinname,
154 hash=b'',
154 hash=b'',
155 executable=lfutil.getexecutable(repo.wjoin(f)),
155 executable=lfutil.getexecutable(repo.wjoin(f)),
156 )
156 )
157 standins.append(standinname)
157 standins.append(standinname)
158 lfdirstate.set_tracked(f)
158 lfdirstate.set_tracked(f)
159 lfdirstate.write(repo.currenttransaction())
159 lfdirstate.write(repo.currenttransaction())
160 bad += [
160 bad += [
161 lfutil.splitstandin(f)
161 lfutil.splitstandin(f)
162 for f in repo[None].add(standins)
162 for f in repo[None].add(standins)
163 if f in m.files()
163 if f in m.files()
164 ]
164 ]
165
165
166 added = [f for f in lfnames if f not in bad]
166 added = [f for f in lfnames if f not in bad]
167 return added, bad
167 return added, bad
168
168
169
169
170 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
170 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
171 after = opts.get('after')
171 after = opts.get('after')
172 m = composelargefilematcher(matcher, repo[None].manifest())
172 m = composelargefilematcher(matcher, repo[None].manifest())
173 with lfstatus(repo):
173 with lfstatus(repo):
174 s = repo.status(match=m, clean=not isaddremove)
174 s = repo.status(match=m, clean=not isaddremove)
175 manifest = repo[None].manifest()
175 manifest = repo[None].manifest()
176 modified, added, deleted, clean = [
176 modified, added, deleted, clean = [
177 [f for f in list if lfutil.standin(f) in manifest]
177 [f for f in list if lfutil.standin(f) in manifest]
178 for list in (s.modified, s.added, s.deleted, s.clean)
178 for list in (s.modified, s.added, s.deleted, s.clean)
179 ]
179 ]
180
180
181 def warn(files, msg):
181 def warn(files, msg):
182 for f in files:
182 for f in files:
183 ui.warn(msg % uipathfn(f))
183 ui.warn(msg % uipathfn(f))
184 return int(len(files) > 0)
184 return int(len(files) > 0)
185
185
186 if after:
186 if after:
187 remove = deleted
187 remove = deleted
188 result = warn(
188 result = warn(
189 modified + added + clean, _(b'not removing %s: file still exists\n')
189 modified + added + clean, _(b'not removing %s: file still exists\n')
190 )
190 )
191 else:
191 else:
192 remove = deleted + clean
192 remove = deleted + clean
193 result = warn(
193 result = warn(
194 modified,
194 modified,
195 _(
195 _(
196 b'not removing %s: file is modified (use -f'
196 b'not removing %s: file is modified (use -f'
197 b' to force removal)\n'
197 b' to force removal)\n'
198 ),
198 ),
199 )
199 )
200 result = (
200 result = (
201 warn(
201 warn(
202 added,
202 added,
203 _(
203 _(
204 b'not removing %s: file has been marked for add'
204 b'not removing %s: file has been marked for add'
205 b' (use forget to undo)\n'
205 b' (use forget to undo)\n'
206 ),
206 ),
207 )
207 )
208 or result
208 or result
209 )
209 )
210
210
211 # Need to lock because standin files are deleted then removed from the
211 # Need to lock because standin files are deleted then removed from the
212 # repository and we could race in-between.
212 # repository and we could race in-between.
213 with repo.wlock():
213 with repo.wlock():
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 for f in sorted(remove):
215 for f in sorted(remove):
216 if ui.verbose or not m.exact(f):
216 if ui.verbose or not m.exact(f):
217 ui.status(_(b'removing %s\n') % uipathfn(f))
217 ui.status(_(b'removing %s\n') % uipathfn(f))
218
218
219 if not dryrun:
219 if not dryrun:
220 if not after:
220 if not after:
221 repo.wvfs.unlinkpath(f, ignoremissing=True)
221 repo.wvfs.unlinkpath(f, ignoremissing=True)
222
222
223 if dryrun:
223 if dryrun:
224 return result
224 return result
225
225
226 remove = [lfutil.standin(f) for f in remove]
226 remove = [lfutil.standin(f) for f in remove]
227 # If this is being called by addremove, let the original addremove
227 # If this is being called by addremove, let the original addremove
228 # function handle this.
228 # function handle this.
229 if not isaddremove:
229 if not isaddremove:
230 for f in remove:
230 for f in remove:
231 repo.wvfs.unlinkpath(f, ignoremissing=True)
231 repo.wvfs.unlinkpath(f, ignoremissing=True)
232 repo[None].forget(remove)
232 repo[None].forget(remove)
233
233
234 for f in remove:
234 for f in remove:
235 lfdirstate.set_untracked(lfutil.splitstandin(f))
235 lfdirstate.set_untracked(lfutil.splitstandin(f))
236
236
237 lfdirstate.write(repo.currenttransaction())
237 lfdirstate.write(repo.currenttransaction())
238
238
239 return result
239 return result
240
240
241
241
242 # For overriding mercurial.hgweb.webcommands so that largefiles will
242 # For overriding mercurial.hgweb.webcommands so that largefiles will
243 # appear at their right place in the manifests.
243 # appear at their right place in the manifests.
244 @eh.wrapfunction(webcommands, b'decodepath')
244 @eh.wrapfunction(webcommands, b'decodepath')
245 def decodepath(orig, path):
245 def decodepath(orig, path):
246 return lfutil.splitstandin(path) or path
246 return lfutil.splitstandin(path) or path
247
247
248
248
249 # -- Wrappers: modify existing commands --------------------------------
249 # -- Wrappers: modify existing commands --------------------------------
250
250
251
251
252 @eh.wrapcommand(
252 @eh.wrapcommand(
253 b'add',
253 b'add',
254 opts=[
254 opts=[
255 (b'', b'large', None, _(b'add as largefile')),
255 (b'', b'large', None, _(b'add as largefile')),
256 (b'', b'normal', None, _(b'add as normal file')),
256 (b'', b'normal', None, _(b'add as normal file')),
257 (
257 (
258 b'',
258 b'',
259 b'lfsize',
259 b'lfsize',
260 b'',
260 b'',
261 _(
261 _(
262 b'add all files above this size (in megabytes) '
262 b'add all files above this size (in megabytes) '
263 b'as largefiles (default: 10)'
263 b'as largefiles (default: 10)'
264 ),
264 ),
265 ),
265 ),
266 ],
266 ],
267 )
267 )
268 def overrideadd(orig, ui, repo, *pats, **opts):
268 def overrideadd(orig, ui, repo, *pats, **opts):
269 if opts.get('normal') and opts.get('large'):
269 if opts.get('normal') and opts.get('large'):
270 raise error.Abort(_(b'--normal cannot be used with --large'))
270 raise error.Abort(_(b'--normal cannot be used with --large'))
271 return orig(ui, repo, *pats, **opts)
271 return orig(ui, repo, *pats, **opts)
272
272
273
273
274 @eh.wrapfunction(cmdutil, b'add')
274 @eh.wrapfunction(cmdutil, b'add')
275 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
275 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
276 # The --normal flag short circuits this override
276 # The --normal flag short circuits this override
277 if opts.get('normal'):
277 if opts.get('normal'):
278 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
278 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
279
279
280 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
280 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
281 normalmatcher = composenormalfilematcher(
281 normalmatcher = composenormalfilematcher(
282 matcher, repo[None].manifest(), ladded
282 matcher, repo[None].manifest(), ladded
283 )
283 )
284 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
284 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
285
285
286 bad.extend(f for f in lbad)
286 bad.extend(f for f in lbad)
287 return bad
287 return bad
288
288
289
289
290 @eh.wrapfunction(cmdutil, b'remove')
290 @eh.wrapfunction(cmdutil, b'remove')
291 def cmdutilremove(
291 def cmdutilremove(
292 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
292 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
293 ):
293 ):
294 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
294 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
295 result = orig(
295 result = orig(
296 ui,
296 ui,
297 repo,
297 repo,
298 normalmatcher,
298 normalmatcher,
299 prefix,
299 prefix,
300 uipathfn,
300 uipathfn,
301 after,
301 after,
302 force,
302 force,
303 subrepos,
303 subrepos,
304 dryrun,
304 dryrun,
305 )
305 )
306 return (
306 return (
307 removelargefiles(
307 removelargefiles(
308 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
308 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
309 )
309 )
310 or result
310 or result
311 )
311 )
312
312
313
313
314 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
314 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
315 def overridestatusfn(orig, repo, rev2, **opts):
315 def overridestatusfn(orig, repo, rev2, **opts):
316 with lfstatus(repo._repo):
316 with lfstatus(repo._repo):
317 return orig(repo, rev2, **opts)
317 return orig(repo, rev2, **opts)
318
318
319
319
320 @eh.wrapcommand(b'status')
320 @eh.wrapcommand(b'status')
321 def overridestatus(orig, ui, repo, *pats, **opts):
321 def overridestatus(orig, ui, repo, *pats, **opts):
322 with lfstatus(repo):
322 with lfstatus(repo):
323 return orig(ui, repo, *pats, **opts)
323 return orig(ui, repo, *pats, **opts)
324
324
325
325
326 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
326 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
327 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
327 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
328 with lfstatus(repo._repo):
328 with lfstatus(repo._repo):
329 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
329 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
330
330
331
331
332 @eh.wrapcommand(b'log')
332 @eh.wrapcommand(b'log')
333 def overridelog(orig, ui, repo, *pats, **opts):
333 def overridelog(orig, ui, repo, *pats, **opts):
334 def overridematchandpats(
334 def overridematchandpats(
335 orig,
335 orig,
336 ctx,
336 ctx,
337 pats=(),
337 pats=(),
338 opts=None,
338 opts=None,
339 globbed=False,
339 globbed=False,
340 default=b'relpath',
340 default=b'relpath',
341 badfn=None,
341 badfn=None,
342 ):
342 ):
343 """Matcher that merges root directory with .hglf, suitable for log.
343 """Matcher that merges root directory with .hglf, suitable for log.
344 It is still possible to match .hglf directly.
344 It is still possible to match .hglf directly.
345 For any listed files run log on the standin too.
345 For any listed files run log on the standin too.
346 matchfn tries both the given filename and with .hglf stripped.
346 matchfn tries both the given filename and with .hglf stripped.
347 """
347 """
348 if opts is None:
348 if opts is None:
349 opts = {}
349 opts = {}
350 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
350 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
351 m, p = copy.copy(matchandpats)
351 m, p = copy.copy(matchandpats)
352
352
353 if m.always():
353 if m.always():
354 # We want to match everything anyway, so there's no benefit trying
354 # We want to match everything anyway, so there's no benefit trying
355 # to add standins.
355 # to add standins.
356 return matchandpats
356 return matchandpats
357
357
358 pats = set(p)
358 pats = set(p)
359
359
360 def fixpats(pat, tostandin=lfutil.standin):
360 def fixpats(pat, tostandin=lfutil.standin):
361 if pat.startswith(b'set:'):
361 if pat.startswith(b'set:'):
362 return pat
362 return pat
363
363
364 kindpat = matchmod._patsplit(pat, None)
364 kindpat = matchmod._patsplit(pat, None)
365
365
366 if kindpat[0] is not None:
366 if kindpat[0] is not None:
367 return kindpat[0] + b':' + tostandin(kindpat[1])
367 return kindpat[0] + b':' + tostandin(kindpat[1])
368 return tostandin(kindpat[1])
368 return tostandin(kindpat[1])
369
369
370 cwd = repo.getcwd()
370 cwd = repo.getcwd()
371 if cwd:
371 if cwd:
372 hglf = lfutil.shortname
372 hglf = lfutil.shortname
373 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
373 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
374
374
375 def tostandin(f):
375 def tostandin(f):
376 # The file may already be a standin, so truncate the back
376 # The file may already be a standin, so truncate the back
377 # prefix and test before mangling it. This avoids turning
377 # prefix and test before mangling it. This avoids turning
378 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
378 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
379 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
379 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
380 return f
380 return f
381
381
382 # An absolute path is from outside the repo, so truncate the
382 # An absolute path is from outside the repo, so truncate the
383 # path to the root before building the standin. Otherwise cwd
383 # path to the root before building the standin. Otherwise cwd
384 # is somewhere in the repo, relative to root, and needs to be
384 # is somewhere in the repo, relative to root, and needs to be
385 # prepended before building the standin.
385 # prepended before building the standin.
386 if os.path.isabs(cwd):
386 if os.path.isabs(cwd):
387 f = f[len(back) :]
387 f = f[len(back) :]
388 else:
388 else:
389 f = cwd + b'/' + f
389 f = cwd + b'/' + f
390 return back + lfutil.standin(f)
390 return back + lfutil.standin(f)
391
391
392 else:
392 else:
393
393
394 def tostandin(f):
394 def tostandin(f):
395 if lfutil.isstandin(f):
395 if lfutil.isstandin(f):
396 return f
396 return f
397 return lfutil.standin(f)
397 return lfutil.standin(f)
398
398
399 pats.update(fixpats(f, tostandin) for f in p)
399 pats.update(fixpats(f, tostandin) for f in p)
400
400
401 for i in range(0, len(m._files)):
401 for i in range(0, len(m._files)):
402 # Don't add '.hglf' to m.files, since that is already covered by '.'
402 # Don't add '.hglf' to m.files, since that is already covered by '.'
403 if m._files[i] == b'.':
403 if m._files[i] == b'.':
404 continue
404 continue
405 standin = lfutil.standin(m._files[i])
405 standin = lfutil.standin(m._files[i])
406 # If the "standin" is a directory, append instead of replace to
406 # If the "standin" is a directory, append instead of replace to
407 # support naming a directory on the command line with only
407 # support naming a directory on the command line with only
408 # largefiles. The original directory is kept to support normal
408 # largefiles. The original directory is kept to support normal
409 # files.
409 # files.
410 if standin in ctx:
410 if standin in ctx:
411 m._files[i] = standin
411 m._files[i] = standin
412 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
412 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
413 m._files.append(standin)
413 m._files.append(standin)
414
414
415 m._fileset = set(m._files)
415 m._fileset = set(m._files)
416 m.always = lambda: False
416 m.always = lambda: False
417 origmatchfn = m.matchfn
417 origmatchfn = m.matchfn
418
418
419 def lfmatchfn(f):
419 def lfmatchfn(f):
420 lf = lfutil.splitstandin(f)
420 lf = lfutil.splitstandin(f)
421 if lf is not None and origmatchfn(lf):
421 if lf is not None and origmatchfn(lf):
422 return True
422 return True
423 r = origmatchfn(f)
423 r = origmatchfn(f)
424 return r
424 return r
425
425
426 m.matchfn = lfmatchfn
426 m.matchfn = lfmatchfn
427
427
428 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
428 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
429 return m, pats
429 return m, pats
430
430
431 # For hg log --patch, the match object is used in two different senses:
431 # For hg log --patch, the match object is used in two different senses:
432 # (1) to determine what revisions should be printed out, and
432 # (1) to determine what revisions should be printed out, and
433 # (2) to determine what files to print out diffs for.
433 # (2) to determine what files to print out diffs for.
434 # The magic matchandpats override should be used for case (1) but not for
434 # The magic matchandpats override should be used for case (1) but not for
435 # case (2).
435 # case (2).
436 oldmatchandpats = scmutil.matchandpats
436 oldmatchandpats = scmutil.matchandpats
437
437
438 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
438 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
439 wctx = repo[None]
439 wctx = repo[None]
440 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
440 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
441 return lambda ctx: match
441 return lambda ctx: match
442
442
443 wrappedmatchandpats = extensions.wrappedfunction(
443 wrappedmatchandpats = extensions.wrappedfunction(
444 scmutil, b'matchandpats', overridematchandpats
444 scmutil, b'matchandpats', overridematchandpats
445 )
445 )
446 wrappedmakefilematcher = extensions.wrappedfunction(
446 wrappedmakefilematcher = extensions.wrappedfunction(
447 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
447 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
448 )
448 )
449 with wrappedmatchandpats, wrappedmakefilematcher:
449 with wrappedmatchandpats, wrappedmakefilematcher:
450 return orig(ui, repo, *pats, **opts)
450 return orig(ui, repo, *pats, **opts)
451
451
452
452
453 @eh.wrapcommand(
453 @eh.wrapcommand(
454 b'verify',
454 b'verify',
455 opts=[
455 opts=[
456 (
456 (
457 b'',
457 b'',
458 b'large',
458 b'large',
459 None,
459 None,
460 _(b'verify that all largefiles in current revision exists'),
460 _(b'verify that all largefiles in current revision exists'),
461 ),
461 ),
462 (
462 (
463 b'',
463 b'',
464 b'lfa',
464 b'lfa',
465 None,
465 None,
466 _(b'verify largefiles in all revisions, not just current'),
466 _(b'verify largefiles in all revisions, not just current'),
467 ),
467 ),
468 (
468 (
469 b'',
469 b'',
470 b'lfc',
470 b'lfc',
471 None,
471 None,
472 _(b'verify local largefile contents, not just existence'),
472 _(b'verify local largefile contents, not just existence'),
473 ),
473 ),
474 ],
474 ],
475 )
475 )
476 def overrideverify(orig, ui, repo, *pats, **opts):
476 def overrideverify(orig, ui, repo, *pats, **opts):
477 large = opts.pop('large', False)
477 large = opts.pop('large', False)
478 all = opts.pop('lfa', False)
478 all = opts.pop('lfa', False)
479 contents = opts.pop('lfc', False)
479 contents = opts.pop('lfc', False)
480
480
481 result = orig(ui, repo, *pats, **opts)
481 result = orig(ui, repo, *pats, **opts)
482 if large or all or contents:
482 if large or all or contents:
483 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
483 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
484 return result
484 return result
485
485
486
486
487 @eh.wrapcommand(
487 @eh.wrapcommand(
488 b'debugstate',
488 b'debugstate',
489 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
489 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
490 )
490 )
491 def overridedebugstate(orig, ui, repo, *pats, **opts):
491 def overridedebugstate(orig, ui, repo, *pats, **opts):
492 large = opts.pop('large', False)
492 large = opts.pop('large', False)
493 if large:
493 if large:
494
494
495 class fakerepo:
495 class fakerepo:
496 dirstate = lfutil.openlfdirstate(ui, repo)
496 dirstate = lfutil.openlfdirstate(ui, repo)
497
497
498 orig(ui, fakerepo, *pats, **opts)
498 orig(ui, fakerepo, *pats, **opts)
499 else:
499 else:
500 orig(ui, repo, *pats, **opts)
500 orig(ui, repo, *pats, **opts)
501
501
502
502
503 # Before starting the manifest merge, merge.updates will call
503 # Before starting the manifest merge, merge.updates will call
504 # _checkunknownfile to check if there are any files in the merged-in
504 # _checkunknownfile to check if there are any files in the merged-in
505 # changeset that collide with unknown files in the working copy.
505 # changeset that collide with unknown files in the working copy.
506 #
506 #
507 # The largefiles are seen as unknown, so this prevents us from merging
507 # The largefiles are seen as unknown, so this prevents us from merging
508 # in a file 'foo' if we already have a largefile with the same name.
508 # in a file 'foo' if we already have a largefile with the same name.
509 #
509 #
510 # The overridden function filters the unknown files by removing any
510 # The overridden function filters the unknown files by removing any
511 # largefiles. This makes the merge proceed and we can then handle this
511 # largefiles. This makes the merge proceed and we can then handle this
512 # case further in the overridden calculateupdates function below.
512 # case further in the overridden calculateupdates function below.
513 @eh.wrapfunction(merge, b'_checkunknownfile')
513 @eh.wrapfunction(merge, b'_checkunknownfile')
514 def overridecheckunknownfile(
514 def overridecheckunknownfile(
515 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
515 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
516 ):
516 ):
517 if lfutil.standin(dirstate.normalize(f)) in wctx:
517 if lfutil.standin(dirstate.normalize(f)) in wctx:
518 return False
518 return False
519 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
519 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
520
520
521
521
522 # The manifest merge handles conflicts on the manifest level. We want
522 # The manifest merge handles conflicts on the manifest level. We want
523 # to handle changes in largefile-ness of files at this level too.
523 # to handle changes in largefile-ness of files at this level too.
524 #
524 #
525 # The strategy is to run the original calculateupdates and then process
525 # The strategy is to run the original calculateupdates and then process
526 # the action list it outputs. There are two cases we need to deal with:
526 # the action list it outputs. There are two cases we need to deal with:
527 #
527 #
528 # 1. Normal file in p1, largefile in p2. Here the largefile is
528 # 1. Normal file in p1, largefile in p2. Here the largefile is
529 # detected via its standin file, which will enter the working copy
529 # detected via its standin file, which will enter the working copy
530 # with a "get" action. It is not "merge" since the standin is all
530 # with a "get" action. It is not "merge" since the standin is all
531 # Mercurial is concerned with at this level -- the link to the
531 # Mercurial is concerned with at this level -- the link to the
532 # existing normal file is not relevant here.
532 # existing normal file is not relevant here.
533 #
533 #
534 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
534 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
535 # since the largefile will be present in the working copy and
535 # since the largefile will be present in the working copy and
536 # different from the normal file in p2. Mercurial therefore
536 # different from the normal file in p2. Mercurial therefore
537 # triggers a merge action.
537 # triggers a merge action.
538 #
538 #
539 # In both cases, we prompt the user and emit new actions to either
539 # In both cases, we prompt the user and emit new actions to either
540 # remove the standin (if the normal file was kept) or to remove the
540 # remove the standin (if the normal file was kept) or to remove the
541 # normal file and get the standin (if the largefile was kept). The
541 # normal file and get the standin (if the largefile was kept). The
542 # default prompt answer is to use the largefile version since it was
542 # default prompt answer is to use the largefile version since it was
543 # presumably changed on purpose.
543 # presumably changed on purpose.
544 #
544 #
545 # Finally, the merge.applyupdates function will then take care of
545 # Finally, the merge.applyupdates function will then take care of
546 # writing the files into the working copy and lfcommands.updatelfiles
546 # writing the files into the working copy and lfcommands.updatelfiles
547 # will update the largefiles.
547 # will update the largefiles.
548 @eh.wrapfunction(merge, b'calculateupdates')
548 @eh.wrapfunction(merge, b'calculateupdates')
549 def overridecalculateupdates(
549 def overridecalculateupdates(
550 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
550 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
551 ):
551 ):
552 overwrite = force and not branchmerge
552 overwrite = force and not branchmerge
553 mresult = origfn(
553 mresult = origfn(
554 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
554 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
555 )
555 )
556
556
557 if overwrite:
557 if overwrite:
558 return mresult
558 return mresult
559
559
560 # Convert to dictionary with filename as key and action as value.
560 # Convert to dictionary with filename as key and action as value.
561 lfiles = set()
561 lfiles = set()
562 for f in mresult.files():
562 for f in mresult.files():
563 splitstandin = lfutil.splitstandin(f)
563 splitstandin = lfutil.splitstandin(f)
564 if splitstandin is not None and splitstandin in p1:
564 if splitstandin is not None and splitstandin in p1:
565 lfiles.add(splitstandin)
565 lfiles.add(splitstandin)
566 elif lfutil.standin(f) in p1:
566 elif lfutil.standin(f) in p1:
567 lfiles.add(f)
567 lfiles.add(f)
568
568
569 for lfile in sorted(lfiles):
569 for lfile in sorted(lfiles):
570 standin = lfutil.standin(lfile)
570 standin = lfutil.standin(lfile)
571 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
571 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
572 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
572 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
573
573
574 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
574 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
575 if sm == ACTION_DELETED_CHANGED:
575 if sm == ACTION_DELETED_CHANGED:
576 f1, f2, fa, move, anc = sargs
576 f1, f2, fa, move, anc = sargs
577 sargs = (p2[f2].flags(), False)
577 sargs = (p2[f2].flags(), False)
578 # Case 1: normal file in the working copy, largefile in
578 # Case 1: normal file in the working copy, largefile in
579 # the second parent
579 # the second parent
580 usermsg = (
580 usermsg = (
581 _(
581 _(
582 b'remote turned local normal file %s into a largefile\n'
582 b'remote turned local normal file %s into a largefile\n'
583 b'use (l)argefile or keep (n)ormal file?'
583 b'use (l)argefile or keep (n)ormal file?'
584 b'$$ &Largefile $$ &Normal file'
584 b'$$ &Largefile $$ &Normal file'
585 )
585 )
586 % lfile
586 % lfile
587 )
587 )
588 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
588 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
589 mresult.addfile(
589 mresult.addfile(
590 lfile, ACTION_REMOVE, None, b'replaced by standin'
590 lfile, ACTION_REMOVE, None, b'replaced by standin'
591 )
591 )
592 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
592 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
593 else: # keep local normal file
593 else: # keep local normal file
594 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
594 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
595 if branchmerge:
595 if branchmerge:
596 mresult.addfile(
596 mresult.addfile(
597 standin,
597 standin,
598 ACTION_KEEP,
598 ACTION_KEEP,
599 None,
599 None,
600 b'replaced by non-standin',
600 b'replaced by non-standin',
601 )
601 )
602 else:
602 else:
603 mresult.addfile(
603 mresult.addfile(
604 standin,
604 standin,
605 ACTION_REMOVE,
605 ACTION_REMOVE,
606 None,
606 None,
607 b'replaced by non-standin',
607 b'replaced by non-standin',
608 )
608 )
609 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
609 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
610 if lm == ACTION_DELETED_CHANGED:
610 if lm == ACTION_DELETED_CHANGED:
611 f1, f2, fa, move, anc = largs
611 f1, f2, fa, move, anc = largs
612 largs = (p2[f2].flags(), False)
612 largs = (p2[f2].flags(), False)
613 # Case 2: largefile in the working copy, normal file in
613 # Case 2: largefile in the working copy, normal file in
614 # the second parent
614 # the second parent
615 usermsg = (
615 usermsg = (
616 _(
616 _(
617 b'remote turned local largefile %s into a normal file\n'
617 b'remote turned local largefile %s into a normal file\n'
618 b'keep (l)argefile or use (n)ormal file?'
618 b'keep (l)argefile or use (n)ormal file?'
619 b'$$ &Largefile $$ &Normal file'
619 b'$$ &Largefile $$ &Normal file'
620 )
620 )
621 % lfile
621 % lfile
622 )
622 )
623 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
623 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
624 if branchmerge:
624 if branchmerge:
625 # largefile can be restored from standin safely
625 # largefile can be restored from standin safely
626 mresult.addfile(
626 mresult.addfile(
627 lfile,
627 lfile,
628 ACTION_KEEP,
628 ACTION_KEEP,
629 None,
629 None,
630 b'replaced by standin',
630 b'replaced by standin',
631 )
631 )
632 mresult.addfile(
632 mresult.addfile(
633 standin, ACTION_KEEP, None, b'replaces standin'
633 standin, ACTION_KEEP, None, b'replaces standin'
634 )
634 )
635 else:
635 else:
636 # "lfile" should be marked as "removed" without
636 # "lfile" should be marked as "removed" without
637 # removal of itself
637 # removal of itself
638 mresult.addfile(
638 mresult.addfile(
639 lfile,
639 lfile,
640 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
640 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
641 None,
641 None,
642 b'forget non-standin largefile',
642 b'forget non-standin largefile',
643 )
643 )
644
644
645 # linear-merge should treat this largefile as 're-added'
645 # linear-merge should treat this largefile as 're-added'
646 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
646 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
647 else: # pick remote normal file
647 else: # pick remote normal file
648 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
648 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
649 mresult.addfile(
649 mresult.addfile(
650 standin,
650 standin,
651 ACTION_REMOVE,
651 ACTION_REMOVE,
652 None,
652 None,
653 b'replaced by non-standin',
653 b'replaced by non-standin',
654 )
654 )
655
655
656 return mresult
656 return mresult
657
657
658
658
659 @eh.wrapfunction(mergestatemod, b'recordupdates')
659 @eh.wrapfunction(mergestatemod, b'recordupdates')
660 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
660 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
661 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
661 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
662 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
662 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
663 with lfdirstate.parentchange(repo):
663 with lfdirstate.changing_parents(repo):
664 for lfile, args, msg in actions[
664 for lfile, args, msg in actions[
665 MERGE_ACTION_LARGEFILE_MARK_REMOVED
665 MERGE_ACTION_LARGEFILE_MARK_REMOVED
666 ]:
666 ]:
667 # this should be executed before 'orig', to execute 'remove'
667 # this should be executed before 'orig', to execute 'remove'
668 # before all other actions
668 # before all other actions
669 repo.dirstate.update_file(
669 repo.dirstate.update_file(
670 lfile, p1_tracked=True, wc_tracked=False
670 lfile, p1_tracked=True, wc_tracked=False
671 )
671 )
672 # make sure lfile doesn't get synclfdirstate'd as normal
672 # make sure lfile doesn't get synclfdirstate'd as normal
673 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
673 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
674 lfdirstate.write(repo.currenttransaction())
674 lfdirstate.write(repo.currenttransaction())
675
675
676 return orig(repo, actions, branchmerge, getfiledata)
676 return orig(repo, actions, branchmerge, getfiledata)
677
677
678
678
679 # Override filemerge to prompt the user about how they wish to merge
679 # Override filemerge to prompt the user about how they wish to merge
680 # largefiles. This will handle identical edits without prompting the user.
680 # largefiles. This will handle identical edits without prompting the user.
681 @eh.wrapfunction(filemerge, b'filemerge')
681 @eh.wrapfunction(filemerge, b'filemerge')
682 def overridefilemerge(
682 def overridefilemerge(
683 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
683 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
684 ):
684 ):
685 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
685 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
686 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
686 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
687
687
688 ahash = lfutil.readasstandin(fca).lower()
688 ahash = lfutil.readasstandin(fca).lower()
689 dhash = lfutil.readasstandin(fcd).lower()
689 dhash = lfutil.readasstandin(fcd).lower()
690 ohash = lfutil.readasstandin(fco).lower()
690 ohash = lfutil.readasstandin(fco).lower()
691 if (
691 if (
692 ohash != ahash
692 ohash != ahash
693 and ohash != dhash
693 and ohash != dhash
694 and (
694 and (
695 dhash == ahash
695 dhash == ahash
696 or repo.ui.promptchoice(
696 or repo.ui.promptchoice(
697 _(
697 _(
698 b'largefile %s has a merge conflict\nancestor was %s\n'
698 b'largefile %s has a merge conflict\nancestor was %s\n'
699 b'you can keep (l)ocal %s or take (o)ther %s.\n'
699 b'you can keep (l)ocal %s or take (o)ther %s.\n'
700 b'what do you want to do?'
700 b'what do you want to do?'
701 b'$$ &Local $$ &Other'
701 b'$$ &Local $$ &Other'
702 )
702 )
703 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
703 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
704 0,
704 0,
705 )
705 )
706 == 1
706 == 1
707 )
707 )
708 ):
708 ):
709 repo.wwrite(fcd.path(), fco.data(), fco.flags())
709 repo.wwrite(fcd.path(), fco.data(), fco.flags())
710 return 0, False
710 return 0, False
711
711
712
712
713 @eh.wrapfunction(copiesmod, b'pathcopies')
713 @eh.wrapfunction(copiesmod, b'pathcopies')
714 def copiespathcopies(orig, ctx1, ctx2, match=None):
714 def copiespathcopies(orig, ctx1, ctx2, match=None):
715 copies = orig(ctx1, ctx2, match=match)
715 copies = orig(ctx1, ctx2, match=match)
716 updated = {}
716 updated = {}
717
717
718 for k, v in copies.items():
718 for k, v in copies.items():
719 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
719 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
720
720
721 return updated
721 return updated
722
722
723
723
724 # Copy first changes the matchers to match standins instead of
724 # Copy first changes the matchers to match standins instead of
725 # largefiles. Then it overrides util.copyfile in that function it
725 # largefiles. Then it overrides util.copyfile in that function it
726 # checks if the destination largefile already exists. It also keeps a
726 # checks if the destination largefile already exists. It also keeps a
727 # list of copied files so that the largefiles can be copied and the
727 # list of copied files so that the largefiles can be copied and the
728 # dirstate updated.
728 # dirstate updated.
729 @eh.wrapfunction(cmdutil, b'copy')
729 @eh.wrapfunction(cmdutil, b'copy')
730 def overridecopy(orig, ui, repo, pats, opts, rename=False):
730 def overridecopy(orig, ui, repo, pats, opts, rename=False):
731 # doesn't remove largefile on rename
731 # doesn't remove largefile on rename
732 if len(pats) < 2:
732 if len(pats) < 2:
733 # this isn't legal, let the original function deal with it
733 # this isn't legal, let the original function deal with it
734 return orig(ui, repo, pats, opts, rename)
734 return orig(ui, repo, pats, opts, rename)
735
735
736 # This could copy both lfiles and normal files in one command,
736 # This could copy both lfiles and normal files in one command,
737 # but we don't want to do that. First replace their matcher to
737 # but we don't want to do that. First replace their matcher to
738 # only match normal files and run it, then replace it to just
738 # only match normal files and run it, then replace it to just
739 # match largefiles and run it again.
739 # match largefiles and run it again.
740 nonormalfiles = False
740 nonormalfiles = False
741 nolfiles = False
741 nolfiles = False
742 manifest = repo[None].manifest()
742 manifest = repo[None].manifest()
743
743
744 def normalfilesmatchfn(
744 def normalfilesmatchfn(
745 orig,
745 orig,
746 ctx,
746 ctx,
747 pats=(),
747 pats=(),
748 opts=None,
748 opts=None,
749 globbed=False,
749 globbed=False,
750 default=b'relpath',
750 default=b'relpath',
751 badfn=None,
751 badfn=None,
752 ):
752 ):
753 if opts is None:
753 if opts is None:
754 opts = {}
754 opts = {}
755 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
755 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
756 return composenormalfilematcher(match, manifest)
756 return composenormalfilematcher(match, manifest)
757
757
758 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
758 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
759 try:
759 try:
760 result = orig(ui, repo, pats, opts, rename)
760 result = orig(ui, repo, pats, opts, rename)
761 except error.Abort as e:
761 except error.Abort as e:
762 if e.message != _(b'no files to copy'):
762 if e.message != _(b'no files to copy'):
763 raise e
763 raise e
764 else:
764 else:
765 nonormalfiles = True
765 nonormalfiles = True
766 result = 0
766 result = 0
767
767
768 # The first rename can cause our current working directory to be removed.
768 # The first rename can cause our current working directory to be removed.
769 # In that case there is nothing left to copy/rename so just quit.
769 # In that case there is nothing left to copy/rename so just quit.
770 try:
770 try:
771 repo.getcwd()
771 repo.getcwd()
772 except OSError:
772 except OSError:
773 return result
773 return result
774
774
775 def makestandin(relpath):
775 def makestandin(relpath):
776 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
776 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
777 return repo.wvfs.join(lfutil.standin(path))
777 return repo.wvfs.join(lfutil.standin(path))
778
778
779 fullpats = scmutil.expandpats(pats)
779 fullpats = scmutil.expandpats(pats)
780 dest = fullpats[-1]
780 dest = fullpats[-1]
781
781
782 if os.path.isdir(dest):
782 if os.path.isdir(dest):
783 if not os.path.isdir(makestandin(dest)):
783 if not os.path.isdir(makestandin(dest)):
784 os.makedirs(makestandin(dest))
784 os.makedirs(makestandin(dest))
785
785
786 try:
786 try:
787 # When we call orig below it creates the standins but we don't add
787 # When we call orig below it creates the standins but we don't add
788 # them to the dir state until later so lock during that time.
788 # them to the dir state until later so lock during that time.
789 wlock = repo.wlock()
789 wlock = repo.wlock()
790
790
791 manifest = repo[None].manifest()
791 manifest = repo[None].manifest()
792
792
793 def overridematch(
793 def overridematch(
794 orig,
794 orig,
795 ctx,
795 ctx,
796 pats=(),
796 pats=(),
797 opts=None,
797 opts=None,
798 globbed=False,
798 globbed=False,
799 default=b'relpath',
799 default=b'relpath',
800 badfn=None,
800 badfn=None,
801 ):
801 ):
802 if opts is None:
802 if opts is None:
803 opts = {}
803 opts = {}
804 newpats = []
804 newpats = []
805 # The patterns were previously mangled to add the standin
805 # The patterns were previously mangled to add the standin
806 # directory; we need to remove that now
806 # directory; we need to remove that now
807 for pat in pats:
807 for pat in pats:
808 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
808 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
809 newpats.append(pat.replace(lfutil.shortname, b''))
809 newpats.append(pat.replace(lfutil.shortname, b''))
810 else:
810 else:
811 newpats.append(pat)
811 newpats.append(pat)
812 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
812 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
813 m = copy.copy(match)
813 m = copy.copy(match)
814 lfile = lambda f: lfutil.standin(f) in manifest
814 lfile = lambda f: lfutil.standin(f) in manifest
815 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
815 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
816 m._fileset = set(m._files)
816 m._fileset = set(m._files)
817 origmatchfn = m.matchfn
817 origmatchfn = m.matchfn
818
818
819 def matchfn(f):
819 def matchfn(f):
820 lfile = lfutil.splitstandin(f)
820 lfile = lfutil.splitstandin(f)
821 return (
821 return (
822 lfile is not None
822 lfile is not None
823 and (f in manifest)
823 and (f in manifest)
824 and origmatchfn(lfile)
824 and origmatchfn(lfile)
825 or None
825 or None
826 )
826 )
827
827
828 m.matchfn = matchfn
828 m.matchfn = matchfn
829 return m
829 return m
830
830
831 listpats = []
831 listpats = []
832 for pat in pats:
832 for pat in pats:
833 if matchmod.patkind(pat) is not None:
833 if matchmod.patkind(pat) is not None:
834 listpats.append(pat)
834 listpats.append(pat)
835 else:
835 else:
836 listpats.append(makestandin(pat))
836 listpats.append(makestandin(pat))
837
837
838 copiedfiles = []
838 copiedfiles = []
839
839
840 def overridecopyfile(orig, src, dest, *args, **kwargs):
840 def overridecopyfile(orig, src, dest, *args, **kwargs):
841 if lfutil.shortname in src and dest.startswith(
841 if lfutil.shortname in src and dest.startswith(
842 repo.wjoin(lfutil.shortname)
842 repo.wjoin(lfutil.shortname)
843 ):
843 ):
844 destlfile = dest.replace(lfutil.shortname, b'')
844 destlfile = dest.replace(lfutil.shortname, b'')
845 if not opts[b'force'] and os.path.exists(destlfile):
845 if not opts[b'force'] and os.path.exists(destlfile):
846 raise IOError(
846 raise IOError(
847 b'', _(b'destination largefile already exists')
847 b'', _(b'destination largefile already exists')
848 )
848 )
849 copiedfiles.append((src, dest))
849 copiedfiles.append((src, dest))
850 orig(src, dest, *args, **kwargs)
850 orig(src, dest, *args, **kwargs)
851
851
852 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
852 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
853 with extensions.wrappedfunction(scmutil, b'match', overridematch):
853 with extensions.wrappedfunction(scmutil, b'match', overridematch):
854 result += orig(ui, repo, listpats, opts, rename)
854 result += orig(ui, repo, listpats, opts, rename)
855
855
856 lfdirstate = lfutil.openlfdirstate(ui, repo)
856 lfdirstate = lfutil.openlfdirstate(ui, repo)
857 for (src, dest) in copiedfiles:
857 for (src, dest) in copiedfiles:
858 if lfutil.shortname in src and dest.startswith(
858 if lfutil.shortname in src and dest.startswith(
859 repo.wjoin(lfutil.shortname)
859 repo.wjoin(lfutil.shortname)
860 ):
860 ):
861 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
861 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
862 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
862 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
863 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
863 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
864 if not os.path.isdir(destlfiledir):
864 if not os.path.isdir(destlfiledir):
865 os.makedirs(destlfiledir)
865 os.makedirs(destlfiledir)
866 if rename:
866 if rename:
867 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
867 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
868
868
869 # The file is gone, but this deletes any empty parent
869 # The file is gone, but this deletes any empty parent
870 # directories as a side-effect.
870 # directories as a side-effect.
871 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
871 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
872 lfdirstate.set_untracked(srclfile)
872 lfdirstate.set_untracked(srclfile)
873 else:
873 else:
874 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
874 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
875
875
876 lfdirstate.set_tracked(destlfile)
876 lfdirstate.set_tracked(destlfile)
877 lfdirstate.write(repo.currenttransaction())
877 lfdirstate.write(repo.currenttransaction())
878 except error.Abort as e:
878 except error.Abort as e:
879 if e.message != _(b'no files to copy'):
879 if e.message != _(b'no files to copy'):
880 raise e
880 raise e
881 else:
881 else:
882 nolfiles = True
882 nolfiles = True
883 finally:
883 finally:
884 wlock.release()
884 wlock.release()
885
885
886 if nolfiles and nonormalfiles:
886 if nolfiles and nonormalfiles:
887 raise error.Abort(_(b'no files to copy'))
887 raise error.Abort(_(b'no files to copy'))
888
888
889 return result
889 return result
890
890
891
891
892 # When the user calls revert, we have to be careful to not revert any
892 # When the user calls revert, we have to be careful to not revert any
893 # changes to other largefiles accidentally. This means we have to keep
893 # changes to other largefiles accidentally. This means we have to keep
894 # track of the largefiles that are being reverted so we only pull down
894 # track of the largefiles that are being reverted so we only pull down
895 # the necessary largefiles.
895 # the necessary largefiles.
896 #
896 #
897 # Standins are only updated (to match the hash of largefiles) before
897 # Standins are only updated (to match the hash of largefiles) before
898 # commits. Update the standins then run the original revert, changing
898 # commits. Update the standins then run the original revert, changing
899 # the matcher to hit standins instead of largefiles. Based on the
899 # the matcher to hit standins instead of largefiles. Based on the
900 # resulting standins update the largefiles.
900 # resulting standins update the largefiles.
901 @eh.wrapfunction(cmdutil, b'revert')
901 @eh.wrapfunction(cmdutil, b'revert')
902 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
902 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
903 # Because we put the standins in a bad state (by updating them)
903 # Because we put the standins in a bad state (by updating them)
904 # and then return them to a correct state we need to lock to
904 # and then return them to a correct state we need to lock to
905 # prevent others from changing them in their incorrect state.
905 # prevent others from changing them in their incorrect state.
906 with repo.wlock():
906 with repo.wlock():
907 lfdirstate = lfutil.openlfdirstate(ui, repo)
907 lfdirstate = lfutil.openlfdirstate(ui, repo)
908 s = lfutil.lfdirstatestatus(lfdirstate, repo)
908 s = lfutil.lfdirstatestatus(lfdirstate, repo)
909 lfdirstate.write(repo.currenttransaction())
909 lfdirstate.write(repo.currenttransaction())
910 for lfile in s.modified:
910 for lfile in s.modified:
911 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
911 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
912 for lfile in s.deleted:
912 for lfile in s.deleted:
913 fstandin = lfutil.standin(lfile)
913 fstandin = lfutil.standin(lfile)
914 if repo.wvfs.exists(fstandin):
914 if repo.wvfs.exists(fstandin):
915 repo.wvfs.unlink(fstandin)
915 repo.wvfs.unlink(fstandin)
916
916
917 oldstandins = lfutil.getstandinsstate(repo)
917 oldstandins = lfutil.getstandinsstate(repo)
918
918
919 def overridematch(
919 def overridematch(
920 orig,
920 orig,
921 mctx,
921 mctx,
922 pats=(),
922 pats=(),
923 opts=None,
923 opts=None,
924 globbed=False,
924 globbed=False,
925 default=b'relpath',
925 default=b'relpath',
926 badfn=None,
926 badfn=None,
927 ):
927 ):
928 if opts is None:
928 if opts is None:
929 opts = {}
929 opts = {}
930 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
930 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
931 m = copy.copy(match)
931 m = copy.copy(match)
932
932
933 # revert supports recursing into subrepos, and though largefiles
933 # revert supports recursing into subrepos, and though largefiles
934 # currently doesn't work correctly in that case, this match is
934 # currently doesn't work correctly in that case, this match is
935 # called, so the lfdirstate above may not be the correct one for
935 # called, so the lfdirstate above may not be the correct one for
936 # this invocation of match.
936 # this invocation of match.
937 lfdirstate = lfutil.openlfdirstate(
937 lfdirstate = lfutil.openlfdirstate(
938 mctx.repo().ui, mctx.repo(), False
938 mctx.repo().ui, mctx.repo(), False
939 )
939 )
940
940
941 wctx = repo[None]
941 wctx = repo[None]
942 matchfiles = []
942 matchfiles = []
943 for f in m._files:
943 for f in m._files:
944 standin = lfutil.standin(f)
944 standin = lfutil.standin(f)
945 if standin in ctx or standin in mctx:
945 if standin in ctx or standin in mctx:
946 matchfiles.append(standin)
946 matchfiles.append(standin)
947 elif standin in wctx or lfdirstate.get_entry(f).removed:
947 elif standin in wctx or lfdirstate.get_entry(f).removed:
948 continue
948 continue
949 else:
949 else:
950 matchfiles.append(f)
950 matchfiles.append(f)
951 m._files = matchfiles
951 m._files = matchfiles
952 m._fileset = set(m._files)
952 m._fileset = set(m._files)
953 origmatchfn = m.matchfn
953 origmatchfn = m.matchfn
954
954
955 def matchfn(f):
955 def matchfn(f):
956 lfile = lfutil.splitstandin(f)
956 lfile = lfutil.splitstandin(f)
957 if lfile is not None:
957 if lfile is not None:
958 return origmatchfn(lfile) and (f in ctx or f in mctx)
958 return origmatchfn(lfile) and (f in ctx or f in mctx)
959 return origmatchfn(f)
959 return origmatchfn(f)
960
960
961 m.matchfn = matchfn
961 m.matchfn = matchfn
962 return m
962 return m
963
963
964 with extensions.wrappedfunction(scmutil, b'match', overridematch):
964 with extensions.wrappedfunction(scmutil, b'match', overridematch):
965 orig(ui, repo, ctx, *pats, **opts)
965 orig(ui, repo, ctx, *pats, **opts)
966
966
967 newstandins = lfutil.getstandinsstate(repo)
967 newstandins = lfutil.getstandinsstate(repo)
968 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
968 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
969 # lfdirstate should be 'normallookup'-ed for updated files,
969 # lfdirstate should be 'normallookup'-ed for updated files,
970 # because reverting doesn't touch dirstate for 'normal' files
970 # because reverting doesn't touch dirstate for 'normal' files
971 # when target revision is explicitly specified: in such case,
971 # when target revision is explicitly specified: in such case,
972 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
972 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
973 # of target (standin) file.
973 # of target (standin) file.
974 lfcommands.updatelfiles(
974 lfcommands.updatelfiles(
975 ui, repo, filelist, printmessage=False, normallookup=True
975 ui, repo, filelist, printmessage=False, normallookup=True
976 )
976 )
977
977
978
978
979 # after pulling changesets, we need to take some extra care to get
979 # after pulling changesets, we need to take some extra care to get
980 # largefiles updated remotely
980 # largefiles updated remotely
981 @eh.wrapcommand(
981 @eh.wrapcommand(
982 b'pull',
982 b'pull',
983 opts=[
983 opts=[
984 (
984 (
985 b'',
985 b'',
986 b'all-largefiles',
986 b'all-largefiles',
987 None,
987 None,
988 _(b'download all pulled versions of largefiles (DEPRECATED)'),
988 _(b'download all pulled versions of largefiles (DEPRECATED)'),
989 ),
989 ),
990 (
990 (
991 b'',
991 b'',
992 b'lfrev',
992 b'lfrev',
993 [],
993 [],
994 _(b'download largefiles for these revisions'),
994 _(b'download largefiles for these revisions'),
995 _(b'REV'),
995 _(b'REV'),
996 ),
996 ),
997 ],
997 ],
998 )
998 )
999 def overridepull(orig, ui, repo, source=None, **opts):
999 def overridepull(orig, ui, repo, source=None, **opts):
1000 revsprepull = len(repo)
1000 revsprepull = len(repo)
1001 if not source:
1001 if not source:
1002 source = b'default'
1002 source = b'default'
1003 repo.lfpullsource = source
1003 repo.lfpullsource = source
1004 result = orig(ui, repo, source, **opts)
1004 result = orig(ui, repo, source, **opts)
1005 revspostpull = len(repo)
1005 revspostpull = len(repo)
1006 lfrevs = opts.get('lfrev', [])
1006 lfrevs = opts.get('lfrev', [])
1007 if opts.get('all_largefiles'):
1007 if opts.get('all_largefiles'):
1008 lfrevs.append(b'pulled()')
1008 lfrevs.append(b'pulled()')
1009 if lfrevs and revspostpull > revsprepull:
1009 if lfrevs and revspostpull > revsprepull:
1010 numcached = 0
1010 numcached = 0
1011 repo.firstpulled = revsprepull # for pulled() revset expression
1011 repo.firstpulled = revsprepull # for pulled() revset expression
1012 try:
1012 try:
1013 for rev in logcmdutil.revrange(repo, lfrevs):
1013 for rev in logcmdutil.revrange(repo, lfrevs):
1014 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1014 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1015 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1015 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1016 numcached += len(cached)
1016 numcached += len(cached)
1017 finally:
1017 finally:
1018 del repo.firstpulled
1018 del repo.firstpulled
1019 ui.status(_(b"%d largefiles cached\n") % numcached)
1019 ui.status(_(b"%d largefiles cached\n") % numcached)
1020 return result
1020 return result
1021
1021
1022
1022
1023 @eh.wrapcommand(
1023 @eh.wrapcommand(
1024 b'push',
1024 b'push',
1025 opts=[
1025 opts=[
1026 (
1026 (
1027 b'',
1027 b'',
1028 b'lfrev',
1028 b'lfrev',
1029 [],
1029 [],
1030 _(b'upload largefiles for these revisions'),
1030 _(b'upload largefiles for these revisions'),
1031 _(b'REV'),
1031 _(b'REV'),
1032 )
1032 )
1033 ],
1033 ],
1034 )
1034 )
1035 def overridepush(orig, ui, repo, *args, **kwargs):
1035 def overridepush(orig, ui, repo, *args, **kwargs):
1036 """Override push command and store --lfrev parameters in opargs"""
1036 """Override push command and store --lfrev parameters in opargs"""
1037 lfrevs = kwargs.pop('lfrev', None)
1037 lfrevs = kwargs.pop('lfrev', None)
1038 if lfrevs:
1038 if lfrevs:
1039 opargs = kwargs.setdefault('opargs', {})
1039 opargs = kwargs.setdefault('opargs', {})
1040 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1040 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1041 return orig(ui, repo, *args, **kwargs)
1041 return orig(ui, repo, *args, **kwargs)
1042
1042
1043
1043
1044 @eh.wrapfunction(exchange, b'pushoperation')
1044 @eh.wrapfunction(exchange, b'pushoperation')
1045 def exchangepushoperation(orig, *args, **kwargs):
1045 def exchangepushoperation(orig, *args, **kwargs):
1046 """Override pushoperation constructor and store lfrevs parameter"""
1046 """Override pushoperation constructor and store lfrevs parameter"""
1047 lfrevs = kwargs.pop('lfrevs', None)
1047 lfrevs = kwargs.pop('lfrevs', None)
1048 pushop = orig(*args, **kwargs)
1048 pushop = orig(*args, **kwargs)
1049 pushop.lfrevs = lfrevs
1049 pushop.lfrevs = lfrevs
1050 return pushop
1050 return pushop
1051
1051
1052
1052
1053 @eh.revsetpredicate(b'pulled()')
1053 @eh.revsetpredicate(b'pulled()')
1054 def pulledrevsetsymbol(repo, subset, x):
1054 def pulledrevsetsymbol(repo, subset, x):
1055 """Changesets that just has been pulled.
1055 """Changesets that just has been pulled.
1056
1056
1057 Only available with largefiles from pull --lfrev expressions.
1057 Only available with largefiles from pull --lfrev expressions.
1058
1058
1059 .. container:: verbose
1059 .. container:: verbose
1060
1060
1061 Some examples:
1061 Some examples:
1062
1062
1063 - pull largefiles for all new changesets::
1063 - pull largefiles for all new changesets::
1064
1064
1065 hg pull -lfrev "pulled()"
1065 hg pull -lfrev "pulled()"
1066
1066
1067 - pull largefiles for all new branch heads::
1067 - pull largefiles for all new branch heads::
1068
1068
1069 hg pull -lfrev "head(pulled()) and not closed()"
1069 hg pull -lfrev "head(pulled()) and not closed()"
1070
1070
1071 """
1071 """
1072
1072
1073 try:
1073 try:
1074 firstpulled = repo.firstpulled
1074 firstpulled = repo.firstpulled
1075 except AttributeError:
1075 except AttributeError:
1076 raise error.Abort(_(b"pulled() only available in --lfrev"))
1076 raise error.Abort(_(b"pulled() only available in --lfrev"))
1077 return smartset.baseset([r for r in subset if r >= firstpulled])
1077 return smartset.baseset([r for r in subset if r >= firstpulled])
1078
1078
1079
1079
1080 @eh.wrapcommand(
1080 @eh.wrapcommand(
1081 b'clone',
1081 b'clone',
1082 opts=[
1082 opts=[
1083 (
1083 (
1084 b'',
1084 b'',
1085 b'all-largefiles',
1085 b'all-largefiles',
1086 None,
1086 None,
1087 _(b'download all versions of all largefiles'),
1087 _(b'download all versions of all largefiles'),
1088 )
1088 )
1089 ],
1089 ],
1090 )
1090 )
1091 def overrideclone(orig, ui, source, dest=None, **opts):
1091 def overrideclone(orig, ui, source, dest=None, **opts):
1092 d = dest
1092 d = dest
1093 if d is None:
1093 if d is None:
1094 d = hg.defaultdest(source)
1094 d = hg.defaultdest(source)
1095 if opts.get('all_largefiles') and not hg.islocal(d):
1095 if opts.get('all_largefiles') and not hg.islocal(d):
1096 raise error.Abort(
1096 raise error.Abort(
1097 _(b'--all-largefiles is incompatible with non-local destination %s')
1097 _(b'--all-largefiles is incompatible with non-local destination %s')
1098 % d
1098 % d
1099 )
1099 )
1100
1100
1101 return orig(ui, source, dest, **opts)
1101 return orig(ui, source, dest, **opts)
1102
1102
1103
1103
1104 @eh.wrapfunction(hg, b'clone')
1104 @eh.wrapfunction(hg, b'clone')
1105 def hgclone(orig, ui, opts, *args, **kwargs):
1105 def hgclone(orig, ui, opts, *args, **kwargs):
1106 result = orig(ui, opts, *args, **kwargs)
1106 result = orig(ui, opts, *args, **kwargs)
1107
1107
1108 if result is not None:
1108 if result is not None:
1109 sourcerepo, destrepo = result
1109 sourcerepo, destrepo = result
1110 repo = destrepo.local()
1110 repo = destrepo.local()
1111
1111
1112 # When cloning to a remote repo (like through SSH), no repo is available
1112 # When cloning to a remote repo (like through SSH), no repo is available
1113 # from the peer. Therefore the largefiles can't be downloaded and the
1113 # from the peer. Therefore the largefiles can't be downloaded and the
1114 # hgrc can't be updated.
1114 # hgrc can't be updated.
1115 if not repo:
1115 if not repo:
1116 return result
1116 return result
1117
1117
1118 # Caching is implicitly limited to 'rev' option, since the dest repo was
1118 # Caching is implicitly limited to 'rev' option, since the dest repo was
1119 # truncated at that point. The user may expect a download count with
1119 # truncated at that point. The user may expect a download count with
1120 # this option, so attempt whether or not this is a largefile repo.
1120 # this option, so attempt whether or not this is a largefile repo.
1121 if opts.get(b'all_largefiles'):
1121 if opts.get(b'all_largefiles'):
1122 success, missing = lfcommands.downloadlfiles(ui, repo)
1122 success, missing = lfcommands.downloadlfiles(ui, repo)
1123
1123
1124 if missing != 0:
1124 if missing != 0:
1125 return None
1125 return None
1126
1126
1127 return result
1127 return result
1128
1128
1129
1129
1130 @eh.wrapcommand(b'rebase', extension=b'rebase')
1130 @eh.wrapcommand(b'rebase', extension=b'rebase')
1131 def overriderebasecmd(orig, ui, repo, **opts):
1131 def overriderebasecmd(orig, ui, repo, **opts):
1132 if not util.safehasattr(repo, b'_largefilesenabled'):
1132 if not util.safehasattr(repo, b'_largefilesenabled'):
1133 return orig(ui, repo, **opts)
1133 return orig(ui, repo, **opts)
1134
1134
1135 resuming = opts.get('continue')
1135 resuming = opts.get('continue')
1136 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1136 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1137 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1137 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1138 try:
1138 try:
1139 with ui.configoverride(
1139 with ui.configoverride(
1140 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1140 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1141 ):
1141 ):
1142 return orig(ui, repo, **opts)
1142 return orig(ui, repo, **opts)
1143 finally:
1143 finally:
1144 repo._lfstatuswriters.pop()
1144 repo._lfstatuswriters.pop()
1145 repo._lfcommithooks.pop()
1145 repo._lfcommithooks.pop()
1146
1146
1147
1147
1148 @eh.extsetup
1148 @eh.extsetup
1149 def overriderebase(ui):
1149 def overriderebase(ui):
1150 try:
1150 try:
1151 rebase = extensions.find(b'rebase')
1151 rebase = extensions.find(b'rebase')
1152 except KeyError:
1152 except KeyError:
1153 pass
1153 pass
1154 else:
1154 else:
1155
1155
1156 def _dorebase(orig, *args, **kwargs):
1156 def _dorebase(orig, *args, **kwargs):
1157 kwargs['inmemory'] = False
1157 kwargs['inmemory'] = False
1158 return orig(*args, **kwargs)
1158 return orig(*args, **kwargs)
1159
1159
1160 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1160 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1161
1161
1162
1162
1163 @eh.wrapcommand(b'archive')
1163 @eh.wrapcommand(b'archive')
1164 def overridearchivecmd(orig, ui, repo, dest, **opts):
1164 def overridearchivecmd(orig, ui, repo, dest, **opts):
1165 with lfstatus(repo.unfiltered()):
1165 with lfstatus(repo.unfiltered()):
1166 return orig(ui, repo.unfiltered(), dest, **opts)
1166 return orig(ui, repo.unfiltered(), dest, **opts)
1167
1167
1168
1168
1169 @eh.wrapfunction(webcommands, b'archive')
1169 @eh.wrapfunction(webcommands, b'archive')
1170 def hgwebarchive(orig, web):
1170 def hgwebarchive(orig, web):
1171 with lfstatus(web.repo):
1171 with lfstatus(web.repo):
1172 return orig(web)
1172 return orig(web)
1173
1173
1174
1174
1175 @eh.wrapfunction(archival, b'archive')
1175 @eh.wrapfunction(archival, b'archive')
1176 def overridearchive(
1176 def overridearchive(
1177 orig,
1177 orig,
1178 repo,
1178 repo,
1179 dest,
1179 dest,
1180 node,
1180 node,
1181 kind,
1181 kind,
1182 decode=True,
1182 decode=True,
1183 match=None,
1183 match=None,
1184 prefix=b'',
1184 prefix=b'',
1185 mtime=None,
1185 mtime=None,
1186 subrepos=None,
1186 subrepos=None,
1187 ):
1187 ):
1188 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1188 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1189 # unfiltered repo's attr, so check that as well.
1189 # unfiltered repo's attr, so check that as well.
1190 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1190 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1191 return orig(
1191 return orig(
1192 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1192 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1193 )
1193 )
1194
1194
1195 # No need to lock because we are only reading history and
1195 # No need to lock because we are only reading history and
1196 # largefile caches, neither of which are modified.
1196 # largefile caches, neither of which are modified.
1197 if node is not None:
1197 if node is not None:
1198 lfcommands.cachelfiles(repo.ui, repo, node)
1198 lfcommands.cachelfiles(repo.ui, repo, node)
1199
1199
1200 if kind not in archival.archivers:
1200 if kind not in archival.archivers:
1201 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1201 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1202
1202
1203 ctx = repo[node]
1203 ctx = repo[node]
1204
1204
1205 if kind == b'files':
1205 if kind == b'files':
1206 if prefix:
1206 if prefix:
1207 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1207 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1208 else:
1208 else:
1209 prefix = archival.tidyprefix(dest, kind, prefix)
1209 prefix = archival.tidyprefix(dest, kind, prefix)
1210
1210
1211 def write(name, mode, islink, getdata):
1211 def write(name, mode, islink, getdata):
1212 if match and not match(name):
1212 if match and not match(name):
1213 return
1213 return
1214 data = getdata()
1214 data = getdata()
1215 if decode:
1215 if decode:
1216 data = repo.wwritedata(name, data)
1216 data = repo.wwritedata(name, data)
1217 archiver.addfile(prefix + name, mode, islink, data)
1217 archiver.addfile(prefix + name, mode, islink, data)
1218
1218
1219 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1219 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1220
1220
1221 if repo.ui.configbool(b"ui", b"archivemeta"):
1221 if repo.ui.configbool(b"ui", b"archivemeta"):
1222 write(
1222 write(
1223 b'.hg_archival.txt',
1223 b'.hg_archival.txt',
1224 0o644,
1224 0o644,
1225 False,
1225 False,
1226 lambda: archival.buildmetadata(ctx),
1226 lambda: archival.buildmetadata(ctx),
1227 )
1227 )
1228
1228
1229 for f in ctx:
1229 for f in ctx:
1230 ff = ctx.flags(f)
1230 ff = ctx.flags(f)
1231 getdata = ctx[f].data
1231 getdata = ctx[f].data
1232 lfile = lfutil.splitstandin(f)
1232 lfile = lfutil.splitstandin(f)
1233 if lfile is not None:
1233 if lfile is not None:
1234 if node is not None:
1234 if node is not None:
1235 path = lfutil.findfile(repo, getdata().strip())
1235 path = lfutil.findfile(repo, getdata().strip())
1236
1236
1237 if path is None:
1237 if path is None:
1238 raise error.Abort(
1238 raise error.Abort(
1239 _(
1239 _(
1240 b'largefile %s not found in repo store or system cache'
1240 b'largefile %s not found in repo store or system cache'
1241 )
1241 )
1242 % lfile
1242 % lfile
1243 )
1243 )
1244 else:
1244 else:
1245 path = lfile
1245 path = lfile
1246
1246
1247 f = lfile
1247 f = lfile
1248
1248
1249 getdata = lambda: util.readfile(path)
1249 getdata = lambda: util.readfile(path)
1250 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1250 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1251
1251
1252 if subrepos:
1252 if subrepos:
1253 for subpath in sorted(ctx.substate):
1253 for subpath in sorted(ctx.substate):
1254 sub = ctx.workingsub(subpath)
1254 sub = ctx.workingsub(subpath)
1255 submatch = matchmod.subdirmatcher(subpath, match)
1255 submatch = matchmod.subdirmatcher(subpath, match)
1256 subprefix = prefix + subpath + b'/'
1256 subprefix = prefix + subpath + b'/'
1257
1257
1258 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1258 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1259 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1259 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1260 # allow only hgsubrepos to set this, instead of the current scheme
1260 # allow only hgsubrepos to set this, instead of the current scheme
1261 # where the parent sets this for the child.
1261 # where the parent sets this for the child.
1262 with (
1262 with (
1263 util.safehasattr(sub, '_repo')
1263 util.safehasattr(sub, '_repo')
1264 and lfstatus(sub._repo)
1264 and lfstatus(sub._repo)
1265 or util.nullcontextmanager()
1265 or util.nullcontextmanager()
1266 ):
1266 ):
1267 sub.archive(archiver, subprefix, submatch)
1267 sub.archive(archiver, subprefix, submatch)
1268
1268
1269 archiver.done()
1269 archiver.done()
1270
1270
1271
1271
1272 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1272 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1273 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1273 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1274 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1274 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1275 if not lfenabled or not repo._repo.lfstatus:
1275 if not lfenabled or not repo._repo.lfstatus:
1276 return orig(repo, archiver, prefix, match, decode)
1276 return orig(repo, archiver, prefix, match, decode)
1277
1277
1278 repo._get(repo._state + (b'hg',))
1278 repo._get(repo._state + (b'hg',))
1279 rev = repo._state[1]
1279 rev = repo._state[1]
1280 ctx = repo._repo[rev]
1280 ctx = repo._repo[rev]
1281
1281
1282 if ctx.node() is not None:
1282 if ctx.node() is not None:
1283 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1283 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1284
1284
1285 def write(name, mode, islink, getdata):
1285 def write(name, mode, islink, getdata):
1286 # At this point, the standin has been replaced with the largefile name,
1286 # At this point, the standin has been replaced with the largefile name,
1287 # so the normal matcher works here without the lfutil variants.
1287 # so the normal matcher works here without the lfutil variants.
1288 if match and not match(f):
1288 if match and not match(f):
1289 return
1289 return
1290 data = getdata()
1290 data = getdata()
1291 if decode:
1291 if decode:
1292 data = repo._repo.wwritedata(name, data)
1292 data = repo._repo.wwritedata(name, data)
1293
1293
1294 archiver.addfile(prefix + name, mode, islink, data)
1294 archiver.addfile(prefix + name, mode, islink, data)
1295
1295
1296 for f in ctx:
1296 for f in ctx:
1297 ff = ctx.flags(f)
1297 ff = ctx.flags(f)
1298 getdata = ctx[f].data
1298 getdata = ctx[f].data
1299 lfile = lfutil.splitstandin(f)
1299 lfile = lfutil.splitstandin(f)
1300 if lfile is not None:
1300 if lfile is not None:
1301 if ctx.node() is not None:
1301 if ctx.node() is not None:
1302 path = lfutil.findfile(repo._repo, getdata().strip())
1302 path = lfutil.findfile(repo._repo, getdata().strip())
1303
1303
1304 if path is None:
1304 if path is None:
1305 raise error.Abort(
1305 raise error.Abort(
1306 _(
1306 _(
1307 b'largefile %s not found in repo store or system cache'
1307 b'largefile %s not found in repo store or system cache'
1308 )
1308 )
1309 % lfile
1309 % lfile
1310 )
1310 )
1311 else:
1311 else:
1312 path = lfile
1312 path = lfile
1313
1313
1314 f = lfile
1314 f = lfile
1315
1315
1316 getdata = lambda: util.readfile(os.path.join(prefix, path))
1316 getdata = lambda: util.readfile(os.path.join(prefix, path))
1317
1317
1318 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1318 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1319
1319
1320 for subpath in sorted(ctx.substate):
1320 for subpath in sorted(ctx.substate):
1321 sub = ctx.workingsub(subpath)
1321 sub = ctx.workingsub(subpath)
1322 submatch = matchmod.subdirmatcher(subpath, match)
1322 submatch = matchmod.subdirmatcher(subpath, match)
1323 subprefix = prefix + subpath + b'/'
1323 subprefix = prefix + subpath + b'/'
1324 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1324 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1325 # infer and possibly set lfstatus at the top of this function. That
1325 # infer and possibly set lfstatus at the top of this function. That
1326 # would allow only hgsubrepos to set this, instead of the current scheme
1326 # would allow only hgsubrepos to set this, instead of the current scheme
1327 # where the parent sets this for the child.
1327 # where the parent sets this for the child.
1328 with (
1328 with (
1329 util.safehasattr(sub, '_repo')
1329 util.safehasattr(sub, '_repo')
1330 and lfstatus(sub._repo)
1330 and lfstatus(sub._repo)
1331 or util.nullcontextmanager()
1331 or util.nullcontextmanager()
1332 ):
1332 ):
1333 sub.archive(archiver, subprefix, submatch, decode)
1333 sub.archive(archiver, subprefix, submatch, decode)
1334
1334
1335
1335
1336 # If a largefile is modified, the change is not reflected in its
1336 # If a largefile is modified, the change is not reflected in its
1337 # standin until a commit. cmdutil.bailifchanged() raises an exception
1337 # standin until a commit. cmdutil.bailifchanged() raises an exception
1338 # if the repo has uncommitted changes. Wrap it to also check if
1338 # if the repo has uncommitted changes. Wrap it to also check if
1339 # largefiles were changed. This is used by bisect, backout and fetch.
1339 # largefiles were changed. This is used by bisect, backout and fetch.
1340 @eh.wrapfunction(cmdutil, b'bailifchanged')
1340 @eh.wrapfunction(cmdutil, b'bailifchanged')
1341 def overridebailifchanged(orig, repo, *args, **kwargs):
1341 def overridebailifchanged(orig, repo, *args, **kwargs):
1342 orig(repo, *args, **kwargs)
1342 orig(repo, *args, **kwargs)
1343 with lfstatus(repo):
1343 with lfstatus(repo):
1344 s = repo.status()
1344 s = repo.status()
1345 if s.modified or s.added or s.removed or s.deleted:
1345 if s.modified or s.added or s.removed or s.deleted:
1346 raise error.Abort(_(b'uncommitted changes'))
1346 raise error.Abort(_(b'uncommitted changes'))
1347
1347
1348
1348
1349 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1349 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1350 def postcommitstatus(orig, repo, *args, **kwargs):
1350 def postcommitstatus(orig, repo, *args, **kwargs):
1351 with lfstatus(repo):
1351 with lfstatus(repo):
1352 return orig(repo, *args, **kwargs)
1352 return orig(repo, *args, **kwargs)
1353
1353
1354
1354
1355 @eh.wrapfunction(cmdutil, b'forget')
1355 @eh.wrapfunction(cmdutil, b'forget')
1356 def cmdutilforget(
1356 def cmdutilforget(
1357 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1357 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1358 ):
1358 ):
1359 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1359 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1360 bad, forgot = orig(
1360 bad, forgot = orig(
1361 ui,
1361 ui,
1362 repo,
1362 repo,
1363 normalmatcher,
1363 normalmatcher,
1364 prefix,
1364 prefix,
1365 uipathfn,
1365 uipathfn,
1366 explicitonly,
1366 explicitonly,
1367 dryrun,
1367 dryrun,
1368 interactive,
1368 interactive,
1369 )
1369 )
1370 m = composelargefilematcher(match, repo[None].manifest())
1370 m = composelargefilematcher(match, repo[None].manifest())
1371
1371
1372 with lfstatus(repo):
1372 with lfstatus(repo):
1373 s = repo.status(match=m, clean=True)
1373 s = repo.status(match=m, clean=True)
1374 manifest = repo[None].manifest()
1374 manifest = repo[None].manifest()
1375 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1375 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1376 forget = [f for f in forget if lfutil.standin(f) in manifest]
1376 forget = [f for f in forget if lfutil.standin(f) in manifest]
1377
1377
1378 for f in forget:
1378 for f in forget:
1379 fstandin = lfutil.standin(f)
1379 fstandin = lfutil.standin(f)
1380 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1380 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1381 ui.warn(
1381 ui.warn(
1382 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1382 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1383 )
1383 )
1384 bad.append(f)
1384 bad.append(f)
1385
1385
1386 for f in forget:
1386 for f in forget:
1387 if ui.verbose or not m.exact(f):
1387 if ui.verbose or not m.exact(f):
1388 ui.status(_(b'removing %s\n') % uipathfn(f))
1388 ui.status(_(b'removing %s\n') % uipathfn(f))
1389
1389
1390 # Need to lock because standin files are deleted then removed from the
1390 # Need to lock because standin files are deleted then removed from the
1391 # repository and we could race in-between.
1391 # repository and we could race in-between.
1392 with repo.wlock():
1392 with repo.wlock():
1393 lfdirstate = lfutil.openlfdirstate(ui, repo)
1393 lfdirstate = lfutil.openlfdirstate(ui, repo)
1394 for f in forget:
1394 for f in forget:
1395 lfdirstate.set_untracked(f)
1395 lfdirstate.set_untracked(f)
1396 lfdirstate.write(repo.currenttransaction())
1396 lfdirstate.write(repo.currenttransaction())
1397 standins = [lfutil.standin(f) for f in forget]
1397 standins = [lfutil.standin(f) for f in forget]
1398 for f in standins:
1398 for f in standins:
1399 repo.wvfs.unlinkpath(f, ignoremissing=True)
1399 repo.wvfs.unlinkpath(f, ignoremissing=True)
1400 rejected = repo[None].forget(standins)
1400 rejected = repo[None].forget(standins)
1401
1401
1402 bad.extend(f for f in rejected if f in m.files())
1402 bad.extend(f for f in rejected if f in m.files())
1403 forgot.extend(f for f in forget if f not in rejected)
1403 forgot.extend(f for f in forget if f not in rejected)
1404 return bad, forgot
1404 return bad, forgot
1405
1405
1406
1406
1407 def _getoutgoings(repo, other, missing, addfunc):
1407 def _getoutgoings(repo, other, missing, addfunc):
1408 """get pairs of filename and largefile hash in outgoing revisions
1408 """get pairs of filename and largefile hash in outgoing revisions
1409 in 'missing'.
1409 in 'missing'.
1410
1410
1411 largefiles already existing on 'other' repository are ignored.
1411 largefiles already existing on 'other' repository are ignored.
1412
1412
1413 'addfunc' is invoked with each unique pairs of filename and
1413 'addfunc' is invoked with each unique pairs of filename and
1414 largefile hash value.
1414 largefile hash value.
1415 """
1415 """
1416 knowns = set()
1416 knowns = set()
1417 lfhashes = set()
1417 lfhashes = set()
1418
1418
1419 def dedup(fn, lfhash):
1419 def dedup(fn, lfhash):
1420 k = (fn, lfhash)
1420 k = (fn, lfhash)
1421 if k not in knowns:
1421 if k not in knowns:
1422 knowns.add(k)
1422 knowns.add(k)
1423 lfhashes.add(lfhash)
1423 lfhashes.add(lfhash)
1424
1424
1425 lfutil.getlfilestoupload(repo, missing, dedup)
1425 lfutil.getlfilestoupload(repo, missing, dedup)
1426 if lfhashes:
1426 if lfhashes:
1427 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1427 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1428 for fn, lfhash in knowns:
1428 for fn, lfhash in knowns:
1429 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1429 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1430 addfunc(fn, lfhash)
1430 addfunc(fn, lfhash)
1431
1431
1432
1432
1433 def outgoinghook(ui, repo, other, opts, missing):
1433 def outgoinghook(ui, repo, other, opts, missing):
1434 if opts.pop(b'large', None):
1434 if opts.pop(b'large', None):
1435 lfhashes = set()
1435 lfhashes = set()
1436 if ui.debugflag:
1436 if ui.debugflag:
1437 toupload = {}
1437 toupload = {}
1438
1438
1439 def addfunc(fn, lfhash):
1439 def addfunc(fn, lfhash):
1440 if fn not in toupload:
1440 if fn not in toupload:
1441 toupload[fn] = [] # pytype: disable=unsupported-operands
1441 toupload[fn] = [] # pytype: disable=unsupported-operands
1442 toupload[fn].append(lfhash)
1442 toupload[fn].append(lfhash)
1443 lfhashes.add(lfhash)
1443 lfhashes.add(lfhash)
1444
1444
1445 def showhashes(fn):
1445 def showhashes(fn):
1446 for lfhash in sorted(toupload[fn]):
1446 for lfhash in sorted(toupload[fn]):
1447 ui.debug(b' %s\n' % lfhash)
1447 ui.debug(b' %s\n' % lfhash)
1448
1448
1449 else:
1449 else:
1450 toupload = set()
1450 toupload = set()
1451
1451
1452 def addfunc(fn, lfhash):
1452 def addfunc(fn, lfhash):
1453 toupload.add(fn)
1453 toupload.add(fn)
1454 lfhashes.add(lfhash)
1454 lfhashes.add(lfhash)
1455
1455
1456 def showhashes(fn):
1456 def showhashes(fn):
1457 pass
1457 pass
1458
1458
1459 _getoutgoings(repo, other, missing, addfunc)
1459 _getoutgoings(repo, other, missing, addfunc)
1460
1460
1461 if not toupload:
1461 if not toupload:
1462 ui.status(_(b'largefiles: no files to upload\n'))
1462 ui.status(_(b'largefiles: no files to upload\n'))
1463 else:
1463 else:
1464 ui.status(
1464 ui.status(
1465 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1465 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1466 )
1466 )
1467 for file in sorted(toupload):
1467 for file in sorted(toupload):
1468 ui.status(lfutil.splitstandin(file) + b'\n')
1468 ui.status(lfutil.splitstandin(file) + b'\n')
1469 showhashes(file)
1469 showhashes(file)
1470 ui.status(b'\n')
1470 ui.status(b'\n')
1471
1471
1472
1472
1473 @eh.wrapcommand(
1473 @eh.wrapcommand(
1474 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1474 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1475 )
1475 )
1476 def _outgoingcmd(orig, *args, **kwargs):
1476 def _outgoingcmd(orig, *args, **kwargs):
1477 # Nothing to do here other than add the extra help option- the hook above
1477 # Nothing to do here other than add the extra help option- the hook above
1478 # processes it.
1478 # processes it.
1479 return orig(*args, **kwargs)
1479 return orig(*args, **kwargs)
1480
1480
1481
1481
1482 def summaryremotehook(ui, repo, opts, changes):
1482 def summaryremotehook(ui, repo, opts, changes):
1483 largeopt = opts.get(b'large', False)
1483 largeopt = opts.get(b'large', False)
1484 if changes is None:
1484 if changes is None:
1485 if largeopt:
1485 if largeopt:
1486 return (False, True) # only outgoing check is needed
1486 return (False, True) # only outgoing check is needed
1487 else:
1487 else:
1488 return (False, False)
1488 return (False, False)
1489 elif largeopt:
1489 elif largeopt:
1490 url, branch, peer, outgoing = changes[1]
1490 url, branch, peer, outgoing = changes[1]
1491 if peer is None:
1491 if peer is None:
1492 # i18n: column positioning for "hg summary"
1492 # i18n: column positioning for "hg summary"
1493 ui.status(_(b'largefiles: (no remote repo)\n'))
1493 ui.status(_(b'largefiles: (no remote repo)\n'))
1494 return
1494 return
1495
1495
1496 toupload = set()
1496 toupload = set()
1497 lfhashes = set()
1497 lfhashes = set()
1498
1498
1499 def addfunc(fn, lfhash):
1499 def addfunc(fn, lfhash):
1500 toupload.add(fn)
1500 toupload.add(fn)
1501 lfhashes.add(lfhash)
1501 lfhashes.add(lfhash)
1502
1502
1503 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1503 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1504
1504
1505 if not toupload:
1505 if not toupload:
1506 # i18n: column positioning for "hg summary"
1506 # i18n: column positioning for "hg summary"
1507 ui.status(_(b'largefiles: (no files to upload)\n'))
1507 ui.status(_(b'largefiles: (no files to upload)\n'))
1508 else:
1508 else:
1509 # i18n: column positioning for "hg summary"
1509 # i18n: column positioning for "hg summary"
1510 ui.status(
1510 ui.status(
1511 _(b'largefiles: %d entities for %d files to upload\n')
1511 _(b'largefiles: %d entities for %d files to upload\n')
1512 % (len(lfhashes), len(toupload))
1512 % (len(lfhashes), len(toupload))
1513 )
1513 )
1514
1514
1515
1515
1516 @eh.wrapcommand(
1516 @eh.wrapcommand(
1517 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1517 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1518 )
1518 )
1519 def overridesummary(orig, ui, repo, *pats, **opts):
1519 def overridesummary(orig, ui, repo, *pats, **opts):
1520 with lfstatus(repo):
1520 with lfstatus(repo):
1521 orig(ui, repo, *pats, **opts)
1521 orig(ui, repo, *pats, **opts)
1522
1522
1523
1523
1524 @eh.wrapfunction(scmutil, b'addremove')
1524 @eh.wrapfunction(scmutil, b'addremove')
1525 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1525 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1526 if opts is None:
1526 if opts is None:
1527 opts = {}
1527 opts = {}
1528 if not lfutil.islfilesrepo(repo):
1528 if not lfutil.islfilesrepo(repo):
1529 return orig(repo, matcher, prefix, uipathfn, opts)
1529 return orig(repo, matcher, prefix, uipathfn, opts)
1530 # Get the list of missing largefiles so we can remove them
1530 # Get the list of missing largefiles so we can remove them
1531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1532 unsure, s, mtime_boundary = lfdirstate.status(
1532 unsure, s, mtime_boundary = lfdirstate.status(
1533 matchmod.always(),
1533 matchmod.always(),
1534 subrepos=[],
1534 subrepos=[],
1535 ignored=False,
1535 ignored=False,
1536 clean=False,
1536 clean=False,
1537 unknown=False,
1537 unknown=False,
1538 )
1538 )
1539
1539
1540 # Call into the normal remove code, but the removing of the standin, we want
1540 # Call into the normal remove code, but the removing of the standin, we want
1541 # to have handled by original addremove. Monkey patching here makes sure
1541 # to have handled by original addremove. Monkey patching here makes sure
1542 # we don't remove the standin in the largefiles code, preventing a very
1542 # we don't remove the standin in the largefiles code, preventing a very
1543 # confused state later.
1543 # confused state later.
1544 if s.deleted:
1544 if s.deleted:
1545 m = copy.copy(matcher)
1545 m = copy.copy(matcher)
1546
1546
1547 # The m._files and m._map attributes are not changed to the deleted list
1547 # The m._files and m._map attributes are not changed to the deleted list
1548 # because that affects the m.exact() test, which in turn governs whether
1548 # because that affects the m.exact() test, which in turn governs whether
1549 # or not the file name is printed, and how. Simply limit the original
1549 # or not the file name is printed, and how. Simply limit the original
1550 # matches to those in the deleted status list.
1550 # matches to those in the deleted status list.
1551 matchfn = m.matchfn
1551 matchfn = m.matchfn
1552 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1552 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1553
1553
1554 removelargefiles(
1554 removelargefiles(
1555 repo.ui,
1555 repo.ui,
1556 repo,
1556 repo,
1557 True,
1557 True,
1558 m,
1558 m,
1559 uipathfn,
1559 uipathfn,
1560 opts.get(b'dry_run'),
1560 opts.get(b'dry_run'),
1561 **pycompat.strkwargs(opts)
1561 **pycompat.strkwargs(opts)
1562 )
1562 )
1563 # Call into the normal add code, and any files that *should* be added as
1563 # Call into the normal add code, and any files that *should* be added as
1564 # largefiles will be
1564 # largefiles will be
1565 added, bad = addlargefiles(
1565 added, bad = addlargefiles(
1566 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1566 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1567 )
1567 )
1568 # Now that we've handled largefiles, hand off to the original addremove
1568 # Now that we've handled largefiles, hand off to the original addremove
1569 # function to take care of the rest. Make sure it doesn't do anything with
1569 # function to take care of the rest. Make sure it doesn't do anything with
1570 # largefiles by passing a matcher that will ignore them.
1570 # largefiles by passing a matcher that will ignore them.
1571 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1571 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1572 return orig(repo, matcher, prefix, uipathfn, opts)
1572 return orig(repo, matcher, prefix, uipathfn, opts)
1573
1573
1574
1574
1575 # Calling purge with --all will cause the largefiles to be deleted.
1575 # Calling purge with --all will cause the largefiles to be deleted.
1576 # Override repo.status to prevent this from happening.
1576 # Override repo.status to prevent this from happening.
1577 @eh.wrapcommand(b'purge')
1577 @eh.wrapcommand(b'purge')
1578 def overridepurge(orig, ui, repo, *dirs, **opts):
1578 def overridepurge(orig, ui, repo, *dirs, **opts):
1579 # XXX Monkey patching a repoview will not work. The assigned attribute will
1579 # XXX Monkey patching a repoview will not work. The assigned attribute will
1580 # be set on the unfiltered repo, but we will only lookup attributes in the
1580 # be set on the unfiltered repo, but we will only lookup attributes in the
1581 # unfiltered repo if the lookup in the repoview object itself fails. As the
1581 # unfiltered repo if the lookup in the repoview object itself fails. As the
1582 # monkey patched method exists on the repoview class the lookup will not
1582 # monkey patched method exists on the repoview class the lookup will not
1583 # fail. As a result, the original version will shadow the monkey patched
1583 # fail. As a result, the original version will shadow the monkey patched
1584 # one, defeating the monkey patch.
1584 # one, defeating the monkey patch.
1585 #
1585 #
1586 # As a work around we use an unfiltered repo here. We should do something
1586 # As a work around we use an unfiltered repo here. We should do something
1587 # cleaner instead.
1587 # cleaner instead.
1588 repo = repo.unfiltered()
1588 repo = repo.unfiltered()
1589 oldstatus = repo.status
1589 oldstatus = repo.status
1590
1590
1591 def overridestatus(
1591 def overridestatus(
1592 node1=b'.',
1592 node1=b'.',
1593 node2=None,
1593 node2=None,
1594 match=None,
1594 match=None,
1595 ignored=False,
1595 ignored=False,
1596 clean=False,
1596 clean=False,
1597 unknown=False,
1597 unknown=False,
1598 listsubrepos=False,
1598 listsubrepos=False,
1599 ):
1599 ):
1600 r = oldstatus(
1600 r = oldstatus(
1601 node1, node2, match, ignored, clean, unknown, listsubrepos
1601 node1, node2, match, ignored, clean, unknown, listsubrepos
1602 )
1602 )
1603 lfdirstate = lfutil.openlfdirstate(ui, repo)
1603 lfdirstate = lfutil.openlfdirstate(ui, repo)
1604 unknown = [
1604 unknown = [
1605 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1605 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1606 ]
1606 ]
1607 ignored = [
1607 ignored = [
1608 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1608 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1609 ]
1609 ]
1610 return scmutil.status(
1610 return scmutil.status(
1611 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1611 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1612 )
1612 )
1613
1613
1614 repo.status = overridestatus
1614 repo.status = overridestatus
1615 orig(ui, repo, *dirs, **opts)
1615 orig(ui, repo, *dirs, **opts)
1616 repo.status = oldstatus
1616 repo.status = oldstatus
1617
1617
1618
1618
1619 @eh.wrapcommand(b'rollback')
1619 @eh.wrapcommand(b'rollback')
1620 def overriderollback(orig, ui, repo, **opts):
1620 def overriderollback(orig, ui, repo, **opts):
1621 with repo.wlock():
1621 with repo.wlock():
1622 before = repo.dirstate.parents()
1622 before = repo.dirstate.parents()
1623 orphans = {
1623 orphans = {
1624 f
1624 f
1625 for f in repo.dirstate
1625 for f in repo.dirstate
1626 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1626 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1627 }
1627 }
1628 result = orig(ui, repo, **opts)
1628 result = orig(ui, repo, **opts)
1629 after = repo.dirstate.parents()
1629 after = repo.dirstate.parents()
1630 if before == after:
1630 if before == after:
1631 return result # no need to restore standins
1631 return result # no need to restore standins
1632
1632
1633 pctx = repo[b'.']
1633 pctx = repo[b'.']
1634 for f in repo.dirstate:
1634 for f in repo.dirstate:
1635 if lfutil.isstandin(f):
1635 if lfutil.isstandin(f):
1636 orphans.discard(f)
1636 orphans.discard(f)
1637 if repo.dirstate.get_entry(f).removed:
1637 if repo.dirstate.get_entry(f).removed:
1638 repo.wvfs.unlinkpath(f, ignoremissing=True)
1638 repo.wvfs.unlinkpath(f, ignoremissing=True)
1639 elif f in pctx:
1639 elif f in pctx:
1640 fctx = pctx[f]
1640 fctx = pctx[f]
1641 repo.wwrite(f, fctx.data(), fctx.flags())
1641 repo.wwrite(f, fctx.data(), fctx.flags())
1642 else:
1642 else:
1643 # content of standin is not so important in 'a',
1643 # content of standin is not so important in 'a',
1644 # 'm' or 'n' (coming from the 2nd parent) cases
1644 # 'm' or 'n' (coming from the 2nd parent) cases
1645 lfutil.writestandin(repo, f, b'', False)
1645 lfutil.writestandin(repo, f, b'', False)
1646 for standin in orphans:
1646 for standin in orphans:
1647 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1647 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1648
1648
1649 return result
1649 return result
1650
1650
1651
1651
1652 @eh.wrapcommand(b'transplant', extension=b'transplant')
1652 @eh.wrapcommand(b'transplant', extension=b'transplant')
1653 def overridetransplant(orig, ui, repo, *revs, **opts):
1653 def overridetransplant(orig, ui, repo, *revs, **opts):
1654 resuming = opts.get('continue')
1654 resuming = opts.get('continue')
1655 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1655 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1656 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1656 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1657 try:
1657 try:
1658 result = orig(ui, repo, *revs, **opts)
1658 result = orig(ui, repo, *revs, **opts)
1659 finally:
1659 finally:
1660 repo._lfstatuswriters.pop()
1660 repo._lfstatuswriters.pop()
1661 repo._lfcommithooks.pop()
1661 repo._lfcommithooks.pop()
1662 return result
1662 return result
1663
1663
1664
1664
1665 @eh.wrapcommand(b'cat')
1665 @eh.wrapcommand(b'cat')
1666 def overridecat(orig, ui, repo, file1, *pats, **opts):
1666 def overridecat(orig, ui, repo, file1, *pats, **opts):
1667 opts = pycompat.byteskwargs(opts)
1667 opts = pycompat.byteskwargs(opts)
1668 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1668 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1669 err = 1
1669 err = 1
1670 notbad = set()
1670 notbad = set()
1671 m = scmutil.match(ctx, (file1,) + pats, opts)
1671 m = scmutil.match(ctx, (file1,) + pats, opts)
1672 origmatchfn = m.matchfn
1672 origmatchfn = m.matchfn
1673
1673
1674 def lfmatchfn(f):
1674 def lfmatchfn(f):
1675 if origmatchfn(f):
1675 if origmatchfn(f):
1676 return True
1676 return True
1677 lf = lfutil.splitstandin(f)
1677 lf = lfutil.splitstandin(f)
1678 if lf is None:
1678 if lf is None:
1679 return False
1679 return False
1680 notbad.add(lf)
1680 notbad.add(lf)
1681 return origmatchfn(lf)
1681 return origmatchfn(lf)
1682
1682
1683 m.matchfn = lfmatchfn
1683 m.matchfn = lfmatchfn
1684 origbadfn = m.bad
1684 origbadfn = m.bad
1685
1685
1686 def lfbadfn(f, msg):
1686 def lfbadfn(f, msg):
1687 if not f in notbad:
1687 if not f in notbad:
1688 origbadfn(f, msg)
1688 origbadfn(f, msg)
1689
1689
1690 m.bad = lfbadfn
1690 m.bad = lfbadfn
1691
1691
1692 origvisitdirfn = m.visitdir
1692 origvisitdirfn = m.visitdir
1693
1693
1694 def lfvisitdirfn(dir):
1694 def lfvisitdirfn(dir):
1695 if dir == lfutil.shortname:
1695 if dir == lfutil.shortname:
1696 return True
1696 return True
1697 ret = origvisitdirfn(dir)
1697 ret = origvisitdirfn(dir)
1698 if ret:
1698 if ret:
1699 return ret
1699 return ret
1700 lf = lfutil.splitstandin(dir)
1700 lf = lfutil.splitstandin(dir)
1701 if lf is None:
1701 if lf is None:
1702 return False
1702 return False
1703 return origvisitdirfn(lf)
1703 return origvisitdirfn(lf)
1704
1704
1705 m.visitdir = lfvisitdirfn
1705 m.visitdir = lfvisitdirfn
1706
1706
1707 for f in ctx.walk(m):
1707 for f in ctx.walk(m):
1708 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1708 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1709 lf = lfutil.splitstandin(f)
1709 lf = lfutil.splitstandin(f)
1710 if lf is None or origmatchfn(f):
1710 if lf is None or origmatchfn(f):
1711 # duplicating unreachable code from commands.cat
1711 # duplicating unreachable code from commands.cat
1712 data = ctx[f].data()
1712 data = ctx[f].data()
1713 if opts.get(b'decode'):
1713 if opts.get(b'decode'):
1714 data = repo.wwritedata(f, data)
1714 data = repo.wwritedata(f, data)
1715 fp.write(data)
1715 fp.write(data)
1716 else:
1716 else:
1717 hash = lfutil.readasstandin(ctx[f])
1717 hash = lfutil.readasstandin(ctx[f])
1718 if not lfutil.inusercache(repo.ui, hash):
1718 if not lfutil.inusercache(repo.ui, hash):
1719 store = storefactory.openstore(repo)
1719 store = storefactory.openstore(repo)
1720 success, missing = store.get([(lf, hash)])
1720 success, missing = store.get([(lf, hash)])
1721 if len(success) != 1:
1721 if len(success) != 1:
1722 raise error.Abort(
1722 raise error.Abort(
1723 _(
1723 _(
1724 b'largefile %s is not in cache and could not be '
1724 b'largefile %s is not in cache and could not be '
1725 b'downloaded'
1725 b'downloaded'
1726 )
1726 )
1727 % lf
1727 % lf
1728 )
1728 )
1729 path = lfutil.usercachepath(repo.ui, hash)
1729 path = lfutil.usercachepath(repo.ui, hash)
1730 with open(path, b"rb") as fpin:
1730 with open(path, b"rb") as fpin:
1731 for chunk in util.filechunkiter(fpin):
1731 for chunk in util.filechunkiter(fpin):
1732 fp.write(chunk)
1732 fp.write(chunk)
1733 err = 0
1733 err = 0
1734 return err
1734 return err
1735
1735
1736
1736
1737 @eh.wrapfunction(merge, b'_update')
1737 @eh.wrapfunction(merge, b'_update')
1738 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1738 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1739 matcher = kwargs.get('matcher', None)
1739 matcher = kwargs.get('matcher', None)
1740 # note if this is a partial update
1740 # note if this is a partial update
1741 partial = matcher and not matcher.always()
1741 partial = matcher and not matcher.always()
1742 with repo.wlock():
1742 with repo.wlock():
1743 # branch | | |
1743 # branch | | |
1744 # merge | force | partial | action
1744 # merge | force | partial | action
1745 # -------+-------+---------+--------------
1745 # -------+-------+---------+--------------
1746 # x | x | x | linear-merge
1746 # x | x | x | linear-merge
1747 # o | x | x | branch-merge
1747 # o | x | x | branch-merge
1748 # x | o | x | overwrite (as clean update)
1748 # x | o | x | overwrite (as clean update)
1749 # o | o | x | force-branch-merge (*1)
1749 # o | o | x | force-branch-merge (*1)
1750 # x | x | o | (*)
1750 # x | x | o | (*)
1751 # o | x | o | (*)
1751 # o | x | o | (*)
1752 # x | o | o | overwrite (as revert)
1752 # x | o | o | overwrite (as revert)
1753 # o | o | o | (*)
1753 # o | o | o | (*)
1754 #
1754 #
1755 # (*) don't care
1755 # (*) don't care
1756 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1756 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1757
1757
1758 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1758 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1759 unsure, s, mtime_boundary = lfdirstate.status(
1759 unsure, s, mtime_boundary = lfdirstate.status(
1760 matchmod.always(),
1760 matchmod.always(),
1761 subrepos=[],
1761 subrepos=[],
1762 ignored=False,
1762 ignored=False,
1763 clean=True,
1763 clean=True,
1764 unknown=False,
1764 unknown=False,
1765 )
1765 )
1766 oldclean = set(s.clean)
1766 oldclean = set(s.clean)
1767 pctx = repo[b'.']
1767 pctx = repo[b'.']
1768 dctx = repo[node]
1768 dctx = repo[node]
1769 for lfile in unsure + s.modified:
1769 for lfile in unsure + s.modified:
1770 lfileabs = repo.wvfs.join(lfile)
1770 lfileabs = repo.wvfs.join(lfile)
1771 if not repo.wvfs.exists(lfileabs):
1771 if not repo.wvfs.exists(lfileabs):
1772 continue
1772 continue
1773 lfhash = lfutil.hashfile(lfileabs)
1773 lfhash = lfutil.hashfile(lfileabs)
1774 standin = lfutil.standin(lfile)
1774 standin = lfutil.standin(lfile)
1775 lfutil.writestandin(
1775 lfutil.writestandin(
1776 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1776 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1777 )
1777 )
1778 if standin in pctx and lfhash == lfutil.readasstandin(
1778 if standin in pctx and lfhash == lfutil.readasstandin(
1779 pctx[standin]
1779 pctx[standin]
1780 ):
1780 ):
1781 oldclean.add(lfile)
1781 oldclean.add(lfile)
1782 for lfile in s.added:
1782 for lfile in s.added:
1783 fstandin = lfutil.standin(lfile)
1783 fstandin = lfutil.standin(lfile)
1784 if fstandin not in dctx:
1784 if fstandin not in dctx:
1785 # in this case, content of standin file is meaningless
1785 # in this case, content of standin file is meaningless
1786 # (in dctx, lfile is unknown, or normal file)
1786 # (in dctx, lfile is unknown, or normal file)
1787 continue
1787 continue
1788 lfutil.updatestandin(repo, lfile, fstandin)
1788 lfutil.updatestandin(repo, lfile, fstandin)
1789 # mark all clean largefiles as dirty, just in case the update gets
1789 # mark all clean largefiles as dirty, just in case the update gets
1790 # interrupted before largefiles and lfdirstate are synchronized
1790 # interrupted before largefiles and lfdirstate are synchronized
1791 for lfile in oldclean:
1791 for lfile in oldclean:
1792 lfdirstate.set_possibly_dirty(lfile)
1792 lfdirstate.set_possibly_dirty(lfile)
1793 lfdirstate.write(repo.currenttransaction())
1793 lfdirstate.write(repo.currenttransaction())
1794
1794
1795 oldstandins = lfutil.getstandinsstate(repo)
1795 oldstandins = lfutil.getstandinsstate(repo)
1796 wc = kwargs.get('wc')
1796 wc = kwargs.get('wc')
1797 if wc and wc.isinmemory():
1797 if wc and wc.isinmemory():
1798 # largefiles is not a good candidate for in-memory merge (large
1798 # largefiles is not a good candidate for in-memory merge (large
1799 # files, custom dirstate, matcher usage).
1799 # files, custom dirstate, matcher usage).
1800 raise error.ProgrammingError(
1800 raise error.ProgrammingError(
1801 b'largefiles is not compatible with in-memory merge'
1801 b'largefiles is not compatible with in-memory merge'
1802 )
1802 )
1803 with lfdirstate.parentchange(repo):
1803 with lfdirstate.changing_parents(repo):
1804 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1804 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1805
1805
1806 newstandins = lfutil.getstandinsstate(repo)
1806 newstandins = lfutil.getstandinsstate(repo)
1807 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1807 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1808
1808
1809 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1809 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1810 # all the ones that didn't change as clean
1810 # all the ones that didn't change as clean
1811 for lfile in oldclean.difference(filelist):
1811 for lfile in oldclean.difference(filelist):
1812 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1812 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1813 lfdirstate.write(repo.currenttransaction())
1813 lfdirstate.write(repo.currenttransaction())
1814
1814
1815 if branchmerge or force or partial:
1815 if branchmerge or force or partial:
1816 filelist.extend(s.deleted + s.removed)
1816 filelist.extend(s.deleted + s.removed)
1817
1817
1818 lfcommands.updatelfiles(
1818 lfcommands.updatelfiles(
1819 repo.ui, repo, filelist=filelist, normallookup=partial
1819 repo.ui, repo, filelist=filelist, normallookup=partial
1820 )
1820 )
1821
1821
1822 return result
1822 return result
1823
1823
1824
1824
1825 @eh.wrapfunction(scmutil, b'marktouched')
1825 @eh.wrapfunction(scmutil, b'marktouched')
1826 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1826 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1827 result = orig(repo, files, *args, **kwargs)
1827 result = orig(repo, files, *args, **kwargs)
1828
1828
1829 filelist = []
1829 filelist = []
1830 for f in files:
1830 for f in files:
1831 lf = lfutil.splitstandin(f)
1831 lf = lfutil.splitstandin(f)
1832 if lf is not None:
1832 if lf is not None:
1833 filelist.append(lf)
1833 filelist.append(lf)
1834 if filelist:
1834 if filelist:
1835 lfcommands.updatelfiles(
1835 lfcommands.updatelfiles(
1836 repo.ui,
1836 repo.ui,
1837 repo,
1837 repo,
1838 filelist=filelist,
1838 filelist=filelist,
1839 printmessage=False,
1839 printmessage=False,
1840 normallookup=True,
1840 normallookup=True,
1841 )
1841 )
1842
1842
1843 return result
1843 return result
1844
1844
1845
1845
1846 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1846 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1847 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1847 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1848 def upgraderequirements(orig, repo):
1848 def upgraderequirements(orig, repo):
1849 reqs = orig(repo)
1849 reqs = orig(repo)
1850 if b'largefiles' in repo.requirements:
1850 if b'largefiles' in repo.requirements:
1851 reqs.add(b'largefiles')
1851 reqs.add(b'largefiles')
1852 return reqs
1852 return reqs
1853
1853
1854
1854
1855 _lfscheme = b'largefile://'
1855 _lfscheme = b'largefile://'
1856
1856
1857
1857
1858 @eh.wrapfunction(urlmod, b'open')
1858 @eh.wrapfunction(urlmod, b'open')
1859 def openlargefile(orig, ui, url_, data=None, **kwargs):
1859 def openlargefile(orig, ui, url_, data=None, **kwargs):
1860 if url_.startswith(_lfscheme):
1860 if url_.startswith(_lfscheme):
1861 if data:
1861 if data:
1862 msg = b"cannot use data on a 'largefile://' url"
1862 msg = b"cannot use data on a 'largefile://' url"
1863 raise error.ProgrammingError(msg)
1863 raise error.ProgrammingError(msg)
1864 lfid = url_[len(_lfscheme) :]
1864 lfid = url_[len(_lfscheme) :]
1865 return storefactory.getlfile(ui, lfid)
1865 return storefactory.getlfile(ui, lfid)
1866 else:
1866 else:
1867 return orig(ui, url_, data=data, **kwargs)
1867 return orig(ui, url_, data=data, **kwargs)
@@ -1,4303 +1,4303 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help COMMAND` for more details)::
17 Common tasks (use :hg:`help COMMAND` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behavior can be configured with::
31 files creations or deletions. This behavior can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 It may be desirable for mq changesets to be kept in the secret phase (see
41 It may be desirable for mq changesets to be kept in the secret phase (see
42 :hg:`help phases`), which can be enabled with the following setting::
42 :hg:`help phases`), which can be enabled with the following setting::
43
43
44 [mq]
44 [mq]
45 secret = True
45 secret = True
46
46
47 You will by default be managing a patch queue named "patches". You can
47 You will by default be managing a patch queue named "patches". You can
48 create other, independent patch queues with the :hg:`qqueue` command.
48 create other, independent patch queues with the :hg:`qqueue` command.
49
49
50 If the working directory contains uncommitted files, qpush, qpop and
50 If the working directory contains uncommitted files, qpush, qpop and
51 qgoto abort immediately. If -f/--force is used, the changes are
51 qgoto abort immediately. If -f/--force is used, the changes are
52 discarded. Setting::
52 discarded. Setting::
53
53
54 [mq]
54 [mq]
55 keepchanges = True
55 keepchanges = True
56
56
57 make them behave as if --keep-changes were passed, and non-conflicting
57 make them behave as if --keep-changes were passed, and non-conflicting
58 local changes will be tolerated and preserved. If incompatible options
58 local changes will be tolerated and preserved. If incompatible options
59 such as -f/--force or --exact are passed, this setting is ignored.
59 such as -f/--force or --exact are passed, this setting is ignored.
60
60
61 This extension used to provide a strip command. This command now lives
61 This extension used to provide a strip command. This command now lives
62 in the strip extension.
62 in the strip extension.
63 '''
63 '''
64
64
65
65
66 import os
66 import os
67 import re
67 import re
68 import shutil
68 import shutil
69 import sys
69 import sys
70 from mercurial.i18n import _
70 from mercurial.i18n import _
71 from mercurial.node import (
71 from mercurial.node import (
72 bin,
72 bin,
73 hex,
73 hex,
74 nullrev,
74 nullrev,
75 short,
75 short,
76 )
76 )
77 from mercurial.pycompat import (
77 from mercurial.pycompat import (
78 delattr,
78 delattr,
79 getattr,
79 getattr,
80 open,
80 open,
81 )
81 )
82 from mercurial import (
82 from mercurial import (
83 cmdutil,
83 cmdutil,
84 commands,
84 commands,
85 dirstateguard,
85 dirstateguard,
86 encoding,
86 encoding,
87 error,
87 error,
88 extensions,
88 extensions,
89 hg,
89 hg,
90 localrepo,
90 localrepo,
91 lock as lockmod,
91 lock as lockmod,
92 logcmdutil,
92 logcmdutil,
93 patch as patchmod,
93 patch as patchmod,
94 phases,
94 phases,
95 pycompat,
95 pycompat,
96 registrar,
96 registrar,
97 revsetlang,
97 revsetlang,
98 scmutil,
98 scmutil,
99 smartset,
99 smartset,
100 strip,
100 strip,
101 subrepoutil,
101 subrepoutil,
102 util,
102 util,
103 vfs as vfsmod,
103 vfs as vfsmod,
104 )
104 )
105 from mercurial.utils import (
105 from mercurial.utils import (
106 dateutil,
106 dateutil,
107 stringutil,
107 stringutil,
108 urlutil,
108 urlutil,
109 )
109 )
110
110
111 release = lockmod.release
111 release = lockmod.release
112 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
112 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
113
113
114 cmdtable = {}
114 cmdtable = {}
115 command = registrar.command(cmdtable)
115 command = registrar.command(cmdtable)
116 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
116 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
117 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
117 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
118 # be specifying the version(s) of Mercurial they are tested with, or
118 # be specifying the version(s) of Mercurial they are tested with, or
119 # leave the attribute unspecified.
119 # leave the attribute unspecified.
120 testedwith = b'ships-with-hg-core'
120 testedwith = b'ships-with-hg-core'
121
121
122 configtable = {}
122 configtable = {}
123 configitem = registrar.configitem(configtable)
123 configitem = registrar.configitem(configtable)
124
124
125 configitem(
125 configitem(
126 b'mq',
126 b'mq',
127 b'git',
127 b'git',
128 default=b'auto',
128 default=b'auto',
129 )
129 )
130 configitem(
130 configitem(
131 b'mq',
131 b'mq',
132 b'keepchanges',
132 b'keepchanges',
133 default=False,
133 default=False,
134 )
134 )
135 configitem(
135 configitem(
136 b'mq',
136 b'mq',
137 b'plain',
137 b'plain',
138 default=False,
138 default=False,
139 )
139 )
140 configitem(
140 configitem(
141 b'mq',
141 b'mq',
142 b'secret',
142 b'secret',
143 default=False,
143 default=False,
144 )
144 )
145
145
146 # force load strip extension formerly included in mq and import some utility
146 # force load strip extension formerly included in mq and import some utility
147 try:
147 try:
148 extensions.find(b'strip')
148 extensions.find(b'strip')
149 except KeyError:
149 except KeyError:
150 # note: load is lazy so we could avoid the try-except,
150 # note: load is lazy so we could avoid the try-except,
151 # but I (marmoute) prefer this explicit code.
151 # but I (marmoute) prefer this explicit code.
152 class dummyui:
152 class dummyui:
153 def debug(self, msg):
153 def debug(self, msg):
154 pass
154 pass
155
155
156 def log(self, event, msgfmt, *msgargs, **opts):
156 def log(self, event, msgfmt, *msgargs, **opts):
157 pass
157 pass
158
158
159 extensions.load(dummyui(), b'strip', b'')
159 extensions.load(dummyui(), b'strip', b'')
160
160
161 strip = strip.strip
161 strip = strip.strip
162
162
163
163
164 def checksubstate(repo, baserev=None):
164 def checksubstate(repo, baserev=None):
165 """return list of subrepos at a different revision than substate.
165 """return list of subrepos at a different revision than substate.
166 Abort if any subrepos have uncommitted changes."""
166 Abort if any subrepos have uncommitted changes."""
167 inclsubs = []
167 inclsubs = []
168 wctx = repo[None]
168 wctx = repo[None]
169 if baserev:
169 if baserev:
170 bctx = repo[baserev]
170 bctx = repo[baserev]
171 else:
171 else:
172 bctx = wctx.p1()
172 bctx = wctx.p1()
173 for s in sorted(wctx.substate):
173 for s in sorted(wctx.substate):
174 wctx.sub(s).bailifchanged(True)
174 wctx.sub(s).bailifchanged(True)
175 if s not in bctx.substate or bctx.sub(s).dirty():
175 if s not in bctx.substate or bctx.sub(s).dirty():
176 inclsubs.append(s)
176 inclsubs.append(s)
177 return inclsubs
177 return inclsubs
178
178
179
179
180 # Patch names looks like unix-file names.
180 # Patch names looks like unix-file names.
181 # They must be joinable with queue directory and result in the patch path.
181 # They must be joinable with queue directory and result in the patch path.
182 normname = util.normpath
182 normname = util.normpath
183
183
184
184
185 class statusentry:
185 class statusentry:
186 def __init__(self, node, name):
186 def __init__(self, node, name):
187 self.node, self.name = node, name
187 self.node, self.name = node, name
188
188
189 def __bytes__(self):
189 def __bytes__(self):
190 return hex(self.node) + b':' + self.name
190 return hex(self.node) + b':' + self.name
191
191
192 __str__ = encoding.strmethod(__bytes__)
192 __str__ = encoding.strmethod(__bytes__)
193 __repr__ = encoding.strmethod(__bytes__)
193 __repr__ = encoding.strmethod(__bytes__)
194
194
195
195
196 # The order of the headers in 'hg export' HG patches:
196 # The order of the headers in 'hg export' HG patches:
197 HGHEADERS = [
197 HGHEADERS = [
198 # '# HG changeset patch',
198 # '# HG changeset patch',
199 b'# User ',
199 b'# User ',
200 b'# Date ',
200 b'# Date ',
201 b'# ',
201 b'# ',
202 b'# Branch ',
202 b'# Branch ',
203 b'# Node ID ',
203 b'# Node ID ',
204 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
204 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
205 ]
205 ]
206 # The order of headers in plain 'mail style' patches:
206 # The order of headers in plain 'mail style' patches:
207 PLAINHEADERS = {
207 PLAINHEADERS = {
208 b'from': 0,
208 b'from': 0,
209 b'date': 1,
209 b'date': 1,
210 b'subject': 2,
210 b'subject': 2,
211 }
211 }
212
212
213
213
214 def inserthgheader(lines, header, value):
214 def inserthgheader(lines, header, value):
215 """Assuming lines contains a HG patch header, add a header line with value.
215 """Assuming lines contains a HG patch header, add a header line with value.
216 >>> try: inserthgheader([], b'# Date ', b'z')
216 >>> try: inserthgheader([], b'# Date ', b'z')
217 ... except ValueError as inst: print("oops")
217 ... except ValueError as inst: print("oops")
218 oops
218 oops
219 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
219 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
220 ['# HG changeset patch', '# Date z']
220 ['# HG changeset patch', '# Date z']
221 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
221 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
222 ['# HG changeset patch', '# Date z', '']
222 ['# HG changeset patch', '# Date z', '']
223 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
223 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
224 ['# HG changeset patch', '# User y', '# Date z']
224 ['# HG changeset patch', '# User y', '# Date z']
225 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
225 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
226 ... b'# User ', b'z')
226 ... b'# User ', b'z')
227 ['# HG changeset patch', '# Date x', '# User z']
227 ['# HG changeset patch', '# Date x', '# User z']
228 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
228 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
229 ['# HG changeset patch', '# Date z']
229 ['# HG changeset patch', '# Date z']
230 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
230 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
231 ... b'# Date ', b'z')
231 ... b'# Date ', b'z')
232 ['# HG changeset patch', '# Date z', '', '# Date y']
232 ['# HG changeset patch', '# Date z', '', '# Date y']
233 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
233 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
234 ... b'# Date ', b'z')
234 ... b'# Date ', b'z')
235 ['# HG changeset patch', '# Date z', '# Parent y']
235 ['# HG changeset patch', '# Date z', '# Parent y']
236 """
236 """
237 start = lines.index(b'# HG changeset patch') + 1
237 start = lines.index(b'# HG changeset patch') + 1
238 newindex = HGHEADERS.index(header)
238 newindex = HGHEADERS.index(header)
239 bestpos = len(lines)
239 bestpos = len(lines)
240 for i in range(start, len(lines)):
240 for i in range(start, len(lines)):
241 line = lines[i]
241 line = lines[i]
242 if not line.startswith(b'# '):
242 if not line.startswith(b'# '):
243 bestpos = min(bestpos, i)
243 bestpos = min(bestpos, i)
244 break
244 break
245 for lineindex, h in enumerate(HGHEADERS):
245 for lineindex, h in enumerate(HGHEADERS):
246 if line.startswith(h):
246 if line.startswith(h):
247 if lineindex == newindex:
247 if lineindex == newindex:
248 lines[i] = header + value
248 lines[i] = header + value
249 return lines
249 return lines
250 if lineindex > newindex:
250 if lineindex > newindex:
251 bestpos = min(bestpos, i)
251 bestpos = min(bestpos, i)
252 break # next line
252 break # next line
253 lines.insert(bestpos, header + value)
253 lines.insert(bestpos, header + value)
254 return lines
254 return lines
255
255
256
256
257 def insertplainheader(lines, header, value):
257 def insertplainheader(lines, header, value):
258 """For lines containing a plain patch header, add a header line with value.
258 """For lines containing a plain patch header, add a header line with value.
259 >>> insertplainheader([], b'Date', b'z')
259 >>> insertplainheader([], b'Date', b'z')
260 ['Date: z']
260 ['Date: z']
261 >>> insertplainheader([b''], b'Date', b'z')
261 >>> insertplainheader([b''], b'Date', b'z')
262 ['Date: z', '']
262 ['Date: z', '']
263 >>> insertplainheader([b'x'], b'Date', b'z')
263 >>> insertplainheader([b'x'], b'Date', b'z')
264 ['Date: z', '', 'x']
264 ['Date: z', '', 'x']
265 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
265 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
266 ['From: y', 'Date: z', '', 'x']
266 ['From: y', 'Date: z', '', 'x']
267 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
267 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
268 [' date : x', 'From: z', '']
268 [' date : x', 'From: z', '']
269 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
269 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
270 ['Date: z', '', 'Date: y']
270 ['Date: z', '', 'Date: y']
271 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
271 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
272 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
272 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
273 """
273 """
274 newprio = PLAINHEADERS[header.lower()]
274 newprio = PLAINHEADERS[header.lower()]
275 bestpos = len(lines)
275 bestpos = len(lines)
276 for i, line in enumerate(lines):
276 for i, line in enumerate(lines):
277 if b':' in line:
277 if b':' in line:
278 lheader = line.split(b':', 1)[0].strip().lower()
278 lheader = line.split(b':', 1)[0].strip().lower()
279 lprio = PLAINHEADERS.get(lheader, newprio + 1)
279 lprio = PLAINHEADERS.get(lheader, newprio + 1)
280 if lprio == newprio:
280 if lprio == newprio:
281 lines[i] = b'%s: %s' % (header, value)
281 lines[i] = b'%s: %s' % (header, value)
282 return lines
282 return lines
283 if lprio > newprio and i < bestpos:
283 if lprio > newprio and i < bestpos:
284 bestpos = i
284 bestpos = i
285 else:
285 else:
286 if line:
286 if line:
287 lines.insert(i, b'')
287 lines.insert(i, b'')
288 if i < bestpos:
288 if i < bestpos:
289 bestpos = i
289 bestpos = i
290 break
290 break
291 lines.insert(bestpos, b'%s: %s' % (header, value))
291 lines.insert(bestpos, b'%s: %s' % (header, value))
292 return lines
292 return lines
293
293
294
294
295 class patchheader:
295 class patchheader:
296 def __init__(self, pf, plainmode=False):
296 def __init__(self, pf, plainmode=False):
297 def eatdiff(lines):
297 def eatdiff(lines):
298 while lines:
298 while lines:
299 l = lines[-1]
299 l = lines[-1]
300 if (
300 if (
301 l.startswith(b"diff -")
301 l.startswith(b"diff -")
302 or l.startswith(b"Index:")
302 or l.startswith(b"Index:")
303 or l.startswith(b"===========")
303 or l.startswith(b"===========")
304 ):
304 ):
305 del lines[-1]
305 del lines[-1]
306 else:
306 else:
307 break
307 break
308
308
309 def eatempty(lines):
309 def eatempty(lines):
310 while lines:
310 while lines:
311 if not lines[-1].strip():
311 if not lines[-1].strip():
312 del lines[-1]
312 del lines[-1]
313 else:
313 else:
314 break
314 break
315
315
316 message = []
316 message = []
317 comments = []
317 comments = []
318 user = None
318 user = None
319 date = None
319 date = None
320 parent = None
320 parent = None
321 format = None
321 format = None
322 subject = None
322 subject = None
323 branch = None
323 branch = None
324 nodeid = None
324 nodeid = None
325 diffstart = 0
325 diffstart = 0
326
326
327 for line in open(pf, b'rb'):
327 for line in open(pf, b'rb'):
328 line = line.rstrip()
328 line = line.rstrip()
329 if line.startswith(b'diff --git') or (
329 if line.startswith(b'diff --git') or (
330 diffstart and line.startswith(b'+++ ')
330 diffstart and line.startswith(b'+++ ')
331 ):
331 ):
332 diffstart = 2
332 diffstart = 2
333 break
333 break
334 diffstart = 0 # reset
334 diffstart = 0 # reset
335 if line.startswith(b"--- "):
335 if line.startswith(b"--- "):
336 diffstart = 1
336 diffstart = 1
337 continue
337 continue
338 elif format == b"hgpatch":
338 elif format == b"hgpatch":
339 # parse values when importing the result of an hg export
339 # parse values when importing the result of an hg export
340 if line.startswith(b"# User "):
340 if line.startswith(b"# User "):
341 user = line[7:]
341 user = line[7:]
342 elif line.startswith(b"# Date "):
342 elif line.startswith(b"# Date "):
343 date = line[7:]
343 date = line[7:]
344 elif line.startswith(b"# Parent "):
344 elif line.startswith(b"# Parent "):
345 parent = line[9:].lstrip() # handle double trailing space
345 parent = line[9:].lstrip() # handle double trailing space
346 elif line.startswith(b"# Branch "):
346 elif line.startswith(b"# Branch "):
347 branch = line[9:]
347 branch = line[9:]
348 elif line.startswith(b"# Node ID "):
348 elif line.startswith(b"# Node ID "):
349 nodeid = line[10:]
349 nodeid = line[10:]
350 elif not line.startswith(b"# ") and line:
350 elif not line.startswith(b"# ") and line:
351 message.append(line)
351 message.append(line)
352 format = None
352 format = None
353 elif line == b'# HG changeset patch':
353 elif line == b'# HG changeset patch':
354 message = []
354 message = []
355 format = b"hgpatch"
355 format = b"hgpatch"
356 elif format != b"tagdone" and (
356 elif format != b"tagdone" and (
357 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
357 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
358 ):
358 ):
359 subject = line[9:]
359 subject = line[9:]
360 format = b"tag"
360 format = b"tag"
361 elif format != b"tagdone" and (
361 elif format != b"tagdone" and (
362 line.startswith(b"From: ") or line.startswith(b"from: ")
362 line.startswith(b"From: ") or line.startswith(b"from: ")
363 ):
363 ):
364 user = line[6:]
364 user = line[6:]
365 format = b"tag"
365 format = b"tag"
366 elif format != b"tagdone" and (
366 elif format != b"tagdone" and (
367 line.startswith(b"Date: ") or line.startswith(b"date: ")
367 line.startswith(b"Date: ") or line.startswith(b"date: ")
368 ):
368 ):
369 date = line[6:]
369 date = line[6:]
370 format = b"tag"
370 format = b"tag"
371 elif format == b"tag" and line == b"":
371 elif format == b"tag" and line == b"":
372 # when looking for tags (subject: from: etc) they
372 # when looking for tags (subject: from: etc) they
373 # end once you find a blank line in the source
373 # end once you find a blank line in the source
374 format = b"tagdone"
374 format = b"tagdone"
375 elif message or line:
375 elif message or line:
376 message.append(line)
376 message.append(line)
377 comments.append(line)
377 comments.append(line)
378
378
379 eatdiff(message)
379 eatdiff(message)
380 eatdiff(comments)
380 eatdiff(comments)
381 # Remember the exact starting line of the patch diffs before consuming
381 # Remember the exact starting line of the patch diffs before consuming
382 # empty lines, for external use by TortoiseHg and others
382 # empty lines, for external use by TortoiseHg and others
383 self.diffstartline = len(comments)
383 self.diffstartline = len(comments)
384 eatempty(message)
384 eatempty(message)
385 eatempty(comments)
385 eatempty(comments)
386
386
387 # make sure message isn't empty
387 # make sure message isn't empty
388 if format and format.startswith(b"tag") and subject:
388 if format and format.startswith(b"tag") and subject:
389 message.insert(0, subject)
389 message.insert(0, subject)
390
390
391 self.message = message
391 self.message = message
392 self.comments = comments
392 self.comments = comments
393 self.user = user
393 self.user = user
394 self.date = date
394 self.date = date
395 self.parent = parent
395 self.parent = parent
396 # nodeid and branch are for external use by TortoiseHg and others
396 # nodeid and branch are for external use by TortoiseHg and others
397 self.nodeid = nodeid
397 self.nodeid = nodeid
398 self.branch = branch
398 self.branch = branch
399 self.haspatch = diffstart > 1
399 self.haspatch = diffstart > 1
400 self.plainmode = (
400 self.plainmode = (
401 plainmode
401 plainmode
402 or b'# HG changeset patch' not in self.comments
402 or b'# HG changeset patch' not in self.comments
403 and any(
403 and any(
404 c.startswith(b'Date: ') or c.startswith(b'From: ')
404 c.startswith(b'Date: ') or c.startswith(b'From: ')
405 for c in self.comments
405 for c in self.comments
406 )
406 )
407 )
407 )
408
408
409 def setuser(self, user):
409 def setuser(self, user):
410 try:
410 try:
411 inserthgheader(self.comments, b'# User ', user)
411 inserthgheader(self.comments, b'# User ', user)
412 except ValueError:
412 except ValueError:
413 if self.plainmode:
413 if self.plainmode:
414 insertplainheader(self.comments, b'From', user)
414 insertplainheader(self.comments, b'From', user)
415 else:
415 else:
416 tmp = [b'# HG changeset patch', b'# User ' + user]
416 tmp = [b'# HG changeset patch', b'# User ' + user]
417 self.comments = tmp + self.comments
417 self.comments = tmp + self.comments
418 self.user = user
418 self.user = user
419
419
420 def setdate(self, date):
420 def setdate(self, date):
421 try:
421 try:
422 inserthgheader(self.comments, b'# Date ', date)
422 inserthgheader(self.comments, b'# Date ', date)
423 except ValueError:
423 except ValueError:
424 if self.plainmode:
424 if self.plainmode:
425 insertplainheader(self.comments, b'Date', date)
425 insertplainheader(self.comments, b'Date', date)
426 else:
426 else:
427 tmp = [b'# HG changeset patch', b'# Date ' + date]
427 tmp = [b'# HG changeset patch', b'# Date ' + date]
428 self.comments = tmp + self.comments
428 self.comments = tmp + self.comments
429 self.date = date
429 self.date = date
430
430
431 def setparent(self, parent):
431 def setparent(self, parent):
432 try:
432 try:
433 inserthgheader(self.comments, b'# Parent ', parent)
433 inserthgheader(self.comments, b'# Parent ', parent)
434 except ValueError:
434 except ValueError:
435 if not self.plainmode:
435 if not self.plainmode:
436 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
436 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
437 self.comments = tmp + self.comments
437 self.comments = tmp + self.comments
438 self.parent = parent
438 self.parent = parent
439
439
440 def setmessage(self, message):
440 def setmessage(self, message):
441 if self.comments:
441 if self.comments:
442 self._delmsg()
442 self._delmsg()
443 self.message = [message]
443 self.message = [message]
444 if message:
444 if message:
445 if self.plainmode and self.comments and self.comments[-1]:
445 if self.plainmode and self.comments and self.comments[-1]:
446 self.comments.append(b'')
446 self.comments.append(b'')
447 self.comments.append(message)
447 self.comments.append(message)
448
448
449 def __bytes__(self):
449 def __bytes__(self):
450 s = b'\n'.join(self.comments).rstrip()
450 s = b'\n'.join(self.comments).rstrip()
451 if not s:
451 if not s:
452 return b''
452 return b''
453 return s + b'\n\n'
453 return s + b'\n\n'
454
454
455 __str__ = encoding.strmethod(__bytes__)
455 __str__ = encoding.strmethod(__bytes__)
456
456
457 def _delmsg(self):
457 def _delmsg(self):
458 """Remove existing message, keeping the rest of the comments fields.
458 """Remove existing message, keeping the rest of the comments fields.
459 If comments contains 'subject: ', message will prepend
459 If comments contains 'subject: ', message will prepend
460 the field and a blank line."""
460 the field and a blank line."""
461 if self.message:
461 if self.message:
462 subj = b'subject: ' + self.message[0].lower()
462 subj = b'subject: ' + self.message[0].lower()
463 for i in range(len(self.comments)):
463 for i in range(len(self.comments)):
464 if subj == self.comments[i].lower():
464 if subj == self.comments[i].lower():
465 del self.comments[i]
465 del self.comments[i]
466 self.message = self.message[2:]
466 self.message = self.message[2:]
467 break
467 break
468 ci = 0
468 ci = 0
469 for mi in self.message:
469 for mi in self.message:
470 while mi != self.comments[ci]:
470 while mi != self.comments[ci]:
471 ci += 1
471 ci += 1
472 del self.comments[ci]
472 del self.comments[ci]
473
473
474
474
475 def newcommit(repo, phase, *args, **kwargs):
475 def newcommit(repo, phase, *args, **kwargs):
476 """helper dedicated to ensure a commit respect mq.secret setting
476 """helper dedicated to ensure a commit respect mq.secret setting
477
477
478 It should be used instead of repo.commit inside the mq source for operation
478 It should be used instead of repo.commit inside the mq source for operation
479 creating new changeset.
479 creating new changeset.
480 """
480 """
481 repo = repo.unfiltered()
481 repo = repo.unfiltered()
482 if phase is None:
482 if phase is None:
483 if repo.ui.configbool(b'mq', b'secret'):
483 if repo.ui.configbool(b'mq', b'secret'):
484 phase = phases.secret
484 phase = phases.secret
485 overrides = {(b'ui', b'allowemptycommit'): True}
485 overrides = {(b'ui', b'allowemptycommit'): True}
486 if phase is not None:
486 if phase is not None:
487 overrides[(b'phases', b'new-commit')] = phase
487 overrides[(b'phases', b'new-commit')] = phase
488 with repo.ui.configoverride(overrides, b'mq'):
488 with repo.ui.configoverride(overrides, b'mq'):
489 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
489 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
490 return repo.commit(*args, **kwargs)
490 return repo.commit(*args, **kwargs)
491
491
492
492
493 class AbortNoCleanup(error.Abort):
493 class AbortNoCleanup(error.Abort):
494 pass
494 pass
495
495
496
496
497 class queue:
497 class queue:
498 def __init__(self, ui, baseui, path, patchdir=None):
498 def __init__(self, ui, baseui, path, patchdir=None):
499 self.basepath = path
499 self.basepath = path
500 try:
500 try:
501 with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
501 with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
502 cur = fh.read().rstrip()
502 cur = fh.read().rstrip()
503
503
504 if not cur:
504 if not cur:
505 curpath = os.path.join(path, b'patches')
505 curpath = os.path.join(path, b'patches')
506 else:
506 else:
507 curpath = os.path.join(path, b'patches-' + cur)
507 curpath = os.path.join(path, b'patches-' + cur)
508 except IOError:
508 except IOError:
509 curpath = os.path.join(path, b'patches')
509 curpath = os.path.join(path, b'patches')
510 self.path = patchdir or curpath
510 self.path = patchdir or curpath
511 self.opener = vfsmod.vfs(self.path)
511 self.opener = vfsmod.vfs(self.path)
512 self.ui = ui
512 self.ui = ui
513 self.baseui = baseui
513 self.baseui = baseui
514 self.applieddirty = False
514 self.applieddirty = False
515 self.seriesdirty = False
515 self.seriesdirty = False
516 self.added = []
516 self.added = []
517 self.seriespath = b"series"
517 self.seriespath = b"series"
518 self.statuspath = b"status"
518 self.statuspath = b"status"
519 self.guardspath = b"guards"
519 self.guardspath = b"guards"
520 self.activeguards = None
520 self.activeguards = None
521 self.guardsdirty = False
521 self.guardsdirty = False
522 # Handle mq.git as a bool with extended values
522 # Handle mq.git as a bool with extended values
523 gitmode = ui.config(b'mq', b'git').lower()
523 gitmode = ui.config(b'mq', b'git').lower()
524 boolmode = stringutil.parsebool(gitmode)
524 boolmode = stringutil.parsebool(gitmode)
525 if boolmode is not None:
525 if boolmode is not None:
526 if boolmode:
526 if boolmode:
527 gitmode = b'yes'
527 gitmode = b'yes'
528 else:
528 else:
529 gitmode = b'no'
529 gitmode = b'no'
530 self.gitmode = gitmode
530 self.gitmode = gitmode
531 # deprecated config: mq.plain
531 # deprecated config: mq.plain
532 self.plainmode = ui.configbool(b'mq', b'plain')
532 self.plainmode = ui.configbool(b'mq', b'plain')
533 self.checkapplied = True
533 self.checkapplied = True
534
534
535 @util.propertycache
535 @util.propertycache
536 def applied(self):
536 def applied(self):
537 def parselines(lines):
537 def parselines(lines):
538 for l in lines:
538 for l in lines:
539 entry = l.split(b':', 1)
539 entry = l.split(b':', 1)
540 if len(entry) > 1:
540 if len(entry) > 1:
541 n, name = entry
541 n, name = entry
542 yield statusentry(bin(n), name)
542 yield statusentry(bin(n), name)
543 elif l.strip():
543 elif l.strip():
544 self.ui.warn(
544 self.ui.warn(
545 _(b'malformated mq status line: %s\n')
545 _(b'malformated mq status line: %s\n')
546 % stringutil.pprint(entry)
546 % stringutil.pprint(entry)
547 )
547 )
548 # else we ignore empty lines
548 # else we ignore empty lines
549
549
550 try:
550 try:
551 lines = self.opener.read(self.statuspath).splitlines()
551 lines = self.opener.read(self.statuspath).splitlines()
552 return list(parselines(lines))
552 return list(parselines(lines))
553 except FileNotFoundError:
553 except FileNotFoundError:
554 return []
554 return []
555
555
556 @util.propertycache
556 @util.propertycache
557 def fullseries(self):
557 def fullseries(self):
558 try:
558 try:
559 return self.opener.read(self.seriespath).splitlines()
559 return self.opener.read(self.seriespath).splitlines()
560 except FileNotFoundError:
560 except FileNotFoundError:
561 return []
561 return []
562
562
563 @util.propertycache
563 @util.propertycache
564 def series(self):
564 def series(self):
565 self.parseseries()
565 self.parseseries()
566 return self.series
566 return self.series
567
567
568 @util.propertycache
568 @util.propertycache
569 def seriesguards(self):
569 def seriesguards(self):
570 self.parseseries()
570 self.parseseries()
571 return self.seriesguards
571 return self.seriesguards
572
572
573 def invalidate(self):
573 def invalidate(self):
574 for a in 'applied fullseries series seriesguards'.split():
574 for a in 'applied fullseries series seriesguards'.split():
575 if a in self.__dict__:
575 if a in self.__dict__:
576 delattr(self, a)
576 delattr(self, a)
577 self.applieddirty = False
577 self.applieddirty = False
578 self.seriesdirty = False
578 self.seriesdirty = False
579 self.guardsdirty = False
579 self.guardsdirty = False
580 self.activeguards = None
580 self.activeguards = None
581
581
582 def diffopts(self, opts=None, patchfn=None, plain=False):
582 def diffopts(self, opts=None, patchfn=None, plain=False):
583 """Return diff options tweaked for this mq use, possibly upgrading to
583 """Return diff options tweaked for this mq use, possibly upgrading to
584 git format, and possibly plain and without lossy options."""
584 git format, and possibly plain and without lossy options."""
585 diffopts = patchmod.difffeatureopts(
585 diffopts = patchmod.difffeatureopts(
586 self.ui,
586 self.ui,
587 opts,
587 opts,
588 git=True,
588 git=True,
589 whitespace=not plain,
589 whitespace=not plain,
590 formatchanging=not plain,
590 formatchanging=not plain,
591 )
591 )
592 if self.gitmode == b'auto':
592 if self.gitmode == b'auto':
593 diffopts.upgrade = True
593 diffopts.upgrade = True
594 elif self.gitmode == b'keep':
594 elif self.gitmode == b'keep':
595 pass
595 pass
596 elif self.gitmode in (b'yes', b'no'):
596 elif self.gitmode in (b'yes', b'no'):
597 diffopts.git = self.gitmode == b'yes'
597 diffopts.git = self.gitmode == b'yes'
598 else:
598 else:
599 raise error.Abort(
599 raise error.Abort(
600 _(b'mq.git option can be auto/keep/yes/no got %s')
600 _(b'mq.git option can be auto/keep/yes/no got %s')
601 % self.gitmode
601 % self.gitmode
602 )
602 )
603 if patchfn:
603 if patchfn:
604 diffopts = self.patchopts(diffopts, patchfn)
604 diffopts = self.patchopts(diffopts, patchfn)
605 return diffopts
605 return diffopts
606
606
607 def patchopts(self, diffopts, *patches):
607 def patchopts(self, diffopts, *patches):
608 """Return a copy of input diff options with git set to true if
608 """Return a copy of input diff options with git set to true if
609 referenced patch is a git patch and should be preserved as such.
609 referenced patch is a git patch and should be preserved as such.
610 """
610 """
611 diffopts = diffopts.copy()
611 diffopts = diffopts.copy()
612 if not diffopts.git and self.gitmode == b'keep':
612 if not diffopts.git and self.gitmode == b'keep':
613 for patchfn in patches:
613 for patchfn in patches:
614 patchf = self.opener(patchfn, b'r')
614 patchf = self.opener(patchfn, b'r')
615 # if the patch was a git patch, refresh it as a git patch
615 # if the patch was a git patch, refresh it as a git patch
616 diffopts.git = any(
616 diffopts.git = any(
617 line.startswith(b'diff --git') for line in patchf
617 line.startswith(b'diff --git') for line in patchf
618 )
618 )
619 patchf.close()
619 patchf.close()
620 return diffopts
620 return diffopts
621
621
622 def join(self, *p):
622 def join(self, *p):
623 return os.path.join(self.path, *p)
623 return os.path.join(self.path, *p)
624
624
625 def findseries(self, patch):
625 def findseries(self, patch):
626 def matchpatch(l):
626 def matchpatch(l):
627 l = l.split(b'#', 1)[0]
627 l = l.split(b'#', 1)[0]
628 return l.strip() == patch
628 return l.strip() == patch
629
629
630 for index, l in enumerate(self.fullseries):
630 for index, l in enumerate(self.fullseries):
631 if matchpatch(l):
631 if matchpatch(l):
632 return index
632 return index
633 return None
633 return None
634
634
635 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
635 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
636
636
637 def parseseries(self):
637 def parseseries(self):
638 self.series = []
638 self.series = []
639 self.seriesguards = []
639 self.seriesguards = []
640 for l in self.fullseries:
640 for l in self.fullseries:
641 h = l.find(b'#')
641 h = l.find(b'#')
642 if h == -1:
642 if h == -1:
643 patch = l
643 patch = l
644 comment = b''
644 comment = b''
645 elif h == 0:
645 elif h == 0:
646 continue
646 continue
647 else:
647 else:
648 patch = l[:h]
648 patch = l[:h]
649 comment = l[h:]
649 comment = l[h:]
650 patch = patch.strip()
650 patch = patch.strip()
651 if patch:
651 if patch:
652 if patch in self.series:
652 if patch in self.series:
653 raise error.Abort(
653 raise error.Abort(
654 _(b'%s appears more than once in %s')
654 _(b'%s appears more than once in %s')
655 % (patch, self.join(self.seriespath))
655 % (patch, self.join(self.seriespath))
656 )
656 )
657 self.series.append(patch)
657 self.series.append(patch)
658 self.seriesguards.append(self.guard_re.findall(comment))
658 self.seriesguards.append(self.guard_re.findall(comment))
659
659
660 def checkguard(self, guard):
660 def checkguard(self, guard):
661 if not guard:
661 if not guard:
662 return _(b'guard cannot be an empty string')
662 return _(b'guard cannot be an empty string')
663 bad_chars = b'# \t\r\n\f'
663 bad_chars = b'# \t\r\n\f'
664 first = guard[0]
664 first = guard[0]
665 if first in b'-+':
665 if first in b'-+':
666 return _(b'guard %r starts with invalid character: %r') % (
666 return _(b'guard %r starts with invalid character: %r') % (
667 guard,
667 guard,
668 first,
668 first,
669 )
669 )
670 for c in bad_chars:
670 for c in bad_chars:
671 if c in guard:
671 if c in guard:
672 return _(b'invalid character in guard %r: %r') % (guard, c)
672 return _(b'invalid character in guard %r: %r') % (guard, c)
673
673
674 def setactive(self, guards):
674 def setactive(self, guards):
675 for guard in guards:
675 for guard in guards:
676 bad = self.checkguard(guard)
676 bad = self.checkguard(guard)
677 if bad:
677 if bad:
678 raise error.Abort(bad)
678 raise error.Abort(bad)
679 guards = sorted(set(guards))
679 guards = sorted(set(guards))
680 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
680 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
681 self.activeguards = guards
681 self.activeguards = guards
682 self.guardsdirty = True
682 self.guardsdirty = True
683
683
684 def active(self):
684 def active(self):
685 if self.activeguards is None:
685 if self.activeguards is None:
686 self.activeguards = []
686 self.activeguards = []
687 try:
687 try:
688 guards = self.opener.read(self.guardspath).split()
688 guards = self.opener.read(self.guardspath).split()
689 except FileNotFoundError:
689 except FileNotFoundError:
690 guards = []
690 guards = []
691 for i, guard in enumerate(guards):
691 for i, guard in enumerate(guards):
692 bad = self.checkguard(guard)
692 bad = self.checkguard(guard)
693 if bad:
693 if bad:
694 self.ui.warn(
694 self.ui.warn(
695 b'%s:%d: %s\n'
695 b'%s:%d: %s\n'
696 % (self.join(self.guardspath), i + 1, bad)
696 % (self.join(self.guardspath), i + 1, bad)
697 )
697 )
698 else:
698 else:
699 self.activeguards.append(guard)
699 self.activeguards.append(guard)
700 return self.activeguards
700 return self.activeguards
701
701
702 def setguards(self, idx, guards):
702 def setguards(self, idx, guards):
703 for g in guards:
703 for g in guards:
704 if len(g) < 2:
704 if len(g) < 2:
705 raise error.Abort(_(b'guard %r too short') % g)
705 raise error.Abort(_(b'guard %r too short') % g)
706 if g[0] not in b'-+':
706 if g[0] not in b'-+':
707 raise error.Abort(_(b'guard %r starts with invalid char') % g)
707 raise error.Abort(_(b'guard %r starts with invalid char') % g)
708 bad = self.checkguard(g[1:])
708 bad = self.checkguard(g[1:])
709 if bad:
709 if bad:
710 raise error.Abort(bad)
710 raise error.Abort(bad)
711 drop = self.guard_re.sub(b'', self.fullseries[idx])
711 drop = self.guard_re.sub(b'', self.fullseries[idx])
712 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
712 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
713 self.parseseries()
713 self.parseseries()
714 self.seriesdirty = True
714 self.seriesdirty = True
715
715
716 def pushable(self, idx):
716 def pushable(self, idx):
717 if isinstance(idx, bytes):
717 if isinstance(idx, bytes):
718 idx = self.series.index(idx)
718 idx = self.series.index(idx)
719 patchguards = self.seriesguards[idx]
719 patchguards = self.seriesguards[idx]
720 if not patchguards:
720 if not patchguards:
721 return True, None
721 return True, None
722 guards = self.active()
722 guards = self.active()
723 exactneg = [
723 exactneg = [
724 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
724 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
725 ]
725 ]
726 if exactneg:
726 if exactneg:
727 return False, stringutil.pprint(exactneg[0])
727 return False, stringutil.pprint(exactneg[0])
728 pos = [g for g in patchguards if g.startswith(b'+')]
728 pos = [g for g in patchguards if g.startswith(b'+')]
729 exactpos = [g for g in pos if g[1:] in guards]
729 exactpos = [g for g in pos if g[1:] in guards]
730 if pos:
730 if pos:
731 if exactpos:
731 if exactpos:
732 return True, stringutil.pprint(exactpos[0])
732 return True, stringutil.pprint(exactpos[0])
733 return False, b' '.join([stringutil.pprint(p) for p in pos])
733 return False, b' '.join([stringutil.pprint(p) for p in pos])
734 return True, b''
734 return True, b''
735
735
736 def explainpushable(self, idx, all_patches=False):
736 def explainpushable(self, idx, all_patches=False):
737 if all_patches:
737 if all_patches:
738 write = self.ui.write
738 write = self.ui.write
739 else:
739 else:
740 write = self.ui.warn
740 write = self.ui.warn
741
741
742 if all_patches or self.ui.verbose:
742 if all_patches or self.ui.verbose:
743 if isinstance(idx, bytes):
743 if isinstance(idx, bytes):
744 idx = self.series.index(idx)
744 idx = self.series.index(idx)
745 pushable, why = self.pushable(idx)
745 pushable, why = self.pushable(idx)
746 if all_patches and pushable:
746 if all_patches and pushable:
747 if why is None:
747 if why is None:
748 write(
748 write(
749 _(b'allowing %s - no guards in effect\n')
749 _(b'allowing %s - no guards in effect\n')
750 % self.series[idx]
750 % self.series[idx]
751 )
751 )
752 else:
752 else:
753 if not why:
753 if not why:
754 write(
754 write(
755 _(b'allowing %s - no matching negative guards\n')
755 _(b'allowing %s - no matching negative guards\n')
756 % self.series[idx]
756 % self.series[idx]
757 )
757 )
758 else:
758 else:
759 write(
759 write(
760 _(b'allowing %s - guarded by %s\n')
760 _(b'allowing %s - guarded by %s\n')
761 % (self.series[idx], why)
761 % (self.series[idx], why)
762 )
762 )
763 if not pushable:
763 if not pushable:
764 if why:
764 if why:
765 write(
765 write(
766 _(b'skipping %s - guarded by %s\n')
766 _(b'skipping %s - guarded by %s\n')
767 % (self.series[idx], why)
767 % (self.series[idx], why)
768 )
768 )
769 else:
769 else:
770 write(
770 write(
771 _(b'skipping %s - no matching guards\n')
771 _(b'skipping %s - no matching guards\n')
772 % self.series[idx]
772 % self.series[idx]
773 )
773 )
774
774
775 def savedirty(self):
775 def savedirty(self):
776 def writelist(items, path):
776 def writelist(items, path):
777 fp = self.opener(path, b'wb')
777 fp = self.opener(path, b'wb')
778 for i in items:
778 for i in items:
779 fp.write(b"%s\n" % i)
779 fp.write(b"%s\n" % i)
780 fp.close()
780 fp.close()
781
781
782 if self.applieddirty:
782 if self.applieddirty:
783 writelist(map(bytes, self.applied), self.statuspath)
783 writelist(map(bytes, self.applied), self.statuspath)
784 self.applieddirty = False
784 self.applieddirty = False
785 if self.seriesdirty:
785 if self.seriesdirty:
786 writelist(self.fullseries, self.seriespath)
786 writelist(self.fullseries, self.seriespath)
787 self.seriesdirty = False
787 self.seriesdirty = False
788 if self.guardsdirty:
788 if self.guardsdirty:
789 writelist(self.activeguards, self.guardspath)
789 writelist(self.activeguards, self.guardspath)
790 self.guardsdirty = False
790 self.guardsdirty = False
791 if self.added:
791 if self.added:
792 qrepo = self.qrepo()
792 qrepo = self.qrepo()
793 if qrepo:
793 if qrepo:
794 qrepo[None].add(f for f in self.added if f not in qrepo[None])
794 qrepo[None].add(f for f in self.added if f not in qrepo[None])
795 self.added = []
795 self.added = []
796
796
797 def removeundo(self, repo):
797 def removeundo(self, repo):
798 undo = repo.sjoin(b'undo')
798 undo = repo.sjoin(b'undo')
799 if not os.path.exists(undo):
799 if not os.path.exists(undo):
800 return
800 return
801 try:
801 try:
802 os.unlink(undo)
802 os.unlink(undo)
803 except OSError as inst:
803 except OSError as inst:
804 self.ui.warn(
804 self.ui.warn(
805 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
805 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
806 )
806 )
807
807
808 def backup(self, repo, files, copy=False):
808 def backup(self, repo, files, copy=False):
809 # backup local changes in --force case
809 # backup local changes in --force case
810 for f in sorted(files):
810 for f in sorted(files):
811 absf = repo.wjoin(f)
811 absf = repo.wjoin(f)
812 if os.path.lexists(absf):
812 if os.path.lexists(absf):
813 absorig = scmutil.backuppath(self.ui, repo, f)
813 absorig = scmutil.backuppath(self.ui, repo, f)
814 self.ui.note(
814 self.ui.note(
815 _(b'saving current version of %s as %s\n')
815 _(b'saving current version of %s as %s\n')
816 % (f, os.path.relpath(absorig))
816 % (f, os.path.relpath(absorig))
817 )
817 )
818
818
819 if copy:
819 if copy:
820 util.copyfile(absf, absorig)
820 util.copyfile(absf, absorig)
821 else:
821 else:
822 util.rename(absf, absorig)
822 util.rename(absf, absorig)
823
823
824 def printdiff(
824 def printdiff(
825 self,
825 self,
826 repo,
826 repo,
827 diffopts,
827 diffopts,
828 node1,
828 node1,
829 node2=None,
829 node2=None,
830 files=None,
830 files=None,
831 fp=None,
831 fp=None,
832 changes=None,
832 changes=None,
833 opts=None,
833 opts=None,
834 ):
834 ):
835 if opts is None:
835 if opts is None:
836 opts = {}
836 opts = {}
837 stat = opts.get(b'stat')
837 stat = opts.get(b'stat')
838 m = scmutil.match(repo[node1], files, opts)
838 m = scmutil.match(repo[node1], files, opts)
839 logcmdutil.diffordiffstat(
839 logcmdutil.diffordiffstat(
840 self.ui,
840 self.ui,
841 repo,
841 repo,
842 diffopts,
842 diffopts,
843 repo[node1],
843 repo[node1],
844 repo[node2],
844 repo[node2],
845 m,
845 m,
846 changes,
846 changes,
847 stat,
847 stat,
848 fp,
848 fp,
849 )
849 )
850
850
851 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
851 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
852 # first try just applying the patch
852 # first try just applying the patch
853 (err, n) = self.apply(
853 (err, n) = self.apply(
854 repo, [patch], update_status=False, strict=True, merge=rev
854 repo, [patch], update_status=False, strict=True, merge=rev
855 )
855 )
856
856
857 if err == 0:
857 if err == 0:
858 return (err, n)
858 return (err, n)
859
859
860 if n is None:
860 if n is None:
861 raise error.Abort(_(b"apply failed for patch %s") % patch)
861 raise error.Abort(_(b"apply failed for patch %s") % patch)
862
862
863 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
863 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
864
864
865 # apply failed, strip away that rev and merge.
865 # apply failed, strip away that rev and merge.
866 hg.clean(repo, head)
866 hg.clean(repo, head)
867 strip(self.ui, repo, [n], update=False, backup=False)
867 strip(self.ui, repo, [n], update=False, backup=False)
868
868
869 ctx = repo[rev]
869 ctx = repo[rev]
870 ret = hg.merge(ctx, remind=False)
870 ret = hg.merge(ctx, remind=False)
871 if ret:
871 if ret:
872 raise error.Abort(_(b"update returned %d") % ret)
872 raise error.Abort(_(b"update returned %d") % ret)
873 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
873 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
874 if n is None:
874 if n is None:
875 raise error.Abort(_(b"repo commit failed"))
875 raise error.Abort(_(b"repo commit failed"))
876 try:
876 try:
877 ph = patchheader(mergeq.join(patch), self.plainmode)
877 ph = patchheader(mergeq.join(patch), self.plainmode)
878 except Exception:
878 except Exception:
879 raise error.Abort(_(b"unable to read %s") % patch)
879 raise error.Abort(_(b"unable to read %s") % patch)
880
880
881 diffopts = self.patchopts(diffopts, patch)
881 diffopts = self.patchopts(diffopts, patch)
882 patchf = self.opener(patch, b"w")
882 patchf = self.opener(patch, b"w")
883 comments = bytes(ph)
883 comments = bytes(ph)
884 if comments:
884 if comments:
885 patchf.write(comments)
885 patchf.write(comments)
886 self.printdiff(repo, diffopts, head, n, fp=patchf)
886 self.printdiff(repo, diffopts, head, n, fp=patchf)
887 patchf.close()
887 patchf.close()
888 self.removeundo(repo)
888 self.removeundo(repo)
889 return (0, n)
889 return (0, n)
890
890
891 def qparents(self, repo, rev=None):
891 def qparents(self, repo, rev=None):
892 """return the mq handled parent or p1
892 """return the mq handled parent or p1
893
893
894 In some case where mq get himself in being the parent of a merge the
894 In some case where mq get himself in being the parent of a merge the
895 appropriate parent may be p2.
895 appropriate parent may be p2.
896 (eg: an in progress merge started with mq disabled)
896 (eg: an in progress merge started with mq disabled)
897
897
898 If no parent are managed by mq, p1 is returned.
898 If no parent are managed by mq, p1 is returned.
899 """
899 """
900 if rev is None:
900 if rev is None:
901 (p1, p2) = repo.dirstate.parents()
901 (p1, p2) = repo.dirstate.parents()
902 if p2 == repo.nullid:
902 if p2 == repo.nullid:
903 return p1
903 return p1
904 if not self.applied:
904 if not self.applied:
905 return None
905 return None
906 return self.applied[-1].node
906 return self.applied[-1].node
907 p1, p2 = repo.changelog.parents(rev)
907 p1, p2 = repo.changelog.parents(rev)
908 if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
908 if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
909 return p2
909 return p2
910 return p1
910 return p1
911
911
912 def mergepatch(self, repo, mergeq, series, diffopts):
912 def mergepatch(self, repo, mergeq, series, diffopts):
913 if not self.applied:
913 if not self.applied:
914 # each of the patches merged in will have two parents. This
914 # each of the patches merged in will have two parents. This
915 # can confuse the qrefresh, qdiff, and strip code because it
915 # can confuse the qrefresh, qdiff, and strip code because it
916 # needs to know which parent is actually in the patch queue.
916 # needs to know which parent is actually in the patch queue.
917 # so, we insert a merge marker with only one parent. This way
917 # so, we insert a merge marker with only one parent. This way
918 # the first patch in the queue is never a merge patch
918 # the first patch in the queue is never a merge patch
919 #
919 #
920 pname = b".hg.patches.merge.marker"
920 pname = b".hg.patches.merge.marker"
921 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
921 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
922 self.removeundo(repo)
922 self.removeundo(repo)
923 self.applied.append(statusentry(n, pname))
923 self.applied.append(statusentry(n, pname))
924 self.applieddirty = True
924 self.applieddirty = True
925
925
926 head = self.qparents(repo)
926 head = self.qparents(repo)
927
927
928 for patch in series:
928 for patch in series:
929 patch = mergeq.lookup(patch, strict=True)
929 patch = mergeq.lookup(patch, strict=True)
930 if not patch:
930 if not patch:
931 self.ui.warn(_(b"patch %s does not exist\n") % patch)
931 self.ui.warn(_(b"patch %s does not exist\n") % patch)
932 return (1, None)
932 return (1, None)
933 pushable, reason = self.pushable(patch)
933 pushable, reason = self.pushable(patch)
934 if not pushable:
934 if not pushable:
935 self.explainpushable(patch, all_patches=True)
935 self.explainpushable(patch, all_patches=True)
936 continue
936 continue
937 info = mergeq.isapplied(patch)
937 info = mergeq.isapplied(patch)
938 if not info:
938 if not info:
939 self.ui.warn(_(b"patch %s is not applied\n") % patch)
939 self.ui.warn(_(b"patch %s is not applied\n") % patch)
940 return (1, None)
940 return (1, None)
941 rev = info[1]
941 rev = info[1]
942 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
942 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
943 if head:
943 if head:
944 self.applied.append(statusentry(head, patch))
944 self.applied.append(statusentry(head, patch))
945 self.applieddirty = True
945 self.applieddirty = True
946 if err:
946 if err:
947 return (err, head)
947 return (err, head)
948 self.savedirty()
948 self.savedirty()
949 return (0, head)
949 return (0, head)
950
950
951 def patch(self, repo, patchfile):
951 def patch(self, repo, patchfile):
952 """Apply patchfile to the working directory.
952 """Apply patchfile to the working directory.
953 patchfile: name of patch file"""
953 patchfile: name of patch file"""
954 files = set()
954 files = set()
955 try:
955 try:
956 fuzz = patchmod.patch(
956 fuzz = patchmod.patch(
957 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
957 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
958 )
958 )
959 return (True, list(files), fuzz)
959 return (True, list(files), fuzz)
960 except Exception as inst:
960 except Exception as inst:
961 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
961 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
962 if not self.ui.verbose:
962 if not self.ui.verbose:
963 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
963 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
964 self.ui.traceback()
964 self.ui.traceback()
965 return (False, list(files), False)
965 return (False, list(files), False)
966
966
967 def apply(
967 def apply(
968 self,
968 self,
969 repo,
969 repo,
970 series,
970 series,
971 list=False,
971 list=False,
972 update_status=True,
972 update_status=True,
973 strict=False,
973 strict=False,
974 patchdir=None,
974 patchdir=None,
975 merge=None,
975 merge=None,
976 all_files=None,
976 all_files=None,
977 tobackup=None,
977 tobackup=None,
978 keepchanges=False,
978 keepchanges=False,
979 ):
979 ):
980 wlock = lock = tr = None
980 wlock = lock = tr = None
981 try:
981 try:
982 wlock = repo.wlock()
982 wlock = repo.wlock()
983 lock = repo.lock()
983 lock = repo.lock()
984 tr = repo.transaction(b"qpush")
984 tr = repo.transaction(b"qpush")
985 try:
985 try:
986 ret = self._apply(
986 ret = self._apply(
987 repo,
987 repo,
988 series,
988 series,
989 list,
989 list,
990 update_status,
990 update_status,
991 strict,
991 strict,
992 patchdir,
992 patchdir,
993 merge,
993 merge,
994 all_files=all_files,
994 all_files=all_files,
995 tobackup=tobackup,
995 tobackup=tobackup,
996 keepchanges=keepchanges,
996 keepchanges=keepchanges,
997 )
997 )
998 tr.close()
998 tr.close()
999 self.savedirty()
999 self.savedirty()
1000 return ret
1000 return ret
1001 except AbortNoCleanup:
1001 except AbortNoCleanup:
1002 tr.close()
1002 tr.close()
1003 self.savedirty()
1003 self.savedirty()
1004 raise
1004 raise
1005 except: # re-raises
1005 except: # re-raises
1006 try:
1006 try:
1007 tr.abort()
1007 tr.abort()
1008 finally:
1008 finally:
1009 self.invalidate()
1009 self.invalidate()
1010 raise
1010 raise
1011 finally:
1011 finally:
1012 release(tr, lock, wlock)
1012 release(tr, lock, wlock)
1013 self.removeundo(repo)
1013 self.removeundo(repo)
1014
1014
1015 def _apply(
1015 def _apply(
1016 self,
1016 self,
1017 repo,
1017 repo,
1018 series,
1018 series,
1019 list=False,
1019 list=False,
1020 update_status=True,
1020 update_status=True,
1021 strict=False,
1021 strict=False,
1022 patchdir=None,
1022 patchdir=None,
1023 merge=None,
1023 merge=None,
1024 all_files=None,
1024 all_files=None,
1025 tobackup=None,
1025 tobackup=None,
1026 keepchanges=False,
1026 keepchanges=False,
1027 ):
1027 ):
1028 """returns (error, hash)
1028 """returns (error, hash)
1029
1029
1030 error = 1 for unable to read, 2 for patch failed, 3 for patch
1030 error = 1 for unable to read, 2 for patch failed, 3 for patch
1031 fuzz. tobackup is None or a set of files to backup before they
1031 fuzz. tobackup is None or a set of files to backup before they
1032 are modified by a patch.
1032 are modified by a patch.
1033 """
1033 """
1034 # TODO unify with commands.py
1034 # TODO unify with commands.py
1035 if not patchdir:
1035 if not patchdir:
1036 patchdir = self.path
1036 patchdir = self.path
1037 err = 0
1037 err = 0
1038 n = None
1038 n = None
1039 for patchname in series:
1039 for patchname in series:
1040 pushable, reason = self.pushable(patchname)
1040 pushable, reason = self.pushable(patchname)
1041 if not pushable:
1041 if not pushable:
1042 self.explainpushable(patchname, all_patches=True)
1042 self.explainpushable(patchname, all_patches=True)
1043 continue
1043 continue
1044 self.ui.status(_(b"applying %s\n") % patchname)
1044 self.ui.status(_(b"applying %s\n") % patchname)
1045 pf = os.path.join(patchdir, patchname)
1045 pf = os.path.join(patchdir, patchname)
1046
1046
1047 try:
1047 try:
1048 ph = patchheader(self.join(patchname), self.plainmode)
1048 ph = patchheader(self.join(patchname), self.plainmode)
1049 except IOError:
1049 except IOError:
1050 self.ui.warn(_(b"unable to read %s\n") % patchname)
1050 self.ui.warn(_(b"unable to read %s\n") % patchname)
1051 err = 1
1051 err = 1
1052 break
1052 break
1053
1053
1054 message = ph.message
1054 message = ph.message
1055 if not message:
1055 if not message:
1056 # The commit message should not be translated
1056 # The commit message should not be translated
1057 message = b"imported patch %s\n" % patchname
1057 message = b"imported patch %s\n" % patchname
1058 else:
1058 else:
1059 if list:
1059 if list:
1060 # The commit message should not be translated
1060 # The commit message should not be translated
1061 message.append(b"\nimported patch %s" % patchname)
1061 message.append(b"\nimported patch %s" % patchname)
1062 message = b'\n'.join(message)
1062 message = b'\n'.join(message)
1063
1063
1064 if ph.haspatch:
1064 if ph.haspatch:
1065 if tobackup:
1065 if tobackup:
1066 touched = patchmod.changedfiles(self.ui, repo, pf)
1066 touched = patchmod.changedfiles(self.ui, repo, pf)
1067 touched = set(touched) & tobackup
1067 touched = set(touched) & tobackup
1068 if touched and keepchanges:
1068 if touched and keepchanges:
1069 raise AbortNoCleanup(
1069 raise AbortNoCleanup(
1070 _(b"conflicting local changes found"),
1070 _(b"conflicting local changes found"),
1071 hint=_(b"did you forget to qrefresh?"),
1071 hint=_(b"did you forget to qrefresh?"),
1072 )
1072 )
1073 self.backup(repo, touched, copy=True)
1073 self.backup(repo, touched, copy=True)
1074 tobackup = tobackup - touched
1074 tobackup = tobackup - touched
1075 (patcherr, files, fuzz) = self.patch(repo, pf)
1075 (patcherr, files, fuzz) = self.patch(repo, pf)
1076 if all_files is not None:
1076 if all_files is not None:
1077 all_files.update(files)
1077 all_files.update(files)
1078 patcherr = not patcherr
1078 patcherr = not patcherr
1079 else:
1079 else:
1080 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1080 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1081 patcherr, files, fuzz = 0, [], 0
1081 patcherr, files, fuzz = 0, [], 0
1082
1082
1083 if merge and files:
1083 if merge and files:
1084 # Mark as removed/merged and update dirstate parent info
1084 # Mark as removed/merged and update dirstate parent info
1085 with repo.dirstate.parentchange(repo):
1085 with repo.dirstate.changing_parents(repo):
1086 for f in files:
1086 for f in files:
1087 repo.dirstate.update_file_p1(f, p1_tracked=True)
1087 repo.dirstate.update_file_p1(f, p1_tracked=True)
1088 p1 = repo.dirstate.p1()
1088 p1 = repo.dirstate.p1()
1089 repo.setparents(p1, merge)
1089 repo.setparents(p1, merge)
1090
1090
1091 if all_files and b'.hgsubstate' in all_files:
1091 if all_files and b'.hgsubstate' in all_files:
1092 wctx = repo[None]
1092 wctx = repo[None]
1093 pctx = repo[b'.']
1093 pctx = repo[b'.']
1094 overwrite = False
1094 overwrite = False
1095 mergedsubstate = subrepoutil.submerge(
1095 mergedsubstate = subrepoutil.submerge(
1096 repo, pctx, wctx, wctx, overwrite
1096 repo, pctx, wctx, wctx, overwrite
1097 )
1097 )
1098 files += mergedsubstate.keys()
1098 files += mergedsubstate.keys()
1099
1099
1100 match = scmutil.matchfiles(repo, files or [])
1100 match = scmutil.matchfiles(repo, files or [])
1101 oldtip = repo.changelog.tip()
1101 oldtip = repo.changelog.tip()
1102 n = newcommit(
1102 n = newcommit(
1103 repo, None, message, ph.user, ph.date, match=match, force=True
1103 repo, None, message, ph.user, ph.date, match=match, force=True
1104 )
1104 )
1105 if repo.changelog.tip() == oldtip:
1105 if repo.changelog.tip() == oldtip:
1106 raise error.Abort(
1106 raise error.Abort(
1107 _(b"qpush exactly duplicates child changeset")
1107 _(b"qpush exactly duplicates child changeset")
1108 )
1108 )
1109 if n is None:
1109 if n is None:
1110 raise error.Abort(_(b"repository commit failed"))
1110 raise error.Abort(_(b"repository commit failed"))
1111
1111
1112 if update_status:
1112 if update_status:
1113 self.applied.append(statusentry(n, patchname))
1113 self.applied.append(statusentry(n, patchname))
1114
1114
1115 if patcherr:
1115 if patcherr:
1116 self.ui.warn(
1116 self.ui.warn(
1117 _(b"patch failed, rejects left in working directory\n")
1117 _(b"patch failed, rejects left in working directory\n")
1118 )
1118 )
1119 err = 2
1119 err = 2
1120 break
1120 break
1121
1121
1122 if fuzz and strict:
1122 if fuzz and strict:
1123 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1123 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1124 err = 3
1124 err = 3
1125 break
1125 break
1126 return (err, n)
1126 return (err, n)
1127
1127
1128 def _cleanup(self, patches, numrevs, keep=False):
1128 def _cleanup(self, patches, numrevs, keep=False):
1129 if not keep:
1129 if not keep:
1130 r = self.qrepo()
1130 r = self.qrepo()
1131 if r:
1131 if r:
1132 r[None].forget(patches)
1132 r[None].forget(patches)
1133 for p in patches:
1133 for p in patches:
1134 try:
1134 try:
1135 os.unlink(self.join(p))
1135 os.unlink(self.join(p))
1136 except FileNotFoundError:
1136 except FileNotFoundError:
1137 pass
1137 pass
1138
1138
1139 qfinished = []
1139 qfinished = []
1140 if numrevs:
1140 if numrevs:
1141 qfinished = self.applied[:numrevs]
1141 qfinished = self.applied[:numrevs]
1142 del self.applied[:numrevs]
1142 del self.applied[:numrevs]
1143 self.applieddirty = True
1143 self.applieddirty = True
1144
1144
1145 unknown = []
1145 unknown = []
1146
1146
1147 sortedseries = []
1147 sortedseries = []
1148 for p in patches:
1148 for p in patches:
1149 idx = self.findseries(p)
1149 idx = self.findseries(p)
1150 if idx is None:
1150 if idx is None:
1151 sortedseries.append((-1, p))
1151 sortedseries.append((-1, p))
1152 else:
1152 else:
1153 sortedseries.append((idx, p))
1153 sortedseries.append((idx, p))
1154
1154
1155 sortedseries.sort(reverse=True)
1155 sortedseries.sort(reverse=True)
1156 for (i, p) in sortedseries:
1156 for (i, p) in sortedseries:
1157 if i != -1:
1157 if i != -1:
1158 del self.fullseries[i]
1158 del self.fullseries[i]
1159 else:
1159 else:
1160 unknown.append(p)
1160 unknown.append(p)
1161
1161
1162 if unknown:
1162 if unknown:
1163 if numrevs:
1163 if numrevs:
1164 rev = {entry.name: entry.node for entry in qfinished}
1164 rev = {entry.name: entry.node for entry in qfinished}
1165 for p in unknown:
1165 for p in unknown:
1166 msg = _(b'revision %s refers to unknown patches: %s\n')
1166 msg = _(b'revision %s refers to unknown patches: %s\n')
1167 self.ui.warn(msg % (short(rev[p]), p))
1167 self.ui.warn(msg % (short(rev[p]), p))
1168 else:
1168 else:
1169 msg = _(b'unknown patches: %s\n')
1169 msg = _(b'unknown patches: %s\n')
1170 raise error.Abort(b''.join(msg % p for p in unknown))
1170 raise error.Abort(b''.join(msg % p for p in unknown))
1171
1171
1172 self.parseseries()
1172 self.parseseries()
1173 self.seriesdirty = True
1173 self.seriesdirty = True
1174 return [entry.node for entry in qfinished]
1174 return [entry.node for entry in qfinished]
1175
1175
1176 def _revpatches(self, repo, revs):
1176 def _revpatches(self, repo, revs):
1177 firstrev = repo[self.applied[0].node].rev()
1177 firstrev = repo[self.applied[0].node].rev()
1178 patches = []
1178 patches = []
1179 for i, rev in enumerate(revs):
1179 for i, rev in enumerate(revs):
1180
1180
1181 if rev < firstrev:
1181 if rev < firstrev:
1182 raise error.Abort(_(b'revision %d is not managed') % rev)
1182 raise error.Abort(_(b'revision %d is not managed') % rev)
1183
1183
1184 ctx = repo[rev]
1184 ctx = repo[rev]
1185 base = self.applied[i].node
1185 base = self.applied[i].node
1186 if ctx.node() != base:
1186 if ctx.node() != base:
1187 msg = _(b'cannot delete revision %d above applied patches')
1187 msg = _(b'cannot delete revision %d above applied patches')
1188 raise error.Abort(msg % rev)
1188 raise error.Abort(msg % rev)
1189
1189
1190 patch = self.applied[i].name
1190 patch = self.applied[i].name
1191 for fmt in (b'[mq]: %s', b'imported patch %s'):
1191 for fmt in (b'[mq]: %s', b'imported patch %s'):
1192 if ctx.description() == fmt % patch:
1192 if ctx.description() == fmt % patch:
1193 msg = _(b'patch %s finalized without changeset message\n')
1193 msg = _(b'patch %s finalized without changeset message\n')
1194 repo.ui.status(msg % patch)
1194 repo.ui.status(msg % patch)
1195 break
1195 break
1196
1196
1197 patches.append(patch)
1197 patches.append(patch)
1198 return patches
1198 return patches
1199
1199
1200 def finish(self, repo, revs):
1200 def finish(self, repo, revs):
1201 # Manually trigger phase computation to ensure phasedefaults is
1201 # Manually trigger phase computation to ensure phasedefaults is
1202 # executed before we remove the patches.
1202 # executed before we remove the patches.
1203 repo._phasecache
1203 repo._phasecache
1204 patches = self._revpatches(repo, sorted(revs))
1204 patches = self._revpatches(repo, sorted(revs))
1205 qfinished = self._cleanup(patches, len(patches))
1205 qfinished = self._cleanup(patches, len(patches))
1206 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1206 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1207 # only use this logic when the secret option is added
1207 # only use this logic when the secret option is added
1208 oldqbase = repo[qfinished[0]]
1208 oldqbase = repo[qfinished[0]]
1209 tphase = phases.newcommitphase(repo.ui)
1209 tphase = phases.newcommitphase(repo.ui)
1210 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1210 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1211 with repo.transaction(b'qfinish') as tr:
1211 with repo.transaction(b'qfinish') as tr:
1212 phases.advanceboundary(repo, tr, tphase, qfinished)
1212 phases.advanceboundary(repo, tr, tphase, qfinished)
1213
1213
1214 def delete(self, repo, patches, opts):
1214 def delete(self, repo, patches, opts):
1215 if not patches and not opts.get(b'rev'):
1215 if not patches and not opts.get(b'rev'):
1216 raise error.Abort(
1216 raise error.Abort(
1217 _(b'qdelete requires at least one revision or patch name')
1217 _(b'qdelete requires at least one revision or patch name')
1218 )
1218 )
1219
1219
1220 realpatches = []
1220 realpatches = []
1221 for patch in patches:
1221 for patch in patches:
1222 patch = self.lookup(patch, strict=True)
1222 patch = self.lookup(patch, strict=True)
1223 info = self.isapplied(patch)
1223 info = self.isapplied(patch)
1224 if info:
1224 if info:
1225 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1225 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1226 if patch not in self.series:
1226 if patch not in self.series:
1227 raise error.Abort(_(b"patch %s not in series file") % patch)
1227 raise error.Abort(_(b"patch %s not in series file") % patch)
1228 if patch not in realpatches:
1228 if patch not in realpatches:
1229 realpatches.append(patch)
1229 realpatches.append(patch)
1230
1230
1231 numrevs = 0
1231 numrevs = 0
1232 if opts.get(b'rev'):
1232 if opts.get(b'rev'):
1233 if not self.applied:
1233 if not self.applied:
1234 raise error.Abort(_(b'no patches applied'))
1234 raise error.Abort(_(b'no patches applied'))
1235 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1235 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1236 revs.sort()
1236 revs.sort()
1237 revpatches = self._revpatches(repo, revs)
1237 revpatches = self._revpatches(repo, revs)
1238 realpatches += revpatches
1238 realpatches += revpatches
1239 numrevs = len(revpatches)
1239 numrevs = len(revpatches)
1240
1240
1241 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1241 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1242
1242
1243 def checktoppatch(self, repo):
1243 def checktoppatch(self, repo):
1244 '''check that working directory is at qtip'''
1244 '''check that working directory is at qtip'''
1245 if self.applied:
1245 if self.applied:
1246 top = self.applied[-1].node
1246 top = self.applied[-1].node
1247 patch = self.applied[-1].name
1247 patch = self.applied[-1].name
1248 if repo.dirstate.p1() != top:
1248 if repo.dirstate.p1() != top:
1249 raise error.Abort(_(b"working directory revision is not qtip"))
1249 raise error.Abort(_(b"working directory revision is not qtip"))
1250 return top, patch
1250 return top, patch
1251 return None, None
1251 return None, None
1252
1252
1253 def putsubstate2changes(self, substatestate, changes):
1253 def putsubstate2changes(self, substatestate, changes):
1254 if isinstance(changes, list):
1254 if isinstance(changes, list):
1255 mar = changes[:3]
1255 mar = changes[:3]
1256 else:
1256 else:
1257 mar = (changes.modified, changes.added, changes.removed)
1257 mar = (changes.modified, changes.added, changes.removed)
1258 if any((b'.hgsubstate' in files for files in mar)):
1258 if any((b'.hgsubstate' in files for files in mar)):
1259 return # already listed up
1259 return # already listed up
1260 # not yet listed up
1260 # not yet listed up
1261 if substatestate.added or not substatestate.any_tracked:
1261 if substatestate.added or not substatestate.any_tracked:
1262 mar[1].append(b'.hgsubstate')
1262 mar[1].append(b'.hgsubstate')
1263 elif substatestate.removed:
1263 elif substatestate.removed:
1264 mar[2].append(b'.hgsubstate')
1264 mar[2].append(b'.hgsubstate')
1265 else: # modified
1265 else: # modified
1266 mar[0].append(b'.hgsubstate')
1266 mar[0].append(b'.hgsubstate')
1267
1267
1268 def checklocalchanges(self, repo, force=False, refresh=True):
1268 def checklocalchanges(self, repo, force=False, refresh=True):
1269 excsuffix = b''
1269 excsuffix = b''
1270 if refresh:
1270 if refresh:
1271 excsuffix = b', qrefresh first'
1271 excsuffix = b', qrefresh first'
1272 # plain versions for i18n tool to detect them
1272 # plain versions for i18n tool to detect them
1273 _(b"local changes found, qrefresh first")
1273 _(b"local changes found, qrefresh first")
1274 _(b"local changed subrepos found, qrefresh first")
1274 _(b"local changed subrepos found, qrefresh first")
1275
1275
1276 s = repo.status()
1276 s = repo.status()
1277 if not force:
1277 if not force:
1278 cmdutil.checkunfinished(repo)
1278 cmdutil.checkunfinished(repo)
1279 if s.modified or s.added or s.removed or s.deleted:
1279 if s.modified or s.added or s.removed or s.deleted:
1280 _(b"local changes found") # i18n tool detection
1280 _(b"local changes found") # i18n tool detection
1281 raise error.Abort(_(b"local changes found" + excsuffix))
1281 raise error.Abort(_(b"local changes found" + excsuffix))
1282 if checksubstate(repo):
1282 if checksubstate(repo):
1283 _(b"local changed subrepos found") # i18n tool detection
1283 _(b"local changed subrepos found") # i18n tool detection
1284 raise error.Abort(
1284 raise error.Abort(
1285 _(b"local changed subrepos found" + excsuffix)
1285 _(b"local changed subrepos found" + excsuffix)
1286 )
1286 )
1287 else:
1287 else:
1288 cmdutil.checkunfinished(repo, skipmerge=True)
1288 cmdutil.checkunfinished(repo, skipmerge=True)
1289 return s
1289 return s
1290
1290
1291 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1291 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1292
1292
1293 def checkreservedname(self, name):
1293 def checkreservedname(self, name):
1294 if name in self._reserved:
1294 if name in self._reserved:
1295 raise error.Abort(
1295 raise error.Abort(
1296 _(b'"%s" cannot be used as the name of a patch') % name
1296 _(b'"%s" cannot be used as the name of a patch') % name
1297 )
1297 )
1298 if name != name.strip():
1298 if name != name.strip():
1299 # whitespace is stripped by parseseries()
1299 # whitespace is stripped by parseseries()
1300 raise error.Abort(
1300 raise error.Abort(
1301 _(b'patch name cannot begin or end with whitespace')
1301 _(b'patch name cannot begin or end with whitespace')
1302 )
1302 )
1303 for prefix in (b'.hg', b'.mq'):
1303 for prefix in (b'.hg', b'.mq'):
1304 if name.startswith(prefix):
1304 if name.startswith(prefix):
1305 raise error.Abort(
1305 raise error.Abort(
1306 _(b'patch name cannot begin with "%s"') % prefix
1306 _(b'patch name cannot begin with "%s"') % prefix
1307 )
1307 )
1308 for c in (b'#', b':', b'\r', b'\n'):
1308 for c in (b'#', b':', b'\r', b'\n'):
1309 if c in name:
1309 if c in name:
1310 raise error.Abort(
1310 raise error.Abort(
1311 _(b'%r cannot be used in the name of a patch')
1311 _(b'%r cannot be used in the name of a patch')
1312 % pycompat.bytestr(c)
1312 % pycompat.bytestr(c)
1313 )
1313 )
1314
1314
1315 def checkpatchname(self, name, force=False):
1315 def checkpatchname(self, name, force=False):
1316 self.checkreservedname(name)
1316 self.checkreservedname(name)
1317 if not force and os.path.exists(self.join(name)):
1317 if not force and os.path.exists(self.join(name)):
1318 if os.path.isdir(self.join(name)):
1318 if os.path.isdir(self.join(name)):
1319 raise error.Abort(
1319 raise error.Abort(
1320 _(b'"%s" already exists as a directory') % name
1320 _(b'"%s" already exists as a directory') % name
1321 )
1321 )
1322 else:
1322 else:
1323 raise error.Abort(_(b'patch "%s" already exists') % name)
1323 raise error.Abort(_(b'patch "%s" already exists') % name)
1324
1324
1325 def makepatchname(self, title, fallbackname):
1325 def makepatchname(self, title, fallbackname):
1326 """Return a suitable filename for title, adding a suffix to make
1326 """Return a suitable filename for title, adding a suffix to make
1327 it unique in the existing list"""
1327 it unique in the existing list"""
1328 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1328 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1329 namebase = namebase[:75] # avoid too long name (issue5117)
1329 namebase = namebase[:75] # avoid too long name (issue5117)
1330 if namebase:
1330 if namebase:
1331 try:
1331 try:
1332 self.checkreservedname(namebase)
1332 self.checkreservedname(namebase)
1333 except error.Abort:
1333 except error.Abort:
1334 namebase = fallbackname
1334 namebase = fallbackname
1335 else:
1335 else:
1336 namebase = fallbackname
1336 namebase = fallbackname
1337 name = namebase
1337 name = namebase
1338 i = 0
1338 i = 0
1339 while True:
1339 while True:
1340 if name not in self.fullseries:
1340 if name not in self.fullseries:
1341 try:
1341 try:
1342 self.checkpatchname(name)
1342 self.checkpatchname(name)
1343 break
1343 break
1344 except error.Abort:
1344 except error.Abort:
1345 pass
1345 pass
1346 i += 1
1346 i += 1
1347 name = b'%s__%d' % (namebase, i)
1347 name = b'%s__%d' % (namebase, i)
1348 return name
1348 return name
1349
1349
1350 def checkkeepchanges(self, keepchanges, force):
1350 def checkkeepchanges(self, keepchanges, force):
1351 if force and keepchanges:
1351 if force and keepchanges:
1352 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1352 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1353
1353
1354 def new(self, repo, patchfn, *pats, **opts):
1354 def new(self, repo, patchfn, *pats, **opts):
1355 """options:
1355 """options:
1356 msg: a string or a no-argument function returning a string
1356 msg: a string or a no-argument function returning a string
1357 """
1357 """
1358 opts = pycompat.byteskwargs(opts)
1358 opts = pycompat.byteskwargs(opts)
1359 msg = opts.get(b'msg')
1359 msg = opts.get(b'msg')
1360 edit = opts.get(b'edit')
1360 edit = opts.get(b'edit')
1361 editform = opts.get(b'editform', b'mq.qnew')
1361 editform = opts.get(b'editform', b'mq.qnew')
1362 user = opts.get(b'user')
1362 user = opts.get(b'user')
1363 date = opts.get(b'date')
1363 date = opts.get(b'date')
1364 if date:
1364 if date:
1365 date = dateutil.parsedate(date)
1365 date = dateutil.parsedate(date)
1366 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1366 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1367 if opts.get(b'checkname', True):
1367 if opts.get(b'checkname', True):
1368 self.checkpatchname(patchfn)
1368 self.checkpatchname(patchfn)
1369 inclsubs = checksubstate(repo)
1369 inclsubs = checksubstate(repo)
1370 if inclsubs:
1370 if inclsubs:
1371 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1371 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1372 if opts.get(b'include') or opts.get(b'exclude') or pats:
1372 if opts.get(b'include') or opts.get(b'exclude') or pats:
1373 # detect missing files in pats
1373 # detect missing files in pats
1374 def badfn(f, msg):
1374 def badfn(f, msg):
1375 if f != b'.hgsubstate': # .hgsubstate is auto-created
1375 if f != b'.hgsubstate': # .hgsubstate is auto-created
1376 raise error.Abort(b'%s: %s' % (f, msg))
1376 raise error.Abort(b'%s: %s' % (f, msg))
1377
1377
1378 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1378 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1379 changes = repo.status(match=match)
1379 changes = repo.status(match=match)
1380 else:
1380 else:
1381 changes = self.checklocalchanges(repo, force=True)
1381 changes = self.checklocalchanges(repo, force=True)
1382 commitfiles = list(inclsubs)
1382 commitfiles = list(inclsubs)
1383 commitfiles.extend(changes.modified)
1383 commitfiles.extend(changes.modified)
1384 commitfiles.extend(changes.added)
1384 commitfiles.extend(changes.added)
1385 commitfiles.extend(changes.removed)
1385 commitfiles.extend(changes.removed)
1386 match = scmutil.matchfiles(repo, commitfiles)
1386 match = scmutil.matchfiles(repo, commitfiles)
1387 if len(repo[None].parents()) > 1:
1387 if len(repo[None].parents()) > 1:
1388 raise error.Abort(_(b'cannot manage merge changesets'))
1388 raise error.Abort(_(b'cannot manage merge changesets'))
1389 self.checktoppatch(repo)
1389 self.checktoppatch(repo)
1390 insert = self.fullseriesend()
1390 insert = self.fullseriesend()
1391 with repo.wlock():
1391 with repo.wlock():
1392 try:
1392 try:
1393 # if patch file write fails, abort early
1393 # if patch file write fails, abort early
1394 p = self.opener(patchfn, b"w")
1394 p = self.opener(patchfn, b"w")
1395 except IOError as e:
1395 except IOError as e:
1396 raise error.Abort(
1396 raise error.Abort(
1397 _(b'cannot write patch "%s": %s')
1397 _(b'cannot write patch "%s": %s')
1398 % (patchfn, encoding.strtolocal(e.strerror))
1398 % (patchfn, encoding.strtolocal(e.strerror))
1399 )
1399 )
1400 try:
1400 try:
1401 defaultmsg = b"[mq]: %s" % patchfn
1401 defaultmsg = b"[mq]: %s" % patchfn
1402 editor = cmdutil.getcommiteditor(editform=editform)
1402 editor = cmdutil.getcommiteditor(editform=editform)
1403 if edit:
1403 if edit:
1404
1404
1405 def finishdesc(desc):
1405 def finishdesc(desc):
1406 if desc.rstrip():
1406 if desc.rstrip():
1407 return desc
1407 return desc
1408 else:
1408 else:
1409 return defaultmsg
1409 return defaultmsg
1410
1410
1411 # i18n: this message is shown in editor with "HG: " prefix
1411 # i18n: this message is shown in editor with "HG: " prefix
1412 extramsg = _(b'Leave message empty to use default message.')
1412 extramsg = _(b'Leave message empty to use default message.')
1413 editor = cmdutil.getcommiteditor(
1413 editor = cmdutil.getcommiteditor(
1414 finishdesc=finishdesc,
1414 finishdesc=finishdesc,
1415 extramsg=extramsg,
1415 extramsg=extramsg,
1416 editform=editform,
1416 editform=editform,
1417 )
1417 )
1418 commitmsg = msg
1418 commitmsg = msg
1419 else:
1419 else:
1420 commitmsg = msg or defaultmsg
1420 commitmsg = msg or defaultmsg
1421
1421
1422 n = newcommit(
1422 n = newcommit(
1423 repo,
1423 repo,
1424 None,
1424 None,
1425 commitmsg,
1425 commitmsg,
1426 user,
1426 user,
1427 date,
1427 date,
1428 match=match,
1428 match=match,
1429 force=True,
1429 force=True,
1430 editor=editor,
1430 editor=editor,
1431 )
1431 )
1432 if n is None:
1432 if n is None:
1433 raise error.Abort(_(b"repo commit failed"))
1433 raise error.Abort(_(b"repo commit failed"))
1434 try:
1434 try:
1435 self.fullseries[insert:insert] = [patchfn]
1435 self.fullseries[insert:insert] = [patchfn]
1436 self.applied.append(statusentry(n, patchfn))
1436 self.applied.append(statusentry(n, patchfn))
1437 self.parseseries()
1437 self.parseseries()
1438 self.seriesdirty = True
1438 self.seriesdirty = True
1439 self.applieddirty = True
1439 self.applieddirty = True
1440 nctx = repo[n]
1440 nctx = repo[n]
1441 ph = patchheader(self.join(patchfn), self.plainmode)
1441 ph = patchheader(self.join(patchfn), self.plainmode)
1442 if user:
1442 if user:
1443 ph.setuser(user)
1443 ph.setuser(user)
1444 if date:
1444 if date:
1445 ph.setdate(b'%d %d' % date)
1445 ph.setdate(b'%d %d' % date)
1446 ph.setparent(hex(nctx.p1().node()))
1446 ph.setparent(hex(nctx.p1().node()))
1447 msg = nctx.description().strip()
1447 msg = nctx.description().strip()
1448 if msg == defaultmsg.strip():
1448 if msg == defaultmsg.strip():
1449 msg = b''
1449 msg = b''
1450 ph.setmessage(msg)
1450 ph.setmessage(msg)
1451 p.write(bytes(ph))
1451 p.write(bytes(ph))
1452 if commitfiles:
1452 if commitfiles:
1453 parent = self.qparents(repo, n)
1453 parent = self.qparents(repo, n)
1454 if inclsubs:
1454 if inclsubs:
1455 self.putsubstate2changes(substatestate, changes)
1455 self.putsubstate2changes(substatestate, changes)
1456 chunks = patchmod.diff(
1456 chunks = patchmod.diff(
1457 repo,
1457 repo,
1458 node1=parent,
1458 node1=parent,
1459 node2=n,
1459 node2=n,
1460 changes=changes,
1460 changes=changes,
1461 opts=diffopts,
1461 opts=diffopts,
1462 )
1462 )
1463 for chunk in chunks:
1463 for chunk in chunks:
1464 p.write(chunk)
1464 p.write(chunk)
1465 p.close()
1465 p.close()
1466 r = self.qrepo()
1466 r = self.qrepo()
1467 if r:
1467 if r:
1468 r[None].add([patchfn])
1468 r[None].add([patchfn])
1469 except: # re-raises
1469 except: # re-raises
1470 repo.rollback()
1470 repo.rollback()
1471 raise
1471 raise
1472 except Exception:
1472 except Exception:
1473 patchpath = self.join(patchfn)
1473 patchpath = self.join(patchfn)
1474 try:
1474 try:
1475 os.unlink(patchpath)
1475 os.unlink(patchpath)
1476 except OSError:
1476 except OSError:
1477 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1477 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1478 raise
1478 raise
1479 self.removeundo(repo)
1479 self.removeundo(repo)
1480
1480
1481 def isapplied(self, patch):
1481 def isapplied(self, patch):
1482 """returns (index, rev, patch)"""
1482 """returns (index, rev, patch)"""
1483 for i, a in enumerate(self.applied):
1483 for i, a in enumerate(self.applied):
1484 if a.name == patch:
1484 if a.name == patch:
1485 return (i, a.node, a.name)
1485 return (i, a.node, a.name)
1486 return None
1486 return None
1487
1487
1488 # if the exact patch name does not exist, we try a few
1488 # if the exact patch name does not exist, we try a few
1489 # variations. If strict is passed, we try only #1
1489 # variations. If strict is passed, we try only #1
1490 #
1490 #
1491 # 1) a number (as string) to indicate an offset in the series file
1491 # 1) a number (as string) to indicate an offset in the series file
1492 # 2) a unique substring of the patch name was given
1492 # 2) a unique substring of the patch name was given
1493 # 3) patchname[-+]num to indicate an offset in the series file
1493 # 3) patchname[-+]num to indicate an offset in the series file
1494 def lookup(self, patch, strict=False):
1494 def lookup(self, patch, strict=False):
1495 def partialname(s):
1495 def partialname(s):
1496 if s in self.series:
1496 if s in self.series:
1497 return s
1497 return s
1498 matches = [x for x in self.series if s in x]
1498 matches = [x for x in self.series if s in x]
1499 if len(matches) > 1:
1499 if len(matches) > 1:
1500 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1500 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1501 for m in matches:
1501 for m in matches:
1502 self.ui.warn(b' %s\n' % m)
1502 self.ui.warn(b' %s\n' % m)
1503 return None
1503 return None
1504 if matches:
1504 if matches:
1505 return matches[0]
1505 return matches[0]
1506 if self.series and self.applied:
1506 if self.series and self.applied:
1507 if s == b'qtip':
1507 if s == b'qtip':
1508 return self.series[self.seriesend(True) - 1]
1508 return self.series[self.seriesend(True) - 1]
1509 if s == b'qbase':
1509 if s == b'qbase':
1510 return self.series[0]
1510 return self.series[0]
1511 return None
1511 return None
1512
1512
1513 if patch in self.series:
1513 if patch in self.series:
1514 return patch
1514 return patch
1515
1515
1516 if not os.path.isfile(self.join(patch)):
1516 if not os.path.isfile(self.join(patch)):
1517 try:
1517 try:
1518 sno = int(patch)
1518 sno = int(patch)
1519 except (ValueError, OverflowError):
1519 except (ValueError, OverflowError):
1520 pass
1520 pass
1521 else:
1521 else:
1522 if -len(self.series) <= sno < len(self.series):
1522 if -len(self.series) <= sno < len(self.series):
1523 return self.series[sno]
1523 return self.series[sno]
1524
1524
1525 if not strict:
1525 if not strict:
1526 res = partialname(patch)
1526 res = partialname(patch)
1527 if res:
1527 if res:
1528 return res
1528 return res
1529 minus = patch.rfind(b'-')
1529 minus = patch.rfind(b'-')
1530 if minus >= 0:
1530 if minus >= 0:
1531 res = partialname(patch[:minus])
1531 res = partialname(patch[:minus])
1532 if res:
1532 if res:
1533 i = self.series.index(res)
1533 i = self.series.index(res)
1534 try:
1534 try:
1535 off = int(patch[minus + 1 :] or 1)
1535 off = int(patch[minus + 1 :] or 1)
1536 except (ValueError, OverflowError):
1536 except (ValueError, OverflowError):
1537 pass
1537 pass
1538 else:
1538 else:
1539 if i - off >= 0:
1539 if i - off >= 0:
1540 return self.series[i - off]
1540 return self.series[i - off]
1541 plus = patch.rfind(b'+')
1541 plus = patch.rfind(b'+')
1542 if plus >= 0:
1542 if plus >= 0:
1543 res = partialname(patch[:plus])
1543 res = partialname(patch[:plus])
1544 if res:
1544 if res:
1545 i = self.series.index(res)
1545 i = self.series.index(res)
1546 try:
1546 try:
1547 off = int(patch[plus + 1 :] or 1)
1547 off = int(patch[plus + 1 :] or 1)
1548 except (ValueError, OverflowError):
1548 except (ValueError, OverflowError):
1549 pass
1549 pass
1550 else:
1550 else:
1551 if i + off < len(self.series):
1551 if i + off < len(self.series):
1552 return self.series[i + off]
1552 return self.series[i + off]
1553 raise error.Abort(_(b"patch %s not in series") % patch)
1553 raise error.Abort(_(b"patch %s not in series") % patch)
1554
1554
1555 def push(
1555 def push(
1556 self,
1556 self,
1557 repo,
1557 repo,
1558 patch=None,
1558 patch=None,
1559 force=False,
1559 force=False,
1560 list=False,
1560 list=False,
1561 mergeq=None,
1561 mergeq=None,
1562 all=False,
1562 all=False,
1563 move=False,
1563 move=False,
1564 exact=False,
1564 exact=False,
1565 nobackup=False,
1565 nobackup=False,
1566 keepchanges=False,
1566 keepchanges=False,
1567 ):
1567 ):
1568 self.checkkeepchanges(keepchanges, force)
1568 self.checkkeepchanges(keepchanges, force)
1569 diffopts = self.diffopts()
1569 diffopts = self.diffopts()
1570 with repo.wlock():
1570 with repo.wlock():
1571 heads = []
1571 heads = []
1572 for hs in repo.branchmap().iterheads():
1572 for hs in repo.branchmap().iterheads():
1573 heads.extend(hs)
1573 heads.extend(hs)
1574 if not heads:
1574 if not heads:
1575 heads = [repo.nullid]
1575 heads = [repo.nullid]
1576 if repo.dirstate.p1() not in heads and not exact:
1576 if repo.dirstate.p1() not in heads and not exact:
1577 self.ui.status(_(b"(working directory not at a head)\n"))
1577 self.ui.status(_(b"(working directory not at a head)\n"))
1578
1578
1579 if not self.series:
1579 if not self.series:
1580 self.ui.warn(_(b'no patches in series\n'))
1580 self.ui.warn(_(b'no patches in series\n'))
1581 return 0
1581 return 0
1582
1582
1583 # Suppose our series file is: A B C and the current 'top'
1583 # Suppose our series file is: A B C and the current 'top'
1584 # patch is B. qpush C should be performed (moving forward)
1584 # patch is B. qpush C should be performed (moving forward)
1585 # qpush B is a NOP (no change) qpush A is an error (can't
1585 # qpush B is a NOP (no change) qpush A is an error (can't
1586 # go backwards with qpush)
1586 # go backwards with qpush)
1587 if patch:
1587 if patch:
1588 patch = self.lookup(patch)
1588 patch = self.lookup(patch)
1589 info = self.isapplied(patch)
1589 info = self.isapplied(patch)
1590 if info and info[0] >= len(self.applied) - 1:
1590 if info and info[0] >= len(self.applied) - 1:
1591 self.ui.warn(
1591 self.ui.warn(
1592 _(b'qpush: %s is already at the top\n') % patch
1592 _(b'qpush: %s is already at the top\n') % patch
1593 )
1593 )
1594 return 0
1594 return 0
1595
1595
1596 pushable, reason = self.pushable(patch)
1596 pushable, reason = self.pushable(patch)
1597 if pushable:
1597 if pushable:
1598 if self.series.index(patch) < self.seriesend():
1598 if self.series.index(patch) < self.seriesend():
1599 raise error.Abort(
1599 raise error.Abort(
1600 _(b"cannot push to a previous patch: %s") % patch
1600 _(b"cannot push to a previous patch: %s") % patch
1601 )
1601 )
1602 else:
1602 else:
1603 if reason:
1603 if reason:
1604 reason = _(b'guarded by %s') % reason
1604 reason = _(b'guarded by %s') % reason
1605 else:
1605 else:
1606 reason = _(b'no matching guards')
1606 reason = _(b'no matching guards')
1607 self.ui.warn(
1607 self.ui.warn(
1608 _(b"cannot push '%s' - %s\n") % (patch, reason)
1608 _(b"cannot push '%s' - %s\n") % (patch, reason)
1609 )
1609 )
1610 return 1
1610 return 1
1611 elif all:
1611 elif all:
1612 patch = self.series[-1]
1612 patch = self.series[-1]
1613 if self.isapplied(patch):
1613 if self.isapplied(patch):
1614 self.ui.warn(_(b'all patches are currently applied\n'))
1614 self.ui.warn(_(b'all patches are currently applied\n'))
1615 return 0
1615 return 0
1616
1616
1617 # Following the above example, starting at 'top' of B:
1617 # Following the above example, starting at 'top' of B:
1618 # qpush should be performed (pushes C), but a subsequent
1618 # qpush should be performed (pushes C), but a subsequent
1619 # qpush without an argument is an error (nothing to
1619 # qpush without an argument is an error (nothing to
1620 # apply). This allows a loop of "...while hg qpush..." to
1620 # apply). This allows a loop of "...while hg qpush..." to
1621 # work as it detects an error when done
1621 # work as it detects an error when done
1622 start = self.seriesend()
1622 start = self.seriesend()
1623 if start == len(self.series):
1623 if start == len(self.series):
1624 self.ui.warn(_(b'patch series already fully applied\n'))
1624 self.ui.warn(_(b'patch series already fully applied\n'))
1625 return 1
1625 return 1
1626 if not force and not keepchanges:
1626 if not force and not keepchanges:
1627 self.checklocalchanges(repo, refresh=self.applied)
1627 self.checklocalchanges(repo, refresh=self.applied)
1628
1628
1629 if exact:
1629 if exact:
1630 if keepchanges:
1630 if keepchanges:
1631 raise error.Abort(
1631 raise error.Abort(
1632 _(b"cannot use --exact and --keep-changes together")
1632 _(b"cannot use --exact and --keep-changes together")
1633 )
1633 )
1634 if move:
1634 if move:
1635 raise error.Abort(
1635 raise error.Abort(
1636 _(b'cannot use --exact and --move together')
1636 _(b'cannot use --exact and --move together')
1637 )
1637 )
1638 if self.applied:
1638 if self.applied:
1639 raise error.Abort(
1639 raise error.Abort(
1640 _(b'cannot push --exact with applied patches')
1640 _(b'cannot push --exact with applied patches')
1641 )
1641 )
1642 root = self.series[start]
1642 root = self.series[start]
1643 target = patchheader(self.join(root), self.plainmode).parent
1643 target = patchheader(self.join(root), self.plainmode).parent
1644 if not target:
1644 if not target:
1645 raise error.Abort(
1645 raise error.Abort(
1646 _(b"%s does not have a parent recorded") % root
1646 _(b"%s does not have a parent recorded") % root
1647 )
1647 )
1648 if not repo[target] == repo[b'.']:
1648 if not repo[target] == repo[b'.']:
1649 hg.update(repo, target)
1649 hg.update(repo, target)
1650
1650
1651 if move:
1651 if move:
1652 if not patch:
1652 if not patch:
1653 raise error.Abort(_(b"please specify the patch to move"))
1653 raise error.Abort(_(b"please specify the patch to move"))
1654 for fullstart, rpn in enumerate(self.fullseries):
1654 for fullstart, rpn in enumerate(self.fullseries):
1655 # strip markers for patch guards
1655 # strip markers for patch guards
1656 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1656 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1657 break
1657 break
1658 for i, rpn in enumerate(self.fullseries[fullstart:]):
1658 for i, rpn in enumerate(self.fullseries[fullstart:]):
1659 # strip markers for patch guards
1659 # strip markers for patch guards
1660 if self.guard_re.split(rpn, 1)[0] == patch:
1660 if self.guard_re.split(rpn, 1)[0] == patch:
1661 break
1661 break
1662 index = fullstart + i
1662 index = fullstart + i
1663 assert index < len(self.fullseries)
1663 assert index < len(self.fullseries)
1664 fullpatch = self.fullseries[index]
1664 fullpatch = self.fullseries[index]
1665 del self.fullseries[index]
1665 del self.fullseries[index]
1666 self.fullseries.insert(fullstart, fullpatch)
1666 self.fullseries.insert(fullstart, fullpatch)
1667 self.parseseries()
1667 self.parseseries()
1668 self.seriesdirty = True
1668 self.seriesdirty = True
1669
1669
1670 self.applieddirty = True
1670 self.applieddirty = True
1671 if start > 0:
1671 if start > 0:
1672 self.checktoppatch(repo)
1672 self.checktoppatch(repo)
1673 if not patch:
1673 if not patch:
1674 patch = self.series[start]
1674 patch = self.series[start]
1675 end = start + 1
1675 end = start + 1
1676 else:
1676 else:
1677 end = self.series.index(patch, start) + 1
1677 end = self.series.index(patch, start) + 1
1678
1678
1679 tobackup = set()
1679 tobackup = set()
1680 if (not nobackup and force) or keepchanges:
1680 if (not nobackup and force) or keepchanges:
1681 status = self.checklocalchanges(repo, force=True)
1681 status = self.checklocalchanges(repo, force=True)
1682 if keepchanges:
1682 if keepchanges:
1683 tobackup.update(
1683 tobackup.update(
1684 status.modified
1684 status.modified
1685 + status.added
1685 + status.added
1686 + status.removed
1686 + status.removed
1687 + status.deleted
1687 + status.deleted
1688 )
1688 )
1689 else:
1689 else:
1690 tobackup.update(status.modified + status.added)
1690 tobackup.update(status.modified + status.added)
1691
1691
1692 s = self.series[start:end]
1692 s = self.series[start:end]
1693 all_files = set()
1693 all_files = set()
1694 try:
1694 try:
1695 if mergeq:
1695 if mergeq:
1696 ret = self.mergepatch(repo, mergeq, s, diffopts)
1696 ret = self.mergepatch(repo, mergeq, s, diffopts)
1697 else:
1697 else:
1698 ret = self.apply(
1698 ret = self.apply(
1699 repo,
1699 repo,
1700 s,
1700 s,
1701 list,
1701 list,
1702 all_files=all_files,
1702 all_files=all_files,
1703 tobackup=tobackup,
1703 tobackup=tobackup,
1704 keepchanges=keepchanges,
1704 keepchanges=keepchanges,
1705 )
1705 )
1706 except AbortNoCleanup:
1706 except AbortNoCleanup:
1707 raise
1707 raise
1708 except: # re-raises
1708 except: # re-raises
1709 self.ui.warn(_(b'cleaning up working directory...\n'))
1709 self.ui.warn(_(b'cleaning up working directory...\n'))
1710 cmdutil.revert(
1710 cmdutil.revert(
1711 self.ui,
1711 self.ui,
1712 repo,
1712 repo,
1713 repo[b'.'],
1713 repo[b'.'],
1714 no_backup=True,
1714 no_backup=True,
1715 )
1715 )
1716 # only remove unknown files that we know we touched or
1716 # only remove unknown files that we know we touched or
1717 # created while patching
1717 # created while patching
1718 for f in all_files:
1718 for f in all_files:
1719 if f not in repo.dirstate:
1719 if f not in repo.dirstate:
1720 repo.wvfs.unlinkpath(f, ignoremissing=True)
1720 repo.wvfs.unlinkpath(f, ignoremissing=True)
1721 self.ui.warn(_(b'done\n'))
1721 self.ui.warn(_(b'done\n'))
1722 raise
1722 raise
1723
1723
1724 if not self.applied:
1724 if not self.applied:
1725 return ret[0]
1725 return ret[0]
1726 top = self.applied[-1].name
1726 top = self.applied[-1].name
1727 if ret[0] and ret[0] > 1:
1727 if ret[0] and ret[0] > 1:
1728 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1728 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1729 self.ui.write(msg % top)
1729 self.ui.write(msg % top)
1730 else:
1730 else:
1731 self.ui.write(_(b"now at: %s\n") % top)
1731 self.ui.write(_(b"now at: %s\n") % top)
1732 return ret[0]
1732 return ret[0]
1733
1733
1734 def pop(
1734 def pop(
1735 self,
1735 self,
1736 repo,
1736 repo,
1737 patch=None,
1737 patch=None,
1738 force=False,
1738 force=False,
1739 update=True,
1739 update=True,
1740 all=False,
1740 all=False,
1741 nobackup=False,
1741 nobackup=False,
1742 keepchanges=False,
1742 keepchanges=False,
1743 ):
1743 ):
1744 self.checkkeepchanges(keepchanges, force)
1744 self.checkkeepchanges(keepchanges, force)
1745 with repo.wlock():
1745 with repo.wlock():
1746 if patch:
1746 if patch:
1747 # index, rev, patch
1747 # index, rev, patch
1748 info = self.isapplied(patch)
1748 info = self.isapplied(patch)
1749 if not info:
1749 if not info:
1750 patch = self.lookup(patch)
1750 patch = self.lookup(patch)
1751 info = self.isapplied(patch)
1751 info = self.isapplied(patch)
1752 if not info:
1752 if not info:
1753 raise error.Abort(_(b"patch %s is not applied") % patch)
1753 raise error.Abort(_(b"patch %s is not applied") % patch)
1754
1754
1755 if not self.applied:
1755 if not self.applied:
1756 # Allow qpop -a to work repeatedly,
1756 # Allow qpop -a to work repeatedly,
1757 # but not qpop without an argument
1757 # but not qpop without an argument
1758 self.ui.warn(_(b"no patches applied\n"))
1758 self.ui.warn(_(b"no patches applied\n"))
1759 return not all
1759 return not all
1760
1760
1761 if all:
1761 if all:
1762 start = 0
1762 start = 0
1763 elif patch:
1763 elif patch:
1764 start = info[0] + 1
1764 start = info[0] + 1
1765 else:
1765 else:
1766 start = len(self.applied) - 1
1766 start = len(self.applied) - 1
1767
1767
1768 if start >= len(self.applied):
1768 if start >= len(self.applied):
1769 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1769 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1770 return
1770 return
1771
1771
1772 if not update:
1772 if not update:
1773 parents = repo.dirstate.parents()
1773 parents = repo.dirstate.parents()
1774 rr = [x.node for x in self.applied]
1774 rr = [x.node for x in self.applied]
1775 for p in parents:
1775 for p in parents:
1776 if p in rr:
1776 if p in rr:
1777 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1777 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1778 update = True
1778 update = True
1779 else:
1779 else:
1780 parents = [p.node() for p in repo[None].parents()]
1780 parents = [p.node() for p in repo[None].parents()]
1781 update = any(
1781 update = any(
1782 entry.node in parents for entry in self.applied[start:]
1782 entry.node in parents for entry in self.applied[start:]
1783 )
1783 )
1784
1784
1785 tobackup = set()
1785 tobackup = set()
1786 if update:
1786 if update:
1787 s = self.checklocalchanges(repo, force=force or keepchanges)
1787 s = self.checklocalchanges(repo, force=force or keepchanges)
1788 if force:
1788 if force:
1789 if not nobackup:
1789 if not nobackup:
1790 tobackup.update(s.modified + s.added)
1790 tobackup.update(s.modified + s.added)
1791 elif keepchanges:
1791 elif keepchanges:
1792 tobackup.update(
1792 tobackup.update(
1793 s.modified + s.added + s.removed + s.deleted
1793 s.modified + s.added + s.removed + s.deleted
1794 )
1794 )
1795
1795
1796 self.applieddirty = True
1796 self.applieddirty = True
1797 end = len(self.applied)
1797 end = len(self.applied)
1798 rev = self.applied[start].node
1798 rev = self.applied[start].node
1799
1799
1800 try:
1800 try:
1801 heads = repo.changelog.heads(rev)
1801 heads = repo.changelog.heads(rev)
1802 except error.LookupError:
1802 except error.LookupError:
1803 node = short(rev)
1803 node = short(rev)
1804 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1804 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1805
1805
1806 if heads != [self.applied[-1].node]:
1806 if heads != [self.applied[-1].node]:
1807 raise error.Abort(
1807 raise error.Abort(
1808 _(
1808 _(
1809 b"popping would remove a revision not "
1809 b"popping would remove a revision not "
1810 b"managed by this patch queue"
1810 b"managed by this patch queue"
1811 )
1811 )
1812 )
1812 )
1813 if not repo[self.applied[-1].node].mutable():
1813 if not repo[self.applied[-1].node].mutable():
1814 raise error.Abort(
1814 raise error.Abort(
1815 _(b"popping would remove a public revision"),
1815 _(b"popping would remove a public revision"),
1816 hint=_(b"see 'hg help phases' for details"),
1816 hint=_(b"see 'hg help phases' for details"),
1817 )
1817 )
1818
1818
1819 # we know there are no local changes, so we can make a simplified
1819 # we know there are no local changes, so we can make a simplified
1820 # form of hg.update.
1820 # form of hg.update.
1821 if update:
1821 if update:
1822 qp = self.qparents(repo, rev)
1822 qp = self.qparents(repo, rev)
1823 ctx = repo[qp]
1823 ctx = repo[qp]
1824 st = repo.status(qp, b'.')
1824 st = repo.status(qp, b'.')
1825 m, a, r, d = st.modified, st.added, st.removed, st.deleted
1825 m, a, r, d = st.modified, st.added, st.removed, st.deleted
1826 if d:
1826 if d:
1827 raise error.Abort(_(b"deletions found between repo revs"))
1827 raise error.Abort(_(b"deletions found between repo revs"))
1828
1828
1829 tobackup = set(a + m + r) & tobackup
1829 tobackup = set(a + m + r) & tobackup
1830 if keepchanges and tobackup:
1830 if keepchanges and tobackup:
1831 raise error.Abort(_(b"local changes found, qrefresh first"))
1831 raise error.Abort(_(b"local changes found, qrefresh first"))
1832 self.backup(repo, tobackup)
1832 self.backup(repo, tobackup)
1833 with repo.dirstate.parentchange(repo):
1833 with repo.dirstate.changing_parents(repo):
1834 for f in a:
1834 for f in a:
1835 repo.wvfs.unlinkpath(f, ignoremissing=True)
1835 repo.wvfs.unlinkpath(f, ignoremissing=True)
1836 repo.dirstate.update_file(
1836 repo.dirstate.update_file(
1837 f, p1_tracked=False, wc_tracked=False
1837 f, p1_tracked=False, wc_tracked=False
1838 )
1838 )
1839 for f in m + r:
1839 for f in m + r:
1840 fctx = ctx[f]
1840 fctx = ctx[f]
1841 repo.wwrite(f, fctx.data(), fctx.flags())
1841 repo.wwrite(f, fctx.data(), fctx.flags())
1842 repo.dirstate.update_file(
1842 repo.dirstate.update_file(
1843 f, p1_tracked=True, wc_tracked=True
1843 f, p1_tracked=True, wc_tracked=True
1844 )
1844 )
1845 repo.setparents(qp, repo.nullid)
1845 repo.setparents(qp, repo.nullid)
1846 for patch in reversed(self.applied[start:end]):
1846 for patch in reversed(self.applied[start:end]):
1847 self.ui.status(_(b"popping %s\n") % patch.name)
1847 self.ui.status(_(b"popping %s\n") % patch.name)
1848 del self.applied[start:end]
1848 del self.applied[start:end]
1849 strip(self.ui, repo, [rev], update=False, backup=False)
1849 strip(self.ui, repo, [rev], update=False, backup=False)
1850 for s, state in repo[b'.'].substate.items():
1850 for s, state in repo[b'.'].substate.items():
1851 repo[b'.'].sub(s).get(state)
1851 repo[b'.'].sub(s).get(state)
1852 if self.applied:
1852 if self.applied:
1853 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1853 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1854 else:
1854 else:
1855 self.ui.write(_(b"patch queue now empty\n"))
1855 self.ui.write(_(b"patch queue now empty\n"))
1856
1856
1857 def diff(self, repo, pats, opts):
1857 def diff(self, repo, pats, opts):
1858 top, patch = self.checktoppatch(repo)
1858 top, patch = self.checktoppatch(repo)
1859 if not top:
1859 if not top:
1860 self.ui.write(_(b"no patches applied\n"))
1860 self.ui.write(_(b"no patches applied\n"))
1861 return
1861 return
1862 qp = self.qparents(repo, top)
1862 qp = self.qparents(repo, top)
1863 if opts.get(b'reverse'):
1863 if opts.get(b'reverse'):
1864 node1, node2 = None, qp
1864 node1, node2 = None, qp
1865 else:
1865 else:
1866 node1, node2 = qp, None
1866 node1, node2 = qp, None
1867 diffopts = self.diffopts(opts, patch)
1867 diffopts = self.diffopts(opts, patch)
1868 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1868 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1869
1869
1870 def refresh(self, repo, pats=None, **opts):
1870 def refresh(self, repo, pats=None, **opts):
1871 opts = pycompat.byteskwargs(opts)
1871 opts = pycompat.byteskwargs(opts)
1872 if not self.applied:
1872 if not self.applied:
1873 self.ui.write(_(b"no patches applied\n"))
1873 self.ui.write(_(b"no patches applied\n"))
1874 return 1
1874 return 1
1875 msg = opts.get(b'msg', b'').rstrip()
1875 msg = opts.get(b'msg', b'').rstrip()
1876 edit = opts.get(b'edit')
1876 edit = opts.get(b'edit')
1877 editform = opts.get(b'editform', b'mq.qrefresh')
1877 editform = opts.get(b'editform', b'mq.qrefresh')
1878 newuser = opts.get(b'user')
1878 newuser = opts.get(b'user')
1879 newdate = opts.get(b'date')
1879 newdate = opts.get(b'date')
1880 if newdate:
1880 if newdate:
1881 newdate = b'%d %d' % dateutil.parsedate(newdate)
1881 newdate = b'%d %d' % dateutil.parsedate(newdate)
1882 wlock = repo.wlock()
1882 wlock = repo.wlock()
1883
1883
1884 try:
1884 try:
1885 self.checktoppatch(repo)
1885 self.checktoppatch(repo)
1886 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1886 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1887 if repo.changelog.heads(top) != [top]:
1887 if repo.changelog.heads(top) != [top]:
1888 raise error.Abort(
1888 raise error.Abort(
1889 _(b"cannot qrefresh a revision with children")
1889 _(b"cannot qrefresh a revision with children")
1890 )
1890 )
1891 if not repo[top].mutable():
1891 if not repo[top].mutable():
1892 raise error.Abort(
1892 raise error.Abort(
1893 _(b"cannot qrefresh public revision"),
1893 _(b"cannot qrefresh public revision"),
1894 hint=_(b"see 'hg help phases' for details"),
1894 hint=_(b"see 'hg help phases' for details"),
1895 )
1895 )
1896
1896
1897 cparents = repo.changelog.parents(top)
1897 cparents = repo.changelog.parents(top)
1898 patchparent = self.qparents(repo, top)
1898 patchparent = self.qparents(repo, top)
1899
1899
1900 inclsubs = checksubstate(repo, patchparent)
1900 inclsubs = checksubstate(repo, patchparent)
1901 if inclsubs:
1901 if inclsubs:
1902 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1902 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1903
1903
1904 ph = patchheader(self.join(patchfn), self.plainmode)
1904 ph = patchheader(self.join(patchfn), self.plainmode)
1905 diffopts = self.diffopts(
1905 diffopts = self.diffopts(
1906 {b'git': opts.get(b'git')}, patchfn, plain=True
1906 {b'git': opts.get(b'git')}, patchfn, plain=True
1907 )
1907 )
1908 if newuser:
1908 if newuser:
1909 ph.setuser(newuser)
1909 ph.setuser(newuser)
1910 if newdate:
1910 if newdate:
1911 ph.setdate(newdate)
1911 ph.setdate(newdate)
1912 ph.setparent(hex(patchparent))
1912 ph.setparent(hex(patchparent))
1913
1913
1914 # only commit new patch when write is complete
1914 # only commit new patch when write is complete
1915 patchf = self.opener(patchfn, b'w', atomictemp=True)
1915 patchf = self.opener(patchfn, b'w', atomictemp=True)
1916
1916
1917 # update the dirstate in place, strip off the qtip commit
1917 # update the dirstate in place, strip off the qtip commit
1918 # and then commit.
1918 # and then commit.
1919 #
1919 #
1920 # this should really read:
1920 # this should really read:
1921 # st = repo.status(top, patchparent)
1921 # st = repo.status(top, patchparent)
1922 # but we do it backwards to take advantage of manifest/changelog
1922 # but we do it backwards to take advantage of manifest/changelog
1923 # caching against the next repo.status call
1923 # caching against the next repo.status call
1924 st = repo.status(patchparent, top)
1924 st = repo.status(patchparent, top)
1925 mm, aa, dd = st.modified, st.added, st.removed
1925 mm, aa, dd = st.modified, st.added, st.removed
1926 ctx = repo[top]
1926 ctx = repo[top]
1927 aaa = aa[:]
1927 aaa = aa[:]
1928 match1 = scmutil.match(repo[None], pats, opts)
1928 match1 = scmutil.match(repo[None], pats, opts)
1929 # in short mode, we only diff the files included in the
1929 # in short mode, we only diff the files included in the
1930 # patch already plus specified files
1930 # patch already plus specified files
1931 if opts.get(b'short'):
1931 if opts.get(b'short'):
1932 # if amending a patch, we start with existing
1932 # if amending a patch, we start with existing
1933 # files plus specified files - unfiltered
1933 # files plus specified files - unfiltered
1934 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1934 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1935 # filter with include/exclude options
1935 # filter with include/exclude options
1936 match1 = scmutil.match(repo[None], opts=opts)
1936 match1 = scmutil.match(repo[None], opts=opts)
1937 else:
1937 else:
1938 match = scmutil.matchall(repo)
1938 match = scmutil.matchall(repo)
1939 stb = repo.status(match=match)
1939 stb = repo.status(match=match)
1940 m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
1940 m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
1941 mm = set(mm)
1941 mm = set(mm)
1942 aa = set(aa)
1942 aa = set(aa)
1943 dd = set(dd)
1943 dd = set(dd)
1944
1944
1945 # we might end up with files that were added between
1945 # we might end up with files that were added between
1946 # qtip and the dirstate parent, but then changed in the
1946 # qtip and the dirstate parent, but then changed in the
1947 # local dirstate. in this case, we want them to only
1947 # local dirstate. in this case, we want them to only
1948 # show up in the added section
1948 # show up in the added section
1949 for x in m:
1949 for x in m:
1950 if x not in aa:
1950 if x not in aa:
1951 mm.add(x)
1951 mm.add(x)
1952 # we might end up with files added by the local dirstate that
1952 # we might end up with files added by the local dirstate that
1953 # were deleted by the patch. In this case, they should only
1953 # were deleted by the patch. In this case, they should only
1954 # show up in the changed section.
1954 # show up in the changed section.
1955 for x in a:
1955 for x in a:
1956 if x in dd:
1956 if x in dd:
1957 dd.remove(x)
1957 dd.remove(x)
1958 mm.add(x)
1958 mm.add(x)
1959 else:
1959 else:
1960 aa.add(x)
1960 aa.add(x)
1961 # make sure any files deleted in the local dirstate
1961 # make sure any files deleted in the local dirstate
1962 # are not in the add or change column of the patch
1962 # are not in the add or change column of the patch
1963 forget = []
1963 forget = []
1964 for x in d + r:
1964 for x in d + r:
1965 if x in aa:
1965 if x in aa:
1966 aa.remove(x)
1966 aa.remove(x)
1967 forget.append(x)
1967 forget.append(x)
1968 continue
1968 continue
1969 else:
1969 else:
1970 mm.discard(x)
1970 mm.discard(x)
1971 dd.add(x)
1971 dd.add(x)
1972
1972
1973 m = list(mm)
1973 m = list(mm)
1974 r = list(dd)
1974 r = list(dd)
1975 a = list(aa)
1975 a = list(aa)
1976
1976
1977 # create 'match' that includes the files to be recommitted.
1977 # create 'match' that includes the files to be recommitted.
1978 # apply match1 via repo.status to ensure correct case handling.
1978 # apply match1 via repo.status to ensure correct case handling.
1979 st = repo.status(patchparent, match=match1)
1979 st = repo.status(patchparent, match=match1)
1980 cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
1980 cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
1981 allmatches = set(cm + ca + cr + cd)
1981 allmatches = set(cm + ca + cr + cd)
1982 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1982 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1983
1983
1984 files = set(inclsubs)
1984 files = set(inclsubs)
1985 for x in refreshchanges:
1985 for x in refreshchanges:
1986 files.update(x)
1986 files.update(x)
1987 match = scmutil.matchfiles(repo, files)
1987 match = scmutil.matchfiles(repo, files)
1988
1988
1989 bmlist = repo[top].bookmarks()
1989 bmlist = repo[top].bookmarks()
1990
1990
1991 with repo.dirstate.parentchange(repo):
1991 with repo.dirstate.changing_parents(repo):
1992 # XXX do we actually need the dirstateguard
1992 # XXX do we actually need the dirstateguard
1993 dsguard = None
1993 dsguard = None
1994 try:
1994 try:
1995 dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
1995 dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
1996 if diffopts.git or diffopts.upgrade:
1996 if diffopts.git or diffopts.upgrade:
1997 copies = {}
1997 copies = {}
1998 for dst in a:
1998 for dst in a:
1999 src = repo.dirstate.copied(dst)
1999 src = repo.dirstate.copied(dst)
2000 # during qfold, the source file for copies may
2000 # during qfold, the source file for copies may
2001 # be removed. Treat this as a simple add.
2001 # be removed. Treat this as a simple add.
2002 if src is not None and src in repo.dirstate:
2002 if src is not None and src in repo.dirstate:
2003 copies.setdefault(src, []).append(dst)
2003 copies.setdefault(src, []).append(dst)
2004 repo.dirstate.update_file(
2004 repo.dirstate.update_file(
2005 dst, p1_tracked=False, wc_tracked=True
2005 dst, p1_tracked=False, wc_tracked=True
2006 )
2006 )
2007 # remember the copies between patchparent and qtip
2007 # remember the copies between patchparent and qtip
2008 for dst in aaa:
2008 for dst in aaa:
2009 src = ctx[dst].copysource()
2009 src = ctx[dst].copysource()
2010 if src:
2010 if src:
2011 copies.setdefault(src, []).extend(
2011 copies.setdefault(src, []).extend(
2012 copies.get(dst, [])
2012 copies.get(dst, [])
2013 )
2013 )
2014 if dst in a:
2014 if dst in a:
2015 copies[src].append(dst)
2015 copies[src].append(dst)
2016 # we can't copy a file created by the patch itself
2016 # we can't copy a file created by the patch itself
2017 if dst in copies:
2017 if dst in copies:
2018 del copies[dst]
2018 del copies[dst]
2019 for src, dsts in copies.items():
2019 for src, dsts in copies.items():
2020 for dst in dsts:
2020 for dst in dsts:
2021 repo.dirstate.copy(src, dst)
2021 repo.dirstate.copy(src, dst)
2022 else:
2022 else:
2023 for dst in a:
2023 for dst in a:
2024 repo.dirstate.update_file(
2024 repo.dirstate.update_file(
2025 dst, p1_tracked=False, wc_tracked=True
2025 dst, p1_tracked=False, wc_tracked=True
2026 )
2026 )
2027 # Drop useless copy information
2027 # Drop useless copy information
2028 for f in list(repo.dirstate.copies()):
2028 for f in list(repo.dirstate.copies()):
2029 repo.dirstate.copy(None, f)
2029 repo.dirstate.copy(None, f)
2030 for f in r:
2030 for f in r:
2031 repo.dirstate.update_file_p1(f, p1_tracked=True)
2031 repo.dirstate.update_file_p1(f, p1_tracked=True)
2032 # if the patch excludes a modified file, mark that
2032 # if the patch excludes a modified file, mark that
2033 # file with mtime=0 so status can see it.
2033 # file with mtime=0 so status can see it.
2034 mm = []
2034 mm = []
2035 for i in range(len(m) - 1, -1, -1):
2035 for i in range(len(m) - 1, -1, -1):
2036 if not match1(m[i]):
2036 if not match1(m[i]):
2037 mm.append(m[i])
2037 mm.append(m[i])
2038 del m[i]
2038 del m[i]
2039 for f in m:
2039 for f in m:
2040 repo.dirstate.update_file_p1(f, p1_tracked=True)
2040 repo.dirstate.update_file_p1(f, p1_tracked=True)
2041 for f in mm:
2041 for f in mm:
2042 repo.dirstate.update_file_p1(f, p1_tracked=True)
2042 repo.dirstate.update_file_p1(f, p1_tracked=True)
2043 for f in forget:
2043 for f in forget:
2044 repo.dirstate.update_file_p1(f, p1_tracked=False)
2044 repo.dirstate.update_file_p1(f, p1_tracked=False)
2045
2045
2046 user = ph.user or ctx.user()
2046 user = ph.user or ctx.user()
2047
2047
2048 oldphase = repo[top].phase()
2048 oldphase = repo[top].phase()
2049
2049
2050 # assumes strip can roll itself back if interrupted
2050 # assumes strip can roll itself back if interrupted
2051 repo.setparents(*cparents)
2051 repo.setparents(*cparents)
2052 self.applied.pop()
2052 self.applied.pop()
2053 self.applieddirty = True
2053 self.applieddirty = True
2054 strip(self.ui, repo, [top], update=False, backup=False)
2054 strip(self.ui, repo, [top], update=False, backup=False)
2055 dsguard.close()
2055 dsguard.close()
2056 finally:
2056 finally:
2057 release(dsguard)
2057 release(dsguard)
2058
2058
2059 try:
2059 try:
2060 # might be nice to attempt to roll back strip after this
2060 # might be nice to attempt to roll back strip after this
2061
2061
2062 defaultmsg = b"[mq]: %s" % patchfn
2062 defaultmsg = b"[mq]: %s" % patchfn
2063 editor = cmdutil.getcommiteditor(editform=editform)
2063 editor = cmdutil.getcommiteditor(editform=editform)
2064 if edit:
2064 if edit:
2065
2065
2066 def finishdesc(desc):
2066 def finishdesc(desc):
2067 if desc.rstrip():
2067 if desc.rstrip():
2068 ph.setmessage(desc)
2068 ph.setmessage(desc)
2069 return desc
2069 return desc
2070 return defaultmsg
2070 return defaultmsg
2071
2071
2072 # i18n: this message is shown in editor with "HG: " prefix
2072 # i18n: this message is shown in editor with "HG: " prefix
2073 extramsg = _(b'Leave message empty to use default message.')
2073 extramsg = _(b'Leave message empty to use default message.')
2074 editor = cmdutil.getcommiteditor(
2074 editor = cmdutil.getcommiteditor(
2075 finishdesc=finishdesc,
2075 finishdesc=finishdesc,
2076 extramsg=extramsg,
2076 extramsg=extramsg,
2077 editform=editform,
2077 editform=editform,
2078 )
2078 )
2079 message = msg or b"\n".join(ph.message)
2079 message = msg or b"\n".join(ph.message)
2080 elif not msg:
2080 elif not msg:
2081 if not ph.message:
2081 if not ph.message:
2082 message = defaultmsg
2082 message = defaultmsg
2083 else:
2083 else:
2084 message = b"\n".join(ph.message)
2084 message = b"\n".join(ph.message)
2085 else:
2085 else:
2086 message = msg
2086 message = msg
2087 ph.setmessage(msg)
2087 ph.setmessage(msg)
2088
2088
2089 # Ensure we create a new changeset in the same phase than
2089 # Ensure we create a new changeset in the same phase than
2090 # the old one.
2090 # the old one.
2091 lock = tr = None
2091 lock = tr = None
2092 try:
2092 try:
2093 lock = repo.lock()
2093 lock = repo.lock()
2094 tr = repo.transaction(b'mq')
2094 tr = repo.transaction(b'mq')
2095 n = newcommit(
2095 n = newcommit(
2096 repo,
2096 repo,
2097 oldphase,
2097 oldphase,
2098 message,
2098 message,
2099 user,
2099 user,
2100 ph.date,
2100 ph.date,
2101 match=match,
2101 match=match,
2102 force=True,
2102 force=True,
2103 editor=editor,
2103 editor=editor,
2104 )
2104 )
2105 # only write patch after a successful commit
2105 # only write patch after a successful commit
2106 c = [list(x) for x in refreshchanges]
2106 c = [list(x) for x in refreshchanges]
2107 if inclsubs:
2107 if inclsubs:
2108 self.putsubstate2changes(substatestate, c)
2108 self.putsubstate2changes(substatestate, c)
2109 chunks = patchmod.diff(
2109 chunks = patchmod.diff(
2110 repo, patchparent, changes=c, opts=diffopts
2110 repo, patchparent, changes=c, opts=diffopts
2111 )
2111 )
2112 comments = bytes(ph)
2112 comments = bytes(ph)
2113 if comments:
2113 if comments:
2114 patchf.write(comments)
2114 patchf.write(comments)
2115 for chunk in chunks:
2115 for chunk in chunks:
2116 patchf.write(chunk)
2116 patchf.write(chunk)
2117 patchf.close()
2117 patchf.close()
2118
2118
2119 marks = repo._bookmarks
2119 marks = repo._bookmarks
2120 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2120 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2121 tr.close()
2121 tr.close()
2122
2122
2123 self.applied.append(statusentry(n, patchfn))
2123 self.applied.append(statusentry(n, patchfn))
2124 finally:
2124 finally:
2125 lockmod.release(tr, lock)
2125 lockmod.release(tr, lock)
2126 except: # re-raises
2126 except: # re-raises
2127 ctx = repo[cparents[0]]
2127 ctx = repo[cparents[0]]
2128 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2128 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2129 self.savedirty()
2129 self.savedirty()
2130 self.ui.warn(
2130 self.ui.warn(
2131 _(
2131 _(
2132 b'qrefresh interrupted while patch was popped! '
2132 b'qrefresh interrupted while patch was popped! '
2133 b'(revert --all, qpush to recover)\n'
2133 b'(revert --all, qpush to recover)\n'
2134 )
2134 )
2135 )
2135 )
2136 raise
2136 raise
2137 finally:
2137 finally:
2138 wlock.release()
2138 wlock.release()
2139 self.removeundo(repo)
2139 self.removeundo(repo)
2140
2140
2141 def init(self, repo, create=False):
2141 def init(self, repo, create=False):
2142 if not create and os.path.isdir(self.path):
2142 if not create and os.path.isdir(self.path):
2143 raise error.Abort(_(b"patch queue directory already exists"))
2143 raise error.Abort(_(b"patch queue directory already exists"))
2144 try:
2144 try:
2145 os.mkdir(self.path)
2145 os.mkdir(self.path)
2146 except FileExistsError:
2146 except FileExistsError:
2147 if not create:
2147 if not create:
2148 raise
2148 raise
2149 if create:
2149 if create:
2150 return self.qrepo(create=True)
2150 return self.qrepo(create=True)
2151
2151
2152 def unapplied(self, repo, patch=None):
2152 def unapplied(self, repo, patch=None):
2153 if patch and patch not in self.series:
2153 if patch and patch not in self.series:
2154 raise error.Abort(_(b"patch %s is not in series file") % patch)
2154 raise error.Abort(_(b"patch %s is not in series file") % patch)
2155 if not patch:
2155 if not patch:
2156 start = self.seriesend()
2156 start = self.seriesend()
2157 else:
2157 else:
2158 start = self.series.index(patch) + 1
2158 start = self.series.index(patch) + 1
2159 unapplied = []
2159 unapplied = []
2160 for i in range(start, len(self.series)):
2160 for i in range(start, len(self.series)):
2161 pushable, reason = self.pushable(i)
2161 pushable, reason = self.pushable(i)
2162 if pushable:
2162 if pushable:
2163 unapplied.append((i, self.series[i]))
2163 unapplied.append((i, self.series[i]))
2164 self.explainpushable(i)
2164 self.explainpushable(i)
2165 return unapplied
2165 return unapplied
2166
2166
2167 def qseries(
2167 def qseries(
2168 self,
2168 self,
2169 repo,
2169 repo,
2170 missing=None,
2170 missing=None,
2171 start=0,
2171 start=0,
2172 length=None,
2172 length=None,
2173 status=None,
2173 status=None,
2174 summary=False,
2174 summary=False,
2175 ):
2175 ):
2176 def displayname(pfx, patchname, state):
2176 def displayname(pfx, patchname, state):
2177 if pfx:
2177 if pfx:
2178 self.ui.write(pfx)
2178 self.ui.write(pfx)
2179 if summary:
2179 if summary:
2180 ph = patchheader(self.join(patchname), self.plainmode)
2180 ph = patchheader(self.join(patchname), self.plainmode)
2181 if ph.message:
2181 if ph.message:
2182 msg = ph.message[0]
2182 msg = ph.message[0]
2183 else:
2183 else:
2184 msg = b''
2184 msg = b''
2185
2185
2186 if self.ui.formatted():
2186 if self.ui.formatted():
2187 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2187 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2188 if width > 0:
2188 if width > 0:
2189 msg = stringutil.ellipsis(msg, width)
2189 msg = stringutil.ellipsis(msg, width)
2190 else:
2190 else:
2191 msg = b''
2191 msg = b''
2192 self.ui.write(patchname, label=b'qseries.' + state)
2192 self.ui.write(patchname, label=b'qseries.' + state)
2193 self.ui.write(b': ')
2193 self.ui.write(b': ')
2194 self.ui.write(msg, label=b'qseries.message.' + state)
2194 self.ui.write(msg, label=b'qseries.message.' + state)
2195 else:
2195 else:
2196 self.ui.write(patchname, label=b'qseries.' + state)
2196 self.ui.write(patchname, label=b'qseries.' + state)
2197 self.ui.write(b'\n')
2197 self.ui.write(b'\n')
2198
2198
2199 applied = {p.name for p in self.applied}
2199 applied = {p.name for p in self.applied}
2200 if length is None:
2200 if length is None:
2201 length = len(self.series) - start
2201 length = len(self.series) - start
2202 if not missing:
2202 if not missing:
2203 if self.ui.verbose:
2203 if self.ui.verbose:
2204 idxwidth = len(b"%d" % (start + length - 1))
2204 idxwidth = len(b"%d" % (start + length - 1))
2205 for i in range(start, start + length):
2205 for i in range(start, start + length):
2206 patch = self.series[i]
2206 patch = self.series[i]
2207 if patch in applied:
2207 if patch in applied:
2208 char, state = b'A', b'applied'
2208 char, state = b'A', b'applied'
2209 elif self.pushable(i)[0]:
2209 elif self.pushable(i)[0]:
2210 char, state = b'U', b'unapplied'
2210 char, state = b'U', b'unapplied'
2211 else:
2211 else:
2212 char, state = b'G', b'guarded'
2212 char, state = b'G', b'guarded'
2213 pfx = b''
2213 pfx = b''
2214 if self.ui.verbose:
2214 if self.ui.verbose:
2215 pfx = b'%*d %s ' % (idxwidth, i, char)
2215 pfx = b'%*d %s ' % (idxwidth, i, char)
2216 elif status and status != char:
2216 elif status and status != char:
2217 continue
2217 continue
2218 displayname(pfx, patch, state)
2218 displayname(pfx, patch, state)
2219 else:
2219 else:
2220 msng_list = []
2220 msng_list = []
2221 for root, dirs, files in os.walk(self.path):
2221 for root, dirs, files in os.walk(self.path):
2222 d = root[len(self.path) + 1 :]
2222 d = root[len(self.path) + 1 :]
2223 for f in files:
2223 for f in files:
2224 fl = os.path.join(d, f)
2224 fl = os.path.join(d, f)
2225 if (
2225 if (
2226 fl not in self.series
2226 fl not in self.series
2227 and fl
2227 and fl
2228 not in (
2228 not in (
2229 self.statuspath,
2229 self.statuspath,
2230 self.seriespath,
2230 self.seriespath,
2231 self.guardspath,
2231 self.guardspath,
2232 )
2232 )
2233 and not fl.startswith(b'.')
2233 and not fl.startswith(b'.')
2234 ):
2234 ):
2235 msng_list.append(fl)
2235 msng_list.append(fl)
2236 for x in sorted(msng_list):
2236 for x in sorted(msng_list):
2237 pfx = self.ui.verbose and b'D ' or b''
2237 pfx = self.ui.verbose and b'D ' or b''
2238 displayname(pfx, x, b'missing')
2238 displayname(pfx, x, b'missing')
2239
2239
2240 def issaveline(self, l):
2240 def issaveline(self, l):
2241 if l.name == b'.hg.patches.save.line':
2241 if l.name == b'.hg.patches.save.line':
2242 return True
2242 return True
2243
2243
2244 def qrepo(self, create=False):
2244 def qrepo(self, create=False):
2245 ui = self.baseui.copy()
2245 ui = self.baseui.copy()
2246 # copy back attributes set by ui.pager()
2246 # copy back attributes set by ui.pager()
2247 if self.ui.pageractive and not ui.pageractive:
2247 if self.ui.pageractive and not ui.pageractive:
2248 ui.pageractive = self.ui.pageractive
2248 ui.pageractive = self.ui.pageractive
2249 # internal config: ui.formatted
2249 # internal config: ui.formatted
2250 ui.setconfig(
2250 ui.setconfig(
2251 b'ui',
2251 b'ui',
2252 b'formatted',
2252 b'formatted',
2253 self.ui.config(b'ui', b'formatted'),
2253 self.ui.config(b'ui', b'formatted'),
2254 b'mqpager',
2254 b'mqpager',
2255 )
2255 )
2256 ui.setconfig(
2256 ui.setconfig(
2257 b'ui',
2257 b'ui',
2258 b'interactive',
2258 b'interactive',
2259 self.ui.config(b'ui', b'interactive'),
2259 self.ui.config(b'ui', b'interactive'),
2260 b'mqpager',
2260 b'mqpager',
2261 )
2261 )
2262 if create or os.path.isdir(self.join(b".hg")):
2262 if create or os.path.isdir(self.join(b".hg")):
2263 return hg.repository(ui, path=self.path, create=create)
2263 return hg.repository(ui, path=self.path, create=create)
2264
2264
2265 def restore(self, repo, rev, delete=None, qupdate=None):
2265 def restore(self, repo, rev, delete=None, qupdate=None):
2266 desc = repo[rev].description().strip()
2266 desc = repo[rev].description().strip()
2267 lines = desc.splitlines()
2267 lines = desc.splitlines()
2268 datastart = None
2268 datastart = None
2269 series = []
2269 series = []
2270 applied = []
2270 applied = []
2271 qpp = None
2271 qpp = None
2272 for i, line in enumerate(lines):
2272 for i, line in enumerate(lines):
2273 if line == b'Patch Data:':
2273 if line == b'Patch Data:':
2274 datastart = i + 1
2274 datastart = i + 1
2275 elif line.startswith(b'Dirstate:'):
2275 elif line.startswith(b'Dirstate:'):
2276 l = line.rstrip()
2276 l = line.rstrip()
2277 l = l[10:].split(b' ')
2277 l = l[10:].split(b' ')
2278 qpp = [bin(x) for x in l]
2278 qpp = [bin(x) for x in l]
2279 elif datastart is not None:
2279 elif datastart is not None:
2280 l = line.rstrip()
2280 l = line.rstrip()
2281 n, name = l.split(b':', 1)
2281 n, name = l.split(b':', 1)
2282 if n:
2282 if n:
2283 applied.append(statusentry(bin(n), name))
2283 applied.append(statusentry(bin(n), name))
2284 else:
2284 else:
2285 series.append(l)
2285 series.append(l)
2286 if datastart is None:
2286 if datastart is None:
2287 self.ui.warn(_(b"no saved patch data found\n"))
2287 self.ui.warn(_(b"no saved patch data found\n"))
2288 return 1
2288 return 1
2289 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2289 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2290 self.fullseries = series
2290 self.fullseries = series
2291 self.applied = applied
2291 self.applied = applied
2292 self.parseseries()
2292 self.parseseries()
2293 self.seriesdirty = True
2293 self.seriesdirty = True
2294 self.applieddirty = True
2294 self.applieddirty = True
2295 heads = repo.changelog.heads()
2295 heads = repo.changelog.heads()
2296 if delete:
2296 if delete:
2297 if rev not in heads:
2297 if rev not in heads:
2298 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2298 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2299 else:
2299 else:
2300 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2300 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2301 pp = repo.dirstate.parents()
2301 pp = repo.dirstate.parents()
2302 if rev in pp:
2302 if rev in pp:
2303 update = True
2303 update = True
2304 else:
2304 else:
2305 update = False
2305 update = False
2306 strip(self.ui, repo, [rev], update=update, backup=False)
2306 strip(self.ui, repo, [rev], update=update, backup=False)
2307 if qpp:
2307 if qpp:
2308 self.ui.warn(
2308 self.ui.warn(
2309 _(b"saved queue repository parents: %s %s\n")
2309 _(b"saved queue repository parents: %s %s\n")
2310 % (short(qpp[0]), short(qpp[1]))
2310 % (short(qpp[0]), short(qpp[1]))
2311 )
2311 )
2312 if qupdate:
2312 if qupdate:
2313 self.ui.status(_(b"updating queue directory\n"))
2313 self.ui.status(_(b"updating queue directory\n"))
2314 r = self.qrepo()
2314 r = self.qrepo()
2315 if not r:
2315 if not r:
2316 self.ui.warn(_(b"unable to load queue repository\n"))
2316 self.ui.warn(_(b"unable to load queue repository\n"))
2317 return 1
2317 return 1
2318 hg.clean(r, qpp[0])
2318 hg.clean(r, qpp[0])
2319
2319
2320 def save(self, repo, msg=None):
2320 def save(self, repo, msg=None):
2321 if not self.applied:
2321 if not self.applied:
2322 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2322 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2323 return 1
2323 return 1
2324 if self.issaveline(self.applied[-1]):
2324 if self.issaveline(self.applied[-1]):
2325 self.ui.warn(_(b"status is already saved\n"))
2325 self.ui.warn(_(b"status is already saved\n"))
2326 return 1
2326 return 1
2327
2327
2328 if not msg:
2328 if not msg:
2329 msg = _(b"hg patches saved state")
2329 msg = _(b"hg patches saved state")
2330 else:
2330 else:
2331 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2331 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2332 r = self.qrepo()
2332 r = self.qrepo()
2333 if r:
2333 if r:
2334 pp = r.dirstate.parents()
2334 pp = r.dirstate.parents()
2335 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2335 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2336 msg += b"\n\nPatch Data:\n"
2336 msg += b"\n\nPatch Data:\n"
2337 msg += b''.join(b'%s\n' % x for x in self.applied)
2337 msg += b''.join(b'%s\n' % x for x in self.applied)
2338 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2338 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2339 n = repo.commit(msg, force=True)
2339 n = repo.commit(msg, force=True)
2340 if not n:
2340 if not n:
2341 self.ui.warn(_(b"repo commit failed\n"))
2341 self.ui.warn(_(b"repo commit failed\n"))
2342 return 1
2342 return 1
2343 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2343 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2344 self.applieddirty = True
2344 self.applieddirty = True
2345 self.removeundo(repo)
2345 self.removeundo(repo)
2346
2346
2347 def fullseriesend(self):
2347 def fullseriesend(self):
2348 if self.applied:
2348 if self.applied:
2349 p = self.applied[-1].name
2349 p = self.applied[-1].name
2350 end = self.findseries(p)
2350 end = self.findseries(p)
2351 if end is None:
2351 if end is None:
2352 return len(self.fullseries)
2352 return len(self.fullseries)
2353 return end + 1
2353 return end + 1
2354 return 0
2354 return 0
2355
2355
2356 def seriesend(self, all_patches=False):
2356 def seriesend(self, all_patches=False):
2357 """If all_patches is False, return the index of the next pushable patch
2357 """If all_patches is False, return the index of the next pushable patch
2358 in the series, or the series length. If all_patches is True, return the
2358 in the series, or the series length. If all_patches is True, return the
2359 index of the first patch past the last applied one.
2359 index of the first patch past the last applied one.
2360 """
2360 """
2361 end = 0
2361 end = 0
2362
2362
2363 def nextpatch(start):
2363 def nextpatch(start):
2364 if all_patches or start >= len(self.series):
2364 if all_patches or start >= len(self.series):
2365 return start
2365 return start
2366 for i in range(start, len(self.series)):
2366 for i in range(start, len(self.series)):
2367 p, reason = self.pushable(i)
2367 p, reason = self.pushable(i)
2368 if p:
2368 if p:
2369 return i
2369 return i
2370 self.explainpushable(i)
2370 self.explainpushable(i)
2371 return len(self.series)
2371 return len(self.series)
2372
2372
2373 if self.applied:
2373 if self.applied:
2374 p = self.applied[-1].name
2374 p = self.applied[-1].name
2375 try:
2375 try:
2376 end = self.series.index(p)
2376 end = self.series.index(p)
2377 except ValueError:
2377 except ValueError:
2378 return 0
2378 return 0
2379 return nextpatch(end + 1)
2379 return nextpatch(end + 1)
2380 return nextpatch(end)
2380 return nextpatch(end)
2381
2381
2382 def appliedname(self, index):
2382 def appliedname(self, index):
2383 pname = self.applied[index].name
2383 pname = self.applied[index].name
2384 if not self.ui.verbose:
2384 if not self.ui.verbose:
2385 p = pname
2385 p = pname
2386 else:
2386 else:
2387 p = (b"%d" % self.series.index(pname)) + b" " + pname
2387 p = (b"%d" % self.series.index(pname)) + b" " + pname
2388 return p
2388 return p
2389
2389
2390 def qimport(
2390 def qimport(
2391 self,
2391 self,
2392 repo,
2392 repo,
2393 files,
2393 files,
2394 patchname=None,
2394 patchname=None,
2395 rev=None,
2395 rev=None,
2396 existing=None,
2396 existing=None,
2397 force=None,
2397 force=None,
2398 git=False,
2398 git=False,
2399 ):
2399 ):
2400 def checkseries(patchname):
2400 def checkseries(patchname):
2401 if patchname in self.series:
2401 if patchname in self.series:
2402 raise error.Abort(
2402 raise error.Abort(
2403 _(b'patch %s is already in the series file') % patchname
2403 _(b'patch %s is already in the series file') % patchname
2404 )
2404 )
2405
2405
2406 if rev:
2406 if rev:
2407 if files:
2407 if files:
2408 raise error.Abort(
2408 raise error.Abort(
2409 _(b'option "-r" not valid when importing files')
2409 _(b'option "-r" not valid when importing files')
2410 )
2410 )
2411 rev = logcmdutil.revrange(repo, rev)
2411 rev = logcmdutil.revrange(repo, rev)
2412 rev.sort(reverse=True)
2412 rev.sort(reverse=True)
2413 elif not files:
2413 elif not files:
2414 raise error.Abort(_(b'no files or revisions specified'))
2414 raise error.Abort(_(b'no files or revisions specified'))
2415 if (len(files) > 1 or len(rev) > 1) and patchname:
2415 if (len(files) > 1 or len(rev) > 1) and patchname:
2416 raise error.Abort(
2416 raise error.Abort(
2417 _(b'option "-n" not valid when importing multiple patches')
2417 _(b'option "-n" not valid when importing multiple patches')
2418 )
2418 )
2419 imported = []
2419 imported = []
2420 if rev:
2420 if rev:
2421 # If mq patches are applied, we can only import revisions
2421 # If mq patches are applied, we can only import revisions
2422 # that form a linear path to qbase.
2422 # that form a linear path to qbase.
2423 # Otherwise, they should form a linear path to a head.
2423 # Otherwise, they should form a linear path to a head.
2424 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2424 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2425 if len(heads) > 1:
2425 if len(heads) > 1:
2426 raise error.Abort(
2426 raise error.Abort(
2427 _(b'revision %d is the root of more than one branch')
2427 _(b'revision %d is the root of more than one branch')
2428 % rev.last()
2428 % rev.last()
2429 )
2429 )
2430 if self.applied:
2430 if self.applied:
2431 base = repo.changelog.node(rev.first())
2431 base = repo.changelog.node(rev.first())
2432 if base in [n.node for n in self.applied]:
2432 if base in [n.node for n in self.applied]:
2433 raise error.Abort(
2433 raise error.Abort(
2434 _(b'revision %d is already managed') % rev.first()
2434 _(b'revision %d is already managed') % rev.first()
2435 )
2435 )
2436 if heads != [self.applied[-1].node]:
2436 if heads != [self.applied[-1].node]:
2437 raise error.Abort(
2437 raise error.Abort(
2438 _(b'revision %d is not the parent of the queue')
2438 _(b'revision %d is not the parent of the queue')
2439 % rev.first()
2439 % rev.first()
2440 )
2440 )
2441 base = repo.changelog.rev(self.applied[0].node)
2441 base = repo.changelog.rev(self.applied[0].node)
2442 lastparent = repo.changelog.parentrevs(base)[0]
2442 lastparent = repo.changelog.parentrevs(base)[0]
2443 else:
2443 else:
2444 if heads != [repo.changelog.node(rev.first())]:
2444 if heads != [repo.changelog.node(rev.first())]:
2445 raise error.Abort(
2445 raise error.Abort(
2446 _(b'revision %d has unmanaged children') % rev.first()
2446 _(b'revision %d has unmanaged children') % rev.first()
2447 )
2447 )
2448 lastparent = None
2448 lastparent = None
2449
2449
2450 diffopts = self.diffopts({b'git': git})
2450 diffopts = self.diffopts({b'git': git})
2451 with repo.transaction(b'qimport') as tr:
2451 with repo.transaction(b'qimport') as tr:
2452 for r in rev:
2452 for r in rev:
2453 if not repo[r].mutable():
2453 if not repo[r].mutable():
2454 raise error.Abort(
2454 raise error.Abort(
2455 _(b'revision %d is not mutable') % r,
2455 _(b'revision %d is not mutable') % r,
2456 hint=_(b"see 'hg help phases' " b'for details'),
2456 hint=_(b"see 'hg help phases' " b'for details'),
2457 )
2457 )
2458 p1, p2 = repo.changelog.parentrevs(r)
2458 p1, p2 = repo.changelog.parentrevs(r)
2459 n = repo.changelog.node(r)
2459 n = repo.changelog.node(r)
2460 if p2 != nullrev:
2460 if p2 != nullrev:
2461 raise error.Abort(
2461 raise error.Abort(
2462 _(b'cannot import merge revision %d') % r
2462 _(b'cannot import merge revision %d') % r
2463 )
2463 )
2464 if lastparent and lastparent != r:
2464 if lastparent and lastparent != r:
2465 raise error.Abort(
2465 raise error.Abort(
2466 _(b'revision %d is not the parent of %d')
2466 _(b'revision %d is not the parent of %d')
2467 % (r, lastparent)
2467 % (r, lastparent)
2468 )
2468 )
2469 lastparent = p1
2469 lastparent = p1
2470
2470
2471 if not patchname:
2471 if not patchname:
2472 patchname = self.makepatchname(
2472 patchname = self.makepatchname(
2473 repo[r].description().split(b'\n', 1)[0],
2473 repo[r].description().split(b'\n', 1)[0],
2474 b'%d.diff' % r,
2474 b'%d.diff' % r,
2475 )
2475 )
2476 checkseries(patchname)
2476 checkseries(patchname)
2477 self.checkpatchname(patchname, force)
2477 self.checkpatchname(patchname, force)
2478 self.fullseries.insert(0, patchname)
2478 self.fullseries.insert(0, patchname)
2479
2479
2480 with self.opener(patchname, b"w") as fp:
2480 with self.opener(patchname, b"w") as fp:
2481 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2481 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2482
2482
2483 se = statusentry(n, patchname)
2483 se = statusentry(n, patchname)
2484 self.applied.insert(0, se)
2484 self.applied.insert(0, se)
2485
2485
2486 self.added.append(patchname)
2486 self.added.append(patchname)
2487 imported.append(patchname)
2487 imported.append(patchname)
2488 patchname = None
2488 patchname = None
2489 if rev and repo.ui.configbool(b'mq', b'secret'):
2489 if rev and repo.ui.configbool(b'mq', b'secret'):
2490 # if we added anything with --rev, move the secret root
2490 # if we added anything with --rev, move the secret root
2491 phases.retractboundary(repo, tr, phases.secret, [n])
2491 phases.retractboundary(repo, tr, phases.secret, [n])
2492 self.parseseries()
2492 self.parseseries()
2493 self.applieddirty = True
2493 self.applieddirty = True
2494 self.seriesdirty = True
2494 self.seriesdirty = True
2495
2495
2496 for i, filename in enumerate(files):
2496 for i, filename in enumerate(files):
2497 if existing:
2497 if existing:
2498 if filename == b'-':
2498 if filename == b'-':
2499 raise error.Abort(
2499 raise error.Abort(
2500 _(b'-e is incompatible with import from -')
2500 _(b'-e is incompatible with import from -')
2501 )
2501 )
2502 filename = normname(filename)
2502 filename = normname(filename)
2503 self.checkreservedname(filename)
2503 self.checkreservedname(filename)
2504 if urlutil.url(filename).islocal():
2504 if urlutil.url(filename).islocal():
2505 originpath = self.join(filename)
2505 originpath = self.join(filename)
2506 if not os.path.isfile(originpath):
2506 if not os.path.isfile(originpath):
2507 raise error.Abort(
2507 raise error.Abort(
2508 _(b"patch %s does not exist") % filename
2508 _(b"patch %s does not exist") % filename
2509 )
2509 )
2510
2510
2511 if patchname:
2511 if patchname:
2512 self.checkpatchname(patchname, force)
2512 self.checkpatchname(patchname, force)
2513
2513
2514 self.ui.write(
2514 self.ui.write(
2515 _(b'renaming %s to %s\n') % (filename, patchname)
2515 _(b'renaming %s to %s\n') % (filename, patchname)
2516 )
2516 )
2517 util.rename(originpath, self.join(patchname))
2517 util.rename(originpath, self.join(patchname))
2518 else:
2518 else:
2519 patchname = filename
2519 patchname = filename
2520
2520
2521 else:
2521 else:
2522 if filename == b'-' and not patchname:
2522 if filename == b'-' and not patchname:
2523 raise error.Abort(
2523 raise error.Abort(
2524 _(b'need --name to import a patch from -')
2524 _(b'need --name to import a patch from -')
2525 )
2525 )
2526 elif not patchname:
2526 elif not patchname:
2527 patchname = normname(
2527 patchname = normname(
2528 os.path.basename(filename.rstrip(b'/'))
2528 os.path.basename(filename.rstrip(b'/'))
2529 )
2529 )
2530 self.checkpatchname(patchname, force)
2530 self.checkpatchname(patchname, force)
2531 try:
2531 try:
2532 if filename == b'-':
2532 if filename == b'-':
2533 text = self.ui.fin.read()
2533 text = self.ui.fin.read()
2534 else:
2534 else:
2535 fp = hg.openpath(self.ui, filename)
2535 fp = hg.openpath(self.ui, filename)
2536 text = fp.read()
2536 text = fp.read()
2537 fp.close()
2537 fp.close()
2538 except (OSError, IOError):
2538 except (OSError, IOError):
2539 raise error.Abort(_(b"unable to read file %s") % filename)
2539 raise error.Abort(_(b"unable to read file %s") % filename)
2540 patchf = self.opener(patchname, b"w")
2540 patchf = self.opener(patchname, b"w")
2541 patchf.write(text)
2541 patchf.write(text)
2542 patchf.close()
2542 patchf.close()
2543 if not force:
2543 if not force:
2544 checkseries(patchname)
2544 checkseries(patchname)
2545 if patchname not in self.series:
2545 if patchname not in self.series:
2546 index = self.fullseriesend() + i
2546 index = self.fullseriesend() + i
2547 self.fullseries[index:index] = [patchname]
2547 self.fullseries[index:index] = [patchname]
2548 self.parseseries()
2548 self.parseseries()
2549 self.seriesdirty = True
2549 self.seriesdirty = True
2550 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2550 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2551 self.added.append(patchname)
2551 self.added.append(patchname)
2552 imported.append(patchname)
2552 imported.append(patchname)
2553 patchname = None
2553 patchname = None
2554
2554
2555 self.removeundo(repo)
2555 self.removeundo(repo)
2556 return imported
2556 return imported
2557
2557
2558
2558
2559 def fixkeepchangesopts(ui, opts):
2559 def fixkeepchangesopts(ui, opts):
2560 if (
2560 if (
2561 not ui.configbool(b'mq', b'keepchanges')
2561 not ui.configbool(b'mq', b'keepchanges')
2562 or opts.get(b'force')
2562 or opts.get(b'force')
2563 or opts.get(b'exact')
2563 or opts.get(b'exact')
2564 ):
2564 ):
2565 return opts
2565 return opts
2566 opts = dict(opts)
2566 opts = dict(opts)
2567 opts[b'keep_changes'] = True
2567 opts[b'keep_changes'] = True
2568 return opts
2568 return opts
2569
2569
2570
2570
2571 @command(
2571 @command(
2572 b"qdelete|qremove|qrm",
2572 b"qdelete|qremove|qrm",
2573 [
2573 [
2574 (b'k', b'keep', None, _(b'keep patch file')),
2574 (b'k', b'keep', None, _(b'keep patch file')),
2575 (
2575 (
2576 b'r',
2576 b'r',
2577 b'rev',
2577 b'rev',
2578 [],
2578 [],
2579 _(b'stop managing a revision (DEPRECATED)'),
2579 _(b'stop managing a revision (DEPRECATED)'),
2580 _(b'REV'),
2580 _(b'REV'),
2581 ),
2581 ),
2582 ],
2582 ],
2583 _(b'hg qdelete [-k] [PATCH]...'),
2583 _(b'hg qdelete [-k] [PATCH]...'),
2584 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2584 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2585 )
2585 )
2586 def delete(ui, repo, *patches, **opts):
2586 def delete(ui, repo, *patches, **opts):
2587 """remove patches from queue
2587 """remove patches from queue
2588
2588
2589 The patches must not be applied, and at least one patch is required. Exact
2589 The patches must not be applied, and at least one patch is required. Exact
2590 patch identifiers must be given. With -k/--keep, the patch files are
2590 patch identifiers must be given. With -k/--keep, the patch files are
2591 preserved in the patch directory.
2591 preserved in the patch directory.
2592
2592
2593 To stop managing a patch and move it into permanent history,
2593 To stop managing a patch and move it into permanent history,
2594 use the :hg:`qfinish` command."""
2594 use the :hg:`qfinish` command."""
2595 q = repo.mq
2595 q = repo.mq
2596 q.delete(repo, patches, pycompat.byteskwargs(opts))
2596 q.delete(repo, patches, pycompat.byteskwargs(opts))
2597 q.savedirty()
2597 q.savedirty()
2598 return 0
2598 return 0
2599
2599
2600
2600
2601 @command(
2601 @command(
2602 b"qapplied",
2602 b"qapplied",
2603 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2603 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2604 + seriesopts,
2604 + seriesopts,
2605 _(b'hg qapplied [-1] [-s] [PATCH]'),
2605 _(b'hg qapplied [-1] [-s] [PATCH]'),
2606 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2606 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2607 )
2607 )
2608 def applied(ui, repo, patch=None, **opts):
2608 def applied(ui, repo, patch=None, **opts):
2609 """print the patches already applied
2609 """print the patches already applied
2610
2610
2611 Returns 0 on success."""
2611 Returns 0 on success."""
2612
2612
2613 q = repo.mq
2613 q = repo.mq
2614 opts = pycompat.byteskwargs(opts)
2614 opts = pycompat.byteskwargs(opts)
2615
2615
2616 if patch:
2616 if patch:
2617 if patch not in q.series:
2617 if patch not in q.series:
2618 raise error.Abort(_(b"patch %s is not in series file") % patch)
2618 raise error.Abort(_(b"patch %s is not in series file") % patch)
2619 end = q.series.index(patch) + 1
2619 end = q.series.index(patch) + 1
2620 else:
2620 else:
2621 end = q.seriesend(True)
2621 end = q.seriesend(True)
2622
2622
2623 if opts.get(b'last') and not end:
2623 if opts.get(b'last') and not end:
2624 ui.write(_(b"no patches applied\n"))
2624 ui.write(_(b"no patches applied\n"))
2625 return 1
2625 return 1
2626 elif opts.get(b'last') and end == 1:
2626 elif opts.get(b'last') and end == 1:
2627 ui.write(_(b"only one patch applied\n"))
2627 ui.write(_(b"only one patch applied\n"))
2628 return 1
2628 return 1
2629 elif opts.get(b'last'):
2629 elif opts.get(b'last'):
2630 start = end - 2
2630 start = end - 2
2631 end = 1
2631 end = 1
2632 else:
2632 else:
2633 start = 0
2633 start = 0
2634
2634
2635 q.qseries(
2635 q.qseries(
2636 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2636 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2637 )
2637 )
2638
2638
2639
2639
2640 @command(
2640 @command(
2641 b"qunapplied",
2641 b"qunapplied",
2642 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2642 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2643 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2643 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2644 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2644 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2645 )
2645 )
2646 def unapplied(ui, repo, patch=None, **opts):
2646 def unapplied(ui, repo, patch=None, **opts):
2647 """print the patches not yet applied
2647 """print the patches not yet applied
2648
2648
2649 Returns 0 on success."""
2649 Returns 0 on success."""
2650
2650
2651 q = repo.mq
2651 q = repo.mq
2652 opts = pycompat.byteskwargs(opts)
2652 opts = pycompat.byteskwargs(opts)
2653 if patch:
2653 if patch:
2654 if patch not in q.series:
2654 if patch not in q.series:
2655 raise error.Abort(_(b"patch %s is not in series file") % patch)
2655 raise error.Abort(_(b"patch %s is not in series file") % patch)
2656 start = q.series.index(patch) + 1
2656 start = q.series.index(patch) + 1
2657 else:
2657 else:
2658 start = q.seriesend(True)
2658 start = q.seriesend(True)
2659
2659
2660 if start == len(q.series) and opts.get(b'first'):
2660 if start == len(q.series) and opts.get(b'first'):
2661 ui.write(_(b"all patches applied\n"))
2661 ui.write(_(b"all patches applied\n"))
2662 return 1
2662 return 1
2663
2663
2664 if opts.get(b'first'):
2664 if opts.get(b'first'):
2665 length = 1
2665 length = 1
2666 else:
2666 else:
2667 length = None
2667 length = None
2668 q.qseries(
2668 q.qseries(
2669 repo,
2669 repo,
2670 start=start,
2670 start=start,
2671 length=length,
2671 length=length,
2672 status=b'U',
2672 status=b'U',
2673 summary=opts.get(b'summary'),
2673 summary=opts.get(b'summary'),
2674 )
2674 )
2675
2675
2676
2676
2677 @command(
2677 @command(
2678 b"qimport",
2678 b"qimport",
2679 [
2679 [
2680 (b'e', b'existing', None, _(b'import file in patch directory')),
2680 (b'e', b'existing', None, _(b'import file in patch directory')),
2681 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2681 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2682 (b'f', b'force', None, _(b'overwrite existing files')),
2682 (b'f', b'force', None, _(b'overwrite existing files')),
2683 (
2683 (
2684 b'r',
2684 b'r',
2685 b'rev',
2685 b'rev',
2686 [],
2686 [],
2687 _(b'place existing revisions under mq control'),
2687 _(b'place existing revisions under mq control'),
2688 _(b'REV'),
2688 _(b'REV'),
2689 ),
2689 ),
2690 (b'g', b'git', None, _(b'use git extended diff format')),
2690 (b'g', b'git', None, _(b'use git extended diff format')),
2691 (b'P', b'push', None, _(b'qpush after importing')),
2691 (b'P', b'push', None, _(b'qpush after importing')),
2692 ],
2692 ],
2693 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2693 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2694 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2694 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2695 )
2695 )
2696 def qimport(ui, repo, *filename, **opts):
2696 def qimport(ui, repo, *filename, **opts):
2697 """import a patch or existing changeset
2697 """import a patch or existing changeset
2698
2698
2699 The patch is inserted into the series after the last applied
2699 The patch is inserted into the series after the last applied
2700 patch. If no patches have been applied, qimport prepends the patch
2700 patch. If no patches have been applied, qimport prepends the patch
2701 to the series.
2701 to the series.
2702
2702
2703 The patch will have the same name as its source file unless you
2703 The patch will have the same name as its source file unless you
2704 give it a new one with -n/--name.
2704 give it a new one with -n/--name.
2705
2705
2706 You can register an existing patch inside the patch directory with
2706 You can register an existing patch inside the patch directory with
2707 the -e/--existing flag.
2707 the -e/--existing flag.
2708
2708
2709 With -f/--force, an existing patch of the same name will be
2709 With -f/--force, an existing patch of the same name will be
2710 overwritten.
2710 overwritten.
2711
2711
2712 An existing changeset may be placed under mq control with -r/--rev
2712 An existing changeset may be placed under mq control with -r/--rev
2713 (e.g. qimport --rev . -n patch will place the current revision
2713 (e.g. qimport --rev . -n patch will place the current revision
2714 under mq control). With -g/--git, patches imported with --rev will
2714 under mq control). With -g/--git, patches imported with --rev will
2715 use the git diff format. See the diffs help topic for information
2715 use the git diff format. See the diffs help topic for information
2716 on why this is important for preserving rename/copy information
2716 on why this is important for preserving rename/copy information
2717 and permission changes. Use :hg:`qfinish` to remove changesets
2717 and permission changes. Use :hg:`qfinish` to remove changesets
2718 from mq control.
2718 from mq control.
2719
2719
2720 To import a patch from standard input, pass - as the patch file.
2720 To import a patch from standard input, pass - as the patch file.
2721 When importing from standard input, a patch name must be specified
2721 When importing from standard input, a patch name must be specified
2722 using the --name flag.
2722 using the --name flag.
2723
2723
2724 To import an existing patch while renaming it::
2724 To import an existing patch while renaming it::
2725
2725
2726 hg qimport -e existing-patch -n new-name
2726 hg qimport -e existing-patch -n new-name
2727
2727
2728 Returns 0 if import succeeded.
2728 Returns 0 if import succeeded.
2729 """
2729 """
2730 opts = pycompat.byteskwargs(opts)
2730 opts = pycompat.byteskwargs(opts)
2731 with repo.lock(): # cause this may move phase
2731 with repo.lock(): # cause this may move phase
2732 q = repo.mq
2732 q = repo.mq
2733 try:
2733 try:
2734 imported = q.qimport(
2734 imported = q.qimport(
2735 repo,
2735 repo,
2736 filename,
2736 filename,
2737 patchname=opts.get(b'name'),
2737 patchname=opts.get(b'name'),
2738 existing=opts.get(b'existing'),
2738 existing=opts.get(b'existing'),
2739 force=opts.get(b'force'),
2739 force=opts.get(b'force'),
2740 rev=opts.get(b'rev'),
2740 rev=opts.get(b'rev'),
2741 git=opts.get(b'git'),
2741 git=opts.get(b'git'),
2742 )
2742 )
2743 finally:
2743 finally:
2744 q.savedirty()
2744 q.savedirty()
2745
2745
2746 if imported and opts.get(b'push') and not opts.get(b'rev'):
2746 if imported and opts.get(b'push') and not opts.get(b'rev'):
2747 return q.push(repo, imported[-1])
2747 return q.push(repo, imported[-1])
2748 return 0
2748 return 0
2749
2749
2750
2750
2751 def qinit(ui, repo, create):
2751 def qinit(ui, repo, create):
2752 """initialize a new queue repository
2752 """initialize a new queue repository
2753
2753
2754 This command also creates a series file for ordering patches, and
2754 This command also creates a series file for ordering patches, and
2755 an mq-specific .hgignore file in the queue repository, to exclude
2755 an mq-specific .hgignore file in the queue repository, to exclude
2756 the status and guards files (these contain mostly transient state).
2756 the status and guards files (these contain mostly transient state).
2757
2757
2758 Returns 0 if initialization succeeded."""
2758 Returns 0 if initialization succeeded."""
2759 q = repo.mq
2759 q = repo.mq
2760 r = q.init(repo, create)
2760 r = q.init(repo, create)
2761 q.savedirty()
2761 q.savedirty()
2762 if r:
2762 if r:
2763 if not os.path.exists(r.wjoin(b'.hgignore')):
2763 if not os.path.exists(r.wjoin(b'.hgignore')):
2764 fp = r.wvfs(b'.hgignore', b'w')
2764 fp = r.wvfs(b'.hgignore', b'w')
2765 fp.write(b'^\\.hg\n')
2765 fp.write(b'^\\.hg\n')
2766 fp.write(b'^\\.mq\n')
2766 fp.write(b'^\\.mq\n')
2767 fp.write(b'syntax: glob\n')
2767 fp.write(b'syntax: glob\n')
2768 fp.write(b'status\n')
2768 fp.write(b'status\n')
2769 fp.write(b'guards\n')
2769 fp.write(b'guards\n')
2770 fp.close()
2770 fp.close()
2771 if not os.path.exists(r.wjoin(b'series')):
2771 if not os.path.exists(r.wjoin(b'series')):
2772 r.wvfs(b'series', b'w').close()
2772 r.wvfs(b'series', b'w').close()
2773 r[None].add([b'.hgignore', b'series'])
2773 r[None].add([b'.hgignore', b'series'])
2774 commands.add(ui, r)
2774 commands.add(ui, r)
2775 return 0
2775 return 0
2776
2776
2777
2777
2778 @command(
2778 @command(
2779 b"qinit",
2779 b"qinit",
2780 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2780 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2781 _(b'hg qinit [-c]'),
2781 _(b'hg qinit [-c]'),
2782 helpcategory=command.CATEGORY_REPO_CREATION,
2782 helpcategory=command.CATEGORY_REPO_CREATION,
2783 helpbasic=True,
2783 helpbasic=True,
2784 )
2784 )
2785 def init(ui, repo, **opts):
2785 def init(ui, repo, **opts):
2786 """init a new queue repository (DEPRECATED)
2786 """init a new queue repository (DEPRECATED)
2787
2787
2788 The queue repository is unversioned by default. If
2788 The queue repository is unversioned by default. If
2789 -c/--create-repo is specified, qinit will create a separate nested
2789 -c/--create-repo is specified, qinit will create a separate nested
2790 repository for patches (qinit -c may also be run later to convert
2790 repository for patches (qinit -c may also be run later to convert
2791 an unversioned patch repository into a versioned one). You can use
2791 an unversioned patch repository into a versioned one). You can use
2792 qcommit to commit changes to this queue repository.
2792 qcommit to commit changes to this queue repository.
2793
2793
2794 This command is deprecated. Without -c, it's implied by other relevant
2794 This command is deprecated. Without -c, it's implied by other relevant
2795 commands. With -c, use :hg:`init --mq` instead."""
2795 commands. With -c, use :hg:`init --mq` instead."""
2796 return qinit(ui, repo, create=opts.get('create_repo'))
2796 return qinit(ui, repo, create=opts.get('create_repo'))
2797
2797
2798
2798
2799 @command(
2799 @command(
2800 b"qclone",
2800 b"qclone",
2801 [
2801 [
2802 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2802 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2803 (
2803 (
2804 b'U',
2804 b'U',
2805 b'noupdate',
2805 b'noupdate',
2806 None,
2806 None,
2807 _(b'do not update the new working directories'),
2807 _(b'do not update the new working directories'),
2808 ),
2808 ),
2809 (
2809 (
2810 b'',
2810 b'',
2811 b'uncompressed',
2811 b'uncompressed',
2812 None,
2812 None,
2813 _(b'use uncompressed transfer (fast over LAN)'),
2813 _(b'use uncompressed transfer (fast over LAN)'),
2814 ),
2814 ),
2815 (
2815 (
2816 b'p',
2816 b'p',
2817 b'patches',
2817 b'patches',
2818 b'',
2818 b'',
2819 _(b'location of source patch repository'),
2819 _(b'location of source patch repository'),
2820 _(b'REPO'),
2820 _(b'REPO'),
2821 ),
2821 ),
2822 ]
2822 ]
2823 + cmdutil.remoteopts,
2823 + cmdutil.remoteopts,
2824 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2824 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2825 helpcategory=command.CATEGORY_REPO_CREATION,
2825 helpcategory=command.CATEGORY_REPO_CREATION,
2826 norepo=True,
2826 norepo=True,
2827 )
2827 )
2828 def clone(ui, source, dest=None, **opts):
2828 def clone(ui, source, dest=None, **opts):
2829 """clone main and patch repository at same time
2829 """clone main and patch repository at same time
2830
2830
2831 If source is local, destination will have no patches applied. If
2831 If source is local, destination will have no patches applied. If
2832 source is remote, this command can not check if patches are
2832 source is remote, this command can not check if patches are
2833 applied in source, so cannot guarantee that patches are not
2833 applied in source, so cannot guarantee that patches are not
2834 applied in destination. If you clone remote repository, be sure
2834 applied in destination. If you clone remote repository, be sure
2835 before that it has no patches applied.
2835 before that it has no patches applied.
2836
2836
2837 Source patch repository is looked for in <src>/.hg/patches by
2837 Source patch repository is looked for in <src>/.hg/patches by
2838 default. Use -p <url> to change.
2838 default. Use -p <url> to change.
2839
2839
2840 The patch directory must be a nested Mercurial repository, as
2840 The patch directory must be a nested Mercurial repository, as
2841 would be created by :hg:`init --mq`.
2841 would be created by :hg:`init --mq`.
2842
2842
2843 Return 0 on success.
2843 Return 0 on success.
2844 """
2844 """
2845 opts = pycompat.byteskwargs(opts)
2845 opts = pycompat.byteskwargs(opts)
2846
2846
2847 def patchdir(repo):
2847 def patchdir(repo):
2848 """compute a patch repo url from a repo object"""
2848 """compute a patch repo url from a repo object"""
2849 url = repo.url()
2849 url = repo.url()
2850 if url.endswith(b'/'):
2850 if url.endswith(b'/'):
2851 url = url[:-1]
2851 url = url[:-1]
2852 return url + b'/.hg/patches'
2852 return url + b'/.hg/patches'
2853
2853
2854 # main repo (destination and sources)
2854 # main repo (destination and sources)
2855 if dest is None:
2855 if dest is None:
2856 dest = hg.defaultdest(source)
2856 dest = hg.defaultdest(source)
2857 source_path = urlutil.get_clone_path_obj(ui, source)
2857 source_path = urlutil.get_clone_path_obj(ui, source)
2858 sr = hg.peer(ui, opts, source_path)
2858 sr = hg.peer(ui, opts, source_path)
2859
2859
2860 # patches repo (source only)
2860 # patches repo (source only)
2861 if opts.get(b'patches'):
2861 if opts.get(b'patches'):
2862 patches_path = urlutil.get_clone_path_obj(ui, opts.get(b'patches'))
2862 patches_path = urlutil.get_clone_path_obj(ui, opts.get(b'patches'))
2863 else:
2863 else:
2864 # XXX path: we should turn this into a path object
2864 # XXX path: we should turn this into a path object
2865 patches_path = patchdir(sr)
2865 patches_path = patchdir(sr)
2866 try:
2866 try:
2867 hg.peer(ui, opts, patches_path)
2867 hg.peer(ui, opts, patches_path)
2868 except error.RepoError:
2868 except error.RepoError:
2869 raise error.Abort(
2869 raise error.Abort(
2870 _(b'versioned patch repository not found (see init --mq)')
2870 _(b'versioned patch repository not found (see init --mq)')
2871 )
2871 )
2872 qbase, destrev = None, None
2872 qbase, destrev = None, None
2873 if sr.local():
2873 if sr.local():
2874 repo = sr.local()
2874 repo = sr.local()
2875 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2875 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2876 qbase = repo.mq.applied[0].node
2876 qbase = repo.mq.applied[0].node
2877 if not hg.islocal(dest):
2877 if not hg.islocal(dest):
2878 heads = set(repo.heads())
2878 heads = set(repo.heads())
2879 destrev = list(heads.difference(repo.heads(qbase)))
2879 destrev = list(heads.difference(repo.heads(qbase)))
2880 destrev.append(repo.changelog.parents(qbase)[0])
2880 destrev.append(repo.changelog.parents(qbase)[0])
2881 elif sr.capable(b'lookup'):
2881 elif sr.capable(b'lookup'):
2882 try:
2882 try:
2883 qbase = sr.lookup(b'qbase')
2883 qbase = sr.lookup(b'qbase')
2884 except error.RepoError:
2884 except error.RepoError:
2885 pass
2885 pass
2886
2886
2887 ui.note(_(b'cloning main repository\n'))
2887 ui.note(_(b'cloning main repository\n'))
2888 sr, dr = hg.clone(
2888 sr, dr = hg.clone(
2889 ui,
2889 ui,
2890 opts,
2890 opts,
2891 sr.url(),
2891 sr.url(),
2892 dest,
2892 dest,
2893 pull=opts.get(b'pull'),
2893 pull=opts.get(b'pull'),
2894 revs=destrev,
2894 revs=destrev,
2895 update=False,
2895 update=False,
2896 stream=opts.get(b'uncompressed'),
2896 stream=opts.get(b'uncompressed'),
2897 )
2897 )
2898
2898
2899 ui.note(_(b'cloning patch repository\n'))
2899 ui.note(_(b'cloning patch repository\n'))
2900 hg.clone(
2900 hg.clone(
2901 ui,
2901 ui,
2902 opts,
2902 opts,
2903 opts.get(b'patches') or patchdir(sr),
2903 opts.get(b'patches') or patchdir(sr),
2904 patchdir(dr),
2904 patchdir(dr),
2905 pull=opts.get(b'pull'),
2905 pull=opts.get(b'pull'),
2906 update=not opts.get(b'noupdate'),
2906 update=not opts.get(b'noupdate'),
2907 stream=opts.get(b'uncompressed'),
2907 stream=opts.get(b'uncompressed'),
2908 )
2908 )
2909
2909
2910 if dr.local():
2910 if dr.local():
2911 repo = dr.local()
2911 repo = dr.local()
2912 if qbase:
2912 if qbase:
2913 ui.note(
2913 ui.note(
2914 _(
2914 _(
2915 b'stripping applied patches from destination '
2915 b'stripping applied patches from destination '
2916 b'repository\n'
2916 b'repository\n'
2917 )
2917 )
2918 )
2918 )
2919 strip(ui, repo, [qbase], update=False, backup=None)
2919 strip(ui, repo, [qbase], update=False, backup=None)
2920 if not opts.get(b'noupdate'):
2920 if not opts.get(b'noupdate'):
2921 ui.note(_(b'updating destination repository\n'))
2921 ui.note(_(b'updating destination repository\n'))
2922 hg.update(repo, repo.changelog.tip())
2922 hg.update(repo, repo.changelog.tip())
2923
2923
2924
2924
2925 @command(
2925 @command(
2926 b"qcommit|qci",
2926 b"qcommit|qci",
2927 commands.table[b"commit|ci"][1],
2927 commands.table[b"commit|ci"][1],
2928 _(b'hg qcommit [OPTION]... [FILE]...'),
2928 _(b'hg qcommit [OPTION]... [FILE]...'),
2929 helpcategory=command.CATEGORY_COMMITTING,
2929 helpcategory=command.CATEGORY_COMMITTING,
2930 inferrepo=True,
2930 inferrepo=True,
2931 )
2931 )
2932 def commit(ui, repo, *pats, **opts):
2932 def commit(ui, repo, *pats, **opts):
2933 """commit changes in the queue repository (DEPRECATED)
2933 """commit changes in the queue repository (DEPRECATED)
2934
2934
2935 This command is deprecated; use :hg:`commit --mq` instead."""
2935 This command is deprecated; use :hg:`commit --mq` instead."""
2936 q = repo.mq
2936 q = repo.mq
2937 r = q.qrepo()
2937 r = q.qrepo()
2938 if not r:
2938 if not r:
2939 raise error.Abort(b'no queue repository')
2939 raise error.Abort(b'no queue repository')
2940 commands.commit(r.ui, r, *pats, **opts)
2940 commands.commit(r.ui, r, *pats, **opts)
2941
2941
2942
2942
2943 @command(
2943 @command(
2944 b"qseries",
2944 b"qseries",
2945 [
2945 [
2946 (b'm', b'missing', None, _(b'print patches not in series')),
2946 (b'm', b'missing', None, _(b'print patches not in series')),
2947 ]
2947 ]
2948 + seriesopts,
2948 + seriesopts,
2949 _(b'hg qseries [-ms]'),
2949 _(b'hg qseries [-ms]'),
2950 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2950 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2951 )
2951 )
2952 def series(ui, repo, **opts):
2952 def series(ui, repo, **opts):
2953 """print the entire series file
2953 """print the entire series file
2954
2954
2955 Returns 0 on success."""
2955 Returns 0 on success."""
2956 repo.mq.qseries(
2956 repo.mq.qseries(
2957 repo, missing=opts.get('missing'), summary=opts.get('summary')
2957 repo, missing=opts.get('missing'), summary=opts.get('summary')
2958 )
2958 )
2959 return 0
2959 return 0
2960
2960
2961
2961
2962 @command(
2962 @command(
2963 b"qtop",
2963 b"qtop",
2964 seriesopts,
2964 seriesopts,
2965 _(b'hg qtop [-s]'),
2965 _(b'hg qtop [-s]'),
2966 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2966 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2967 )
2967 )
2968 def top(ui, repo, **opts):
2968 def top(ui, repo, **opts):
2969 """print the name of the current patch
2969 """print the name of the current patch
2970
2970
2971 Returns 0 on success."""
2971 Returns 0 on success."""
2972 q = repo.mq
2972 q = repo.mq
2973 if q.applied:
2973 if q.applied:
2974 t = q.seriesend(True)
2974 t = q.seriesend(True)
2975 else:
2975 else:
2976 t = 0
2976 t = 0
2977
2977
2978 if t:
2978 if t:
2979 q.qseries(
2979 q.qseries(
2980 repo,
2980 repo,
2981 start=t - 1,
2981 start=t - 1,
2982 length=1,
2982 length=1,
2983 status=b'A',
2983 status=b'A',
2984 summary=opts.get('summary'),
2984 summary=opts.get('summary'),
2985 )
2985 )
2986 else:
2986 else:
2987 ui.write(_(b"no patches applied\n"))
2987 ui.write(_(b"no patches applied\n"))
2988 return 1
2988 return 1
2989
2989
2990
2990
2991 @command(
2991 @command(
2992 b"qnext",
2992 b"qnext",
2993 seriesopts,
2993 seriesopts,
2994 _(b'hg qnext [-s]'),
2994 _(b'hg qnext [-s]'),
2995 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2995 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2996 )
2996 )
2997 def next(ui, repo, **opts):
2997 def next(ui, repo, **opts):
2998 """print the name of the next pushable patch
2998 """print the name of the next pushable patch
2999
2999
3000 Returns 0 on success."""
3000 Returns 0 on success."""
3001 q = repo.mq
3001 q = repo.mq
3002 end = q.seriesend()
3002 end = q.seriesend()
3003 if end == len(q.series):
3003 if end == len(q.series):
3004 ui.write(_(b"all patches applied\n"))
3004 ui.write(_(b"all patches applied\n"))
3005 return 1
3005 return 1
3006 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
3006 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
3007
3007
3008
3008
3009 @command(
3009 @command(
3010 b"qprev",
3010 b"qprev",
3011 seriesopts,
3011 seriesopts,
3012 _(b'hg qprev [-s]'),
3012 _(b'hg qprev [-s]'),
3013 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3013 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3014 )
3014 )
3015 def prev(ui, repo, **opts):
3015 def prev(ui, repo, **opts):
3016 """print the name of the preceding applied patch
3016 """print the name of the preceding applied patch
3017
3017
3018 Returns 0 on success."""
3018 Returns 0 on success."""
3019 q = repo.mq
3019 q = repo.mq
3020 l = len(q.applied)
3020 l = len(q.applied)
3021 if l == 1:
3021 if l == 1:
3022 ui.write(_(b"only one patch applied\n"))
3022 ui.write(_(b"only one patch applied\n"))
3023 return 1
3023 return 1
3024 if not l:
3024 if not l:
3025 ui.write(_(b"no patches applied\n"))
3025 ui.write(_(b"no patches applied\n"))
3026 return 1
3026 return 1
3027 idx = q.series.index(q.applied[-2].name)
3027 idx = q.series.index(q.applied[-2].name)
3028 q.qseries(
3028 q.qseries(
3029 repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
3029 repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
3030 )
3030 )
3031
3031
3032
3032
3033 def setupheaderopts(ui, opts):
3033 def setupheaderopts(ui, opts):
3034 if not opts.get(b'user') and opts.get(b'currentuser'):
3034 if not opts.get(b'user') and opts.get(b'currentuser'):
3035 opts[b'user'] = ui.username()
3035 opts[b'user'] = ui.username()
3036 if not opts.get(b'date') and opts.get(b'currentdate'):
3036 if not opts.get(b'date') and opts.get(b'currentdate'):
3037 opts[b'date'] = b"%d %d" % dateutil.makedate()
3037 opts[b'date'] = b"%d %d" % dateutil.makedate()
3038
3038
3039
3039
3040 @command(
3040 @command(
3041 b"qnew",
3041 b"qnew",
3042 [
3042 [
3043 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3043 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3044 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3044 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3045 (b'g', b'git', None, _(b'use git extended diff format')),
3045 (b'g', b'git', None, _(b'use git extended diff format')),
3046 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3046 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3047 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3047 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3048 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3048 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3049 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3049 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3050 ]
3050 ]
3051 + cmdutil.walkopts
3051 + cmdutil.walkopts
3052 + cmdutil.commitopts,
3052 + cmdutil.commitopts,
3053 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3053 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3054 helpcategory=command.CATEGORY_COMMITTING,
3054 helpcategory=command.CATEGORY_COMMITTING,
3055 helpbasic=True,
3055 helpbasic=True,
3056 inferrepo=True,
3056 inferrepo=True,
3057 )
3057 )
3058 def new(ui, repo, patch, *args, **opts):
3058 def new(ui, repo, patch, *args, **opts):
3059 """create a new patch
3059 """create a new patch
3060
3060
3061 qnew creates a new patch on top of the currently-applied patch (if
3061 qnew creates a new patch on top of the currently-applied patch (if
3062 any). The patch will be initialized with any outstanding changes
3062 any). The patch will be initialized with any outstanding changes
3063 in the working directory. You may also use -I/--include,
3063 in the working directory. You may also use -I/--include,
3064 -X/--exclude, and/or a list of files after the patch name to add
3064 -X/--exclude, and/or a list of files after the patch name to add
3065 only changes to matching files to the new patch, leaving the rest
3065 only changes to matching files to the new patch, leaving the rest
3066 as uncommitted modifications.
3066 as uncommitted modifications.
3067
3067
3068 -u/--user and -d/--date can be used to set the (given) user and
3068 -u/--user and -d/--date can be used to set the (given) user and
3069 date, respectively. -U/--currentuser and -D/--currentdate set user
3069 date, respectively. -U/--currentuser and -D/--currentdate set user
3070 to current user and date to current date.
3070 to current user and date to current date.
3071
3071
3072 -e/--edit, -m/--message or -l/--logfile set the patch header as
3072 -e/--edit, -m/--message or -l/--logfile set the patch header as
3073 well as the commit message. If none is specified, the header is
3073 well as the commit message. If none is specified, the header is
3074 empty and the commit message is '[mq]: PATCH'.
3074 empty and the commit message is '[mq]: PATCH'.
3075
3075
3076 Use the -g/--git option to keep the patch in the git extended diff
3076 Use the -g/--git option to keep the patch in the git extended diff
3077 format. Read the diffs help topic for more information on why this
3077 format. Read the diffs help topic for more information on why this
3078 is important for preserving permission changes and copy/rename
3078 is important for preserving permission changes and copy/rename
3079 information.
3079 information.
3080
3080
3081 Returns 0 on successful creation of a new patch.
3081 Returns 0 on successful creation of a new patch.
3082 """
3082 """
3083 opts = pycompat.byteskwargs(opts)
3083 opts = pycompat.byteskwargs(opts)
3084 msg = cmdutil.logmessage(ui, opts)
3084 msg = cmdutil.logmessage(ui, opts)
3085 q = repo.mq
3085 q = repo.mq
3086 opts[b'msg'] = msg
3086 opts[b'msg'] = msg
3087 setupheaderopts(ui, opts)
3087 setupheaderopts(ui, opts)
3088 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3088 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3089 q.savedirty()
3089 q.savedirty()
3090 return 0
3090 return 0
3091
3091
3092
3092
3093 @command(
3093 @command(
3094 b"qrefresh",
3094 b"qrefresh",
3095 [
3095 [
3096 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3096 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3097 (b'g', b'git', None, _(b'use git extended diff format')),
3097 (b'g', b'git', None, _(b'use git extended diff format')),
3098 (
3098 (
3099 b's',
3099 b's',
3100 b'short',
3100 b'short',
3101 None,
3101 None,
3102 _(b'refresh only files already in the patch and specified files'),
3102 _(b'refresh only files already in the patch and specified files'),
3103 ),
3103 ),
3104 (
3104 (
3105 b'U',
3105 b'U',
3106 b'currentuser',
3106 b'currentuser',
3107 None,
3107 None,
3108 _(b'add/update author field in patch with current user'),
3108 _(b'add/update author field in patch with current user'),
3109 ),
3109 ),
3110 (
3110 (
3111 b'u',
3111 b'u',
3112 b'user',
3112 b'user',
3113 b'',
3113 b'',
3114 _(b'add/update author field in patch with given user'),
3114 _(b'add/update author field in patch with given user'),
3115 _(b'USER'),
3115 _(b'USER'),
3116 ),
3116 ),
3117 (
3117 (
3118 b'D',
3118 b'D',
3119 b'currentdate',
3119 b'currentdate',
3120 None,
3120 None,
3121 _(b'add/update date field in patch with current date'),
3121 _(b'add/update date field in patch with current date'),
3122 ),
3122 ),
3123 (
3123 (
3124 b'd',
3124 b'd',
3125 b'date',
3125 b'date',
3126 b'',
3126 b'',
3127 _(b'add/update date field in patch with given date'),
3127 _(b'add/update date field in patch with given date'),
3128 _(b'DATE'),
3128 _(b'DATE'),
3129 ),
3129 ),
3130 ]
3130 ]
3131 + cmdutil.walkopts
3131 + cmdutil.walkopts
3132 + cmdutil.commitopts,
3132 + cmdutil.commitopts,
3133 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3133 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3134 helpcategory=command.CATEGORY_COMMITTING,
3134 helpcategory=command.CATEGORY_COMMITTING,
3135 helpbasic=True,
3135 helpbasic=True,
3136 inferrepo=True,
3136 inferrepo=True,
3137 )
3137 )
3138 def refresh(ui, repo, *pats, **opts):
3138 def refresh(ui, repo, *pats, **opts):
3139 """update the current patch
3139 """update the current patch
3140
3140
3141 If any file patterns are provided, the refreshed patch will
3141 If any file patterns are provided, the refreshed patch will
3142 contain only the modifications that match those patterns; the
3142 contain only the modifications that match those patterns; the
3143 remaining modifications will remain in the working directory.
3143 remaining modifications will remain in the working directory.
3144
3144
3145 If -s/--short is specified, files currently included in the patch
3145 If -s/--short is specified, files currently included in the patch
3146 will be refreshed just like matched files and remain in the patch.
3146 will be refreshed just like matched files and remain in the patch.
3147
3147
3148 If -e/--edit is specified, Mercurial will start your configured editor for
3148 If -e/--edit is specified, Mercurial will start your configured editor for
3149 you to enter a message. In case qrefresh fails, you will find a backup of
3149 you to enter a message. In case qrefresh fails, you will find a backup of
3150 your message in ``.hg/last-message.txt``.
3150 your message in ``.hg/last-message.txt``.
3151
3151
3152 hg add/remove/copy/rename work as usual, though you might want to
3152 hg add/remove/copy/rename work as usual, though you might want to
3153 use git-style patches (-g/--git or [diff] git=1) to track copies
3153 use git-style patches (-g/--git or [diff] git=1) to track copies
3154 and renames. See the diffs help topic for more information on the
3154 and renames. See the diffs help topic for more information on the
3155 git diff format.
3155 git diff format.
3156
3156
3157 Returns 0 on success.
3157 Returns 0 on success.
3158 """
3158 """
3159 opts = pycompat.byteskwargs(opts)
3159 opts = pycompat.byteskwargs(opts)
3160 q = repo.mq
3160 q = repo.mq
3161 message = cmdutil.logmessage(ui, opts)
3161 message = cmdutil.logmessage(ui, opts)
3162 setupheaderopts(ui, opts)
3162 setupheaderopts(ui, opts)
3163 with repo.wlock():
3163 with repo.wlock():
3164 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3164 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3165 q.savedirty()
3165 q.savedirty()
3166 return ret
3166 return ret
3167
3167
3168
3168
3169 @command(
3169 @command(
3170 b"qdiff",
3170 b"qdiff",
3171 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3171 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3172 _(b'hg qdiff [OPTION]... [FILE]...'),
3172 _(b'hg qdiff [OPTION]... [FILE]...'),
3173 helpcategory=command.CATEGORY_FILE_CONTENTS,
3173 helpcategory=command.CATEGORY_FILE_CONTENTS,
3174 helpbasic=True,
3174 helpbasic=True,
3175 inferrepo=True,
3175 inferrepo=True,
3176 )
3176 )
3177 def diff(ui, repo, *pats, **opts):
3177 def diff(ui, repo, *pats, **opts):
3178 """diff of the current patch and subsequent modifications
3178 """diff of the current patch and subsequent modifications
3179
3179
3180 Shows a diff which includes the current patch as well as any
3180 Shows a diff which includes the current patch as well as any
3181 changes which have been made in the working directory since the
3181 changes which have been made in the working directory since the
3182 last refresh (thus showing what the current patch would become
3182 last refresh (thus showing what the current patch would become
3183 after a qrefresh).
3183 after a qrefresh).
3184
3184
3185 Use :hg:`diff` if you only want to see the changes made since the
3185 Use :hg:`diff` if you only want to see the changes made since the
3186 last qrefresh, or :hg:`export qtip` if you want to see changes
3186 last qrefresh, or :hg:`export qtip` if you want to see changes
3187 made by the current patch without including changes made since the
3187 made by the current patch without including changes made since the
3188 qrefresh.
3188 qrefresh.
3189
3189
3190 Returns 0 on success.
3190 Returns 0 on success.
3191 """
3191 """
3192 ui.pager(b'qdiff')
3192 ui.pager(b'qdiff')
3193 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3193 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3194 return 0
3194 return 0
3195
3195
3196
3196
3197 @command(
3197 @command(
3198 b'qfold',
3198 b'qfold',
3199 [
3199 [
3200 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3200 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3201 (b'k', b'keep', None, _(b'keep folded patch files')),
3201 (b'k', b'keep', None, _(b'keep folded patch files')),
3202 ]
3202 ]
3203 + cmdutil.commitopts,
3203 + cmdutil.commitopts,
3204 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3204 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3205 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3205 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3206 )
3206 )
3207 def fold(ui, repo, *files, **opts):
3207 def fold(ui, repo, *files, **opts):
3208 """fold the named patches into the current patch
3208 """fold the named patches into the current patch
3209
3209
3210 Patches must not yet be applied. Each patch will be successively
3210 Patches must not yet be applied. Each patch will be successively
3211 applied to the current patch in the order given. If all the
3211 applied to the current patch in the order given. If all the
3212 patches apply successfully, the current patch will be refreshed
3212 patches apply successfully, the current patch will be refreshed
3213 with the new cumulative patch, and the folded patches will be
3213 with the new cumulative patch, and the folded patches will be
3214 deleted. With -k/--keep, the folded patch files will not be
3214 deleted. With -k/--keep, the folded patch files will not be
3215 removed afterwards.
3215 removed afterwards.
3216
3216
3217 The header for each folded patch will be concatenated with the
3217 The header for each folded patch will be concatenated with the
3218 current patch header, separated by a line of ``* * *``.
3218 current patch header, separated by a line of ``* * *``.
3219
3219
3220 Returns 0 on success."""
3220 Returns 0 on success."""
3221 opts = pycompat.byteskwargs(opts)
3221 opts = pycompat.byteskwargs(opts)
3222 q = repo.mq
3222 q = repo.mq
3223 if not files:
3223 if not files:
3224 raise error.Abort(_(b'qfold requires at least one patch name'))
3224 raise error.Abort(_(b'qfold requires at least one patch name'))
3225 if not q.checktoppatch(repo)[0]:
3225 if not q.checktoppatch(repo)[0]:
3226 raise error.Abort(_(b'no patches applied'))
3226 raise error.Abort(_(b'no patches applied'))
3227 q.checklocalchanges(repo)
3227 q.checklocalchanges(repo)
3228
3228
3229 message = cmdutil.logmessage(ui, opts)
3229 message = cmdutil.logmessage(ui, opts)
3230
3230
3231 parent = q.lookup(b'qtip')
3231 parent = q.lookup(b'qtip')
3232 patches = []
3232 patches = []
3233 messages = []
3233 messages = []
3234 for f in files:
3234 for f in files:
3235 p = q.lookup(f)
3235 p = q.lookup(f)
3236 if p in patches or p == parent:
3236 if p in patches or p == parent:
3237 ui.warn(_(b'skipping already folded patch %s\n') % p)
3237 ui.warn(_(b'skipping already folded patch %s\n') % p)
3238 if q.isapplied(p):
3238 if q.isapplied(p):
3239 raise error.Abort(
3239 raise error.Abort(
3240 _(b'qfold cannot fold already applied patch %s') % p
3240 _(b'qfold cannot fold already applied patch %s') % p
3241 )
3241 )
3242 patches.append(p)
3242 patches.append(p)
3243
3243
3244 for p in patches:
3244 for p in patches:
3245 if not message:
3245 if not message:
3246 ph = patchheader(q.join(p), q.plainmode)
3246 ph = patchheader(q.join(p), q.plainmode)
3247 if ph.message:
3247 if ph.message:
3248 messages.append(ph.message)
3248 messages.append(ph.message)
3249 pf = q.join(p)
3249 pf = q.join(p)
3250 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3250 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3251 if not patchsuccess:
3251 if not patchsuccess:
3252 raise error.Abort(_(b'error folding patch %s') % p)
3252 raise error.Abort(_(b'error folding patch %s') % p)
3253
3253
3254 if not message:
3254 if not message:
3255 ph = patchheader(q.join(parent), q.plainmode)
3255 ph = patchheader(q.join(parent), q.plainmode)
3256 message = ph.message
3256 message = ph.message
3257 for msg in messages:
3257 for msg in messages:
3258 if msg:
3258 if msg:
3259 if message:
3259 if message:
3260 message.append(b'* * *')
3260 message.append(b'* * *')
3261 message.extend(msg)
3261 message.extend(msg)
3262 message = b'\n'.join(message)
3262 message = b'\n'.join(message)
3263
3263
3264 diffopts = q.patchopts(q.diffopts(), *patches)
3264 diffopts = q.patchopts(q.diffopts(), *patches)
3265 with repo.wlock():
3265 with repo.wlock():
3266 q.refresh(
3266 q.refresh(
3267 repo,
3267 repo,
3268 msg=message,
3268 msg=message,
3269 git=diffopts.git,
3269 git=diffopts.git,
3270 edit=opts.get(b'edit'),
3270 edit=opts.get(b'edit'),
3271 editform=b'mq.qfold',
3271 editform=b'mq.qfold',
3272 )
3272 )
3273 q.delete(repo, patches, opts)
3273 q.delete(repo, patches, opts)
3274 q.savedirty()
3274 q.savedirty()
3275
3275
3276
3276
3277 @command(
3277 @command(
3278 b"qgoto",
3278 b"qgoto",
3279 [
3279 [
3280 (
3280 (
3281 b'',
3281 b'',
3282 b'keep-changes',
3282 b'keep-changes',
3283 None,
3283 None,
3284 _(b'tolerate non-conflicting local changes'),
3284 _(b'tolerate non-conflicting local changes'),
3285 ),
3285 ),
3286 (b'f', b'force', None, _(b'overwrite any local changes')),
3286 (b'f', b'force', None, _(b'overwrite any local changes')),
3287 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3287 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3288 ],
3288 ],
3289 _(b'hg qgoto [OPTION]... PATCH'),
3289 _(b'hg qgoto [OPTION]... PATCH'),
3290 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3290 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3291 )
3291 )
3292 def goto(ui, repo, patch, **opts):
3292 def goto(ui, repo, patch, **opts):
3293 """push or pop patches until named patch is at top of stack
3293 """push or pop patches until named patch is at top of stack
3294
3294
3295 Returns 0 on success."""
3295 Returns 0 on success."""
3296 opts = pycompat.byteskwargs(opts)
3296 opts = pycompat.byteskwargs(opts)
3297 opts = fixkeepchangesopts(ui, opts)
3297 opts = fixkeepchangesopts(ui, opts)
3298 q = repo.mq
3298 q = repo.mq
3299 patch = q.lookup(patch)
3299 patch = q.lookup(patch)
3300 nobackup = opts.get(b'no_backup')
3300 nobackup = opts.get(b'no_backup')
3301 keepchanges = opts.get(b'keep_changes')
3301 keepchanges = opts.get(b'keep_changes')
3302 if q.isapplied(patch):
3302 if q.isapplied(patch):
3303 ret = q.pop(
3303 ret = q.pop(
3304 repo,
3304 repo,
3305 patch,
3305 patch,
3306 force=opts.get(b'force'),
3306 force=opts.get(b'force'),
3307 nobackup=nobackup,
3307 nobackup=nobackup,
3308 keepchanges=keepchanges,
3308 keepchanges=keepchanges,
3309 )
3309 )
3310 else:
3310 else:
3311 ret = q.push(
3311 ret = q.push(
3312 repo,
3312 repo,
3313 patch,
3313 patch,
3314 force=opts.get(b'force'),
3314 force=opts.get(b'force'),
3315 nobackup=nobackup,
3315 nobackup=nobackup,
3316 keepchanges=keepchanges,
3316 keepchanges=keepchanges,
3317 )
3317 )
3318 q.savedirty()
3318 q.savedirty()
3319 return ret
3319 return ret
3320
3320
3321
3321
3322 @command(
3322 @command(
3323 b"qguard",
3323 b"qguard",
3324 [
3324 [
3325 (b'l', b'list', None, _(b'list all patches and guards')),
3325 (b'l', b'list', None, _(b'list all patches and guards')),
3326 (b'n', b'none', None, _(b'drop all guards')),
3326 (b'n', b'none', None, _(b'drop all guards')),
3327 ],
3327 ],
3328 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3328 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3329 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3329 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3330 )
3330 )
3331 def guard(ui, repo, *args, **opts):
3331 def guard(ui, repo, *args, **opts):
3332 """set or print guards for a patch
3332 """set or print guards for a patch
3333
3333
3334 Guards control whether a patch can be pushed. A patch with no
3334 Guards control whether a patch can be pushed. A patch with no
3335 guards is always pushed. A patch with a positive guard ("+foo") is
3335 guards is always pushed. A patch with a positive guard ("+foo") is
3336 pushed only if the :hg:`qselect` command has activated it. A patch with
3336 pushed only if the :hg:`qselect` command has activated it. A patch with
3337 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3337 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3338 has activated it.
3338 has activated it.
3339
3339
3340 With no arguments, print the currently active guards.
3340 With no arguments, print the currently active guards.
3341 With arguments, set guards for the named patch.
3341 With arguments, set guards for the named patch.
3342
3342
3343 .. note::
3343 .. note::
3344
3344
3345 Specifying negative guards now requires '--'.
3345 Specifying negative guards now requires '--'.
3346
3346
3347 To set guards on another patch::
3347 To set guards on another patch::
3348
3348
3349 hg qguard other.patch -- +2.6.17 -stable
3349 hg qguard other.patch -- +2.6.17 -stable
3350
3350
3351 Returns 0 on success.
3351 Returns 0 on success.
3352 """
3352 """
3353
3353
3354 def status(idx):
3354 def status(idx):
3355 guards = q.seriesguards[idx] or [b'unguarded']
3355 guards = q.seriesguards[idx] or [b'unguarded']
3356 if q.series[idx] in applied:
3356 if q.series[idx] in applied:
3357 state = b'applied'
3357 state = b'applied'
3358 elif q.pushable(idx)[0]:
3358 elif q.pushable(idx)[0]:
3359 state = b'unapplied'
3359 state = b'unapplied'
3360 else:
3360 else:
3361 state = b'guarded'
3361 state = b'guarded'
3362 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3362 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3363 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3363 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3364
3364
3365 for i, guard in enumerate(guards):
3365 for i, guard in enumerate(guards):
3366 if guard.startswith(b'+'):
3366 if guard.startswith(b'+'):
3367 ui.write(guard, label=b'qguard.positive')
3367 ui.write(guard, label=b'qguard.positive')
3368 elif guard.startswith(b'-'):
3368 elif guard.startswith(b'-'):
3369 ui.write(guard, label=b'qguard.negative')
3369 ui.write(guard, label=b'qguard.negative')
3370 else:
3370 else:
3371 ui.write(guard, label=b'qguard.unguarded')
3371 ui.write(guard, label=b'qguard.unguarded')
3372 if i != len(guards) - 1:
3372 if i != len(guards) - 1:
3373 ui.write(b' ')
3373 ui.write(b' ')
3374 ui.write(b'\n')
3374 ui.write(b'\n')
3375
3375
3376 q = repo.mq
3376 q = repo.mq
3377 applied = {p.name for p in q.applied}
3377 applied = {p.name for p in q.applied}
3378 patch = None
3378 patch = None
3379 args = list(args)
3379 args = list(args)
3380 if opts.get('list'):
3380 if opts.get('list'):
3381 if args or opts.get('none'):
3381 if args or opts.get('none'):
3382 raise error.Abort(
3382 raise error.Abort(
3383 _(b'cannot mix -l/--list with options or arguments')
3383 _(b'cannot mix -l/--list with options or arguments')
3384 )
3384 )
3385 for i in range(len(q.series)):
3385 for i in range(len(q.series)):
3386 status(i)
3386 status(i)
3387 return
3387 return
3388 if not args or args[0][0:1] in b'-+':
3388 if not args or args[0][0:1] in b'-+':
3389 if not q.applied:
3389 if not q.applied:
3390 raise error.Abort(_(b'no patches applied'))
3390 raise error.Abort(_(b'no patches applied'))
3391 patch = q.applied[-1].name
3391 patch = q.applied[-1].name
3392 if patch is None and args[0][0:1] not in b'-+':
3392 if patch is None and args[0][0:1] not in b'-+':
3393 patch = args.pop(0)
3393 patch = args.pop(0)
3394 if patch is None:
3394 if patch is None:
3395 raise error.Abort(_(b'no patch to work with'))
3395 raise error.Abort(_(b'no patch to work with'))
3396 if args or opts.get('none'):
3396 if args or opts.get('none'):
3397 idx = q.findseries(patch)
3397 idx = q.findseries(patch)
3398 if idx is None:
3398 if idx is None:
3399 raise error.Abort(_(b'no patch named %s') % patch)
3399 raise error.Abort(_(b'no patch named %s') % patch)
3400 q.setguards(idx, args)
3400 q.setguards(idx, args)
3401 q.savedirty()
3401 q.savedirty()
3402 else:
3402 else:
3403 status(q.series.index(q.lookup(patch)))
3403 status(q.series.index(q.lookup(patch)))
3404
3404
3405
3405
3406 @command(
3406 @command(
3407 b"qheader",
3407 b"qheader",
3408 [],
3408 [],
3409 _(b'hg qheader [PATCH]'),
3409 _(b'hg qheader [PATCH]'),
3410 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3410 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3411 )
3411 )
3412 def header(ui, repo, patch=None):
3412 def header(ui, repo, patch=None):
3413 """print the header of the topmost or specified patch
3413 """print the header of the topmost or specified patch
3414
3414
3415 Returns 0 on success."""
3415 Returns 0 on success."""
3416 q = repo.mq
3416 q = repo.mq
3417
3417
3418 if patch:
3418 if patch:
3419 patch = q.lookup(patch)
3419 patch = q.lookup(patch)
3420 else:
3420 else:
3421 if not q.applied:
3421 if not q.applied:
3422 ui.write(_(b'no patches applied\n'))
3422 ui.write(_(b'no patches applied\n'))
3423 return 1
3423 return 1
3424 patch = q.lookup(b'qtip')
3424 patch = q.lookup(b'qtip')
3425 ph = patchheader(q.join(patch), q.plainmode)
3425 ph = patchheader(q.join(patch), q.plainmode)
3426
3426
3427 ui.write(b'\n'.join(ph.message) + b'\n')
3427 ui.write(b'\n'.join(ph.message) + b'\n')
3428
3428
3429
3429
3430 def lastsavename(path):
3430 def lastsavename(path):
3431 (directory, base) = os.path.split(path)
3431 (directory, base) = os.path.split(path)
3432 names = os.listdir(directory)
3432 names = os.listdir(directory)
3433 namere = re.compile(b"%s.([0-9]+)" % base)
3433 namere = re.compile(b"%s.([0-9]+)" % base)
3434 maxindex = None
3434 maxindex = None
3435 maxname = None
3435 maxname = None
3436 for f in names:
3436 for f in names:
3437 m = namere.match(f)
3437 m = namere.match(f)
3438 if m:
3438 if m:
3439 index = int(m.group(1))
3439 index = int(m.group(1))
3440 if maxindex is None or index > maxindex:
3440 if maxindex is None or index > maxindex:
3441 maxindex = index
3441 maxindex = index
3442 maxname = f
3442 maxname = f
3443 if maxname:
3443 if maxname:
3444 return (os.path.join(directory, maxname), maxindex)
3444 return (os.path.join(directory, maxname), maxindex)
3445 return (None, None)
3445 return (None, None)
3446
3446
3447
3447
3448 def savename(path):
3448 def savename(path):
3449 (last, index) = lastsavename(path)
3449 (last, index) = lastsavename(path)
3450 if last is None:
3450 if last is None:
3451 index = 0
3451 index = 0
3452 newpath = path + b".%d" % (index + 1)
3452 newpath = path + b".%d" % (index + 1)
3453 return newpath
3453 return newpath
3454
3454
3455
3455
3456 @command(
3456 @command(
3457 b"qpush",
3457 b"qpush",
3458 [
3458 [
3459 (
3459 (
3460 b'',
3460 b'',
3461 b'keep-changes',
3461 b'keep-changes',
3462 None,
3462 None,
3463 _(b'tolerate non-conflicting local changes'),
3463 _(b'tolerate non-conflicting local changes'),
3464 ),
3464 ),
3465 (b'f', b'force', None, _(b'apply on top of local changes')),
3465 (b'f', b'force', None, _(b'apply on top of local changes')),
3466 (
3466 (
3467 b'e',
3467 b'e',
3468 b'exact',
3468 b'exact',
3469 None,
3469 None,
3470 _(b'apply the target patch to its recorded parent'),
3470 _(b'apply the target patch to its recorded parent'),
3471 ),
3471 ),
3472 (b'l', b'list', None, _(b'list patch name in commit text')),
3472 (b'l', b'list', None, _(b'list patch name in commit text')),
3473 (b'a', b'all', None, _(b'apply all patches')),
3473 (b'a', b'all', None, _(b'apply all patches')),
3474 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3474 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3475 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3475 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3476 (
3476 (
3477 b'',
3477 b'',
3478 b'move',
3478 b'move',
3479 None,
3479 None,
3480 _(b'reorder patch series and apply only the patch'),
3480 _(b'reorder patch series and apply only the patch'),
3481 ),
3481 ),
3482 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3482 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3483 ],
3483 ],
3484 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3484 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3485 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3485 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3486 helpbasic=True,
3486 helpbasic=True,
3487 )
3487 )
3488 def push(ui, repo, patch=None, **opts):
3488 def push(ui, repo, patch=None, **opts):
3489 """push the next patch onto the stack
3489 """push the next patch onto the stack
3490
3490
3491 By default, abort if the working directory contains uncommitted
3491 By default, abort if the working directory contains uncommitted
3492 changes. With --keep-changes, abort only if the uncommitted files
3492 changes. With --keep-changes, abort only if the uncommitted files
3493 overlap with patched files. With -f/--force, backup and patch over
3493 overlap with patched files. With -f/--force, backup and patch over
3494 uncommitted changes.
3494 uncommitted changes.
3495
3495
3496 Return 0 on success.
3496 Return 0 on success.
3497 """
3497 """
3498 q = repo.mq
3498 q = repo.mq
3499 mergeq = None
3499 mergeq = None
3500
3500
3501 opts = pycompat.byteskwargs(opts)
3501 opts = pycompat.byteskwargs(opts)
3502 opts = fixkeepchangesopts(ui, opts)
3502 opts = fixkeepchangesopts(ui, opts)
3503 if opts.get(b'merge'):
3503 if opts.get(b'merge'):
3504 if opts.get(b'name'):
3504 if opts.get(b'name'):
3505 newpath = repo.vfs.join(opts.get(b'name'))
3505 newpath = repo.vfs.join(opts.get(b'name'))
3506 else:
3506 else:
3507 newpath, i = lastsavename(q.path)
3507 newpath, i = lastsavename(q.path)
3508 if not newpath:
3508 if not newpath:
3509 ui.warn(_(b"no saved queues found, please use -n\n"))
3509 ui.warn(_(b"no saved queues found, please use -n\n"))
3510 return 1
3510 return 1
3511 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3511 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3512 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3512 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3513 ret = q.push(
3513 ret = q.push(
3514 repo,
3514 repo,
3515 patch,
3515 patch,
3516 force=opts.get(b'force'),
3516 force=opts.get(b'force'),
3517 list=opts.get(b'list'),
3517 list=opts.get(b'list'),
3518 mergeq=mergeq,
3518 mergeq=mergeq,
3519 all=opts.get(b'all'),
3519 all=opts.get(b'all'),
3520 move=opts.get(b'move'),
3520 move=opts.get(b'move'),
3521 exact=opts.get(b'exact'),
3521 exact=opts.get(b'exact'),
3522 nobackup=opts.get(b'no_backup'),
3522 nobackup=opts.get(b'no_backup'),
3523 keepchanges=opts.get(b'keep_changes'),
3523 keepchanges=opts.get(b'keep_changes'),
3524 )
3524 )
3525 return ret
3525 return ret
3526
3526
3527
3527
3528 @command(
3528 @command(
3529 b"qpop",
3529 b"qpop",
3530 [
3530 [
3531 (b'a', b'all', None, _(b'pop all patches')),
3531 (b'a', b'all', None, _(b'pop all patches')),
3532 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3532 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3533 (
3533 (
3534 b'',
3534 b'',
3535 b'keep-changes',
3535 b'keep-changes',
3536 None,
3536 None,
3537 _(b'tolerate non-conflicting local changes'),
3537 _(b'tolerate non-conflicting local changes'),
3538 ),
3538 ),
3539 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3539 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3540 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3540 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3541 ],
3541 ],
3542 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3542 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3543 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3543 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3544 helpbasic=True,
3544 helpbasic=True,
3545 )
3545 )
3546 def pop(ui, repo, patch=None, **opts):
3546 def pop(ui, repo, patch=None, **opts):
3547 """pop the current patch off the stack
3547 """pop the current patch off the stack
3548
3548
3549 Without argument, pops off the top of the patch stack. If given a
3549 Without argument, pops off the top of the patch stack. If given a
3550 patch name, keeps popping off patches until the named patch is at
3550 patch name, keeps popping off patches until the named patch is at
3551 the top of the stack.
3551 the top of the stack.
3552
3552
3553 By default, abort if the working directory contains uncommitted
3553 By default, abort if the working directory contains uncommitted
3554 changes. With --keep-changes, abort only if the uncommitted files
3554 changes. With --keep-changes, abort only if the uncommitted files
3555 overlap with patched files. With -f/--force, backup and discard
3555 overlap with patched files. With -f/--force, backup and discard
3556 changes made to such files.
3556 changes made to such files.
3557
3557
3558 Return 0 on success.
3558 Return 0 on success.
3559 """
3559 """
3560 opts = pycompat.byteskwargs(opts)
3560 opts = pycompat.byteskwargs(opts)
3561 opts = fixkeepchangesopts(ui, opts)
3561 opts = fixkeepchangesopts(ui, opts)
3562 localupdate = True
3562 localupdate = True
3563 if opts.get(b'name'):
3563 if opts.get(b'name'):
3564 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3564 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3565 ui.warn(_(b'using patch queue: %s\n') % q.path)
3565 ui.warn(_(b'using patch queue: %s\n') % q.path)
3566 localupdate = False
3566 localupdate = False
3567 else:
3567 else:
3568 q = repo.mq
3568 q = repo.mq
3569 ret = q.pop(
3569 ret = q.pop(
3570 repo,
3570 repo,
3571 patch,
3571 patch,
3572 force=opts.get(b'force'),
3572 force=opts.get(b'force'),
3573 update=localupdate,
3573 update=localupdate,
3574 all=opts.get(b'all'),
3574 all=opts.get(b'all'),
3575 nobackup=opts.get(b'no_backup'),
3575 nobackup=opts.get(b'no_backup'),
3576 keepchanges=opts.get(b'keep_changes'),
3576 keepchanges=opts.get(b'keep_changes'),
3577 )
3577 )
3578 q.savedirty()
3578 q.savedirty()
3579 return ret
3579 return ret
3580
3580
3581
3581
3582 @command(
3582 @command(
3583 b"qrename|qmv",
3583 b"qrename|qmv",
3584 [],
3584 [],
3585 _(b'hg qrename PATCH1 [PATCH2]'),
3585 _(b'hg qrename PATCH1 [PATCH2]'),
3586 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3586 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3587 )
3587 )
3588 def rename(ui, repo, patch, name=None, **opts):
3588 def rename(ui, repo, patch, name=None, **opts):
3589 """rename a patch
3589 """rename a patch
3590
3590
3591 With one argument, renames the current patch to PATCH1.
3591 With one argument, renames the current patch to PATCH1.
3592 With two arguments, renames PATCH1 to PATCH2.
3592 With two arguments, renames PATCH1 to PATCH2.
3593
3593
3594 Returns 0 on success."""
3594 Returns 0 on success."""
3595 q = repo.mq
3595 q = repo.mq
3596 if not name:
3596 if not name:
3597 name = patch
3597 name = patch
3598 patch = None
3598 patch = None
3599
3599
3600 if patch:
3600 if patch:
3601 patch = q.lookup(patch)
3601 patch = q.lookup(patch)
3602 else:
3602 else:
3603 if not q.applied:
3603 if not q.applied:
3604 ui.write(_(b'no patches applied\n'))
3604 ui.write(_(b'no patches applied\n'))
3605 return
3605 return
3606 patch = q.lookup(b'qtip')
3606 patch = q.lookup(b'qtip')
3607 absdest = q.join(name)
3607 absdest = q.join(name)
3608 if os.path.isdir(absdest):
3608 if os.path.isdir(absdest):
3609 name = normname(os.path.join(name, os.path.basename(patch)))
3609 name = normname(os.path.join(name, os.path.basename(patch)))
3610 absdest = q.join(name)
3610 absdest = q.join(name)
3611 q.checkpatchname(name)
3611 q.checkpatchname(name)
3612
3612
3613 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3613 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3614 i = q.findseries(patch)
3614 i = q.findseries(patch)
3615 guards = q.guard_re.findall(q.fullseries[i])
3615 guards = q.guard_re.findall(q.fullseries[i])
3616 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3616 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3617 q.parseseries()
3617 q.parseseries()
3618 q.seriesdirty = True
3618 q.seriesdirty = True
3619
3619
3620 info = q.isapplied(patch)
3620 info = q.isapplied(patch)
3621 if info:
3621 if info:
3622 q.applied[info[0]] = statusentry(info[1], name)
3622 q.applied[info[0]] = statusentry(info[1], name)
3623 q.applieddirty = True
3623 q.applieddirty = True
3624
3624
3625 destdir = os.path.dirname(absdest)
3625 destdir = os.path.dirname(absdest)
3626 if not os.path.isdir(destdir):
3626 if not os.path.isdir(destdir):
3627 os.makedirs(destdir)
3627 os.makedirs(destdir)
3628 util.rename(q.join(patch), absdest)
3628 util.rename(q.join(patch), absdest)
3629 r = q.qrepo()
3629 r = q.qrepo()
3630 if r and patch in r.dirstate:
3630 if r and patch in r.dirstate:
3631 wctx = r[None]
3631 wctx = r[None]
3632 with r.wlock():
3632 with r.wlock():
3633 if r.dirstate.get_entry(patch).added:
3633 if r.dirstate.get_entry(patch).added:
3634 r.dirstate.set_untracked(patch)
3634 r.dirstate.set_untracked(patch)
3635 r.dirstate.set_tracked(name)
3635 r.dirstate.set_tracked(name)
3636 else:
3636 else:
3637 wctx.copy(patch, name)
3637 wctx.copy(patch, name)
3638 wctx.forget([patch])
3638 wctx.forget([patch])
3639
3639
3640 q.savedirty()
3640 q.savedirty()
3641
3641
3642
3642
3643 @command(
3643 @command(
3644 b"qrestore",
3644 b"qrestore",
3645 [
3645 [
3646 (b'd', b'delete', None, _(b'delete save entry')),
3646 (b'd', b'delete', None, _(b'delete save entry')),
3647 (b'u', b'update', None, _(b'update queue working directory')),
3647 (b'u', b'update', None, _(b'update queue working directory')),
3648 ],
3648 ],
3649 _(b'hg qrestore [-d] [-u] REV'),
3649 _(b'hg qrestore [-d] [-u] REV'),
3650 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3650 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3651 )
3651 )
3652 def restore(ui, repo, rev, **opts):
3652 def restore(ui, repo, rev, **opts):
3653 """restore the queue state saved by a revision (DEPRECATED)
3653 """restore the queue state saved by a revision (DEPRECATED)
3654
3654
3655 This command is deprecated, use :hg:`rebase` instead."""
3655 This command is deprecated, use :hg:`rebase` instead."""
3656 rev = repo.lookup(rev)
3656 rev = repo.lookup(rev)
3657 q = repo.mq
3657 q = repo.mq
3658 q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
3658 q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
3659 q.savedirty()
3659 q.savedirty()
3660 return 0
3660 return 0
3661
3661
3662
3662
3663 @command(
3663 @command(
3664 b"qsave",
3664 b"qsave",
3665 [
3665 [
3666 (b'c', b'copy', None, _(b'copy patch directory')),
3666 (b'c', b'copy', None, _(b'copy patch directory')),
3667 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3667 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3668 (b'e', b'empty', None, _(b'clear queue status file')),
3668 (b'e', b'empty', None, _(b'clear queue status file')),
3669 (b'f', b'force', None, _(b'force copy')),
3669 (b'f', b'force', None, _(b'force copy')),
3670 ]
3670 ]
3671 + cmdutil.commitopts,
3671 + cmdutil.commitopts,
3672 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3672 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3673 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3673 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3674 )
3674 )
3675 def save(ui, repo, **opts):
3675 def save(ui, repo, **opts):
3676 """save current queue state (DEPRECATED)
3676 """save current queue state (DEPRECATED)
3677
3677
3678 This command is deprecated, use :hg:`rebase` instead."""
3678 This command is deprecated, use :hg:`rebase` instead."""
3679 q = repo.mq
3679 q = repo.mq
3680 opts = pycompat.byteskwargs(opts)
3680 opts = pycompat.byteskwargs(opts)
3681 message = cmdutil.logmessage(ui, opts)
3681 message = cmdutil.logmessage(ui, opts)
3682 ret = q.save(repo, msg=message)
3682 ret = q.save(repo, msg=message)
3683 if ret:
3683 if ret:
3684 return ret
3684 return ret
3685 q.savedirty() # save to .hg/patches before copying
3685 q.savedirty() # save to .hg/patches before copying
3686 if opts.get(b'copy'):
3686 if opts.get(b'copy'):
3687 path = q.path
3687 path = q.path
3688 if opts.get(b'name'):
3688 if opts.get(b'name'):
3689 newpath = os.path.join(q.basepath, opts.get(b'name'))
3689 newpath = os.path.join(q.basepath, opts.get(b'name'))
3690 if os.path.exists(newpath):
3690 if os.path.exists(newpath):
3691 if not os.path.isdir(newpath):
3691 if not os.path.isdir(newpath):
3692 raise error.Abort(
3692 raise error.Abort(
3693 _(b'destination %s exists and is not a directory')
3693 _(b'destination %s exists and is not a directory')
3694 % newpath
3694 % newpath
3695 )
3695 )
3696 if not opts.get(b'force'):
3696 if not opts.get(b'force'):
3697 raise error.Abort(
3697 raise error.Abort(
3698 _(b'destination %s exists, use -f to force') % newpath
3698 _(b'destination %s exists, use -f to force') % newpath
3699 )
3699 )
3700 else:
3700 else:
3701 newpath = savename(path)
3701 newpath = savename(path)
3702 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3702 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3703 util.copyfiles(path, newpath)
3703 util.copyfiles(path, newpath)
3704 if opts.get(b'empty'):
3704 if opts.get(b'empty'):
3705 del q.applied[:]
3705 del q.applied[:]
3706 q.applieddirty = True
3706 q.applieddirty = True
3707 q.savedirty()
3707 q.savedirty()
3708 return 0
3708 return 0
3709
3709
3710
3710
3711 @command(
3711 @command(
3712 b"qselect",
3712 b"qselect",
3713 [
3713 [
3714 (b'n', b'none', None, _(b'disable all guards')),
3714 (b'n', b'none', None, _(b'disable all guards')),
3715 (b's', b'series', None, _(b'list all guards in series file')),
3715 (b's', b'series', None, _(b'list all guards in series file')),
3716 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3716 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3717 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3717 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3718 ],
3718 ],
3719 _(b'hg qselect [OPTION]... [GUARD]...'),
3719 _(b'hg qselect [OPTION]... [GUARD]...'),
3720 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3720 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3721 )
3721 )
3722 def select(ui, repo, *args, **opts):
3722 def select(ui, repo, *args, **opts):
3723 """set or print guarded patches to push
3723 """set or print guarded patches to push
3724
3724
3725 Use the :hg:`qguard` command to set or print guards on patch, then use
3725 Use the :hg:`qguard` command to set or print guards on patch, then use
3726 qselect to tell mq which guards to use. A patch will be pushed if
3726 qselect to tell mq which guards to use. A patch will be pushed if
3727 it has no guards or any positive guards match the currently
3727 it has no guards or any positive guards match the currently
3728 selected guard, but will not be pushed if any negative guards
3728 selected guard, but will not be pushed if any negative guards
3729 match the current guard. For example::
3729 match the current guard. For example::
3730
3730
3731 qguard foo.patch -- -stable (negative guard)
3731 qguard foo.patch -- -stable (negative guard)
3732 qguard bar.patch +stable (positive guard)
3732 qguard bar.patch +stable (positive guard)
3733 qselect stable
3733 qselect stable
3734
3734
3735 This activates the "stable" guard. mq will skip foo.patch (because
3735 This activates the "stable" guard. mq will skip foo.patch (because
3736 it has a negative match) but push bar.patch (because it has a
3736 it has a negative match) but push bar.patch (because it has a
3737 positive match).
3737 positive match).
3738
3738
3739 With no arguments, prints the currently active guards.
3739 With no arguments, prints the currently active guards.
3740 With one argument, sets the active guard.
3740 With one argument, sets the active guard.
3741
3741
3742 Use -n/--none to deactivate guards (no other arguments needed).
3742 Use -n/--none to deactivate guards (no other arguments needed).
3743 When no guards are active, patches with positive guards are
3743 When no guards are active, patches with positive guards are
3744 skipped and patches with negative guards are pushed.
3744 skipped and patches with negative guards are pushed.
3745
3745
3746 qselect can change the guards on applied patches. It does not pop
3746 qselect can change the guards on applied patches. It does not pop
3747 guarded patches by default. Use --pop to pop back to the last
3747 guarded patches by default. Use --pop to pop back to the last
3748 applied patch that is not guarded. Use --reapply (which implies
3748 applied patch that is not guarded. Use --reapply (which implies
3749 --pop) to push back to the current patch afterwards, but skip
3749 --pop) to push back to the current patch afterwards, but skip
3750 guarded patches.
3750 guarded patches.
3751
3751
3752 Use -s/--series to print a list of all guards in the series file
3752 Use -s/--series to print a list of all guards in the series file
3753 (no other arguments needed). Use -v for more information.
3753 (no other arguments needed). Use -v for more information.
3754
3754
3755 Returns 0 on success."""
3755 Returns 0 on success."""
3756
3756
3757 q = repo.mq
3757 q = repo.mq
3758 opts = pycompat.byteskwargs(opts)
3758 opts = pycompat.byteskwargs(opts)
3759 guards = q.active()
3759 guards = q.active()
3760 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3760 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3761 if args or opts.get(b'none'):
3761 if args or opts.get(b'none'):
3762 old_unapplied = q.unapplied(repo)
3762 old_unapplied = q.unapplied(repo)
3763 old_guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3763 old_guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3764 q.setactive(args)
3764 q.setactive(args)
3765 q.savedirty()
3765 q.savedirty()
3766 if not args:
3766 if not args:
3767 ui.status(_(b'guards deactivated\n'))
3767 ui.status(_(b'guards deactivated\n'))
3768 if not opts.get(b'pop') and not opts.get(b'reapply'):
3768 if not opts.get(b'pop') and not opts.get(b'reapply'):
3769 unapplied = q.unapplied(repo)
3769 unapplied = q.unapplied(repo)
3770 guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3770 guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3771 if len(unapplied) != len(old_unapplied):
3771 if len(unapplied) != len(old_unapplied):
3772 ui.status(
3772 ui.status(
3773 _(
3773 _(
3774 b'number of unguarded, unapplied patches has '
3774 b'number of unguarded, unapplied patches has '
3775 b'changed from %d to %d\n'
3775 b'changed from %d to %d\n'
3776 )
3776 )
3777 % (len(old_unapplied), len(unapplied))
3777 % (len(old_unapplied), len(unapplied))
3778 )
3778 )
3779 if len(guarded) != len(old_guarded):
3779 if len(guarded) != len(old_guarded):
3780 ui.status(
3780 ui.status(
3781 _(
3781 _(
3782 b'number of guarded, applied patches has changed '
3782 b'number of guarded, applied patches has changed '
3783 b'from %d to %d\n'
3783 b'from %d to %d\n'
3784 )
3784 )
3785 % (len(old_guarded), len(guarded))
3785 % (len(old_guarded), len(guarded))
3786 )
3786 )
3787 elif opts.get(b'series'):
3787 elif opts.get(b'series'):
3788 guards = {}
3788 guards = {}
3789 noguards = 0
3789 noguards = 0
3790 for gs in q.seriesguards:
3790 for gs in q.seriesguards:
3791 if not gs:
3791 if not gs:
3792 noguards += 1
3792 noguards += 1
3793 for g in gs:
3793 for g in gs:
3794 guards.setdefault(g, 0)
3794 guards.setdefault(g, 0)
3795 guards[g] += 1
3795 guards[g] += 1
3796 if ui.verbose:
3796 if ui.verbose:
3797 guards[b'NONE'] = noguards
3797 guards[b'NONE'] = noguards
3798 guards = list(guards.items())
3798 guards = list(guards.items())
3799 guards.sort(key=lambda x: x[0][1:])
3799 guards.sort(key=lambda x: x[0][1:])
3800 if guards:
3800 if guards:
3801 ui.note(_(b'guards in series file:\n'))
3801 ui.note(_(b'guards in series file:\n'))
3802 for guard, count in guards:
3802 for guard, count in guards:
3803 ui.note(b'%2d ' % count)
3803 ui.note(b'%2d ' % count)
3804 ui.write(guard, b'\n')
3804 ui.write(guard, b'\n')
3805 else:
3805 else:
3806 ui.note(_(b'no guards in series file\n'))
3806 ui.note(_(b'no guards in series file\n'))
3807 else:
3807 else:
3808 if guards:
3808 if guards:
3809 ui.note(_(b'active guards:\n'))
3809 ui.note(_(b'active guards:\n'))
3810 for g in guards:
3810 for g in guards:
3811 ui.write(g, b'\n')
3811 ui.write(g, b'\n')
3812 else:
3812 else:
3813 ui.write(_(b'no active guards\n'))
3813 ui.write(_(b'no active guards\n'))
3814 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3814 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3815 popped = False
3815 popped = False
3816 if opts.get(b'pop') or opts.get(b'reapply'):
3816 if opts.get(b'pop') or opts.get(b'reapply'):
3817 for i in range(len(q.applied)):
3817 for i in range(len(q.applied)):
3818 if not pushable(i):
3818 if not pushable(i):
3819 ui.status(_(b'popping guarded patches\n'))
3819 ui.status(_(b'popping guarded patches\n'))
3820 popped = True
3820 popped = True
3821 if i == 0:
3821 if i == 0:
3822 q.pop(repo, all=True)
3822 q.pop(repo, all=True)
3823 else:
3823 else:
3824 q.pop(repo, q.applied[i - 1].name)
3824 q.pop(repo, q.applied[i - 1].name)
3825 break
3825 break
3826 if popped:
3826 if popped:
3827 try:
3827 try:
3828 if reapply:
3828 if reapply:
3829 ui.status(_(b'reapplying unguarded patches\n'))
3829 ui.status(_(b'reapplying unguarded patches\n'))
3830 q.push(repo, reapply)
3830 q.push(repo, reapply)
3831 finally:
3831 finally:
3832 q.savedirty()
3832 q.savedirty()
3833
3833
3834
3834
3835 @command(
3835 @command(
3836 b"qfinish",
3836 b"qfinish",
3837 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3837 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3838 _(b'hg qfinish [-a] [REV]...'),
3838 _(b'hg qfinish [-a] [REV]...'),
3839 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3839 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3840 )
3840 )
3841 def finish(ui, repo, *revrange, **opts):
3841 def finish(ui, repo, *revrange, **opts):
3842 """move applied patches into repository history
3842 """move applied patches into repository history
3843
3843
3844 Finishes the specified revisions (corresponding to applied
3844 Finishes the specified revisions (corresponding to applied
3845 patches) by moving them out of mq control into regular repository
3845 patches) by moving them out of mq control into regular repository
3846 history.
3846 history.
3847
3847
3848 Accepts a revision range or the -a/--applied option. If --applied
3848 Accepts a revision range or the -a/--applied option. If --applied
3849 is specified, all applied mq revisions are removed from mq
3849 is specified, all applied mq revisions are removed from mq
3850 control. Otherwise, the given revisions must be at the base of the
3850 control. Otherwise, the given revisions must be at the base of the
3851 stack of applied patches.
3851 stack of applied patches.
3852
3852
3853 This can be especially useful if your changes have been applied to
3853 This can be especially useful if your changes have been applied to
3854 an upstream repository, or if you are about to push your changes
3854 an upstream repository, or if you are about to push your changes
3855 to upstream.
3855 to upstream.
3856
3856
3857 Returns 0 on success.
3857 Returns 0 on success.
3858 """
3858 """
3859 if not opts.get('applied') and not revrange:
3859 if not opts.get('applied') and not revrange:
3860 raise error.Abort(_(b'no revisions specified'))
3860 raise error.Abort(_(b'no revisions specified'))
3861 elif opts.get('applied'):
3861 elif opts.get('applied'):
3862 revrange = (b'qbase::qtip',) + revrange
3862 revrange = (b'qbase::qtip',) + revrange
3863
3863
3864 q = repo.mq
3864 q = repo.mq
3865 if not q.applied:
3865 if not q.applied:
3866 ui.status(_(b'no patches applied\n'))
3866 ui.status(_(b'no patches applied\n'))
3867 return 0
3867 return 0
3868
3868
3869 revs = logcmdutil.revrange(repo, revrange)
3869 revs = logcmdutil.revrange(repo, revrange)
3870 if repo[b'.'].rev() in revs and repo[None].files():
3870 if repo[b'.'].rev() in revs and repo[None].files():
3871 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3871 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3872 # queue.finish may changes phases but leave the responsibility to lock the
3872 # queue.finish may changes phases but leave the responsibility to lock the
3873 # repo to the caller to avoid deadlock with wlock. This command code is
3873 # repo to the caller to avoid deadlock with wlock. This command code is
3874 # responsibility for this locking.
3874 # responsibility for this locking.
3875 with repo.lock():
3875 with repo.lock():
3876 q.finish(repo, revs)
3876 q.finish(repo, revs)
3877 q.savedirty()
3877 q.savedirty()
3878 return 0
3878 return 0
3879
3879
3880
3880
3881 @command(
3881 @command(
3882 b"qqueue",
3882 b"qqueue",
3883 [
3883 [
3884 (b'l', b'list', False, _(b'list all available queues')),
3884 (b'l', b'list', False, _(b'list all available queues')),
3885 (b'', b'active', False, _(b'print name of active queue')),
3885 (b'', b'active', False, _(b'print name of active queue')),
3886 (b'c', b'create', False, _(b'create new queue')),
3886 (b'c', b'create', False, _(b'create new queue')),
3887 (b'', b'rename', False, _(b'rename active queue')),
3887 (b'', b'rename', False, _(b'rename active queue')),
3888 (b'', b'delete', False, _(b'delete reference to queue')),
3888 (b'', b'delete', False, _(b'delete reference to queue')),
3889 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3889 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3890 ],
3890 ],
3891 _(b'[OPTION] [QUEUE]'),
3891 _(b'[OPTION] [QUEUE]'),
3892 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3892 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3893 )
3893 )
3894 def qqueue(ui, repo, name=None, **opts):
3894 def qqueue(ui, repo, name=None, **opts):
3895 """manage multiple patch queues
3895 """manage multiple patch queues
3896
3896
3897 Supports switching between different patch queues, as well as creating
3897 Supports switching between different patch queues, as well as creating
3898 new patch queues and deleting existing ones.
3898 new patch queues and deleting existing ones.
3899
3899
3900 Omitting a queue name or specifying -l/--list will show you the registered
3900 Omitting a queue name or specifying -l/--list will show you the registered
3901 queues - by default the "normal" patches queue is registered. The currently
3901 queues - by default the "normal" patches queue is registered. The currently
3902 active queue will be marked with "(active)". Specifying --active will print
3902 active queue will be marked with "(active)". Specifying --active will print
3903 only the name of the active queue.
3903 only the name of the active queue.
3904
3904
3905 To create a new queue, use -c/--create. The queue is automatically made
3905 To create a new queue, use -c/--create. The queue is automatically made
3906 active, except in the case where there are applied patches from the
3906 active, except in the case where there are applied patches from the
3907 currently active queue in the repository. Then the queue will only be
3907 currently active queue in the repository. Then the queue will only be
3908 created and switching will fail.
3908 created and switching will fail.
3909
3909
3910 To delete an existing queue, use --delete. You cannot delete the currently
3910 To delete an existing queue, use --delete. You cannot delete the currently
3911 active queue.
3911 active queue.
3912
3912
3913 Returns 0 on success.
3913 Returns 0 on success.
3914 """
3914 """
3915 q = repo.mq
3915 q = repo.mq
3916 _defaultqueue = b'patches'
3916 _defaultqueue = b'patches'
3917 _allqueues = b'patches.queues'
3917 _allqueues = b'patches.queues'
3918 _activequeue = b'patches.queue'
3918 _activequeue = b'patches.queue'
3919
3919
3920 def _getcurrent():
3920 def _getcurrent():
3921 cur = os.path.basename(q.path)
3921 cur = os.path.basename(q.path)
3922 if cur.startswith(b'patches-'):
3922 if cur.startswith(b'patches-'):
3923 cur = cur[8:]
3923 cur = cur[8:]
3924 return cur
3924 return cur
3925
3925
3926 def _noqueues():
3926 def _noqueues():
3927 try:
3927 try:
3928 fh = repo.vfs(_allqueues, b'r')
3928 fh = repo.vfs(_allqueues, b'r')
3929 fh.close()
3929 fh.close()
3930 except IOError:
3930 except IOError:
3931 return True
3931 return True
3932
3932
3933 return False
3933 return False
3934
3934
3935 def _getqueues():
3935 def _getqueues():
3936 current = _getcurrent()
3936 current = _getcurrent()
3937
3937
3938 try:
3938 try:
3939 fh = repo.vfs(_allqueues, b'r')
3939 fh = repo.vfs(_allqueues, b'r')
3940 queues = [queue.strip() for queue in fh if queue.strip()]
3940 queues = [queue.strip() for queue in fh if queue.strip()]
3941 fh.close()
3941 fh.close()
3942 if current not in queues:
3942 if current not in queues:
3943 queues.append(current)
3943 queues.append(current)
3944 except IOError:
3944 except IOError:
3945 queues = [_defaultqueue]
3945 queues = [_defaultqueue]
3946
3946
3947 return sorted(queues)
3947 return sorted(queues)
3948
3948
3949 def _setactive(name):
3949 def _setactive(name):
3950 if q.applied:
3950 if q.applied:
3951 raise error.Abort(
3951 raise error.Abort(
3952 _(
3952 _(
3953 b'new queue created, but cannot make active '
3953 b'new queue created, but cannot make active '
3954 b'as patches are applied'
3954 b'as patches are applied'
3955 )
3955 )
3956 )
3956 )
3957 _setactivenocheck(name)
3957 _setactivenocheck(name)
3958
3958
3959 def _setactivenocheck(name):
3959 def _setactivenocheck(name):
3960 fh = repo.vfs(_activequeue, b'w')
3960 fh = repo.vfs(_activequeue, b'w')
3961 if name != b'patches':
3961 if name != b'patches':
3962 fh.write(name)
3962 fh.write(name)
3963 fh.close()
3963 fh.close()
3964
3964
3965 def _addqueue(name):
3965 def _addqueue(name):
3966 fh = repo.vfs(_allqueues, b'a')
3966 fh = repo.vfs(_allqueues, b'a')
3967 fh.write(b'%s\n' % (name,))
3967 fh.write(b'%s\n' % (name,))
3968 fh.close()
3968 fh.close()
3969
3969
3970 def _queuedir(name):
3970 def _queuedir(name):
3971 if name == b'patches':
3971 if name == b'patches':
3972 return repo.vfs.join(b'patches')
3972 return repo.vfs.join(b'patches')
3973 else:
3973 else:
3974 return repo.vfs.join(b'patches-' + name)
3974 return repo.vfs.join(b'patches-' + name)
3975
3975
3976 def _validname(name):
3976 def _validname(name):
3977 for n in name:
3977 for n in name:
3978 if n in b':\\/.':
3978 if n in b':\\/.':
3979 return False
3979 return False
3980 return True
3980 return True
3981
3981
3982 def _delete(name):
3982 def _delete(name):
3983 if name not in existing:
3983 if name not in existing:
3984 raise error.Abort(_(b'cannot delete queue that does not exist'))
3984 raise error.Abort(_(b'cannot delete queue that does not exist'))
3985
3985
3986 current = _getcurrent()
3986 current = _getcurrent()
3987
3987
3988 if name == current:
3988 if name == current:
3989 raise error.Abort(_(b'cannot delete currently active queue'))
3989 raise error.Abort(_(b'cannot delete currently active queue'))
3990
3990
3991 fh = repo.vfs(b'patches.queues.new', b'w')
3991 fh = repo.vfs(b'patches.queues.new', b'w')
3992 for queue in existing:
3992 for queue in existing:
3993 if queue == name:
3993 if queue == name:
3994 continue
3994 continue
3995 fh.write(b'%s\n' % (queue,))
3995 fh.write(b'%s\n' % (queue,))
3996 fh.close()
3996 fh.close()
3997 repo.vfs.rename(b'patches.queues.new', _allqueues)
3997 repo.vfs.rename(b'patches.queues.new', _allqueues)
3998
3998
3999 opts = pycompat.byteskwargs(opts)
3999 opts = pycompat.byteskwargs(opts)
4000 if not name or opts.get(b'list') or opts.get(b'active'):
4000 if not name or opts.get(b'list') or opts.get(b'active'):
4001 current = _getcurrent()
4001 current = _getcurrent()
4002 if opts.get(b'active'):
4002 if opts.get(b'active'):
4003 ui.write(b'%s\n' % (current,))
4003 ui.write(b'%s\n' % (current,))
4004 return
4004 return
4005 for queue in _getqueues():
4005 for queue in _getqueues():
4006 ui.write(b'%s' % (queue,))
4006 ui.write(b'%s' % (queue,))
4007 if queue == current and not ui.quiet:
4007 if queue == current and not ui.quiet:
4008 ui.write(_(b' (active)\n'))
4008 ui.write(_(b' (active)\n'))
4009 else:
4009 else:
4010 ui.write(b'\n')
4010 ui.write(b'\n')
4011 return
4011 return
4012
4012
4013 if not _validname(name):
4013 if not _validname(name):
4014 raise error.Abort(
4014 raise error.Abort(
4015 _(b'invalid queue name, may not contain the characters ":\\/."')
4015 _(b'invalid queue name, may not contain the characters ":\\/."')
4016 )
4016 )
4017
4017
4018 with repo.wlock():
4018 with repo.wlock():
4019 existing = _getqueues()
4019 existing = _getqueues()
4020
4020
4021 if opts.get(b'create'):
4021 if opts.get(b'create'):
4022 if name in existing:
4022 if name in existing:
4023 raise error.Abort(_(b'queue "%s" already exists') % name)
4023 raise error.Abort(_(b'queue "%s" already exists') % name)
4024 if _noqueues():
4024 if _noqueues():
4025 _addqueue(_defaultqueue)
4025 _addqueue(_defaultqueue)
4026 _addqueue(name)
4026 _addqueue(name)
4027 _setactive(name)
4027 _setactive(name)
4028 elif opts.get(b'rename'):
4028 elif opts.get(b'rename'):
4029 current = _getcurrent()
4029 current = _getcurrent()
4030 if name == current:
4030 if name == current:
4031 raise error.Abort(
4031 raise error.Abort(
4032 _(b'can\'t rename "%s" to its current name') % name
4032 _(b'can\'t rename "%s" to its current name') % name
4033 )
4033 )
4034 if name in existing:
4034 if name in existing:
4035 raise error.Abort(_(b'queue "%s" already exists') % name)
4035 raise error.Abort(_(b'queue "%s" already exists') % name)
4036
4036
4037 olddir = _queuedir(current)
4037 olddir = _queuedir(current)
4038 newdir = _queuedir(name)
4038 newdir = _queuedir(name)
4039
4039
4040 if os.path.exists(newdir):
4040 if os.path.exists(newdir):
4041 raise error.Abort(
4041 raise error.Abort(
4042 _(b'non-queue directory "%s" already exists') % newdir
4042 _(b'non-queue directory "%s" already exists') % newdir
4043 )
4043 )
4044
4044
4045 fh = repo.vfs(b'patches.queues.new', b'w')
4045 fh = repo.vfs(b'patches.queues.new', b'w')
4046 for queue in existing:
4046 for queue in existing:
4047 if queue == current:
4047 if queue == current:
4048 fh.write(b'%s\n' % (name,))
4048 fh.write(b'%s\n' % (name,))
4049 if os.path.exists(olddir):
4049 if os.path.exists(olddir):
4050 util.rename(olddir, newdir)
4050 util.rename(olddir, newdir)
4051 else:
4051 else:
4052 fh.write(b'%s\n' % (queue,))
4052 fh.write(b'%s\n' % (queue,))
4053 fh.close()
4053 fh.close()
4054 repo.vfs.rename(b'patches.queues.new', _allqueues)
4054 repo.vfs.rename(b'patches.queues.new', _allqueues)
4055 _setactivenocheck(name)
4055 _setactivenocheck(name)
4056 elif opts.get(b'delete'):
4056 elif opts.get(b'delete'):
4057 _delete(name)
4057 _delete(name)
4058 elif opts.get(b'purge'):
4058 elif opts.get(b'purge'):
4059 if name in existing:
4059 if name in existing:
4060 _delete(name)
4060 _delete(name)
4061 qdir = _queuedir(name)
4061 qdir = _queuedir(name)
4062 if os.path.exists(qdir):
4062 if os.path.exists(qdir):
4063 shutil.rmtree(qdir)
4063 shutil.rmtree(qdir)
4064 else:
4064 else:
4065 if name not in existing:
4065 if name not in existing:
4066 raise error.Abort(_(b'use --create to create a new queue'))
4066 raise error.Abort(_(b'use --create to create a new queue'))
4067 _setactive(name)
4067 _setactive(name)
4068
4068
4069
4069
4070 def mqphasedefaults(repo, roots):
4070 def mqphasedefaults(repo, roots):
4071 """callback used to set mq changeset as secret when no phase data exists"""
4071 """callback used to set mq changeset as secret when no phase data exists"""
4072 if repo.mq.applied:
4072 if repo.mq.applied:
4073 if repo.ui.configbool(b'mq', b'secret'):
4073 if repo.ui.configbool(b'mq', b'secret'):
4074 mqphase = phases.secret
4074 mqphase = phases.secret
4075 else:
4075 else:
4076 mqphase = phases.draft
4076 mqphase = phases.draft
4077 qbase = repo[repo.mq.applied[0].node]
4077 qbase = repo[repo.mq.applied[0].node]
4078 roots[mqphase].add(qbase.node())
4078 roots[mqphase].add(qbase.node())
4079 return roots
4079 return roots
4080
4080
4081
4081
4082 def reposetup(ui, repo):
4082 def reposetup(ui, repo):
4083 class mqrepo(repo.__class__):
4083 class mqrepo(repo.__class__):
4084 @localrepo.unfilteredpropertycache
4084 @localrepo.unfilteredpropertycache
4085 def mq(self):
4085 def mq(self):
4086 return queue(self.ui, self.baseui, self.path)
4086 return queue(self.ui, self.baseui, self.path)
4087
4087
4088 def invalidateall(self):
4088 def invalidateall(self):
4089 super(mqrepo, self).invalidateall()
4089 super(mqrepo, self).invalidateall()
4090 if localrepo.hasunfilteredcache(self, 'mq'):
4090 if localrepo.hasunfilteredcache(self, 'mq'):
4091 # recreate mq in case queue path was changed
4091 # recreate mq in case queue path was changed
4092 delattr(self.unfiltered(), 'mq')
4092 delattr(self.unfiltered(), 'mq')
4093
4093
4094 def abortifwdirpatched(self, errmsg, force=False):
4094 def abortifwdirpatched(self, errmsg, force=False):
4095 if self.mq.applied and self.mq.checkapplied and not force:
4095 if self.mq.applied and self.mq.checkapplied and not force:
4096 parents = self.dirstate.parents()
4096 parents = self.dirstate.parents()
4097 patches = [s.node for s in self.mq.applied]
4097 patches = [s.node for s in self.mq.applied]
4098 if any(p in patches for p in parents):
4098 if any(p in patches for p in parents):
4099 raise error.Abort(errmsg)
4099 raise error.Abort(errmsg)
4100
4100
4101 def commit(
4101 def commit(
4102 self,
4102 self,
4103 text=b"",
4103 text=b"",
4104 user=None,
4104 user=None,
4105 date=None,
4105 date=None,
4106 match=None,
4106 match=None,
4107 force=False,
4107 force=False,
4108 editor=False,
4108 editor=False,
4109 extra=None,
4109 extra=None,
4110 ):
4110 ):
4111 if extra is None:
4111 if extra is None:
4112 extra = {}
4112 extra = {}
4113 self.abortifwdirpatched(
4113 self.abortifwdirpatched(
4114 _(b'cannot commit over an applied mq patch'), force
4114 _(b'cannot commit over an applied mq patch'), force
4115 )
4115 )
4116
4116
4117 return super(mqrepo, self).commit(
4117 return super(mqrepo, self).commit(
4118 text, user, date, match, force, editor, extra
4118 text, user, date, match, force, editor, extra
4119 )
4119 )
4120
4120
4121 def checkpush(self, pushop):
4121 def checkpush(self, pushop):
4122 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4122 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4123 outapplied = [e.node for e in self.mq.applied]
4123 outapplied = [e.node for e in self.mq.applied]
4124 if pushop.revs:
4124 if pushop.revs:
4125 # Assume applied patches have no non-patch descendants and
4125 # Assume applied patches have no non-patch descendants and
4126 # are not on remote already. Filtering any changeset not
4126 # are not on remote already. Filtering any changeset not
4127 # pushed.
4127 # pushed.
4128 heads = set(pushop.revs)
4128 heads = set(pushop.revs)
4129 for node in reversed(outapplied):
4129 for node in reversed(outapplied):
4130 if node in heads:
4130 if node in heads:
4131 break
4131 break
4132 else:
4132 else:
4133 outapplied.pop()
4133 outapplied.pop()
4134 # looking for pushed and shared changeset
4134 # looking for pushed and shared changeset
4135 for node in outapplied:
4135 for node in outapplied:
4136 if self[node].phase() < phases.secret:
4136 if self[node].phase() < phases.secret:
4137 raise error.Abort(_(b'source has mq patches applied'))
4137 raise error.Abort(_(b'source has mq patches applied'))
4138 # no non-secret patches pushed
4138 # no non-secret patches pushed
4139 super(mqrepo, self).checkpush(pushop)
4139 super(mqrepo, self).checkpush(pushop)
4140
4140
4141 def _findtags(self):
4141 def _findtags(self):
4142 '''augment tags from base class with patch tags'''
4142 '''augment tags from base class with patch tags'''
4143 result = super(mqrepo, self)._findtags()
4143 result = super(mqrepo, self)._findtags()
4144
4144
4145 q = self.mq
4145 q = self.mq
4146 if not q.applied:
4146 if not q.applied:
4147 return result
4147 return result
4148
4148
4149 mqtags = [(patch.node, patch.name) for patch in q.applied]
4149 mqtags = [(patch.node, patch.name) for patch in q.applied]
4150
4150
4151 try:
4151 try:
4152 # for now ignore filtering business
4152 # for now ignore filtering business
4153 self.unfiltered().changelog.rev(mqtags[-1][0])
4153 self.unfiltered().changelog.rev(mqtags[-1][0])
4154 except error.LookupError:
4154 except error.LookupError:
4155 self.ui.warn(
4155 self.ui.warn(
4156 _(b'mq status file refers to unknown node %s\n')
4156 _(b'mq status file refers to unknown node %s\n')
4157 % short(mqtags[-1][0])
4157 % short(mqtags[-1][0])
4158 )
4158 )
4159 return result
4159 return result
4160
4160
4161 # do not add fake tags for filtered revisions
4161 # do not add fake tags for filtered revisions
4162 included = self.changelog.hasnode
4162 included = self.changelog.hasnode
4163 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4163 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4164 if not mqtags:
4164 if not mqtags:
4165 return result
4165 return result
4166
4166
4167 mqtags.append((mqtags[-1][0], b'qtip'))
4167 mqtags.append((mqtags[-1][0], b'qtip'))
4168 mqtags.append((mqtags[0][0], b'qbase'))
4168 mqtags.append((mqtags[0][0], b'qbase'))
4169 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4169 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4170 tags = result[0]
4170 tags = result[0]
4171 for patch in mqtags:
4171 for patch in mqtags:
4172 if patch[1] in tags:
4172 if patch[1] in tags:
4173 self.ui.warn(
4173 self.ui.warn(
4174 _(b'tag %s overrides mq patch of the same name\n')
4174 _(b'tag %s overrides mq patch of the same name\n')
4175 % patch[1]
4175 % patch[1]
4176 )
4176 )
4177 else:
4177 else:
4178 tags[patch[1]] = patch[0]
4178 tags[patch[1]] = patch[0]
4179
4179
4180 return result
4180 return result
4181
4181
4182 if repo.local():
4182 if repo.local():
4183 repo.__class__ = mqrepo
4183 repo.__class__ = mqrepo
4184
4184
4185 repo._phasedefaults.append(mqphasedefaults)
4185 repo._phasedefaults.append(mqphasedefaults)
4186
4186
4187
4187
4188 def mqimport(orig, ui, repo, *args, **kwargs):
4188 def mqimport(orig, ui, repo, *args, **kwargs):
4189 if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
4189 if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
4190 'no_commit', False
4190 'no_commit', False
4191 ):
4191 ):
4192 repo.abortifwdirpatched(
4192 repo.abortifwdirpatched(
4193 _(b'cannot import over an applied patch'), kwargs.get('force')
4193 _(b'cannot import over an applied patch'), kwargs.get('force')
4194 )
4194 )
4195 return orig(ui, repo, *args, **kwargs)
4195 return orig(ui, repo, *args, **kwargs)
4196
4196
4197
4197
4198 def mqinit(orig, ui, *args, **kwargs):
4198 def mqinit(orig, ui, *args, **kwargs):
4199 mq = kwargs.pop('mq', None)
4199 mq = kwargs.pop('mq', None)
4200
4200
4201 if not mq:
4201 if not mq:
4202 return orig(ui, *args, **kwargs)
4202 return orig(ui, *args, **kwargs)
4203
4203
4204 if args:
4204 if args:
4205 repopath = args[0]
4205 repopath = args[0]
4206 if not hg.islocal(repopath):
4206 if not hg.islocal(repopath):
4207 raise error.Abort(
4207 raise error.Abort(
4208 _(b'only a local queue repository may be initialized')
4208 _(b'only a local queue repository may be initialized')
4209 )
4209 )
4210 else:
4210 else:
4211 repopath = cmdutil.findrepo(encoding.getcwd())
4211 repopath = cmdutil.findrepo(encoding.getcwd())
4212 if not repopath:
4212 if not repopath:
4213 raise error.Abort(
4213 raise error.Abort(
4214 _(b'there is no Mercurial repository here (.hg not found)')
4214 _(b'there is no Mercurial repository here (.hg not found)')
4215 )
4215 )
4216 repo = hg.repository(ui, repopath)
4216 repo = hg.repository(ui, repopath)
4217 return qinit(ui, repo, True)
4217 return qinit(ui, repo, True)
4218
4218
4219
4219
4220 def mqcommand(orig, ui, repo, *args, **kwargs):
4220 def mqcommand(orig, ui, repo, *args, **kwargs):
4221 """Add --mq option to operate on patch repository instead of main"""
4221 """Add --mq option to operate on patch repository instead of main"""
4222
4222
4223 # some commands do not like getting unknown options
4223 # some commands do not like getting unknown options
4224 mq = kwargs.pop('mq', None)
4224 mq = kwargs.pop('mq', None)
4225
4225
4226 if not mq:
4226 if not mq:
4227 return orig(ui, repo, *args, **kwargs)
4227 return orig(ui, repo, *args, **kwargs)
4228
4228
4229 q = repo.mq
4229 q = repo.mq
4230 r = q.qrepo()
4230 r = q.qrepo()
4231 if not r:
4231 if not r:
4232 raise error.Abort(_(b'no queue repository'))
4232 raise error.Abort(_(b'no queue repository'))
4233 return orig(r.ui, r, *args, **kwargs)
4233 return orig(r.ui, r, *args, **kwargs)
4234
4234
4235
4235
4236 def summaryhook(ui, repo):
4236 def summaryhook(ui, repo):
4237 q = repo.mq
4237 q = repo.mq
4238 m = []
4238 m = []
4239 a, u = len(q.applied), len(q.unapplied(repo))
4239 a, u = len(q.applied), len(q.unapplied(repo))
4240 if a:
4240 if a:
4241 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4241 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4242 if u:
4242 if u:
4243 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4243 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4244 if m:
4244 if m:
4245 # i18n: column positioning for "hg summary"
4245 # i18n: column positioning for "hg summary"
4246 ui.write(_(b"mq: %s\n") % b', '.join(m))
4246 ui.write(_(b"mq: %s\n") % b', '.join(m))
4247 else:
4247 else:
4248 # i18n: column positioning for "hg summary"
4248 # i18n: column positioning for "hg summary"
4249 ui.note(_(b"mq: (empty queue)\n"))
4249 ui.note(_(b"mq: (empty queue)\n"))
4250
4250
4251
4251
4252 revsetpredicate = registrar.revsetpredicate()
4252 revsetpredicate = registrar.revsetpredicate()
4253
4253
4254
4254
4255 @revsetpredicate(b'mq()')
4255 @revsetpredicate(b'mq()')
4256 def revsetmq(repo, subset, x):
4256 def revsetmq(repo, subset, x):
4257 """Changesets managed by MQ."""
4257 """Changesets managed by MQ."""
4258 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4258 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4259 applied = {repo[r.node].rev() for r in repo.mq.applied}
4259 applied = {repo[r.node].rev() for r in repo.mq.applied}
4260 return smartset.baseset([r for r in subset if r in applied])
4260 return smartset.baseset([r for r in subset if r in applied])
4261
4261
4262
4262
4263 # tell hggettext to extract docstrings from these functions:
4263 # tell hggettext to extract docstrings from these functions:
4264 i18nfunctions = [revsetmq]
4264 i18nfunctions = [revsetmq]
4265
4265
4266
4266
4267 def extsetup(ui):
4267 def extsetup(ui):
4268 # Ensure mq wrappers are called first, regardless of extension load order by
4268 # Ensure mq wrappers are called first, regardless of extension load order by
4269 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4269 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4270 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4270 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4271
4271
4272 extensions.wrapcommand(commands.table, b'import', mqimport)
4272 extensions.wrapcommand(commands.table, b'import', mqimport)
4273 cmdutil.summaryhooks.add(b'mq', summaryhook)
4273 cmdutil.summaryhooks.add(b'mq', summaryhook)
4274
4274
4275 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4275 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4276 entry[1].extend(mqopt)
4276 entry[1].extend(mqopt)
4277
4277
4278 def dotable(cmdtable):
4278 def dotable(cmdtable):
4279 for cmd, entry in cmdtable.items():
4279 for cmd, entry in cmdtable.items():
4280 cmd = cmdutil.parsealiases(cmd)[0]
4280 cmd = cmdutil.parsealiases(cmd)[0]
4281 func = entry[0]
4281 func = entry[0]
4282 if func.norepo:
4282 if func.norepo:
4283 continue
4283 continue
4284 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4284 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4285 entry[1].extend(mqopt)
4285 entry[1].extend(mqopt)
4286
4286
4287 dotable(commands.table)
4287 dotable(commands.table)
4288
4288
4289 thismodule = sys.modules["hgext.mq"]
4289 thismodule = sys.modules["hgext.mq"]
4290 for extname, extmodule in extensions.extensions():
4290 for extname, extmodule in extensions.extensions():
4291 if extmodule != thismodule:
4291 if extmodule != thismodule:
4292 dotable(getattr(extmodule, 'cmdtable', {}))
4292 dotable(getattr(extmodule, 'cmdtable', {}))
4293
4293
4294
4294
4295 colortable = {
4295 colortable = {
4296 b'qguard.negative': b'red',
4296 b'qguard.negative': b'red',
4297 b'qguard.positive': b'yellow',
4297 b'qguard.positive': b'yellow',
4298 b'qguard.unguarded': b'green',
4298 b'qguard.unguarded': b'green',
4299 b'qseries.applied': b'blue bold underline',
4299 b'qseries.applied': b'blue bold underline',
4300 b'qseries.guarded': b'black bold',
4300 b'qseries.guarded': b'black bold',
4301 b'qseries.missing': b'red bold',
4301 b'qseries.missing': b'red bold',
4302 b'qseries.unapplied': b'black bold',
4302 b'qseries.unapplied': b'black bold',
4303 }
4303 }
@@ -1,692 +1,694 b''
1 # narrowcommands.py - command modifications for narrowhg extension
1 # narrowcommands.py - command modifications for narrowhg extension
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import itertools
8 import itertools
9 import os
9 import os
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial.node import (
12 from mercurial.node import (
13 hex,
13 hex,
14 short,
14 short,
15 )
15 )
16 from mercurial import (
16 from mercurial import (
17 bundle2,
17 bundle2,
18 cmdutil,
18 cmdutil,
19 commands,
19 commands,
20 discovery,
20 discovery,
21 encoding,
21 encoding,
22 error,
22 error,
23 exchange,
23 exchange,
24 extensions,
24 extensions,
25 hg,
25 hg,
26 narrowspec,
26 narrowspec,
27 pathutil,
27 pathutil,
28 pycompat,
28 pycompat,
29 registrar,
29 registrar,
30 repair,
30 repair,
31 repoview,
31 repoview,
32 requirements,
32 requirements,
33 sparse,
33 sparse,
34 util,
34 util,
35 wireprototypes,
35 wireprototypes,
36 )
36 )
37 from mercurial.utils import (
37 from mercurial.utils import (
38 urlutil,
38 urlutil,
39 )
39 )
40
40
41 table = {}
41 table = {}
42 command = registrar.command(table)
42 command = registrar.command(table)
43
43
44
44
45 def setup():
45 def setup():
46 """Wraps user-facing mercurial commands with narrow-aware versions."""
46 """Wraps user-facing mercurial commands with narrow-aware versions."""
47
47
48 entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd)
48 entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd)
49 entry[1].append(
49 entry[1].append(
50 (b'', b'narrow', None, _(b"create a narrow clone of select files"))
50 (b'', b'narrow', None, _(b"create a narrow clone of select files"))
51 )
51 )
52 entry[1].append(
52 entry[1].append(
53 (
53 (
54 b'',
54 b'',
55 b'depth',
55 b'depth',
56 b'',
56 b'',
57 _(b"limit the history fetched by distance from heads"),
57 _(b"limit the history fetched by distance from heads"),
58 )
58 )
59 )
59 )
60 entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file")))
60 entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file")))
61 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
61 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
62 if b'sparse' not in extensions.enabled():
62 if b'sparse' not in extensions.enabled():
63 entry[1].append(
63 entry[1].append(
64 (b'', b'include', [], _(b"specifically fetch this file/directory"))
64 (b'', b'include', [], _(b"specifically fetch this file/directory"))
65 )
65 )
66 entry[1].append(
66 entry[1].append(
67 (
67 (
68 b'',
68 b'',
69 b'exclude',
69 b'exclude',
70 [],
70 [],
71 _(b"do not fetch this file/directory, even if included"),
71 _(b"do not fetch this file/directory, even if included"),
72 )
72 )
73 )
73 )
74
74
75 entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd)
75 entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd)
76 entry[1].append(
76 entry[1].append(
77 (
77 (
78 b'',
78 b'',
79 b'depth',
79 b'depth',
80 b'',
80 b'',
81 _(b"limit the history fetched by distance from heads"),
81 _(b"limit the history fetched by distance from heads"),
82 )
82 )
83 )
83 )
84
84
85 extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd)
85 extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd)
86
86
87
87
88 def clonenarrowcmd(orig, ui, repo, *args, **opts):
88 def clonenarrowcmd(orig, ui, repo, *args, **opts):
89 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
89 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
90 opts = pycompat.byteskwargs(opts)
90 opts = pycompat.byteskwargs(opts)
91 wrappedextraprepare = util.nullcontextmanager()
91 wrappedextraprepare = util.nullcontextmanager()
92 narrowspecfile = opts[b'narrowspec']
92 narrowspecfile = opts[b'narrowspec']
93
93
94 if narrowspecfile:
94 if narrowspecfile:
95 filepath = os.path.join(encoding.getcwd(), narrowspecfile)
95 filepath = os.path.join(encoding.getcwd(), narrowspecfile)
96 ui.status(_(b"reading narrowspec from '%s'\n") % filepath)
96 ui.status(_(b"reading narrowspec from '%s'\n") % filepath)
97 try:
97 try:
98 fdata = util.readfile(filepath)
98 fdata = util.readfile(filepath)
99 except IOError as inst:
99 except IOError as inst:
100 raise error.Abort(
100 raise error.Abort(
101 _(b"cannot read narrowspecs from '%s': %s")
101 _(b"cannot read narrowspecs from '%s': %s")
102 % (filepath, encoding.strtolocal(inst.strerror))
102 % (filepath, encoding.strtolocal(inst.strerror))
103 )
103 )
104
104
105 includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow')
105 includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow')
106 if profiles:
106 if profiles:
107 raise error.ConfigError(
107 raise error.ConfigError(
108 _(
108 _(
109 b"cannot specify other files using '%include' in"
109 b"cannot specify other files using '%include' in"
110 b" narrowspec"
110 b" narrowspec"
111 )
111 )
112 )
112 )
113
113
114 narrowspec.validatepatterns(includes)
114 narrowspec.validatepatterns(includes)
115 narrowspec.validatepatterns(excludes)
115 narrowspec.validatepatterns(excludes)
116
116
117 # narrowspec is passed so we should assume that user wants narrow clone
117 # narrowspec is passed so we should assume that user wants narrow clone
118 opts[b'narrow'] = True
118 opts[b'narrow'] = True
119 opts[b'include'].extend(includes)
119 opts[b'include'].extend(includes)
120 opts[b'exclude'].extend(excludes)
120 opts[b'exclude'].extend(excludes)
121
121
122 if opts[b'narrow']:
122 if opts[b'narrow']:
123
123
124 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
124 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
125 orig(pullop, kwargs)
125 orig(pullop, kwargs)
126
126
127 if opts.get(b'depth'):
127 if opts.get(b'depth'):
128 kwargs[b'depth'] = opts[b'depth']
128 kwargs[b'depth'] = opts[b'depth']
129
129
130 wrappedextraprepare = extensions.wrappedfunction(
130 wrappedextraprepare = extensions.wrappedfunction(
131 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
131 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
132 )
132 )
133
133
134 with wrappedextraprepare:
134 with wrappedextraprepare:
135 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
135 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
136
136
137
137
138 def pullnarrowcmd(orig, ui, repo, *args, **opts):
138 def pullnarrowcmd(orig, ui, repo, *args, **opts):
139 """Wraps pull command to allow modifying narrow spec."""
139 """Wraps pull command to allow modifying narrow spec."""
140 wrappedextraprepare = util.nullcontextmanager()
140 wrappedextraprepare = util.nullcontextmanager()
141 if requirements.NARROW_REQUIREMENT in repo.requirements:
141 if requirements.NARROW_REQUIREMENT in repo.requirements:
142
142
143 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
143 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
144 orig(pullop, kwargs)
144 orig(pullop, kwargs)
145 if opts.get('depth'):
145 if opts.get('depth'):
146 kwargs[b'depth'] = opts['depth']
146 kwargs[b'depth'] = opts['depth']
147
147
148 wrappedextraprepare = extensions.wrappedfunction(
148 wrappedextraprepare = extensions.wrappedfunction(
149 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
149 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
150 )
150 )
151
151
152 with wrappedextraprepare:
152 with wrappedextraprepare:
153 return orig(ui, repo, *args, **opts)
153 return orig(ui, repo, *args, **opts)
154
154
155
155
156 def archivenarrowcmd(orig, ui, repo, *args, **opts):
156 def archivenarrowcmd(orig, ui, repo, *args, **opts):
157 """Wraps archive command to narrow the default includes."""
157 """Wraps archive command to narrow the default includes."""
158 if requirements.NARROW_REQUIREMENT in repo.requirements:
158 if requirements.NARROW_REQUIREMENT in repo.requirements:
159 repo_includes, repo_excludes = repo.narrowpats
159 repo_includes, repo_excludes = repo.narrowpats
160 includes = set(opts.get('include', []))
160 includes = set(opts.get('include', []))
161 excludes = set(opts.get('exclude', []))
161 excludes = set(opts.get('exclude', []))
162 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
162 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
163 includes, excludes, repo_includes, repo_excludes
163 includes, excludes, repo_includes, repo_excludes
164 )
164 )
165 if includes:
165 if includes:
166 opts['include'] = includes
166 opts['include'] = includes
167 if excludes:
167 if excludes:
168 opts['exclude'] = excludes
168 opts['exclude'] = excludes
169 return orig(ui, repo, *args, **opts)
169 return orig(ui, repo, *args, **opts)
170
170
171
171
172 def pullbundle2extraprepare(orig, pullop, kwargs):
172 def pullbundle2extraprepare(orig, pullop, kwargs):
173 repo = pullop.repo
173 repo = pullop.repo
174 if requirements.NARROW_REQUIREMENT not in repo.requirements:
174 if requirements.NARROW_REQUIREMENT not in repo.requirements:
175 return orig(pullop, kwargs)
175 return orig(pullop, kwargs)
176
176
177 if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
177 if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
178 raise error.Abort(_(b"server does not support narrow clones"))
178 raise error.Abort(_(b"server does not support narrow clones"))
179 orig(pullop, kwargs)
179 orig(pullop, kwargs)
180 kwargs[b'narrow'] = True
180 kwargs[b'narrow'] = True
181 include, exclude = repo.narrowpats
181 include, exclude = repo.narrowpats
182 kwargs[b'oldincludepats'] = include
182 kwargs[b'oldincludepats'] = include
183 kwargs[b'oldexcludepats'] = exclude
183 kwargs[b'oldexcludepats'] = exclude
184 if include:
184 if include:
185 kwargs[b'includepats'] = include
185 kwargs[b'includepats'] = include
186 if exclude:
186 if exclude:
187 kwargs[b'excludepats'] = exclude
187 kwargs[b'excludepats'] = exclude
188 # calculate known nodes only in ellipses cases because in non-ellipses cases
188 # calculate known nodes only in ellipses cases because in non-ellipses cases
189 # we have all the nodes
189 # we have all the nodes
190 if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities():
190 if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities():
191 kwargs[b'known'] = [
191 kwargs[b'known'] = [
192 hex(ctx.node())
192 hex(ctx.node())
193 for ctx in repo.set(b'::%ln', pullop.common)
193 for ctx in repo.set(b'::%ln', pullop.common)
194 if ctx.node() != repo.nullid
194 if ctx.node() != repo.nullid
195 ]
195 ]
196 if not kwargs[b'known']:
196 if not kwargs[b'known']:
197 # Mercurial serializes an empty list as '' and deserializes it as
197 # Mercurial serializes an empty list as '' and deserializes it as
198 # [''], so delete it instead to avoid handling the empty string on
198 # [''], so delete it instead to avoid handling the empty string on
199 # the server.
199 # the server.
200 del kwargs[b'known']
200 del kwargs[b'known']
201
201
202
202
203 extensions.wrapfunction(
203 extensions.wrapfunction(
204 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare
204 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare
205 )
205 )
206
206
207
207
208 def _narrow(
208 def _narrow(
209 ui,
209 ui,
210 repo,
210 repo,
211 remote,
211 remote,
212 commoninc,
212 commoninc,
213 oldincludes,
213 oldincludes,
214 oldexcludes,
214 oldexcludes,
215 newincludes,
215 newincludes,
216 newexcludes,
216 newexcludes,
217 force,
217 force,
218 backup,
218 backup,
219 ):
219 ):
220 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
220 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
221 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
221 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
222
222
223 # This is essentially doing "hg outgoing" to find all local-only
223 # This is essentially doing "hg outgoing" to find all local-only
224 # commits. We will then check that the local-only commits don't
224 # commits. We will then check that the local-only commits don't
225 # have any changes to files that will be untracked.
225 # have any changes to files that will be untracked.
226 unfi = repo.unfiltered()
226 unfi = repo.unfiltered()
227 outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
227 outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
228 ui.status(_(b'looking for local changes to affected paths\n'))
228 ui.status(_(b'looking for local changes to affected paths\n'))
229 progress = ui.makeprogress(
229 progress = ui.makeprogress(
230 topic=_(b'changesets'),
230 topic=_(b'changesets'),
231 unit=_(b'changesets'),
231 unit=_(b'changesets'),
232 total=len(outgoing.missing) + len(outgoing.excluded),
232 total=len(outgoing.missing) + len(outgoing.excluded),
233 )
233 )
234 localnodes = []
234 localnodes = []
235 with progress:
235 with progress:
236 for n in itertools.chain(outgoing.missing, outgoing.excluded):
236 for n in itertools.chain(outgoing.missing, outgoing.excluded):
237 progress.increment()
237 progress.increment()
238 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
238 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
239 localnodes.append(n)
239 localnodes.append(n)
240 revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
240 revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
241 hiddenrevs = repoview.filterrevs(repo, b'visible')
241 hiddenrevs = repoview.filterrevs(repo, b'visible')
242 visibletostrip = list(
242 visibletostrip = list(
243 repo.changelog.node(r) for r in (revstostrip - hiddenrevs)
243 repo.changelog.node(r) for r in (revstostrip - hiddenrevs)
244 )
244 )
245 if visibletostrip:
245 if visibletostrip:
246 ui.status(
246 ui.status(
247 _(
247 _(
248 b'The following changeset(s) or their ancestors have '
248 b'The following changeset(s) or their ancestors have '
249 b'local changes not on the remote:\n'
249 b'local changes not on the remote:\n'
250 )
250 )
251 )
251 )
252 maxnodes = 10
252 maxnodes = 10
253 if ui.verbose or len(visibletostrip) <= maxnodes:
253 if ui.verbose or len(visibletostrip) <= maxnodes:
254 for n in visibletostrip:
254 for n in visibletostrip:
255 ui.status(b'%s\n' % short(n))
255 ui.status(b'%s\n' % short(n))
256 else:
256 else:
257 for n in visibletostrip[:maxnodes]:
257 for n in visibletostrip[:maxnodes]:
258 ui.status(b'%s\n' % short(n))
258 ui.status(b'%s\n' % short(n))
259 ui.status(
259 ui.status(
260 _(b'...and %d more, use --verbose to list all\n')
260 _(b'...and %d more, use --verbose to list all\n')
261 % (len(visibletostrip) - maxnodes)
261 % (len(visibletostrip) - maxnodes)
262 )
262 )
263 if not force:
263 if not force:
264 raise error.StateError(
264 raise error.StateError(
265 _(b'local changes found'),
265 _(b'local changes found'),
266 hint=_(b'use --force-delete-local-changes to ignore'),
266 hint=_(b'use --force-delete-local-changes to ignore'),
267 )
267 )
268
268
269 with ui.uninterruptible():
269 with ui.uninterruptible():
270 if revstostrip:
270 if revstostrip:
271 tostrip = [unfi.changelog.node(r) for r in revstostrip]
271 tostrip = [unfi.changelog.node(r) for r in revstostrip]
272 if repo[b'.'].node() in tostrip:
272 if repo[b'.'].node() in tostrip:
273 # stripping working copy, so move to a different commit first
273 # stripping working copy, so move to a different commit first
274 urev = max(
274 urev = max(
275 repo.revs(
275 repo.revs(
276 b'(::%n) - %ln + null',
276 b'(::%n) - %ln + null',
277 repo[b'.'].node(),
277 repo[b'.'].node(),
278 visibletostrip,
278 visibletostrip,
279 )
279 )
280 )
280 )
281 hg.clean(repo, urev)
281 hg.clean(repo, urev)
282 overrides = {(b'devel', b'strip-obsmarkers'): False}
282 overrides = {(b'devel', b'strip-obsmarkers'): False}
283 if backup:
283 if backup:
284 ui.status(_(b'moving unwanted changesets to backup\n'))
284 ui.status(_(b'moving unwanted changesets to backup\n'))
285 else:
285 else:
286 ui.status(_(b'deleting unwanted changesets\n'))
286 ui.status(_(b'deleting unwanted changesets\n'))
287 with ui.configoverride(overrides, b'narrow'):
287 with ui.configoverride(overrides, b'narrow'):
288 repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
288 repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
289
289
290 todelete = []
290 todelete = []
291 for t, f, size in repo.store.datafiles():
291 for t, f, size in repo.store.datafiles():
292 if f.startswith(b'data/'):
292 if f.startswith(b'data/'):
293 file = f[5:-2]
293 file = f[5:-2]
294 if not newmatch(file):
294 if not newmatch(file):
295 todelete.append(f)
295 todelete.append(f)
296 elif f.startswith(b'meta/'):
296 elif f.startswith(b'meta/'):
297 dir = f[5:-13]
297 dir = f[5:-13]
298 dirs = sorted(pathutil.dirs({dir})) + [dir]
298 dirs = sorted(pathutil.dirs({dir})) + [dir]
299 include = True
299 include = True
300 for d in dirs:
300 for d in dirs:
301 visit = newmatch.visitdir(d)
301 visit = newmatch.visitdir(d)
302 if not visit:
302 if not visit:
303 include = False
303 include = False
304 break
304 break
305 if visit == b'all':
305 if visit == b'all':
306 break
306 break
307 if not include:
307 if not include:
308 todelete.append(f)
308 todelete.append(f)
309
309
310 repo.destroying()
310 repo.destroying()
311
311
312 with repo.transaction(b'narrowing'):
312 with repo.transaction(b'narrowing'):
313 # Update narrowspec before removing revlogs, so repo won't be
313 # Update narrowspec before removing revlogs, so repo won't be
314 # corrupt in case of crash
314 # corrupt in case of crash
315 repo.setnarrowpats(newincludes, newexcludes)
315 repo.setnarrowpats(newincludes, newexcludes)
316
316
317 for f in todelete:
317 for f in todelete:
318 ui.status(_(b'deleting %s\n') % f)
318 ui.status(_(b'deleting %s\n') % f)
319 util.unlinkpath(repo.svfs.join(f))
319 util.unlinkpath(repo.svfs.join(f))
320 repo.store.markremoved(f)
320 repo.store.markremoved(f)
321
321
322 ui.status(_(b'deleting unwanted files from working copy\n'))
322 ui.status(_(b'deleting unwanted files from working copy\n'))
323 with repo.dirstate.parentchange(repo):
323 with repo.dirstate.changing_parents(repo):
324 narrowspec.updateworkingcopy(repo, assumeclean=True)
324 narrowspec.updateworkingcopy(repo, assumeclean=True)
325 narrowspec.copytoworkingcopy(repo)
325 narrowspec.copytoworkingcopy(repo)
326
326
327 repo.destroyed()
327 repo.destroyed()
328
328
329
329
330 def _widen(
330 def _widen(
331 ui,
331 ui,
332 repo,
332 repo,
333 remote,
333 remote,
334 commoninc,
334 commoninc,
335 oldincludes,
335 oldincludes,
336 oldexcludes,
336 oldexcludes,
337 newincludes,
337 newincludes,
338 newexcludes,
338 newexcludes,
339 ):
339 ):
340 # for now we assume that if a server has ellipses enabled, we will be
340 # for now we assume that if a server has ellipses enabled, we will be
341 # exchanging ellipses nodes. In future we should add ellipses as a client
341 # exchanging ellipses nodes. In future we should add ellipses as a client
342 # side requirement (maybe) to distinguish a client is shallow or not and
342 # side requirement (maybe) to distinguish a client is shallow or not and
343 # then send that information to server whether we want ellipses or not.
343 # then send that information to server whether we want ellipses or not.
344 # Theoretically a non-ellipses repo should be able to use narrow
344 # Theoretically a non-ellipses repo should be able to use narrow
345 # functionality from an ellipses enabled server
345 # functionality from an ellipses enabled server
346 remotecap = remote.capabilities()
346 remotecap = remote.capabilities()
347 ellipsesremote = any(
347 ellipsesremote = any(
348 cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP
348 cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP
349 )
349 )
350
350
351 # check whether we are talking to a server which supports old version of
351 # check whether we are talking to a server which supports old version of
352 # ellipses capabilities
352 # ellipses capabilities
353 isoldellipses = (
353 isoldellipses = (
354 ellipsesremote
354 ellipsesremote
355 and wireprototypes.ELLIPSESCAP1 in remotecap
355 and wireprototypes.ELLIPSESCAP1 in remotecap
356 and wireprototypes.ELLIPSESCAP not in remotecap
356 and wireprototypes.ELLIPSESCAP not in remotecap
357 )
357 )
358
358
359 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
359 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
360 orig(pullop, kwargs)
360 orig(pullop, kwargs)
361 # The old{in,ex}cludepats have already been set by orig()
361 # The old{in,ex}cludepats have already been set by orig()
362 kwargs[b'includepats'] = newincludes
362 kwargs[b'includepats'] = newincludes
363 kwargs[b'excludepats'] = newexcludes
363 kwargs[b'excludepats'] = newexcludes
364
364
365 wrappedextraprepare = extensions.wrappedfunction(
365 wrappedextraprepare = extensions.wrappedfunction(
366 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
366 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
367 )
367 )
368
368
369 # define a function that narrowbundle2 can call after creating the
369 # define a function that narrowbundle2 can call after creating the
370 # backup bundle, but before applying the bundle from the server
370 # backup bundle, but before applying the bundle from the server
371 def setnewnarrowpats():
371 def setnewnarrowpats():
372 repo.setnarrowpats(newincludes, newexcludes)
372 repo.setnarrowpats(newincludes, newexcludes)
373
373
374 repo.setnewnarrowpats = setnewnarrowpats
374 repo.setnewnarrowpats = setnewnarrowpats
375 # silence the devel-warning of applying an empty changegroup
375 # silence the devel-warning of applying an empty changegroup
376 overrides = {(b'devel', b'all-warnings'): False}
376 overrides = {(b'devel', b'all-warnings'): False}
377
377
378 common = commoninc[0]
378 common = commoninc[0]
379 with ui.uninterruptible():
379 with ui.uninterruptible():
380 if ellipsesremote:
380 if ellipsesremote:
381 ds = repo.dirstate
381 ds = repo.dirstate
382 p1, p2 = ds.p1(), ds.p2()
382 p1, p2 = ds.p1(), ds.p2()
383 with ds.parentchange(repo):
383 with ds.changing_parents(repo):
384 ds.setparents(repo.nullid, repo.nullid)
384 ds.setparents(repo.nullid, repo.nullid)
385 if isoldellipses:
385 if isoldellipses:
386 with wrappedextraprepare:
386 with wrappedextraprepare:
387 exchange.pull(repo, remote, heads=common)
387 exchange.pull(repo, remote, heads=common)
388 else:
388 else:
389 known = []
389 known = []
390 if ellipsesremote:
390 if ellipsesremote:
391 known = [
391 known = [
392 ctx.node()
392 ctx.node()
393 for ctx in repo.set(b'::%ln', common)
393 for ctx in repo.set(b'::%ln', common)
394 if ctx.node() != repo.nullid
394 if ctx.node() != repo.nullid
395 ]
395 ]
396 with remote.commandexecutor() as e:
396 with remote.commandexecutor() as e:
397 bundle = e.callcommand(
397 bundle = e.callcommand(
398 b'narrow_widen',
398 b'narrow_widen',
399 {
399 {
400 b'oldincludes': oldincludes,
400 b'oldincludes': oldincludes,
401 b'oldexcludes': oldexcludes,
401 b'oldexcludes': oldexcludes,
402 b'newincludes': newincludes,
402 b'newincludes': newincludes,
403 b'newexcludes': newexcludes,
403 b'newexcludes': newexcludes,
404 b'cgversion': b'03',
404 b'cgversion': b'03',
405 b'commonheads': common,
405 b'commonheads': common,
406 b'known': known,
406 b'known': known,
407 b'ellipses': ellipsesremote,
407 b'ellipses': ellipsesremote,
408 },
408 },
409 ).result()
409 ).result()
410
410
411 trmanager = exchange.transactionmanager(
411 trmanager = exchange.transactionmanager(
412 repo, b'widen', remote.url()
412 repo, b'widen', remote.url()
413 )
413 )
414 with trmanager, repo.ui.configoverride(overrides, b'widen'):
414 with trmanager, repo.ui.configoverride(overrides, b'widen'):
415 op = bundle2.bundleoperation(
415 op = bundle2.bundleoperation(
416 repo, trmanager.transaction, source=b'widen'
416 repo, trmanager.transaction, source=b'widen'
417 )
417 )
418 # TODO: we should catch error.Abort here
418 # TODO: we should catch error.Abort here
419 bundle2.processbundle(repo, bundle, op=op, remote=remote)
419 bundle2.processbundle(repo, bundle, op=op, remote=remote)
420
420
421 if ellipsesremote:
421 if ellipsesremote:
422 with ds.parentchange(repo):
422 with ds.changing_parents(repo):
423 ds.setparents(p1, p2)
423 ds.setparents(p1, p2)
424
424
425 with repo.transaction(b'widening'), repo.dirstate.parentchange(repo):
425 with repo.transaction(b'widening'), repo.dirstate.changing_parents(
426 repo
427 ):
426 repo.setnewnarrowpats()
428 repo.setnewnarrowpats()
427 narrowspec.updateworkingcopy(repo)
429 narrowspec.updateworkingcopy(repo)
428 narrowspec.copytoworkingcopy(repo)
430 narrowspec.copytoworkingcopy(repo)
429
431
430
432
431 # TODO(rdamazio): Make new matcher format and update description
433 # TODO(rdamazio): Make new matcher format and update description
432 @command(
434 @command(
433 b'tracked',
435 b'tracked',
434 [
436 [
435 (b'', b'addinclude', [], _(b'new paths to include')),
437 (b'', b'addinclude', [], _(b'new paths to include')),
436 (b'', b'removeinclude', [], _(b'old paths to no longer include')),
438 (b'', b'removeinclude', [], _(b'old paths to no longer include')),
437 (
439 (
438 b'',
440 b'',
439 b'auto-remove-includes',
441 b'auto-remove-includes',
440 False,
442 False,
441 _(b'automatically choose unused includes to remove'),
443 _(b'automatically choose unused includes to remove'),
442 ),
444 ),
443 (b'', b'addexclude', [], _(b'new paths to exclude')),
445 (b'', b'addexclude', [], _(b'new paths to exclude')),
444 (b'', b'import-rules', b'', _(b'import narrowspecs from a file')),
446 (b'', b'import-rules', b'', _(b'import narrowspecs from a file')),
445 (b'', b'removeexclude', [], _(b'old paths to no longer exclude')),
447 (b'', b'removeexclude', [], _(b'old paths to no longer exclude')),
446 (
448 (
447 b'',
449 b'',
448 b'clear',
450 b'clear',
449 False,
451 False,
450 _(b'whether to replace the existing narrowspec'),
452 _(b'whether to replace the existing narrowspec'),
451 ),
453 ),
452 (
454 (
453 b'',
455 b'',
454 b'force-delete-local-changes',
456 b'force-delete-local-changes',
455 False,
457 False,
456 _(b'forces deletion of local changes when narrowing'),
458 _(b'forces deletion of local changes when narrowing'),
457 ),
459 ),
458 (
460 (
459 b'',
461 b'',
460 b'backup',
462 b'backup',
461 True,
463 True,
462 _(b'back up local changes when narrowing'),
464 _(b'back up local changes when narrowing'),
463 ),
465 ),
464 (
466 (
465 b'',
467 b'',
466 b'update-working-copy',
468 b'update-working-copy',
467 False,
469 False,
468 _(b'update working copy when the store has changed'),
470 _(b'update working copy when the store has changed'),
469 ),
471 ),
470 ]
472 ]
471 + commands.remoteopts,
473 + commands.remoteopts,
472 _(b'[OPTIONS]... [REMOTE]'),
474 _(b'[OPTIONS]... [REMOTE]'),
473 inferrepo=True,
475 inferrepo=True,
474 helpcategory=command.CATEGORY_MAINTENANCE,
476 helpcategory=command.CATEGORY_MAINTENANCE,
475 )
477 )
476 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
478 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
477 """show or change the current narrowspec
479 """show or change the current narrowspec
478
480
479 With no argument, shows the current narrowspec entries, one per line. Each
481 With no argument, shows the current narrowspec entries, one per line. Each
480 line will be prefixed with 'I' or 'X' for included or excluded patterns,
482 line will be prefixed with 'I' or 'X' for included or excluded patterns,
481 respectively.
483 respectively.
482
484
483 The narrowspec is comprised of expressions to match remote files and/or
485 The narrowspec is comprised of expressions to match remote files and/or
484 directories that should be pulled into your client.
486 directories that should be pulled into your client.
485 The narrowspec has *include* and *exclude* expressions, with excludes always
487 The narrowspec has *include* and *exclude* expressions, with excludes always
486 trumping includes: that is, if a file matches an exclude expression, it will
488 trumping includes: that is, if a file matches an exclude expression, it will
487 be excluded even if it also matches an include expression.
489 be excluded even if it also matches an include expression.
488 Excluding files that were never included has no effect.
490 Excluding files that were never included has no effect.
489
491
490 Each included or excluded entry is in the format described by
492 Each included or excluded entry is in the format described by
491 'hg help patterns'.
493 'hg help patterns'.
492
494
493 The options allow you to add or remove included and excluded expressions.
495 The options allow you to add or remove included and excluded expressions.
494
496
495 If --clear is specified, then all previous includes and excludes are DROPPED
497 If --clear is specified, then all previous includes and excludes are DROPPED
496 and replaced by the new ones specified to --addinclude and --addexclude.
498 and replaced by the new ones specified to --addinclude and --addexclude.
497 If --clear is specified without any further options, the narrowspec will be
499 If --clear is specified without any further options, the narrowspec will be
498 empty and will not match any files.
500 empty and will not match any files.
499
501
500 If --auto-remove-includes is specified, then those includes that don't match
502 If --auto-remove-includes is specified, then those includes that don't match
501 any files modified by currently visible local commits (those not shared by
503 any files modified by currently visible local commits (those not shared by
502 the remote) will be added to the set of explicitly specified includes to
504 the remote) will be added to the set of explicitly specified includes to
503 remove.
505 remove.
504
506
505 --import-rules accepts a path to a file containing rules, allowing you to
507 --import-rules accepts a path to a file containing rules, allowing you to
506 add --addinclude, --addexclude rules in bulk. Like the other include and
508 add --addinclude, --addexclude rules in bulk. Like the other include and
507 exclude switches, the changes are applied immediately.
509 exclude switches, the changes are applied immediately.
508 """
510 """
509 opts = pycompat.byteskwargs(opts)
511 opts = pycompat.byteskwargs(opts)
510 if requirements.NARROW_REQUIREMENT not in repo.requirements:
512 if requirements.NARROW_REQUIREMENT not in repo.requirements:
511 raise error.InputError(
513 raise error.InputError(
512 _(
514 _(
513 b'the tracked command is only supported on '
515 b'the tracked command is only supported on '
514 b'repositories cloned with --narrow'
516 b'repositories cloned with --narrow'
515 )
517 )
516 )
518 )
517
519
518 # Before supporting, decide whether it "hg tracked --clear" should mean
520 # Before supporting, decide whether it "hg tracked --clear" should mean
519 # tracking no paths or all paths.
521 # tracking no paths or all paths.
520 if opts[b'clear']:
522 if opts[b'clear']:
521 raise error.InputError(_(b'the --clear option is not yet supported'))
523 raise error.InputError(_(b'the --clear option is not yet supported'))
522
524
523 # import rules from a file
525 # import rules from a file
524 newrules = opts.get(b'import_rules')
526 newrules = opts.get(b'import_rules')
525 if newrules:
527 if newrules:
526 try:
528 try:
527 filepath = os.path.join(encoding.getcwd(), newrules)
529 filepath = os.path.join(encoding.getcwd(), newrules)
528 fdata = util.readfile(filepath)
530 fdata = util.readfile(filepath)
529 except IOError as inst:
531 except IOError as inst:
530 raise error.StorageError(
532 raise error.StorageError(
531 _(b"cannot read narrowspecs from '%s': %s")
533 _(b"cannot read narrowspecs from '%s': %s")
532 % (filepath, encoding.strtolocal(inst.strerror))
534 % (filepath, encoding.strtolocal(inst.strerror))
533 )
535 )
534 includepats, excludepats, profiles = sparse.parseconfig(
536 includepats, excludepats, profiles = sparse.parseconfig(
535 ui, fdata, b'narrow'
537 ui, fdata, b'narrow'
536 )
538 )
537 if profiles:
539 if profiles:
538 raise error.InputError(
540 raise error.InputError(
539 _(
541 _(
540 b"including other spec files using '%include' "
542 b"including other spec files using '%include' "
541 b"is not supported in narrowspec"
543 b"is not supported in narrowspec"
542 )
544 )
543 )
545 )
544 opts[b'addinclude'].extend(includepats)
546 opts[b'addinclude'].extend(includepats)
545 opts[b'addexclude'].extend(excludepats)
547 opts[b'addexclude'].extend(excludepats)
546
548
547 addedincludes = narrowspec.parsepatterns(opts[b'addinclude'])
549 addedincludes = narrowspec.parsepatterns(opts[b'addinclude'])
548 removedincludes = narrowspec.parsepatterns(opts[b'removeinclude'])
550 removedincludes = narrowspec.parsepatterns(opts[b'removeinclude'])
549 addedexcludes = narrowspec.parsepatterns(opts[b'addexclude'])
551 addedexcludes = narrowspec.parsepatterns(opts[b'addexclude'])
550 removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude'])
552 removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude'])
551 autoremoveincludes = opts[b'auto_remove_includes']
553 autoremoveincludes = opts[b'auto_remove_includes']
552
554
553 update_working_copy = opts[b'update_working_copy']
555 update_working_copy = opts[b'update_working_copy']
554 only_show = not (
556 only_show = not (
555 addedincludes
557 addedincludes
556 or removedincludes
558 or removedincludes
557 or addedexcludes
559 or addedexcludes
558 or removedexcludes
560 or removedexcludes
559 or newrules
561 or newrules
560 or autoremoveincludes
562 or autoremoveincludes
561 or update_working_copy
563 or update_working_copy
562 )
564 )
563
565
564 oldincludes, oldexcludes = repo.narrowpats
566 oldincludes, oldexcludes = repo.narrowpats
565
567
566 # filter the user passed additions and deletions into actual additions and
568 # filter the user passed additions and deletions into actual additions and
567 # deletions of excludes and includes
569 # deletions of excludes and includes
568 addedincludes -= oldincludes
570 addedincludes -= oldincludes
569 removedincludes &= oldincludes
571 removedincludes &= oldincludes
570 addedexcludes -= oldexcludes
572 addedexcludes -= oldexcludes
571 removedexcludes &= oldexcludes
573 removedexcludes &= oldexcludes
572
574
573 widening = addedincludes or removedexcludes
575 widening = addedincludes or removedexcludes
574 narrowing = removedincludes or addedexcludes
576 narrowing = removedincludes or addedexcludes
575
577
576 # Only print the current narrowspec.
578 # Only print the current narrowspec.
577 if only_show:
579 if only_show:
578 ui.pager(b'tracked')
580 ui.pager(b'tracked')
579 fm = ui.formatter(b'narrow', opts)
581 fm = ui.formatter(b'narrow', opts)
580 for i in sorted(oldincludes):
582 for i in sorted(oldincludes):
581 fm.startitem()
583 fm.startitem()
582 fm.write(b'status', b'%s ', b'I', label=b'narrow.included')
584 fm.write(b'status', b'%s ', b'I', label=b'narrow.included')
583 fm.write(b'pat', b'%s\n', i, label=b'narrow.included')
585 fm.write(b'pat', b'%s\n', i, label=b'narrow.included')
584 for i in sorted(oldexcludes):
586 for i in sorted(oldexcludes):
585 fm.startitem()
587 fm.startitem()
586 fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded')
588 fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded')
587 fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded')
589 fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded')
588 fm.end()
590 fm.end()
589 return 0
591 return 0
590
592
591 if update_working_copy:
593 if update_working_copy:
592 with repo.wlock(), repo.lock(), repo.transaction(
594 with repo.wlock(), repo.lock(), repo.transaction(
593 b'narrow-wc'
595 b'narrow-wc'
594 ), repo.dirstate.parentchange(repo):
596 ), repo.dirstate.changing_parents(repo):
595 narrowspec.updateworkingcopy(repo)
597 narrowspec.updateworkingcopy(repo)
596 narrowspec.copytoworkingcopy(repo)
598 narrowspec.copytoworkingcopy(repo)
597 return 0
599 return 0
598
600
599 if not (widening or narrowing or autoremoveincludes):
601 if not (widening or narrowing or autoremoveincludes):
600 ui.status(_(b"nothing to widen or narrow\n"))
602 ui.status(_(b"nothing to widen or narrow\n"))
601 return 0
603 return 0
602
604
603 with repo.wlock(), repo.lock():
605 with repo.wlock(), repo.lock():
604 cmdutil.bailifchanged(repo)
606 cmdutil.bailifchanged(repo)
605
607
606 # Find the revisions we have in common with the remote. These will
608 # Find the revisions we have in common with the remote. These will
607 # be used for finding local-only changes for narrowing. They will
609 # be used for finding local-only changes for narrowing. They will
608 # also define the set of revisions to update for widening.
610 # also define the set of revisions to update for widening.
609 path = urlutil.get_unique_pull_path_obj(b'tracked', ui, remotepath)
611 path = urlutil.get_unique_pull_path_obj(b'tracked', ui, remotepath)
610 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
612 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
611 remote = hg.peer(repo, opts, path)
613 remote = hg.peer(repo, opts, path)
612
614
613 try:
615 try:
614 # check narrow support before doing anything if widening needs to be
616 # check narrow support before doing anything if widening needs to be
615 # performed. In future we should also abort if client is ellipses and
617 # performed. In future we should also abort if client is ellipses and
616 # server does not support ellipses
618 # server does not support ellipses
617 if (
619 if (
618 widening
620 widening
619 and wireprototypes.NARROWCAP not in remote.capabilities()
621 and wireprototypes.NARROWCAP not in remote.capabilities()
620 ):
622 ):
621 raise error.Abort(_(b"server does not support narrow clones"))
623 raise error.Abort(_(b"server does not support narrow clones"))
622
624
623 commoninc = discovery.findcommonincoming(repo, remote)
625 commoninc = discovery.findcommonincoming(repo, remote)
624
626
625 if autoremoveincludes:
627 if autoremoveincludes:
626 outgoing = discovery.findcommonoutgoing(
628 outgoing = discovery.findcommonoutgoing(
627 repo, remote, commoninc=commoninc
629 repo, remote, commoninc=commoninc
628 )
630 )
629 ui.status(_(b'looking for unused includes to remove\n'))
631 ui.status(_(b'looking for unused includes to remove\n'))
630 localfiles = set()
632 localfiles = set()
631 for n in itertools.chain(outgoing.missing, outgoing.excluded):
633 for n in itertools.chain(outgoing.missing, outgoing.excluded):
632 localfiles.update(repo[n].files())
634 localfiles.update(repo[n].files())
633 suggestedremovals = []
635 suggestedremovals = []
634 for include in sorted(oldincludes):
636 for include in sorted(oldincludes):
635 match = narrowspec.match(repo.root, [include], oldexcludes)
637 match = narrowspec.match(repo.root, [include], oldexcludes)
636 if not any(match(f) for f in localfiles):
638 if not any(match(f) for f in localfiles):
637 suggestedremovals.append(include)
639 suggestedremovals.append(include)
638 if suggestedremovals:
640 if suggestedremovals:
639 for s in suggestedremovals:
641 for s in suggestedremovals:
640 ui.status(b'%s\n' % s)
642 ui.status(b'%s\n' % s)
641 if (
643 if (
642 ui.promptchoice(
644 ui.promptchoice(
643 _(
645 _(
644 b'remove these unused includes (yn)?'
646 b'remove these unused includes (yn)?'
645 b'$$ &Yes $$ &No'
647 b'$$ &Yes $$ &No'
646 )
648 )
647 )
649 )
648 == 0
650 == 0
649 ):
651 ):
650 removedincludes.update(suggestedremovals)
652 removedincludes.update(suggestedremovals)
651 narrowing = True
653 narrowing = True
652 else:
654 else:
653 ui.status(_(b'found no unused includes\n'))
655 ui.status(_(b'found no unused includes\n'))
654
656
655 if narrowing:
657 if narrowing:
656 newincludes = oldincludes - removedincludes
658 newincludes = oldincludes - removedincludes
657 newexcludes = oldexcludes | addedexcludes
659 newexcludes = oldexcludes | addedexcludes
658 _narrow(
660 _narrow(
659 ui,
661 ui,
660 repo,
662 repo,
661 remote,
663 remote,
662 commoninc,
664 commoninc,
663 oldincludes,
665 oldincludes,
664 oldexcludes,
666 oldexcludes,
665 newincludes,
667 newincludes,
666 newexcludes,
668 newexcludes,
667 opts[b'force_delete_local_changes'],
669 opts[b'force_delete_local_changes'],
668 opts[b'backup'],
670 opts[b'backup'],
669 )
671 )
670 # _narrow() updated the narrowspec and _widen() below needs to
672 # _narrow() updated the narrowspec and _widen() below needs to
671 # use the updated values as its base (otherwise removed includes
673 # use the updated values as its base (otherwise removed includes
672 # and addedexcludes will be lost in the resulting narrowspec)
674 # and addedexcludes will be lost in the resulting narrowspec)
673 oldincludes = newincludes
675 oldincludes = newincludes
674 oldexcludes = newexcludes
676 oldexcludes = newexcludes
675
677
676 if widening:
678 if widening:
677 newincludes = oldincludes | addedincludes
679 newincludes = oldincludes | addedincludes
678 newexcludes = oldexcludes - removedexcludes
680 newexcludes = oldexcludes - removedexcludes
679 _widen(
681 _widen(
680 ui,
682 ui,
681 repo,
683 repo,
682 remote,
684 remote,
683 commoninc,
685 commoninc,
684 oldincludes,
686 oldincludes,
685 oldexcludes,
687 oldexcludes,
686 newincludes,
688 newincludes,
687 newexcludes,
689 newexcludes,
688 )
690 )
689 finally:
691 finally:
690 remote.close()
692 remote.close()
691
693
692 return 0
694 return 0
@@ -1,204 +1,204 b''
1 # split.py - split a changeset into smaller ones
1 # split.py - split a changeset into smaller ones
2 #
2 #
3 # Copyright 2015 Laurent Charignon <lcharignon@fb.com>
3 # Copyright 2015 Laurent Charignon <lcharignon@fb.com>
4 # Copyright 2017 Facebook, Inc.
4 # Copyright 2017 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """command to split a changeset into smaller ones (EXPERIMENTAL)"""
8 """command to split a changeset into smaller ones (EXPERIMENTAL)"""
9
9
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 from mercurial.node import (
13 from mercurial.node import (
14 nullrev,
14 nullrev,
15 short,
15 short,
16 )
16 )
17
17
18 from mercurial import (
18 from mercurial import (
19 bookmarks,
19 bookmarks,
20 cmdutil,
20 cmdutil,
21 commands,
21 commands,
22 error,
22 error,
23 hg,
23 hg,
24 logcmdutil,
24 logcmdutil,
25 pycompat,
25 pycompat,
26 registrar,
26 registrar,
27 revsetlang,
27 revsetlang,
28 rewriteutil,
28 rewriteutil,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 # allow people to use split without explicitly enabling rebase extension
33 # allow people to use split without explicitly enabling rebase extension
34 from . import rebase
34 from . import rebase
35
35
36 cmdtable = {}
36 cmdtable = {}
37 command = registrar.command(cmdtable)
37 command = registrar.command(cmdtable)
38
38
39 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
39 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
40 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
40 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
41 # be specifying the version(s) of Mercurial they are tested with, or
41 # be specifying the version(s) of Mercurial they are tested with, or
42 # leave the attribute unspecified.
42 # leave the attribute unspecified.
43 testedwith = b'ships-with-hg-core'
43 testedwith = b'ships-with-hg-core'
44
44
45
45
46 @command(
46 @command(
47 b'split',
47 b'split',
48 [
48 [
49 (b'r', b'rev', b'', _(b"revision to split"), _(b'REV')),
49 (b'r', b'rev', b'', _(b"revision to split"), _(b'REV')),
50 (b'', b'rebase', True, _(b'rebase descendants after split')),
50 (b'', b'rebase', True, _(b'rebase descendants after split')),
51 ]
51 ]
52 + cmdutil.commitopts2,
52 + cmdutil.commitopts2,
53 _(b'hg split [--no-rebase] [[-r] REV]'),
53 _(b'hg split [--no-rebase] [[-r] REV]'),
54 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
54 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
55 helpbasic=True,
55 helpbasic=True,
56 )
56 )
57 def split(ui, repo, *revs, **opts):
57 def split(ui, repo, *revs, **opts):
58 """split a changeset into smaller ones
58 """split a changeset into smaller ones
59
59
60 Repeatedly prompt changes and commit message for new changesets until there
60 Repeatedly prompt changes and commit message for new changesets until there
61 is nothing left in the original changeset.
61 is nothing left in the original changeset.
62
62
63 If --rev was not given, split the working directory parent.
63 If --rev was not given, split the working directory parent.
64
64
65 By default, rebase connected non-obsoleted descendants onto the new
65 By default, rebase connected non-obsoleted descendants onto the new
66 changeset. Use --no-rebase to avoid the rebase.
66 changeset. Use --no-rebase to avoid the rebase.
67 """
67 """
68 opts = pycompat.byteskwargs(opts)
68 opts = pycompat.byteskwargs(opts)
69 revlist = []
69 revlist = []
70 if opts.get(b'rev'):
70 if opts.get(b'rev'):
71 revlist.append(opts.get(b'rev'))
71 revlist.append(opts.get(b'rev'))
72 revlist.extend(revs)
72 revlist.extend(revs)
73 with repo.wlock(), repo.lock():
73 with repo.wlock(), repo.lock():
74 tr = repo.transaction(b'split')
74 tr = repo.transaction(b'split')
75 # If the rebase somehow runs into conflicts, make sure
75 # If the rebase somehow runs into conflicts, make sure
76 # we close the transaction so the user can continue it.
76 # we close the transaction so the user can continue it.
77 with util.acceptintervention(tr):
77 with util.acceptintervention(tr):
78 revs = logcmdutil.revrange(repo, revlist or [b'.'])
78 revs = logcmdutil.revrange(repo, revlist or [b'.'])
79 if len(revs) > 1:
79 if len(revs) > 1:
80 raise error.InputError(_(b'cannot split multiple revisions'))
80 raise error.InputError(_(b'cannot split multiple revisions'))
81
81
82 rev = revs.first()
82 rev = revs.first()
83 # Handle nullrev specially here (instead of leaving for precheck()
83 # Handle nullrev specially here (instead of leaving for precheck()
84 # below) so we get a nicer message and error code.
84 # below) so we get a nicer message and error code.
85 if rev is None or rev == nullrev:
85 if rev is None or rev == nullrev:
86 ui.status(_(b'nothing to split\n'))
86 ui.status(_(b'nothing to split\n'))
87 return 1
87 return 1
88 ctx = repo[rev]
88 ctx = repo[rev]
89 if ctx.node() is None:
89 if ctx.node() is None:
90 raise error.InputError(_(b'cannot split working directory'))
90 raise error.InputError(_(b'cannot split working directory'))
91
91
92 if opts.get(b'rebase'):
92 if opts.get(b'rebase'):
93 # Skip obsoleted descendants and their descendants so the rebase
93 # Skip obsoleted descendants and their descendants so the rebase
94 # won't cause conflicts for sure.
94 # won't cause conflicts for sure.
95 descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
95 descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
96 torebase = list(
96 torebase = list(
97 repo.revs(
97 repo.revs(
98 b'%ld - (%ld & obsolete())::', descendants, descendants
98 b'%ld - (%ld & obsolete())::', descendants, descendants
99 )
99 )
100 )
100 )
101 else:
101 else:
102 torebase = []
102 torebase = []
103 rewriteutil.precheck(repo, [rev] + torebase, b'split')
103 rewriteutil.precheck(repo, [rev] + torebase, b'split')
104
104
105 if len(ctx.parents()) > 1:
105 if len(ctx.parents()) > 1:
106 raise error.InputError(_(b'cannot split a merge changeset'))
106 raise error.InputError(_(b'cannot split a merge changeset'))
107
107
108 cmdutil.bailifchanged(repo)
108 cmdutil.bailifchanged(repo)
109
109
110 # Deactivate bookmark temporarily so it won't get moved
110 # Deactivate bookmark temporarily so it won't get moved
111 # unintentionally
111 # unintentionally
112 bname = repo._activebookmark
112 bname = repo._activebookmark
113 if bname and repo._bookmarks[bname] != ctx.node():
113 if bname and repo._bookmarks[bname] != ctx.node():
114 bookmarks.deactivate(repo)
114 bookmarks.deactivate(repo)
115
115
116 wnode = repo[b'.'].node()
116 wnode = repo[b'.'].node()
117 top = None
117 top = None
118 try:
118 try:
119 top = dosplit(ui, repo, tr, ctx, opts)
119 top = dosplit(ui, repo, tr, ctx, opts)
120 finally:
120 finally:
121 # top is None: split failed, need update --clean recovery.
121 # top is None: split failed, need update --clean recovery.
122 # wnode == ctx.node(): wnode split, no need to update.
122 # wnode == ctx.node(): wnode split, no need to update.
123 if top is None or wnode != ctx.node():
123 if top is None or wnode != ctx.node():
124 hg.clean(repo, wnode, show_stats=False)
124 hg.clean(repo, wnode, show_stats=False)
125 if bname:
125 if bname:
126 bookmarks.activate(repo, bname)
126 bookmarks.activate(repo, bname)
127 if torebase and top:
127 if torebase and top:
128 dorebase(ui, repo, torebase, top)
128 dorebase(ui, repo, torebase, top)
129
129
130
130
131 def dosplit(ui, repo, tr, ctx, opts):
131 def dosplit(ui, repo, tr, ctx, opts):
132 committed = [] # [ctx]
132 committed = [] # [ctx]
133
133
134 # Set working parent to ctx.p1(), and keep working copy as ctx's content
134 # Set working parent to ctx.p1(), and keep working copy as ctx's content
135 if ctx.node() != repo.dirstate.p1():
135 if ctx.node() != repo.dirstate.p1():
136 hg.clean(repo, ctx.node(), show_stats=False)
136 hg.clean(repo, ctx.node(), show_stats=False)
137 with repo.dirstate.parentchange(repo):
137 with repo.dirstate.changing_parents(repo):
138 scmutil.movedirstate(repo, ctx.p1())
138 scmutil.movedirstate(repo, ctx.p1())
139
139
140 # Any modified, added, removed, deleted result means split is incomplete
140 # Any modified, added, removed, deleted result means split is incomplete
141 def incomplete(repo):
141 def incomplete(repo):
142 st = repo.status()
142 st = repo.status()
143 return any((st.modified, st.added, st.removed, st.deleted))
143 return any((st.modified, st.added, st.removed, st.deleted))
144
144
145 # Main split loop
145 # Main split loop
146 while incomplete(repo):
146 while incomplete(repo):
147 if committed:
147 if committed:
148 header = _(
148 header = _(
149 b'HG: Splitting %s. So far it has been split into:\n'
149 b'HG: Splitting %s. So far it has been split into:\n'
150 ) % short(ctx.node())
150 ) % short(ctx.node())
151 # We don't want color codes in the commit message template, so
151 # We don't want color codes in the commit message template, so
152 # disable the label() template function while we render it.
152 # disable the label() template function while we render it.
153 with ui.configoverride(
153 with ui.configoverride(
154 {(b'templatealias', b'label(l,x)'): b"x"}, b'split'
154 {(b'templatealias', b'label(l,x)'): b"x"}, b'split'
155 ):
155 ):
156 for c in committed:
156 for c in committed:
157 summary = cmdutil.format_changeset_summary(ui, c, b'split')
157 summary = cmdutil.format_changeset_summary(ui, c, b'split')
158 header += _(b'HG: - %s\n') % summary
158 header += _(b'HG: - %s\n') % summary
159 header += _(
159 header += _(
160 b'HG: Write commit message for the next split changeset.\n'
160 b'HG: Write commit message for the next split changeset.\n'
161 )
161 )
162 else:
162 else:
163 header = _(
163 header = _(
164 b'HG: Splitting %s. Write commit message for the '
164 b'HG: Splitting %s. Write commit message for the '
165 b'first split changeset.\n'
165 b'first split changeset.\n'
166 ) % short(ctx.node())
166 ) % short(ctx.node())
167 opts.update(
167 opts.update(
168 {
168 {
169 b'edit': True,
169 b'edit': True,
170 b'interactive': True,
170 b'interactive': True,
171 b'message': header + ctx.description(),
171 b'message': header + ctx.description(),
172 }
172 }
173 )
173 )
174 origctx = repo[b'.']
174 origctx = repo[b'.']
175 commands.commit(ui, repo, **pycompat.strkwargs(opts))
175 commands.commit(ui, repo, **pycompat.strkwargs(opts))
176 newctx = repo[b'.']
176 newctx = repo[b'.']
177 # Ensure user didn't do a "no-op" split (such as deselecting
177 # Ensure user didn't do a "no-op" split (such as deselecting
178 # everything).
178 # everything).
179 if origctx.node() != newctx.node():
179 if origctx.node() != newctx.node():
180 committed.append(newctx)
180 committed.append(newctx)
181
181
182 if not committed:
182 if not committed:
183 raise error.InputError(_(b'cannot split an empty revision'))
183 raise error.InputError(_(b'cannot split an empty revision'))
184
184
185 if len(committed) != 1 or committed[0].node() != ctx.node():
185 if len(committed) != 1 or committed[0].node() != ctx.node():
186 # Ensure we don't strip a node if we produce the same commit as already
186 # Ensure we don't strip a node if we produce the same commit as already
187 # exists
187 # exists
188 scmutil.cleanupnodes(
188 scmutil.cleanupnodes(
189 repo,
189 repo,
190 {ctx.node(): [c.node() for c in committed]},
190 {ctx.node(): [c.node() for c in committed]},
191 operation=b'split',
191 operation=b'split',
192 fixphase=True,
192 fixphase=True,
193 )
193 )
194
194
195 return committed[-1]
195 return committed[-1]
196
196
197
197
198 def dorebase(ui, repo, src, destctx):
198 def dorebase(ui, repo, src, destctx):
199 rebase.rebase(
199 rebase.rebase(
200 ui,
200 ui,
201 repo,
201 repo,
202 rev=[revsetlang.formatspec(b'%ld', src)],
202 rev=[revsetlang.formatspec(b'%ld', src)],
203 dest=revsetlang.formatspec(b'%d', destctx.rev()),
203 dest=revsetlang.formatspec(b'%d', destctx.rev()),
204 )
204 )
@@ -1,324 +1,324 b''
1 # uncommit - undo the actions of a commit
1 # uncommit - undo the actions of a commit
2 #
2 #
3 # Copyright 2011 Peter Arrenbrecht <peter.arrenbrecht@gmail.com>
3 # Copyright 2011 Peter Arrenbrecht <peter.arrenbrecht@gmail.com>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 # Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 # Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 # Patrick Mezard <patrick@mezard.eu>
6 # Patrick Mezard <patrick@mezard.eu>
7 # Copyright 2016 Facebook, Inc.
7 # Copyright 2016 Facebook, Inc.
8 #
8 #
9 # This software may be used and distributed according to the terms of the
9 # This software may be used and distributed according to the terms of the
10 # GNU General Public License version 2 or any later version.
10 # GNU General Public License version 2 or any later version.
11
11
12 """uncommit part or all of a local changeset (EXPERIMENTAL)
12 """uncommit part or all of a local changeset (EXPERIMENTAL)
13
13
14 This command undoes the effect of a local commit, returning the affected
14 This command undoes the effect of a local commit, returning the affected
15 files to their uncommitted state. This means that files modified, added or
15 files to their uncommitted state. This means that files modified, added or
16 removed in the changeset will be left unchanged, and so will remain modified,
16 removed in the changeset will be left unchanged, and so will remain modified,
17 added and removed in the working directory.
17 added and removed in the working directory.
18 """
18 """
19
19
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 cmdutil,
24 cmdutil,
25 commands,
25 commands,
26 context,
26 context,
27 copies as copiesmod,
27 copies as copiesmod,
28 error,
28 error,
29 obsutil,
29 obsutil,
30 pathutil,
30 pathutil,
31 pycompat,
31 pycompat,
32 registrar,
32 registrar,
33 rewriteutil,
33 rewriteutil,
34 scmutil,
34 scmutil,
35 )
35 )
36
36
37 cmdtable = {}
37 cmdtable = {}
38 command = registrar.command(cmdtable)
38 command = registrar.command(cmdtable)
39
39
40 configtable = {}
40 configtable = {}
41 configitem = registrar.configitem(configtable)
41 configitem = registrar.configitem(configtable)
42
42
43 configitem(
43 configitem(
44 b'experimental',
44 b'experimental',
45 b'uncommitondirtywdir',
45 b'uncommitondirtywdir',
46 default=False,
46 default=False,
47 )
47 )
48 configitem(
48 configitem(
49 b'experimental',
49 b'experimental',
50 b'uncommit.keep',
50 b'uncommit.keep',
51 default=False,
51 default=False,
52 )
52 )
53
53
54 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
54 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
55 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
55 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
56 # be specifying the version(s) of Mercurial they are tested with, or
56 # be specifying the version(s) of Mercurial they are tested with, or
57 # leave the attribute unspecified.
57 # leave the attribute unspecified.
58 testedwith = b'ships-with-hg-core'
58 testedwith = b'ships-with-hg-core'
59
59
60
60
61 def _commitfiltered(
61 def _commitfiltered(
62 repo, ctx, match, keepcommit, message=None, user=None, date=None
62 repo, ctx, match, keepcommit, message=None, user=None, date=None
63 ):
63 ):
64 """Recommit ctx with changed files not in match. Return the new
64 """Recommit ctx with changed files not in match. Return the new
65 node identifier, or None if nothing changed.
65 node identifier, or None if nothing changed.
66 """
66 """
67 base = ctx.p1()
67 base = ctx.p1()
68 # ctx
68 # ctx
69 initialfiles = set(ctx.files())
69 initialfiles = set(ctx.files())
70 exclude = {f for f in initialfiles if match(f)}
70 exclude = {f for f in initialfiles if match(f)}
71
71
72 # No files matched commit, so nothing excluded
72 # No files matched commit, so nothing excluded
73 if not exclude:
73 if not exclude:
74 return None
74 return None
75
75
76 # return the p1 so that we don't create an obsmarker later
76 # return the p1 so that we don't create an obsmarker later
77 if not keepcommit:
77 if not keepcommit:
78 return ctx.p1().node()
78 return ctx.p1().node()
79
79
80 files = initialfiles - exclude
80 files = initialfiles - exclude
81 # Filter copies
81 # Filter copies
82 copied = copiesmod.pathcopies(base, ctx)
82 copied = copiesmod.pathcopies(base, ctx)
83 copied = {dst: src for dst, src in copied.items() if dst in files}
83 copied = {dst: src for dst, src in copied.items() if dst in files}
84
84
85 def filectxfn(repo, memctx, path, contentctx=ctx, redirect=()):
85 def filectxfn(repo, memctx, path, contentctx=ctx, redirect=()):
86 if path not in contentctx:
86 if path not in contentctx:
87 return None
87 return None
88 fctx = contentctx[path]
88 fctx = contentctx[path]
89 mctx = context.memfilectx(
89 mctx = context.memfilectx(
90 repo,
90 repo,
91 memctx,
91 memctx,
92 fctx.path(),
92 fctx.path(),
93 fctx.data(),
93 fctx.data(),
94 fctx.islink(),
94 fctx.islink(),
95 fctx.isexec(),
95 fctx.isexec(),
96 copysource=copied.get(path),
96 copysource=copied.get(path),
97 )
97 )
98 return mctx
98 return mctx
99
99
100 if not files:
100 if not files:
101 repo.ui.status(_(b"note: keeping empty commit\n"))
101 repo.ui.status(_(b"note: keeping empty commit\n"))
102
102
103 if message is None:
103 if message is None:
104 message = ctx.description()
104 message = ctx.description()
105 if not user:
105 if not user:
106 user = ctx.user()
106 user = ctx.user()
107 if not date:
107 if not date:
108 date = ctx.date()
108 date = ctx.date()
109
109
110 new = context.memctx(
110 new = context.memctx(
111 repo,
111 repo,
112 parents=[base.node(), repo.nullid],
112 parents=[base.node(), repo.nullid],
113 text=message,
113 text=message,
114 files=files,
114 files=files,
115 filectxfn=filectxfn,
115 filectxfn=filectxfn,
116 user=user,
116 user=user,
117 date=date,
117 date=date,
118 extra=ctx.extra(),
118 extra=ctx.extra(),
119 )
119 )
120 return repo.commitctx(new)
120 return repo.commitctx(new)
121
121
122
122
123 @command(
123 @command(
124 b'uncommit',
124 b'uncommit',
125 [
125 [
126 (b'', b'keep', None, _(b'allow an empty commit after uncommitting')),
126 (b'', b'keep', None, _(b'allow an empty commit after uncommitting')),
127 (
127 (
128 b'',
128 b'',
129 b'allow-dirty-working-copy',
129 b'allow-dirty-working-copy',
130 False,
130 False,
131 _(b'allow uncommit with outstanding changes'),
131 _(b'allow uncommit with outstanding changes'),
132 ),
132 ),
133 (b'n', b'note', b'', _(b'store a note on uncommit'), _(b'TEXT')),
133 (b'n', b'note', b'', _(b'store a note on uncommit'), _(b'TEXT')),
134 ]
134 ]
135 + commands.walkopts
135 + commands.walkopts
136 + commands.commitopts
136 + commands.commitopts
137 + commands.commitopts2
137 + commands.commitopts2
138 + commands.commitopts3,
138 + commands.commitopts3,
139 _(b'[OPTION]... [FILE]...'),
139 _(b'[OPTION]... [FILE]...'),
140 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
140 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
141 )
141 )
142 def uncommit(ui, repo, *pats, **opts):
142 def uncommit(ui, repo, *pats, **opts):
143 """uncommit part or all of a local changeset
143 """uncommit part or all of a local changeset
144
144
145 This command undoes the effect of a local commit, returning the affected
145 This command undoes the effect of a local commit, returning the affected
146 files to their uncommitted state. This means that files modified or
146 files to their uncommitted state. This means that files modified or
147 deleted in the changeset will be left unchanged, and so will remain
147 deleted in the changeset will be left unchanged, and so will remain
148 modified in the working directory.
148 modified in the working directory.
149
149
150 If no files are specified, the commit will be pruned, unless --keep is
150 If no files are specified, the commit will be pruned, unless --keep is
151 given.
151 given.
152 """
152 """
153 cmdutil.check_note_size(opts)
153 cmdutil.check_note_size(opts)
154 cmdutil.resolve_commit_options(ui, opts)
154 cmdutil.resolve_commit_options(ui, opts)
155 opts = pycompat.byteskwargs(opts)
155 opts = pycompat.byteskwargs(opts)
156
156
157 with repo.wlock(), repo.lock():
157 with repo.wlock(), repo.lock():
158
158
159 st = repo.status()
159 st = repo.status()
160 m, a, r, d = st.modified, st.added, st.removed, st.deleted
160 m, a, r, d = st.modified, st.added, st.removed, st.deleted
161 isdirtypath = any(set(m + a + r + d) & set(pats))
161 isdirtypath = any(set(m + a + r + d) & set(pats))
162 allowdirtywcopy = opts[
162 allowdirtywcopy = opts[
163 b'allow_dirty_working_copy'
163 b'allow_dirty_working_copy'
164 ] or repo.ui.configbool(b'experimental', b'uncommitondirtywdir')
164 ] or repo.ui.configbool(b'experimental', b'uncommitondirtywdir')
165 if not allowdirtywcopy and (not pats or isdirtypath):
165 if not allowdirtywcopy and (not pats or isdirtypath):
166 cmdutil.bailifchanged(
166 cmdutil.bailifchanged(
167 repo,
167 repo,
168 hint=_(b'requires --allow-dirty-working-copy to uncommit'),
168 hint=_(b'requires --allow-dirty-working-copy to uncommit'),
169 )
169 )
170 old = repo[b'.']
170 old = repo[b'.']
171 rewriteutil.precheck(repo, [old.rev()], b'uncommit')
171 rewriteutil.precheck(repo, [old.rev()], b'uncommit')
172 if len(old.parents()) > 1:
172 if len(old.parents()) > 1:
173 raise error.InputError(_(b"cannot uncommit merge changeset"))
173 raise error.InputError(_(b"cannot uncommit merge changeset"))
174
174
175 match = scmutil.match(old, pats, opts)
175 match = scmutil.match(old, pats, opts)
176
176
177 # Check all explicitly given files; abort if there's a problem.
177 # Check all explicitly given files; abort if there's a problem.
178 if match.files():
178 if match.files():
179 s = old.status(old.p1(), match, listclean=True)
179 s = old.status(old.p1(), match, listclean=True)
180 eligible = set(s.added) | set(s.modified) | set(s.removed)
180 eligible = set(s.added) | set(s.modified) | set(s.removed)
181
181
182 badfiles = set(match.files()) - eligible
182 badfiles = set(match.files()) - eligible
183
183
184 # Naming a parent directory of an eligible file is OK, even
184 # Naming a parent directory of an eligible file is OK, even
185 # if not everything tracked in that directory can be
185 # if not everything tracked in that directory can be
186 # uncommitted.
186 # uncommitted.
187 if badfiles:
187 if badfiles:
188 badfiles -= {f for f in pathutil.dirs(eligible)}
188 badfiles -= {f for f in pathutil.dirs(eligible)}
189
189
190 for f in sorted(badfiles):
190 for f in sorted(badfiles):
191 if f in s.clean:
191 if f in s.clean:
192 hint = _(
192 hint = _(
193 b"file was not changed in working directory parent"
193 b"file was not changed in working directory parent"
194 )
194 )
195 elif repo.wvfs.exists(f):
195 elif repo.wvfs.exists(f):
196 hint = _(b"file was untracked in working directory parent")
196 hint = _(b"file was untracked in working directory parent")
197 else:
197 else:
198 hint = _(b"file does not exist")
198 hint = _(b"file does not exist")
199
199
200 raise error.InputError(
200 raise error.InputError(
201 _(b'cannot uncommit "%s"') % scmutil.getuipathfn(repo)(f),
201 _(b'cannot uncommit "%s"') % scmutil.getuipathfn(repo)(f),
202 hint=hint,
202 hint=hint,
203 )
203 )
204
204
205 with repo.transaction(b'uncommit'):
205 with repo.transaction(b'uncommit'):
206 if not (opts[b'message'] or opts[b'logfile']):
206 if not (opts[b'message'] or opts[b'logfile']):
207 opts[b'message'] = old.description()
207 opts[b'message'] = old.description()
208 message = cmdutil.logmessage(ui, opts)
208 message = cmdutil.logmessage(ui, opts)
209
209
210 keepcommit = pats
210 keepcommit = pats
211 if not keepcommit:
211 if not keepcommit:
212 if opts.get(b'keep') is not None:
212 if opts.get(b'keep') is not None:
213 keepcommit = opts.get(b'keep')
213 keepcommit = opts.get(b'keep')
214 else:
214 else:
215 keepcommit = ui.configbool(
215 keepcommit = ui.configbool(
216 b'experimental', b'uncommit.keep'
216 b'experimental', b'uncommit.keep'
217 )
217 )
218 newid = _commitfiltered(
218 newid = _commitfiltered(
219 repo,
219 repo,
220 old,
220 old,
221 match,
221 match,
222 keepcommit,
222 keepcommit,
223 message=message,
223 message=message,
224 user=opts.get(b'user'),
224 user=opts.get(b'user'),
225 date=opts.get(b'date'),
225 date=opts.get(b'date'),
226 )
226 )
227 if newid is None:
227 if newid is None:
228 ui.status(_(b"nothing to uncommit\n"))
228 ui.status(_(b"nothing to uncommit\n"))
229 return 1
229 return 1
230
230
231 mapping = {}
231 mapping = {}
232 if newid != old.p1().node():
232 if newid != old.p1().node():
233 # Move local changes on filtered changeset
233 # Move local changes on filtered changeset
234 mapping[old.node()] = (newid,)
234 mapping[old.node()] = (newid,)
235 else:
235 else:
236 # Fully removed the old commit
236 # Fully removed the old commit
237 mapping[old.node()] = ()
237 mapping[old.node()] = ()
238
238
239 with repo.dirstate.parentchange(repo):
239 with repo.dirstate.changing_parents(repo):
240 scmutil.movedirstate(repo, repo[newid], match)
240 scmutil.movedirstate(repo, repo[newid], match)
241
241
242 scmutil.cleanupnodes(repo, mapping, b'uncommit', fixphase=True)
242 scmutil.cleanupnodes(repo, mapping, b'uncommit', fixphase=True)
243
243
244
244
245 def predecessormarkers(ctx):
245 def predecessormarkers(ctx):
246 """yields the obsolete markers marking the given changeset as a successor"""
246 """yields the obsolete markers marking the given changeset as a successor"""
247 for data in ctx.repo().obsstore.predecessors.get(ctx.node(), ()):
247 for data in ctx.repo().obsstore.predecessors.get(ctx.node(), ()):
248 yield obsutil.marker(ctx.repo(), data)
248 yield obsutil.marker(ctx.repo(), data)
249
249
250
250
251 @command(
251 @command(
252 b'unamend',
252 b'unamend',
253 [],
253 [],
254 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
254 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
255 helpbasic=True,
255 helpbasic=True,
256 )
256 )
257 def unamend(ui, repo, **opts):
257 def unamend(ui, repo, **opts):
258 """undo the most recent amend operation on a current changeset
258 """undo the most recent amend operation on a current changeset
259
259
260 This command will roll back to the previous version of a changeset,
260 This command will roll back to the previous version of a changeset,
261 leaving working directory in state in which it was before running
261 leaving working directory in state in which it was before running
262 `hg amend` (e.g. files modified as part of an amend will be
262 `hg amend` (e.g. files modified as part of an amend will be
263 marked as modified `hg status`)
263 marked as modified `hg status`)
264 """
264 """
265
265
266 unfi = repo.unfiltered()
266 unfi = repo.unfiltered()
267 with repo.wlock(), repo.lock(), repo.transaction(b'unamend'):
267 with repo.wlock(), repo.lock(), repo.transaction(b'unamend'):
268
268
269 # identify the commit from which to unamend
269 # identify the commit from which to unamend
270 curctx = repo[b'.']
270 curctx = repo[b'.']
271
271
272 rewriteutil.precheck(repo, [curctx.rev()], b'unamend')
272 rewriteutil.precheck(repo, [curctx.rev()], b'unamend')
273 if len(curctx.parents()) > 1:
273 if len(curctx.parents()) > 1:
274 raise error.InputError(_(b"cannot unamend merge changeset"))
274 raise error.InputError(_(b"cannot unamend merge changeset"))
275
275
276 expected_keys = (b'amend_source', b'unamend_source')
276 expected_keys = (b'amend_source', b'unamend_source')
277 if not any(key in curctx.extra() for key in expected_keys):
277 if not any(key in curctx.extra() for key in expected_keys):
278 raise error.InputError(
278 raise error.InputError(
279 _(
279 _(
280 b"working copy parent was not created by 'hg amend' or "
280 b"working copy parent was not created by 'hg amend' or "
281 b"'hg unamend'"
281 b"'hg unamend'"
282 )
282 )
283 )
283 )
284
284
285 # identify the commit to which to unamend
285 # identify the commit to which to unamend
286 markers = list(predecessormarkers(curctx))
286 markers = list(predecessormarkers(curctx))
287 if len(markers) != 1:
287 if len(markers) != 1:
288 e = _(b"changeset must have one predecessor, found %i predecessors")
288 e = _(b"changeset must have one predecessor, found %i predecessors")
289 raise error.InputError(e % len(markers))
289 raise error.InputError(e % len(markers))
290
290
291 prednode = markers[0].prednode()
291 prednode = markers[0].prednode()
292 predctx = unfi[prednode]
292 predctx = unfi[prednode]
293
293
294 # add an extra so that we get a new hash
294 # add an extra so that we get a new hash
295 # note: allowing unamend to undo an unamend is an intentional feature
295 # note: allowing unamend to undo an unamend is an intentional feature
296 extras = predctx.extra()
296 extras = predctx.extra()
297 extras[b'unamend_source'] = curctx.hex()
297 extras[b'unamend_source'] = curctx.hex()
298
298
299 def filectxfn(repo, ctx_, path):
299 def filectxfn(repo, ctx_, path):
300 try:
300 try:
301 return predctx.filectx(path)
301 return predctx.filectx(path)
302 except KeyError:
302 except KeyError:
303 return None
303 return None
304
304
305 # Make a new commit same as predctx
305 # Make a new commit same as predctx
306 newctx = context.memctx(
306 newctx = context.memctx(
307 repo,
307 repo,
308 parents=(predctx.p1(), predctx.p2()),
308 parents=(predctx.p1(), predctx.p2()),
309 text=predctx.description(),
309 text=predctx.description(),
310 files=predctx.files(),
310 files=predctx.files(),
311 filectxfn=filectxfn,
311 filectxfn=filectxfn,
312 user=predctx.user(),
312 user=predctx.user(),
313 date=predctx.date(),
313 date=predctx.date(),
314 extra=extras,
314 extra=extras,
315 )
315 )
316 newprednode = repo.commitctx(newctx)
316 newprednode = repo.commitctx(newctx)
317 newpredctx = repo[newprednode]
317 newpredctx = repo[newprednode]
318 dirstate = repo.dirstate
318 dirstate = repo.dirstate
319
319
320 with dirstate.parentchange(repo):
320 with dirstate.changing_parents(repo):
321 scmutil.movedirstate(repo, newpredctx)
321 scmutil.movedirstate(repo, newpredctx)
322
322
323 mapping = {curctx.node(): (newprednode,)}
323 mapping = {curctx.node(): (newprednode,)}
324 scmutil.cleanupnodes(repo, mapping, b'unamend', fixphase=True)
324 scmutil.cleanupnodes(repo, mapping, b'unamend', fixphase=True)
@@ -1,242 +1,242 b''
1 # win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users
1 # win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users
2 #
2 #
3 # Copyright 2005, 2007-2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2005, 2007-2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''perform automatic newline conversion (DEPRECATED)
8 '''perform automatic newline conversion (DEPRECATED)
9
9
10 Deprecation: The win32text extension requires each user to configure
10 Deprecation: The win32text extension requires each user to configure
11 the extension again and again for each clone since the configuration
11 the extension again and again for each clone since the configuration
12 is not copied when cloning.
12 is not copied when cloning.
13
13
14 We have therefore made the ``eol`` as an alternative. The ``eol``
14 We have therefore made the ``eol`` as an alternative. The ``eol``
15 uses a version controlled file for its configuration and each clone
15 uses a version controlled file for its configuration and each clone
16 will therefore use the right settings from the start.
16 will therefore use the right settings from the start.
17
17
18 To perform automatic newline conversion, use::
18 To perform automatic newline conversion, use::
19
19
20 [extensions]
20 [extensions]
21 win32text =
21 win32text =
22 [encode]
22 [encode]
23 ** = cleverencode:
23 ** = cleverencode:
24 # or ** = macencode:
24 # or ** = macencode:
25
25
26 [decode]
26 [decode]
27 ** = cleverdecode:
27 ** = cleverdecode:
28 # or ** = macdecode:
28 # or ** = macdecode:
29
29
30 If not doing conversion, to make sure you do not commit CRLF/CR by accident::
30 If not doing conversion, to make sure you do not commit CRLF/CR by accident::
31
31
32 [hooks]
32 [hooks]
33 pretxncommit.crlf = python:hgext.win32text.forbidcrlf
33 pretxncommit.crlf = python:hgext.win32text.forbidcrlf
34 # or pretxncommit.cr = python:hgext.win32text.forbidcr
34 # or pretxncommit.cr = python:hgext.win32text.forbidcr
35
35
36 To do the same check on a server to prevent CRLF/CR from being
36 To do the same check on a server to prevent CRLF/CR from being
37 pushed or pulled::
37 pushed or pulled::
38
38
39 [hooks]
39 [hooks]
40 pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
40 pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
41 # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
41 # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
42 '''
42 '''
43
43
44
44
45 import re
45 import re
46 from mercurial.i18n import _
46 from mercurial.i18n import _
47 from mercurial.node import short
47 from mercurial.node import short
48 from mercurial import (
48 from mercurial import (
49 cmdutil,
49 cmdutil,
50 extensions,
50 extensions,
51 registrar,
51 registrar,
52 )
52 )
53 from mercurial.utils import stringutil
53 from mercurial.utils import stringutil
54
54
55 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
55 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
56 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
56 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
57 # be specifying the version(s) of Mercurial they are tested with, or
57 # be specifying the version(s) of Mercurial they are tested with, or
58 # leave the attribute unspecified.
58 # leave the attribute unspecified.
59 testedwith = b'ships-with-hg-core'
59 testedwith = b'ships-with-hg-core'
60
60
61 configtable = {}
61 configtable = {}
62 configitem = registrar.configitem(configtable)
62 configitem = registrar.configitem(configtable)
63
63
64 configitem(
64 configitem(
65 b'win32text',
65 b'win32text',
66 b'warn',
66 b'warn',
67 default=True,
67 default=True,
68 )
68 )
69
69
70 # regexp for single LF without CR preceding.
70 # regexp for single LF without CR preceding.
71 re_single_lf = re.compile(b'(^|[^\r])\n', re.MULTILINE)
71 re_single_lf = re.compile(b'(^|[^\r])\n', re.MULTILINE)
72
72
73 newlinestr = {b'\r\n': b'CRLF', b'\r': b'CR'}
73 newlinestr = {b'\r\n': b'CRLF', b'\r': b'CR'}
74 filterstr = {b'\r\n': b'clever', b'\r': b'mac'}
74 filterstr = {b'\r\n': b'clever', b'\r': b'mac'}
75
75
76
76
77 def checknewline(s, newline, ui=None, repo=None, filename=None):
77 def checknewline(s, newline, ui=None, repo=None, filename=None):
78 # warn if already has 'newline' in repository.
78 # warn if already has 'newline' in repository.
79 # it might cause unexpected eol conversion.
79 # it might cause unexpected eol conversion.
80 # see issue 302:
80 # see issue 302:
81 # https://bz.mercurial-scm.org/302
81 # https://bz.mercurial-scm.org/302
82 if newline in s and ui and filename and repo:
82 if newline in s and ui and filename and repo:
83 ui.warn(
83 ui.warn(
84 _(
84 _(
85 b'WARNING: %s already has %s line endings\n'
85 b'WARNING: %s already has %s line endings\n'
86 b'and does not need EOL conversion by the win32text plugin.\n'
86 b'and does not need EOL conversion by the win32text plugin.\n'
87 b'Before your next commit, please reconsider your '
87 b'Before your next commit, please reconsider your '
88 b'encode/decode settings in \nMercurial.ini or %s.\n'
88 b'encode/decode settings in \nMercurial.ini or %s.\n'
89 )
89 )
90 % (filename, newlinestr[newline], repo.vfs.join(b'hgrc'))
90 % (filename, newlinestr[newline], repo.vfs.join(b'hgrc'))
91 )
91 )
92
92
93
93
94 def dumbdecode(s, cmd, **kwargs):
94 def dumbdecode(s, cmd, **kwargs):
95 checknewline(s, b'\r\n', **kwargs)
95 checknewline(s, b'\r\n', **kwargs)
96 # replace single LF to CRLF
96 # replace single LF to CRLF
97 return re_single_lf.sub(b'\\1\r\n', s)
97 return re_single_lf.sub(b'\\1\r\n', s)
98
98
99
99
100 def dumbencode(s, cmd):
100 def dumbencode(s, cmd):
101 return s.replace(b'\r\n', b'\n')
101 return s.replace(b'\r\n', b'\n')
102
102
103
103
104 def macdumbdecode(s, cmd, **kwargs):
104 def macdumbdecode(s, cmd, **kwargs):
105 checknewline(s, b'\r', **kwargs)
105 checknewline(s, b'\r', **kwargs)
106 return s.replace(b'\n', b'\r')
106 return s.replace(b'\n', b'\r')
107
107
108
108
109 def macdumbencode(s, cmd):
109 def macdumbencode(s, cmd):
110 return s.replace(b'\r', b'\n')
110 return s.replace(b'\r', b'\n')
111
111
112
112
113 def cleverdecode(s, cmd, **kwargs):
113 def cleverdecode(s, cmd, **kwargs):
114 if not stringutil.binary(s):
114 if not stringutil.binary(s):
115 return dumbdecode(s, cmd, **kwargs)
115 return dumbdecode(s, cmd, **kwargs)
116 return s
116 return s
117
117
118
118
119 def cleverencode(s, cmd):
119 def cleverencode(s, cmd):
120 if not stringutil.binary(s):
120 if not stringutil.binary(s):
121 return dumbencode(s, cmd)
121 return dumbencode(s, cmd)
122 return s
122 return s
123
123
124
124
125 def macdecode(s, cmd, **kwargs):
125 def macdecode(s, cmd, **kwargs):
126 if not stringutil.binary(s):
126 if not stringutil.binary(s):
127 return macdumbdecode(s, cmd, **kwargs)
127 return macdumbdecode(s, cmd, **kwargs)
128 return s
128 return s
129
129
130
130
131 def macencode(s, cmd):
131 def macencode(s, cmd):
132 if not stringutil.binary(s):
132 if not stringutil.binary(s):
133 return macdumbencode(s, cmd)
133 return macdumbencode(s, cmd)
134 return s
134 return s
135
135
136
136
137 _filters = {
137 _filters = {
138 b'dumbdecode:': dumbdecode,
138 b'dumbdecode:': dumbdecode,
139 b'dumbencode:': dumbencode,
139 b'dumbencode:': dumbencode,
140 b'cleverdecode:': cleverdecode,
140 b'cleverdecode:': cleverdecode,
141 b'cleverencode:': cleverencode,
141 b'cleverencode:': cleverencode,
142 b'macdumbdecode:': macdumbdecode,
142 b'macdumbdecode:': macdumbdecode,
143 b'macdumbencode:': macdumbencode,
143 b'macdumbencode:': macdumbencode,
144 b'macdecode:': macdecode,
144 b'macdecode:': macdecode,
145 b'macencode:': macencode,
145 b'macencode:': macencode,
146 }
146 }
147
147
148
148
149 def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
149 def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
150 halt = False
150 halt = False
151 seen = set()
151 seen = set()
152 # we try to walk changesets in reverse order from newest to
152 # we try to walk changesets in reverse order from newest to
153 # oldest, so that if we see a file multiple times, we take the
153 # oldest, so that if we see a file multiple times, we take the
154 # newest version as canonical. this prevents us from blocking a
154 # newest version as canonical. this prevents us from blocking a
155 # changegroup that contains an unacceptable commit followed later
155 # changegroup that contains an unacceptable commit followed later
156 # by a commit that fixes the problem.
156 # by a commit that fixes the problem.
157 tip = repo[b'tip']
157 tip = repo[b'tip']
158 for rev in range(repo.changelog.tiprev(), repo[node].rev() - 1, -1):
158 for rev in range(repo.changelog.tiprev(), repo[node].rev() - 1, -1):
159 c = repo[rev]
159 c = repo[rev]
160 for f in c.files():
160 for f in c.files():
161 if f in seen or f not in tip or f not in c:
161 if f in seen or f not in tip or f not in c:
162 continue
162 continue
163 seen.add(f)
163 seen.add(f)
164 data = c[f].data()
164 data = c[f].data()
165 if not stringutil.binary(data) and newline in data:
165 if not stringutil.binary(data) and newline in data:
166 if not halt:
166 if not halt:
167 ui.warn(
167 ui.warn(
168 _(
168 _(
169 b'attempt to commit or push text file(s) '
169 b'attempt to commit or push text file(s) '
170 b'using %s line endings\n'
170 b'using %s line endings\n'
171 )
171 )
172 % newlinestr[newline]
172 % newlinestr[newline]
173 )
173 )
174 ui.warn(_(b'in %s: %s\n') % (short(c.node()), f))
174 ui.warn(_(b'in %s: %s\n') % (short(c.node()), f))
175 halt = True
175 halt = True
176 if halt and hooktype == b'pretxnchangegroup':
176 if halt and hooktype == b'pretxnchangegroup':
177 crlf = newlinestr[newline].lower()
177 crlf = newlinestr[newline].lower()
178 filter = filterstr[newline]
178 filter = filterstr[newline]
179 ui.warn(
179 ui.warn(
180 _(
180 _(
181 b'\nTo prevent this mistake in your local repository,\n'
181 b'\nTo prevent this mistake in your local repository,\n'
182 b'add to Mercurial.ini or .hg/hgrc:\n'
182 b'add to Mercurial.ini or .hg/hgrc:\n'
183 b'\n'
183 b'\n'
184 b'[hooks]\n'
184 b'[hooks]\n'
185 b'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
185 b'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
186 b'\n'
186 b'\n'
187 b'and also consider adding:\n'
187 b'and also consider adding:\n'
188 b'\n'
188 b'\n'
189 b'[extensions]\n'
189 b'[extensions]\n'
190 b'win32text =\n'
190 b'win32text =\n'
191 b'[encode]\n'
191 b'[encode]\n'
192 b'** = %sencode:\n'
192 b'** = %sencode:\n'
193 b'[decode]\n'
193 b'[decode]\n'
194 b'** = %sdecode:\n'
194 b'** = %sdecode:\n'
195 )
195 )
196 % (crlf, crlf, filter, filter)
196 % (crlf, crlf, filter, filter)
197 )
197 )
198 return halt
198 return halt
199
199
200
200
201 def forbidcrlf(ui, repo, hooktype, node, **kwargs):
201 def forbidcrlf(ui, repo, hooktype, node, **kwargs):
202 return forbidnewline(ui, repo, hooktype, node, b'\r\n', **kwargs)
202 return forbidnewline(ui, repo, hooktype, node, b'\r\n', **kwargs)
203
203
204
204
205 def forbidcr(ui, repo, hooktype, node, **kwargs):
205 def forbidcr(ui, repo, hooktype, node, **kwargs):
206 return forbidnewline(ui, repo, hooktype, node, b'\r', **kwargs)
206 return forbidnewline(ui, repo, hooktype, node, b'\r', **kwargs)
207
207
208
208
209 def reposetup(ui, repo):
209 def reposetup(ui, repo):
210 if not repo.local():
210 if not repo.local():
211 return
211 return
212 for name, fn in _filters.items():
212 for name, fn in _filters.items():
213 repo.adddatafilter(name, fn)
213 repo.adddatafilter(name, fn)
214
214
215
215
216 def wrap_revert(orig, repo, ctx, names, uipathfn, actions, *args, **kwargs):
216 def wrap_revert(orig, repo, ctx, names, uipathfn, actions, *args, **kwargs):
217 # reset dirstate cache for file we touch
217 # reset dirstate cache for file we touch
218 ds = repo.dirstate
218 ds = repo.dirstate
219 with ds.parentchange(repo):
219 with ds.changing_parents(repo):
220 for filename in actions[b'revert'][0]:
220 for filename in actions[b'revert'][0]:
221 entry = ds.get_entry(filename)
221 entry = ds.get_entry(filename)
222 if entry is not None:
222 if entry is not None:
223 if entry.p1_tracked:
223 if entry.p1_tracked:
224 ds.update_file(
224 ds.update_file(
225 filename,
225 filename,
226 entry.tracked,
226 entry.tracked,
227 p1_tracked=True,
227 p1_tracked=True,
228 p2_info=entry.p2_info,
228 p2_info=entry.p2_info,
229 )
229 )
230 return orig(repo, ctx, names, uipathfn, actions, *args, **kwargs)
230 return orig(repo, ctx, names, uipathfn, actions, *args, **kwargs)
231
231
232
232
233 def extsetup(ui):
233 def extsetup(ui):
234 # deprecated config: win32text.warn
234 # deprecated config: win32text.warn
235 if ui.configbool(b'win32text', b'warn'):
235 if ui.configbool(b'win32text', b'warn'):
236 ui.warn(
236 ui.warn(
237 _(
237 _(
238 b"win32text is deprecated: "
238 b"win32text is deprecated: "
239 b"https://mercurial-scm.org/wiki/Win32TextExtension\n"
239 b"https://mercurial-scm.org/wiki/Win32TextExtension\n"
240 )
240 )
241 )
241 )
242 extensions.wrapfunction(cmdutil, '_performrevert', wrap_revert)
242 extensions.wrapfunction(cmdutil, '_performrevert', wrap_revert)
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now