##// END OF EJS Templates
py3: replace `pycompat.xrange` by `range`
Manuel Jacob -
r50179:d44e3c45 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,1165 +1,1165 b''
1 # absorb.py
1 # absorb.py
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """apply working directory changes to changesets (EXPERIMENTAL)
8 """apply working directory changes to changesets (EXPERIMENTAL)
9
9
10 The absorb extension provides a command to use annotate information to
10 The absorb extension provides a command to use annotate information to
11 amend modified chunks into the corresponding non-public changesets.
11 amend modified chunks into the corresponding non-public changesets.
12
12
13 ::
13 ::
14
14
15 [absorb]
15 [absorb]
16 # only check 50 recent non-public changesets at most
16 # only check 50 recent non-public changesets at most
17 max-stack-size = 50
17 max-stack-size = 50
18 # whether to add noise to new commits to avoid obsolescence cycle
18 # whether to add noise to new commits to avoid obsolescence cycle
19 add-noise = 1
19 add-noise = 1
20 # make `amend --correlated` a shortcut to the main command
20 # make `amend --correlated` a shortcut to the main command
21 amend-flag = correlated
21 amend-flag = correlated
22
22
23 [color]
23 [color]
24 absorb.description = yellow
24 absorb.description = yellow
25 absorb.node = blue bold
25 absorb.node = blue bold
26 absorb.path = bold
26 absorb.path = bold
27 """
27 """
28
28
29 # TODO:
29 # TODO:
30 # * Rename config items to [commands] namespace
30 # * Rename config items to [commands] namespace
31 # * Converge getdraftstack() with other code in core
31 # * Converge getdraftstack() with other code in core
32 # * move many attributes on fixupstate to be private
32 # * move many attributes on fixupstate to be private
33
33
34
34
35 import collections
35 import collections
36
36
37 from mercurial.i18n import _
37 from mercurial.i18n import _
38 from mercurial.node import (
38 from mercurial.node import (
39 hex,
39 hex,
40 short,
40 short,
41 )
41 )
42 from mercurial import (
42 from mercurial import (
43 cmdutil,
43 cmdutil,
44 commands,
44 commands,
45 context,
45 context,
46 crecord,
46 crecord,
47 error,
47 error,
48 linelog,
48 linelog,
49 mdiff,
49 mdiff,
50 obsolete,
50 obsolete,
51 patch,
51 patch,
52 phases,
52 phases,
53 pycompat,
53 pycompat,
54 registrar,
54 registrar,
55 rewriteutil,
55 rewriteutil,
56 scmutil,
56 scmutil,
57 util,
57 util,
58 )
58 )
59 from mercurial.utils import stringutil
59 from mercurial.utils import stringutil
60
60
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 # be specifying the version(s) of Mercurial they are tested with, or
63 # be specifying the version(s) of Mercurial they are tested with, or
64 # leave the attribute unspecified.
64 # leave the attribute unspecified.
65 testedwith = b'ships-with-hg-core'
65 testedwith = b'ships-with-hg-core'
66
66
67 cmdtable = {}
67 cmdtable = {}
68 command = registrar.command(cmdtable)
68 command = registrar.command(cmdtable)
69
69
70 configtable = {}
70 configtable = {}
71 configitem = registrar.configitem(configtable)
71 configitem = registrar.configitem(configtable)
72
72
73 configitem(b'absorb', b'add-noise', default=True)
73 configitem(b'absorb', b'add-noise', default=True)
74 configitem(b'absorb', b'amend-flag', default=None)
74 configitem(b'absorb', b'amend-flag', default=None)
75 configitem(b'absorb', b'max-stack-size', default=50)
75 configitem(b'absorb', b'max-stack-size', default=50)
76
76
77 colortable = {
77 colortable = {
78 b'absorb.description': b'yellow',
78 b'absorb.description': b'yellow',
79 b'absorb.node': b'blue bold',
79 b'absorb.node': b'blue bold',
80 b'absorb.path': b'bold',
80 b'absorb.path': b'bold',
81 }
81 }
82
82
83 defaultdict = collections.defaultdict
83 defaultdict = collections.defaultdict
84
84
85
85
86 class nullui:
86 class nullui:
87 """blank ui object doing nothing"""
87 """blank ui object doing nothing"""
88
88
89 debugflag = False
89 debugflag = False
90 verbose = False
90 verbose = False
91 quiet = True
91 quiet = True
92
92
93 def __getitem__(name):
93 def __getitem__(name):
94 def nullfunc(*args, **kwds):
94 def nullfunc(*args, **kwds):
95 return
95 return
96
96
97 return nullfunc
97 return nullfunc
98
98
99
99
100 class emptyfilecontext:
100 class emptyfilecontext:
101 """minimal filecontext representing an empty file"""
101 """minimal filecontext representing an empty file"""
102
102
103 def __init__(self, repo):
103 def __init__(self, repo):
104 self._repo = repo
104 self._repo = repo
105
105
106 def data(self):
106 def data(self):
107 return b''
107 return b''
108
108
109 def node(self):
109 def node(self):
110 return self._repo.nullid
110 return self._repo.nullid
111
111
112
112
113 def uniq(lst):
113 def uniq(lst):
114 """list -> list. remove duplicated items without changing the order"""
114 """list -> list. remove duplicated items without changing the order"""
115 seen = set()
115 seen = set()
116 result = []
116 result = []
117 for x in lst:
117 for x in lst:
118 if x not in seen:
118 if x not in seen:
119 seen.add(x)
119 seen.add(x)
120 result.append(x)
120 result.append(x)
121 return result
121 return result
122
122
123
123
124 def getdraftstack(headctx, limit=None):
124 def getdraftstack(headctx, limit=None):
125 """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
125 """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
126
126
127 changesets are sorted in topo order, oldest first.
127 changesets are sorted in topo order, oldest first.
128 return at most limit items, if limit is a positive number.
128 return at most limit items, if limit is a positive number.
129
129
130 merges are considered as non-draft as well. i.e. every commit
130 merges are considered as non-draft as well. i.e. every commit
131 returned has and only has 1 parent.
131 returned has and only has 1 parent.
132 """
132 """
133 ctx = headctx
133 ctx = headctx
134 result = []
134 result = []
135 while ctx.phase() != phases.public:
135 while ctx.phase() != phases.public:
136 if limit and len(result) >= limit:
136 if limit and len(result) >= limit:
137 break
137 break
138 parents = ctx.parents()
138 parents = ctx.parents()
139 if len(parents) != 1:
139 if len(parents) != 1:
140 break
140 break
141 result.append(ctx)
141 result.append(ctx)
142 ctx = parents[0]
142 ctx = parents[0]
143 result.reverse()
143 result.reverse()
144 return result
144 return result
145
145
146
146
147 def getfilestack(stack, path, seenfctxs=None):
147 def getfilestack(stack, path, seenfctxs=None):
148 """([ctx], str, set) -> [fctx], {ctx: fctx}
148 """([ctx], str, set) -> [fctx], {ctx: fctx}
149
149
150 stack is a list of contexts, from old to new. usually they are what
150 stack is a list of contexts, from old to new. usually they are what
151 "getdraftstack" returns.
151 "getdraftstack" returns.
152
152
153 follows renames, but not copies.
153 follows renames, but not copies.
154
154
155 seenfctxs is a set of filecontexts that will be considered "immutable".
155 seenfctxs is a set of filecontexts that will be considered "immutable".
156 they are usually what this function returned in earlier calls, useful
156 they are usually what this function returned in earlier calls, useful
157 to avoid issues that a file was "moved" to multiple places and was then
157 to avoid issues that a file was "moved" to multiple places and was then
158 modified differently, like: "a" was copied to "b", "a" was also copied to
158 modified differently, like: "a" was copied to "b", "a" was also copied to
159 "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
159 "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
160 and we enforce only one of them to be able to affect "a"'s content.
160 and we enforce only one of them to be able to affect "a"'s content.
161
161
162 return an empty list and an empty dict, if the specified path does not
162 return an empty list and an empty dict, if the specified path does not
163 exist in stack[-1] (the top of the stack).
163 exist in stack[-1] (the top of the stack).
164
164
165 otherwise, return a list of de-duplicated filecontexts, and the map to
165 otherwise, return a list of de-duplicated filecontexts, and the map to
166 convert ctx in the stack to fctx, for possible mutable fctxs. the first item
166 convert ctx in the stack to fctx, for possible mutable fctxs. the first item
167 of the list would be outside the stack and should be considered immutable.
167 of the list would be outside the stack and should be considered immutable.
168 the remaining items are within the stack.
168 the remaining items are within the stack.
169
169
170 for example, given the following changelog and corresponding filelog
170 for example, given the following changelog and corresponding filelog
171 revisions:
171 revisions:
172
172
173 changelog: 3----4----5----6----7
173 changelog: 3----4----5----6----7
174 filelog: x 0----1----1----2 (x: no such file yet)
174 filelog: x 0----1----1----2 (x: no such file yet)
175
175
176 - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
176 - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
177 - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
177 - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
178 dummy empty filecontext.
178 dummy empty filecontext.
179 - if stack = [2], returns ([], {})
179 - if stack = [2], returns ([], {})
180 - if stack = [7], returns ([1, 2], {7: 2})
180 - if stack = [7], returns ([1, 2], {7: 2})
181 - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
181 - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
182 removed, since 1 is immutable.
182 removed, since 1 is immutable.
183 """
183 """
184 if seenfctxs is None:
184 if seenfctxs is None:
185 seenfctxs = set()
185 seenfctxs = set()
186 assert stack
186 assert stack
187
187
188 if path not in stack[-1]:
188 if path not in stack[-1]:
189 return [], {}
189 return [], {}
190
190
191 fctxs = []
191 fctxs = []
192 fctxmap = {}
192 fctxmap = {}
193
193
194 pctx = stack[0].p1() # the public (immutable) ctx we stop at
194 pctx = stack[0].p1() # the public (immutable) ctx we stop at
195 for ctx in reversed(stack):
195 for ctx in reversed(stack):
196 if path not in ctx: # the file is added in the next commit
196 if path not in ctx: # the file is added in the next commit
197 pctx = ctx
197 pctx = ctx
198 break
198 break
199 fctx = ctx[path]
199 fctx = ctx[path]
200 fctxs.append(fctx)
200 fctxs.append(fctx)
201 if fctx in seenfctxs: # treat fctx as the immutable one
201 if fctx in seenfctxs: # treat fctx as the immutable one
202 pctx = None # do not add another immutable fctx
202 pctx = None # do not add another immutable fctx
203 break
203 break
204 fctxmap[ctx] = fctx # only for mutable fctxs
204 fctxmap[ctx] = fctx # only for mutable fctxs
205 copy = fctx.copysource()
205 copy = fctx.copysource()
206 if copy:
206 if copy:
207 path = copy # follow rename
207 path = copy # follow rename
208 if path in ctx: # but do not follow copy
208 if path in ctx: # but do not follow copy
209 pctx = ctx.p1()
209 pctx = ctx.p1()
210 break
210 break
211
211
212 if pctx is not None: # need an extra immutable fctx
212 if pctx is not None: # need an extra immutable fctx
213 if path in pctx:
213 if path in pctx:
214 fctxs.append(pctx[path])
214 fctxs.append(pctx[path])
215 else:
215 else:
216 fctxs.append(emptyfilecontext(pctx.repo()))
216 fctxs.append(emptyfilecontext(pctx.repo()))
217
217
218 fctxs.reverse()
218 fctxs.reverse()
219 # note: we rely on a property of hg: filerev is not reused for linear
219 # note: we rely on a property of hg: filerev is not reused for linear
220 # history. i.e. it's impossible to have:
220 # history. i.e. it's impossible to have:
221 # changelog: 4----5----6 (linear, no merges)
221 # changelog: 4----5----6 (linear, no merges)
222 # filelog: 1----2----1
222 # filelog: 1----2----1
223 # ^ reuse filerev (impossible)
223 # ^ reuse filerev (impossible)
224 # because parents are part of the hash. if that's not true, we need to
224 # because parents are part of the hash. if that's not true, we need to
225 # remove uniq and find a different way to identify fctxs.
225 # remove uniq and find a different way to identify fctxs.
226 return uniq(fctxs), fctxmap
226 return uniq(fctxs), fctxmap
227
227
228
228
229 class overlaystore(patch.filestore):
229 class overlaystore(patch.filestore):
230 """read-only, hybrid store based on a dict and ctx.
230 """read-only, hybrid store based on a dict and ctx.
231 memworkingcopy: {path: content}, overrides file contents.
231 memworkingcopy: {path: content}, overrides file contents.
232 """
232 """
233
233
234 def __init__(self, basectx, memworkingcopy):
234 def __init__(self, basectx, memworkingcopy):
235 self.basectx = basectx
235 self.basectx = basectx
236 self.memworkingcopy = memworkingcopy
236 self.memworkingcopy = memworkingcopy
237
237
238 def getfile(self, path):
238 def getfile(self, path):
239 """comply with mercurial.patch.filestore.getfile"""
239 """comply with mercurial.patch.filestore.getfile"""
240 if path not in self.basectx:
240 if path not in self.basectx:
241 return None, None, None
241 return None, None, None
242 fctx = self.basectx[path]
242 fctx = self.basectx[path]
243 if path in self.memworkingcopy:
243 if path in self.memworkingcopy:
244 content = self.memworkingcopy[path]
244 content = self.memworkingcopy[path]
245 else:
245 else:
246 content = fctx.data()
246 content = fctx.data()
247 mode = (fctx.islink(), fctx.isexec())
247 mode = (fctx.islink(), fctx.isexec())
248 copy = fctx.copysource()
248 copy = fctx.copysource()
249 return content, mode, copy
249 return content, mode, copy
250
250
251
251
252 def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None):
252 def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None):
253 """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
253 """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
254 memworkingcopy overrides file contents.
254 memworkingcopy overrides file contents.
255 """
255 """
256 # parents must contain 2 items: (node1, node2)
256 # parents must contain 2 items: (node1, node2)
257 if parents is None:
257 if parents is None:
258 parents = ctx.repo().changelog.parents(ctx.node())
258 parents = ctx.repo().changelog.parents(ctx.node())
259 if extra is None:
259 if extra is None:
260 extra = ctx.extra()
260 extra = ctx.extra()
261 if desc is None:
261 if desc is None:
262 desc = ctx.description()
262 desc = ctx.description()
263 date = ctx.date()
263 date = ctx.date()
264 user = ctx.user()
264 user = ctx.user()
265 files = set(ctx.files()).union(memworkingcopy)
265 files = set(ctx.files()).union(memworkingcopy)
266 store = overlaystore(ctx, memworkingcopy)
266 store = overlaystore(ctx, memworkingcopy)
267 return context.memctx(
267 return context.memctx(
268 repo=ctx.repo(),
268 repo=ctx.repo(),
269 parents=parents,
269 parents=parents,
270 text=desc,
270 text=desc,
271 files=files,
271 files=files,
272 filectxfn=store,
272 filectxfn=store,
273 user=user,
273 user=user,
274 date=date,
274 date=date,
275 branch=None,
275 branch=None,
276 extra=extra,
276 extra=extra,
277 )
277 )
278
278
279
279
280 class filefixupstate:
280 class filefixupstate:
281 """state needed to apply fixups to a single file
281 """state needed to apply fixups to a single file
282
282
283 internally, it keeps file contents of several revisions and a linelog.
283 internally, it keeps file contents of several revisions and a linelog.
284
284
285 the linelog uses odd revision numbers for original contents (fctxs passed
285 the linelog uses odd revision numbers for original contents (fctxs passed
286 to __init__), and even revision numbers for fixups, like:
286 to __init__), and even revision numbers for fixups, like:
287
287
288 linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
288 linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
289 linelog rev 2: fixups made to self.fctxs[0]
289 linelog rev 2: fixups made to self.fctxs[0]
290 linelog rev 3: self.fctxs[1] (a child of fctxs[0])
290 linelog rev 3: self.fctxs[1] (a child of fctxs[0])
291 linelog rev 4: fixups made to self.fctxs[1]
291 linelog rev 4: fixups made to self.fctxs[1]
292 ...
292 ...
293
293
294 a typical use is like:
294 a typical use is like:
295
295
296 1. call diffwith, to calculate self.fixups
296 1. call diffwith, to calculate self.fixups
297 2. (optionally), present self.fixups to the user, or change it
297 2. (optionally), present self.fixups to the user, or change it
298 3. call apply, to apply changes
298 3. call apply, to apply changes
299 4. read results from "finalcontents", or call getfinalcontent
299 4. read results from "finalcontents", or call getfinalcontent
300 """
300 """
301
301
302 def __init__(self, fctxs, path, ui=None, opts=None):
302 def __init__(self, fctxs, path, ui=None, opts=None):
303 """([fctx], ui or None) -> None
303 """([fctx], ui or None) -> None
304
304
305 fctxs should be linear, and sorted by topo order - oldest first.
305 fctxs should be linear, and sorted by topo order - oldest first.
306 fctxs[0] will be considered as "immutable" and will not be changed.
306 fctxs[0] will be considered as "immutable" and will not be changed.
307 """
307 """
308 self.fctxs = fctxs
308 self.fctxs = fctxs
309 self.path = path
309 self.path = path
310 self.ui = ui or nullui()
310 self.ui = ui or nullui()
311 self.opts = opts or {}
311 self.opts = opts or {}
312
312
313 # following fields are built from fctxs. they exist for perf reason
313 # following fields are built from fctxs. they exist for perf reason
314 self.contents = [f.data() for f in fctxs]
314 self.contents = [f.data() for f in fctxs]
315 self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
315 self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
316 self.linelog = self._buildlinelog()
316 self.linelog = self._buildlinelog()
317 if self.ui.debugflag:
317 if self.ui.debugflag:
318 assert self._checkoutlinelog() == self.contents
318 assert self._checkoutlinelog() == self.contents
319
319
320 # following fields will be filled later
320 # following fields will be filled later
321 self.chunkstats = [0, 0] # [adopted, total : int]
321 self.chunkstats = [0, 0] # [adopted, total : int]
322 self.targetlines = [] # [str]
322 self.targetlines = [] # [str]
323 self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
323 self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
324 self.finalcontents = [] # [str]
324 self.finalcontents = [] # [str]
325 self.ctxaffected = set()
325 self.ctxaffected = set()
326
326
327 def diffwith(self, targetfctx, fm=None):
327 def diffwith(self, targetfctx, fm=None):
328 """calculate fixups needed by examining the differences between
328 """calculate fixups needed by examining the differences between
329 self.fctxs[-1] and targetfctx, chunk by chunk.
329 self.fctxs[-1] and targetfctx, chunk by chunk.
330
330
331 targetfctx is the target state we move towards. we may or may not be
331 targetfctx is the target state we move towards. we may or may not be
332 able to get there because not all modified chunks can be amended into
332 able to get there because not all modified chunks can be amended into
333 a non-public fctx unambiguously.
333 a non-public fctx unambiguously.
334
334
335 call this only once, before apply().
335 call this only once, before apply().
336
336
337 update self.fixups, self.chunkstats, and self.targetlines.
337 update self.fixups, self.chunkstats, and self.targetlines.
338 """
338 """
339 a = self.contents[-1]
339 a = self.contents[-1]
340 alines = self.contentlines[-1]
340 alines = self.contentlines[-1]
341 b = targetfctx.data()
341 b = targetfctx.data()
342 blines = mdiff.splitnewlines(b)
342 blines = mdiff.splitnewlines(b)
343 self.targetlines = blines
343 self.targetlines = blines
344
344
345 self.linelog.annotate(self.linelog.maxrev)
345 self.linelog.annotate(self.linelog.maxrev)
346 annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
346 annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
347 assert len(annotated) == len(alines)
347 assert len(annotated) == len(alines)
348 # add a dummy end line to make insertion at the end easier
348 # add a dummy end line to make insertion at the end easier
349 if annotated:
349 if annotated:
350 dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
350 dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
351 annotated.append(dummyendline)
351 annotated.append(dummyendline)
352
352
353 # analyse diff blocks
353 # analyse diff blocks
354 for chunk in self._alldiffchunks(a, b, alines, blines):
354 for chunk in self._alldiffchunks(a, b, alines, blines):
355 newfixups = self._analysediffchunk(chunk, annotated)
355 newfixups = self._analysediffchunk(chunk, annotated)
356 self.chunkstats[0] += bool(newfixups) # 1 or 0
356 self.chunkstats[0] += bool(newfixups) # 1 or 0
357 self.chunkstats[1] += 1
357 self.chunkstats[1] += 1
358 self.fixups += newfixups
358 self.fixups += newfixups
359 if fm is not None:
359 if fm is not None:
360 self._showchanges(fm, alines, blines, chunk, newfixups)
360 self._showchanges(fm, alines, blines, chunk, newfixups)
361
361
362 def apply(self):
362 def apply(self):
363 """apply self.fixups. update self.linelog, self.finalcontents.
363 """apply self.fixups. update self.linelog, self.finalcontents.
364
364
365 call this only once, before getfinalcontent(), after diffwith().
365 call this only once, before getfinalcontent(), after diffwith().
366 """
366 """
367 # the following is unnecessary, as it's done by "diffwith":
367 # the following is unnecessary, as it's done by "diffwith":
368 # self.linelog.annotate(self.linelog.maxrev)
368 # self.linelog.annotate(self.linelog.maxrev)
369 for rev, a1, a2, b1, b2 in reversed(self.fixups):
369 for rev, a1, a2, b1, b2 in reversed(self.fixups):
370 blines = self.targetlines[b1:b2]
370 blines = self.targetlines[b1:b2]
371 if self.ui.debugflag:
371 if self.ui.debugflag:
372 idx = (max(rev - 1, 0)) // 2
372 idx = (max(rev - 1, 0)) // 2
373 self.ui.write(
373 self.ui.write(
374 _(b'%s: chunk %d:%d -> %d lines\n')
374 _(b'%s: chunk %d:%d -> %d lines\n')
375 % (short(self.fctxs[idx].node()), a1, a2, len(blines))
375 % (short(self.fctxs[idx].node()), a1, a2, len(blines))
376 )
376 )
377 self.linelog.replacelines(rev, a1, a2, b1, b2)
377 self.linelog.replacelines(rev, a1, a2, b1, b2)
378 if self.opts.get(b'edit_lines', False):
378 if self.opts.get(b'edit_lines', False):
379 self.finalcontents = self._checkoutlinelogwithedits()
379 self.finalcontents = self._checkoutlinelogwithedits()
380 else:
380 else:
381 self.finalcontents = self._checkoutlinelog()
381 self.finalcontents = self._checkoutlinelog()
382
382
383 def getfinalcontent(self, fctx):
383 def getfinalcontent(self, fctx):
384 """(fctx) -> str. get modified file content for a given filecontext"""
384 """(fctx) -> str. get modified file content for a given filecontext"""
385 idx = self.fctxs.index(fctx)
385 idx = self.fctxs.index(fctx)
386 return self.finalcontents[idx]
386 return self.finalcontents[idx]
387
387
388 def _analysediffchunk(self, chunk, annotated):
388 def _analysediffchunk(self, chunk, annotated):
389 """analyse a different chunk and return new fixups found
389 """analyse a different chunk and return new fixups found
390
390
391 return [] if no lines from the chunk can be safely applied.
391 return [] if no lines from the chunk can be safely applied.
392
392
393 the chunk (or lines) cannot be safely applied, if, for example:
393 the chunk (or lines) cannot be safely applied, if, for example:
394 - the modified (deleted) lines belong to a public changeset
394 - the modified (deleted) lines belong to a public changeset
395 (self.fctxs[0])
395 (self.fctxs[0])
396 - the chunk is a pure insertion and the adjacent lines (at most 2
396 - the chunk is a pure insertion and the adjacent lines (at most 2
397 lines) belong to different non-public changesets, or do not belong
397 lines) belong to different non-public changesets, or do not belong
398 to any non-public changesets.
398 to any non-public changesets.
399 - the chunk is modifying lines from different changesets.
399 - the chunk is modifying lines from different changesets.
400 in this case, if the number of lines deleted equals to the number
400 in this case, if the number of lines deleted equals to the number
401 of lines added, assume it's a simple 1:1 map (could be wrong).
401 of lines added, assume it's a simple 1:1 map (could be wrong).
402 otherwise, give up.
402 otherwise, give up.
403 - the chunk is modifying lines from a single non-public changeset,
403 - the chunk is modifying lines from a single non-public changeset,
404 but other revisions touch the area as well. i.e. the lines are
404 but other revisions touch the area as well. i.e. the lines are
405 not continuous as seen from the linelog.
405 not continuous as seen from the linelog.
406 """
406 """
407 a1, a2, b1, b2 = chunk
407 a1, a2, b1, b2 = chunk
408 # find involved indexes from annotate result
408 # find involved indexes from annotate result
409 involved = annotated[a1:a2]
409 involved = annotated[a1:a2]
410 if not involved and annotated: # a1 == a2 and a is not empty
410 if not involved and annotated: # a1 == a2 and a is not empty
411 # pure insertion, check nearby lines. ignore lines belong
411 # pure insertion, check nearby lines. ignore lines belong
412 # to the public (first) changeset (i.e. annotated[i][0] == 1)
412 # to the public (first) changeset (i.e. annotated[i][0] == 1)
413 nearbylinenums = {a2, max(0, a1 - 1)}
413 nearbylinenums = {a2, max(0, a1 - 1)}
414 involved = [
414 involved = [
415 annotated[i] for i in nearbylinenums if annotated[i][0] != 1
415 annotated[i] for i in nearbylinenums if annotated[i][0] != 1
416 ]
416 ]
417 involvedrevs = list({r for r, l in involved})
417 involvedrevs = list({r for r, l in involved})
418 newfixups = []
418 newfixups = []
419 if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
419 if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
420 # chunk belongs to a single revision
420 # chunk belongs to a single revision
421 rev = involvedrevs[0]
421 rev = involvedrevs[0]
422 if rev > 1:
422 if rev > 1:
423 fixuprev = rev + 1
423 fixuprev = rev + 1
424 newfixups.append((fixuprev, a1, a2, b1, b2))
424 newfixups.append((fixuprev, a1, a2, b1, b2))
425 elif a2 - a1 == b2 - b1 or b1 == b2:
425 elif a2 - a1 == b2 - b1 or b1 == b2:
426 # 1:1 line mapping, or chunk was deleted
426 # 1:1 line mapping, or chunk was deleted
427 for i in pycompat.xrange(a1, a2):
427 for i in range(a1, a2):
428 rev, linenum = annotated[i]
428 rev, linenum = annotated[i]
429 if rev > 1:
429 if rev > 1:
430 if b1 == b2: # deletion, simply remove that single line
430 if b1 == b2: # deletion, simply remove that single line
431 nb1 = nb2 = 0
431 nb1 = nb2 = 0
432 else: # 1:1 line mapping, change the corresponding rev
432 else: # 1:1 line mapping, change the corresponding rev
433 nb1 = b1 + i - a1
433 nb1 = b1 + i - a1
434 nb2 = nb1 + 1
434 nb2 = nb1 + 1
435 fixuprev = rev + 1
435 fixuprev = rev + 1
436 newfixups.append((fixuprev, i, i + 1, nb1, nb2))
436 newfixups.append((fixuprev, i, i + 1, nb1, nb2))
437 return self._optimizefixups(newfixups)
437 return self._optimizefixups(newfixups)
438
438
439 @staticmethod
439 @staticmethod
440 def _alldiffchunks(a, b, alines, blines):
440 def _alldiffchunks(a, b, alines, blines):
441 """like mdiff.allblocks, but only care about differences"""
441 """like mdiff.allblocks, but only care about differences"""
442 blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
442 blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
443 for chunk, btype in blocks:
443 for chunk, btype in blocks:
444 if btype != b'!':
444 if btype != b'!':
445 continue
445 continue
446 yield chunk
446 yield chunk
447
447
448 def _buildlinelog(self):
448 def _buildlinelog(self):
449 """calculate the initial linelog based on self.content{,line}s.
449 """calculate the initial linelog based on self.content{,line}s.
450 this is similar to running a partial "annotate".
450 this is similar to running a partial "annotate".
451 """
451 """
452 llog = linelog.linelog()
452 llog = linelog.linelog()
453 a, alines = b'', []
453 a, alines = b'', []
454 for i in pycompat.xrange(len(self.contents)):
454 for i in range(len(self.contents)):
455 b, blines = self.contents[i], self.contentlines[i]
455 b, blines = self.contents[i], self.contentlines[i]
456 llrev = i * 2 + 1
456 llrev = i * 2 + 1
457 chunks = self._alldiffchunks(a, b, alines, blines)
457 chunks = self._alldiffchunks(a, b, alines, blines)
458 for a1, a2, b1, b2 in reversed(list(chunks)):
458 for a1, a2, b1, b2 in reversed(list(chunks)):
459 llog.replacelines(llrev, a1, a2, b1, b2)
459 llog.replacelines(llrev, a1, a2, b1, b2)
460 a, alines = b, blines
460 a, alines = b, blines
461 return llog
461 return llog
462
462
463 def _checkoutlinelog(self):
463 def _checkoutlinelog(self):
464 """() -> [str]. check out file contents from linelog"""
464 """() -> [str]. check out file contents from linelog"""
465 contents = []
465 contents = []
466 for i in pycompat.xrange(len(self.contents)):
466 for i in range(len(self.contents)):
467 rev = (i + 1) * 2
467 rev = (i + 1) * 2
468 self.linelog.annotate(rev)
468 self.linelog.annotate(rev)
469 content = b''.join(map(self._getline, self.linelog.annotateresult))
469 content = b''.join(map(self._getline, self.linelog.annotateresult))
470 contents.append(content)
470 contents.append(content)
471 return contents
471 return contents
472
472
473 def _checkoutlinelogwithedits(self):
473 def _checkoutlinelogwithedits(self):
474 """() -> [str]. prompt all lines for edit"""
474 """() -> [str]. prompt all lines for edit"""
475 alllines = self.linelog.getalllines()
475 alllines = self.linelog.getalllines()
476 # header
476 # header
477 editortext = (
477 editortext = (
478 _(
478 _(
479 b'HG: editing %s\nHG: "y" means the line to the right '
479 b'HG: editing %s\nHG: "y" means the line to the right '
480 b'exists in the changeset to the top\nHG:\n'
480 b'exists in the changeset to the top\nHG:\n'
481 )
481 )
482 % self.fctxs[-1].path()
482 % self.fctxs[-1].path()
483 )
483 )
484 # [(idx, fctx)]. hide the dummy emptyfilecontext
484 # [(idx, fctx)]. hide the dummy emptyfilecontext
485 visiblefctxs = [
485 visiblefctxs = [
486 (i, f)
486 (i, f)
487 for i, f in enumerate(self.fctxs)
487 for i, f in enumerate(self.fctxs)
488 if not isinstance(f, emptyfilecontext)
488 if not isinstance(f, emptyfilecontext)
489 ]
489 ]
490 for i, (j, f) in enumerate(visiblefctxs):
490 for i, (j, f) in enumerate(visiblefctxs):
491 editortext += _(b'HG: %s/%s %s %s\n') % (
491 editortext += _(b'HG: %s/%s %s %s\n') % (
492 b'|' * i,
492 b'|' * i,
493 b'-' * (len(visiblefctxs) - i + 1),
493 b'-' * (len(visiblefctxs) - i + 1),
494 short(f.node()),
494 short(f.node()),
495 f.description().split(b'\n', 1)[0],
495 f.description().split(b'\n', 1)[0],
496 )
496 )
497 editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs))
497 editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs))
498 # figure out the lifetime of a line, this is relatively inefficient,
498 # figure out the lifetime of a line, this is relatively inefficient,
499 # but probably fine
499 # but probably fine
500 lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
500 lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
501 for i, f in visiblefctxs:
501 for i, f in visiblefctxs:
502 self.linelog.annotate((i + 1) * 2)
502 self.linelog.annotate((i + 1) * 2)
503 for l in self.linelog.annotateresult:
503 for l in self.linelog.annotateresult:
504 lineset[l].add(i)
504 lineset[l].add(i)
505 # append lines
505 # append lines
506 for l in alllines:
506 for l in alllines:
507 editortext += b' %s : %s' % (
507 editortext += b' %s : %s' % (
508 b''.join(
508 b''.join(
509 [
509 [
510 (b'y' if i in lineset[l] else b' ')
510 (b'y' if i in lineset[l] else b' ')
511 for i, _f in visiblefctxs
511 for i, _f in visiblefctxs
512 ]
512 ]
513 ),
513 ),
514 self._getline(l),
514 self._getline(l),
515 )
515 )
516 # run editor
516 # run editor
517 editedtext = self.ui.edit(editortext, b'', action=b'absorb')
517 editedtext = self.ui.edit(editortext, b'', action=b'absorb')
518 if not editedtext:
518 if not editedtext:
519 raise error.InputError(_(b'empty editor text'))
519 raise error.InputError(_(b'empty editor text'))
520 # parse edited result
520 # parse edited result
521 contents = [b''] * len(self.fctxs)
521 contents = [b''] * len(self.fctxs)
522 leftpadpos = 4
522 leftpadpos = 4
523 colonpos = leftpadpos + len(visiblefctxs) + 1
523 colonpos = leftpadpos + len(visiblefctxs) + 1
524 for l in mdiff.splitnewlines(editedtext):
524 for l in mdiff.splitnewlines(editedtext):
525 if l.startswith(b'HG:'):
525 if l.startswith(b'HG:'):
526 continue
526 continue
527 if l[colonpos - 1 : colonpos + 2] != b' : ':
527 if l[colonpos - 1 : colonpos + 2] != b' : ':
528 raise error.InputError(_(b'malformed line: %s') % l)
528 raise error.InputError(_(b'malformed line: %s') % l)
529 linecontent = l[colonpos + 2 :]
529 linecontent = l[colonpos + 2 :]
530 for i, ch in enumerate(
530 for i, ch in enumerate(
531 pycompat.bytestr(l[leftpadpos : colonpos - 1])
531 pycompat.bytestr(l[leftpadpos : colonpos - 1])
532 ):
532 ):
533 if ch == b'y':
533 if ch == b'y':
534 contents[visiblefctxs[i][0]] += linecontent
534 contents[visiblefctxs[i][0]] += linecontent
535 # chunkstats is hard to calculate if anything changes, therefore
535 # chunkstats is hard to calculate if anything changes, therefore
536 # set them to just a simple value (1, 1).
536 # set them to just a simple value (1, 1).
537 if editedtext != editortext:
537 if editedtext != editortext:
538 self.chunkstats = [1, 1]
538 self.chunkstats = [1, 1]
539 return contents
539 return contents
540
540
541 def _getline(self, lineinfo):
541 def _getline(self, lineinfo):
542 """((rev, linenum)) -> str. convert rev+line number to line content"""
542 """((rev, linenum)) -> str. convert rev+line number to line content"""
543 rev, linenum = lineinfo
543 rev, linenum = lineinfo
544 if rev & 1: # odd: original line taken from fctxs
544 if rev & 1: # odd: original line taken from fctxs
545 return self.contentlines[rev // 2][linenum]
545 return self.contentlines[rev // 2][linenum]
546 else: # even: fixup line from targetfctx
546 else: # even: fixup line from targetfctx
547 return self.targetlines[linenum]
547 return self.targetlines[linenum]
548
548
549 def _iscontinuous(self, a1, a2, closedinterval=False):
549 def _iscontinuous(self, a1, a2, closedinterval=False):
550 """(a1, a2 : int) -> bool
550 """(a1, a2 : int) -> bool
551
551
552 check if these lines are continuous. i.e. no other insertions or
552 check if these lines are continuous. i.e. no other insertions or
553 deletions (from other revisions) among these lines.
553 deletions (from other revisions) among these lines.
554
554
555 closedinterval decides whether a2 should be included or not. i.e. is
555 closedinterval decides whether a2 should be included or not. i.e. is
556 it [a1, a2), or [a1, a2] ?
556 it [a1, a2), or [a1, a2] ?
557 """
557 """
558 if a1 >= a2:
558 if a1 >= a2:
559 return True
559 return True
560 llog = self.linelog
560 llog = self.linelog
561 offset1 = llog.getoffset(a1)
561 offset1 = llog.getoffset(a1)
562 offset2 = llog.getoffset(a2) + int(closedinterval)
562 offset2 = llog.getoffset(a2) + int(closedinterval)
563 linesinbetween = llog.getalllines(offset1, offset2)
563 linesinbetween = llog.getalllines(offset1, offset2)
564 return len(linesinbetween) == a2 - a1 + int(closedinterval)
564 return len(linesinbetween) == a2 - a1 + int(closedinterval)
565
565
566 def _optimizefixups(self, fixups):
566 def _optimizefixups(self, fixups):
567 """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
567 """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
568 merge adjacent fixups to make them less fragmented.
568 merge adjacent fixups to make them less fragmented.
569 """
569 """
570 result = []
570 result = []
571 pcurrentchunk = [[-1, -1, -1, -1, -1]]
571 pcurrentchunk = [[-1, -1, -1, -1, -1]]
572
572
573 def pushchunk():
573 def pushchunk():
574 if pcurrentchunk[0][0] != -1:
574 if pcurrentchunk[0][0] != -1:
575 result.append(tuple(pcurrentchunk[0]))
575 result.append(tuple(pcurrentchunk[0]))
576
576
577 for i, chunk in enumerate(fixups):
577 for i, chunk in enumerate(fixups):
578 rev, a1, a2, b1, b2 = chunk
578 rev, a1, a2, b1, b2 = chunk
579 lastrev = pcurrentchunk[0][0]
579 lastrev = pcurrentchunk[0][0]
580 lasta2 = pcurrentchunk[0][2]
580 lasta2 = pcurrentchunk[0][2]
581 lastb2 = pcurrentchunk[0][4]
581 lastb2 = pcurrentchunk[0][4]
582 if (
582 if (
583 a1 == lasta2
583 a1 == lasta2
584 and b1 == lastb2
584 and b1 == lastb2
585 and rev == lastrev
585 and rev == lastrev
586 and self._iscontinuous(max(a1 - 1, 0), a1)
586 and self._iscontinuous(max(a1 - 1, 0), a1)
587 ):
587 ):
588 # merge into currentchunk
588 # merge into currentchunk
589 pcurrentchunk[0][2] = a2
589 pcurrentchunk[0][2] = a2
590 pcurrentchunk[0][4] = b2
590 pcurrentchunk[0][4] = b2
591 else:
591 else:
592 pushchunk()
592 pushchunk()
593 pcurrentchunk[0] = list(chunk)
593 pcurrentchunk[0] = list(chunk)
594 pushchunk()
594 pushchunk()
595 return result
595 return result
596
596
597 def _showchanges(self, fm, alines, blines, chunk, fixups):
597 def _showchanges(self, fm, alines, blines, chunk, fixups):
598 def trim(line):
598 def trim(line):
599 if line.endswith(b'\n'):
599 if line.endswith(b'\n'):
600 line = line[:-1]
600 line = line[:-1]
601 return line
601 return line
602
602
603 # this is not optimized for perf but _showchanges only gets executed
603 # this is not optimized for perf but _showchanges only gets executed
604 # with an extra command-line flag.
604 # with an extra command-line flag.
605 a1, a2, b1, b2 = chunk
605 a1, a2, b1, b2 = chunk
606 aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
606 aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
607 for idx, fa1, fa2, fb1, fb2 in fixups:
607 for idx, fa1, fa2, fb1, fb2 in fixups:
608 for i in pycompat.xrange(fa1, fa2):
608 for i in range(fa1, fa2):
609 aidxs[i - a1] = (max(idx, 1) - 1) // 2
609 aidxs[i - a1] = (max(idx, 1) - 1) // 2
610 for i in pycompat.xrange(fb1, fb2):
610 for i in range(fb1, fb2):
611 bidxs[i - b1] = (max(idx, 1) - 1) // 2
611 bidxs[i - b1] = (max(idx, 1) - 1) // 2
612
612
613 fm.startitem()
613 fm.startitem()
614 fm.write(
614 fm.write(
615 b'hunk',
615 b'hunk',
616 b' %s\n',
616 b' %s\n',
617 b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1),
617 b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1),
618 label=b'diff.hunk',
618 label=b'diff.hunk',
619 )
619 )
620 fm.data(path=self.path, linetype=b'hunk')
620 fm.data(path=self.path, linetype=b'hunk')
621
621
622 def writeline(idx, diffchar, line, linetype, linelabel):
622 def writeline(idx, diffchar, line, linetype, linelabel):
623 fm.startitem()
623 fm.startitem()
624 node = b''
624 node = b''
625 if idx:
625 if idx:
626 ctx = self.fctxs[idx]
626 ctx = self.fctxs[idx]
627 fm.context(fctx=ctx)
627 fm.context(fctx=ctx)
628 node = ctx.hex()
628 node = ctx.hex()
629 self.ctxaffected.add(ctx.changectx())
629 self.ctxaffected.add(ctx.changectx())
630 fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node')
630 fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node')
631 fm.write(
631 fm.write(
632 b'diffchar ' + linetype,
632 b'diffchar ' + linetype,
633 b'%s%s\n',
633 b'%s%s\n',
634 diffchar,
634 diffchar,
635 line,
635 line,
636 label=linelabel,
636 label=linelabel,
637 )
637 )
638 fm.data(path=self.path, linetype=linetype)
638 fm.data(path=self.path, linetype=linetype)
639
639
640 for i in pycompat.xrange(a1, a2):
640 for i in range(a1, a2):
641 writeline(
641 writeline(
642 aidxs[i - a1],
642 aidxs[i - a1],
643 b'-',
643 b'-',
644 trim(alines[i]),
644 trim(alines[i]),
645 b'deleted',
645 b'deleted',
646 b'diff.deleted',
646 b'diff.deleted',
647 )
647 )
648 for i in pycompat.xrange(b1, b2):
648 for i in range(b1, b2):
649 writeline(
649 writeline(
650 bidxs[i - b1],
650 bidxs[i - b1],
651 b'+',
651 b'+',
652 trim(blines[i]),
652 trim(blines[i]),
653 b'inserted',
653 b'inserted',
654 b'diff.inserted',
654 b'diff.inserted',
655 )
655 )
656
656
657
657
658 class fixupstate:
658 class fixupstate:
659 """state needed to run absorb
659 """state needed to run absorb
660
660
661 internally, it keeps paths and filefixupstates.
661 internally, it keeps paths and filefixupstates.
662
662
663 a typical use is like filefixupstates:
663 a typical use is like filefixupstates:
664
664
665 1. call diffwith, to calculate fixups
665 1. call diffwith, to calculate fixups
666 2. (optionally), present fixups to the user, or edit fixups
666 2. (optionally), present fixups to the user, or edit fixups
667 3. call apply, to apply changes to memory
667 3. call apply, to apply changes to memory
668 4. call commit, to commit changes to hg database
668 4. call commit, to commit changes to hg database
669 """
669 """
670
670
671 def __init__(self, stack, ui=None, opts=None):
671 def __init__(self, stack, ui=None, opts=None):
672 """([ctx], ui or None) -> None
672 """([ctx], ui or None) -> None
673
673
674 stack: should be linear, and sorted by topo order - oldest first.
674 stack: should be linear, and sorted by topo order - oldest first.
675 all commits in stack are considered mutable.
675 all commits in stack are considered mutable.
676 """
676 """
677 assert stack
677 assert stack
678 self.ui = ui or nullui()
678 self.ui = ui or nullui()
679 self.opts = opts or {}
679 self.opts = opts or {}
680 self.stack = stack
680 self.stack = stack
681 self.repo = stack[-1].repo().unfiltered()
681 self.repo = stack[-1].repo().unfiltered()
682
682
683 # following fields will be filled later
683 # following fields will be filled later
684 self.paths = [] # [str]
684 self.paths = [] # [str]
685 self.status = None # ctx.status output
685 self.status = None # ctx.status output
686 self.fctxmap = {} # {path: {ctx: fctx}}
686 self.fctxmap = {} # {path: {ctx: fctx}}
687 self.fixupmap = {} # {path: filefixupstate}
687 self.fixupmap = {} # {path: filefixupstate}
688 self.replacemap = {} # {oldnode: newnode or None}
688 self.replacemap = {} # {oldnode: newnode or None}
689 self.finalnode = None # head after all fixups
689 self.finalnode = None # head after all fixups
690 self.ctxaffected = set() # ctx that will be absorbed into
690 self.ctxaffected = set() # ctx that will be absorbed into
691
691
692 def diffwith(self, targetctx, match=None, fm=None):
692 def diffwith(self, targetctx, match=None, fm=None):
693 """diff and prepare fixups. update self.fixupmap, self.paths"""
693 """diff and prepare fixups. update self.fixupmap, self.paths"""
694 # only care about modified files
694 # only care about modified files
695 self.status = self.stack[-1].status(targetctx, match)
695 self.status = self.stack[-1].status(targetctx, match)
696 self.paths = []
696 self.paths = []
697 # but if --edit-lines is used, the user may want to edit files
697 # but if --edit-lines is used, the user may want to edit files
698 # even if they are not modified
698 # even if they are not modified
699 editopt = self.opts.get(b'edit_lines')
699 editopt = self.opts.get(b'edit_lines')
700 if not self.status.modified and editopt and match:
700 if not self.status.modified and editopt and match:
701 interestingpaths = match.files()
701 interestingpaths = match.files()
702 else:
702 else:
703 interestingpaths = self.status.modified
703 interestingpaths = self.status.modified
704 # prepare the filefixupstate
704 # prepare the filefixupstate
705 seenfctxs = set()
705 seenfctxs = set()
706 # sorting is necessary to eliminate ambiguity for the "double move"
706 # sorting is necessary to eliminate ambiguity for the "double move"
707 # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
707 # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
708 for path in sorted(interestingpaths):
708 for path in sorted(interestingpaths):
709 self.ui.debug(b'calculating fixups for %s\n' % path)
709 self.ui.debug(b'calculating fixups for %s\n' % path)
710 targetfctx = targetctx[path]
710 targetfctx = targetctx[path]
711 fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
711 fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
712 # ignore symbolic links or binary, or unchanged files
712 # ignore symbolic links or binary, or unchanged files
713 if any(
713 if any(
714 f.islink() or stringutil.binary(f.data())
714 f.islink() or stringutil.binary(f.data())
715 for f in [targetfctx] + fctxs
715 for f in [targetfctx] + fctxs
716 if not isinstance(f, emptyfilecontext)
716 if not isinstance(f, emptyfilecontext)
717 ):
717 ):
718 continue
718 continue
719 if targetfctx.data() == fctxs[-1].data() and not editopt:
719 if targetfctx.data() == fctxs[-1].data() and not editopt:
720 continue
720 continue
721 seenfctxs.update(fctxs[1:])
721 seenfctxs.update(fctxs[1:])
722 self.fctxmap[path] = ctx2fctx
722 self.fctxmap[path] = ctx2fctx
723 fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
723 fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
724 if fm is not None:
724 if fm is not None:
725 fm.startitem()
725 fm.startitem()
726 fm.plain(b'showing changes for ')
726 fm.plain(b'showing changes for ')
727 fm.write(b'path', b'%s\n', path, label=b'absorb.path')
727 fm.write(b'path', b'%s\n', path, label=b'absorb.path')
728 fm.data(linetype=b'path')
728 fm.data(linetype=b'path')
729 fstate.diffwith(targetfctx, fm)
729 fstate.diffwith(targetfctx, fm)
730 self.fixupmap[path] = fstate
730 self.fixupmap[path] = fstate
731 self.paths.append(path)
731 self.paths.append(path)
732 self.ctxaffected.update(fstate.ctxaffected)
732 self.ctxaffected.update(fstate.ctxaffected)
733
733
734 def apply(self):
734 def apply(self):
735 """apply fixups to individual filefixupstates"""
735 """apply fixups to individual filefixupstates"""
736 for path, state in self.fixupmap.items():
736 for path, state in self.fixupmap.items():
737 if self.ui.debugflag:
737 if self.ui.debugflag:
738 self.ui.write(_(b'applying fixups to %s\n') % path)
738 self.ui.write(_(b'applying fixups to %s\n') % path)
739 state.apply()
739 state.apply()
740
740
741 @property
741 @property
742 def chunkstats(self):
742 def chunkstats(self):
743 """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
743 """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
744 return {path: state.chunkstats for path, state in self.fixupmap.items()}
744 return {path: state.chunkstats for path, state in self.fixupmap.items()}
745
745
746 def commit(self):
746 def commit(self):
747 """commit changes. update self.finalnode, self.replacemap"""
747 """commit changes. update self.finalnode, self.replacemap"""
748 with self.repo.transaction(b'absorb') as tr:
748 with self.repo.transaction(b'absorb') as tr:
749 self._commitstack()
749 self._commitstack()
750 self._movebookmarks(tr)
750 self._movebookmarks(tr)
751 if self.repo[b'.'].node() in self.replacemap:
751 if self.repo[b'.'].node() in self.replacemap:
752 self._moveworkingdirectoryparent()
752 self._moveworkingdirectoryparent()
753 self._cleanupoldcommits()
753 self._cleanupoldcommits()
754 return self.finalnode
754 return self.finalnode
755
755
756 def printchunkstats(self):
756 def printchunkstats(self):
757 """print things like '1 of 2 chunk(s) applied'"""
757 """print things like '1 of 2 chunk(s) applied'"""
758 ui = self.ui
758 ui = self.ui
759 chunkstats = self.chunkstats
759 chunkstats = self.chunkstats
760 if ui.verbose:
760 if ui.verbose:
761 # chunkstats for each file
761 # chunkstats for each file
762 for path, stat in chunkstats.items():
762 for path, stat in chunkstats.items():
763 if stat[0]:
763 if stat[0]:
764 ui.write(
764 ui.write(
765 _(b'%s: %d of %d chunk(s) applied\n')
765 _(b'%s: %d of %d chunk(s) applied\n')
766 % (path, stat[0], stat[1])
766 % (path, stat[0], stat[1])
767 )
767 )
768 elif not ui.quiet:
768 elif not ui.quiet:
769 # a summary for all files
769 # a summary for all files
770 stats = chunkstats.values()
770 stats = chunkstats.values()
771 applied, total = (sum(s[i] for s in stats) for i in (0, 1))
771 applied, total = (sum(s[i] for s in stats) for i in (0, 1))
772 ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total))
772 ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total))
773
773
774 def _commitstack(self):
774 def _commitstack(self):
775 """make new commits. update self.finalnode, self.replacemap.
775 """make new commits. update self.finalnode, self.replacemap.
776 it is splitted from "commit" to avoid too much indentation.
776 it is splitted from "commit" to avoid too much indentation.
777 """
777 """
778 # last node (20-char) committed by us
778 # last node (20-char) committed by us
779 lastcommitted = None
779 lastcommitted = None
780 # p1 which overrides the parent of the next commit, "None" means use
780 # p1 which overrides the parent of the next commit, "None" means use
781 # the original parent unchanged
781 # the original parent unchanged
782 nextp1 = None
782 nextp1 = None
783 for ctx in self.stack:
783 for ctx in self.stack:
784 memworkingcopy = self._getnewfilecontents(ctx)
784 memworkingcopy = self._getnewfilecontents(ctx)
785 if not memworkingcopy and not lastcommitted:
785 if not memworkingcopy and not lastcommitted:
786 # nothing changed, nothing commited
786 # nothing changed, nothing commited
787 nextp1 = ctx
787 nextp1 = ctx
788 continue
788 continue
789 willbecomenoop = ctx.files() and self._willbecomenoop(
789 willbecomenoop = ctx.files() and self._willbecomenoop(
790 memworkingcopy, ctx, nextp1
790 memworkingcopy, ctx, nextp1
791 )
791 )
792 if self.skip_empty_successor and willbecomenoop:
792 if self.skip_empty_successor and willbecomenoop:
793 # changeset is no longer necessary
793 # changeset is no longer necessary
794 self.replacemap[ctx.node()] = None
794 self.replacemap[ctx.node()] = None
795 msg = _(b'became empty and was dropped')
795 msg = _(b'became empty and was dropped')
796 else:
796 else:
797 # changeset needs re-commit
797 # changeset needs re-commit
798 nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
798 nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
799 lastcommitted = self.repo[nodestr]
799 lastcommitted = self.repo[nodestr]
800 nextp1 = lastcommitted
800 nextp1 = lastcommitted
801 self.replacemap[ctx.node()] = lastcommitted.node()
801 self.replacemap[ctx.node()] = lastcommitted.node()
802 if memworkingcopy:
802 if memworkingcopy:
803 if willbecomenoop:
803 if willbecomenoop:
804 msg = _(b'%d file(s) changed, became empty as %s')
804 msg = _(b'%d file(s) changed, became empty as %s')
805 else:
805 else:
806 msg = _(b'%d file(s) changed, became %s')
806 msg = _(b'%d file(s) changed, became %s')
807 msg = msg % (
807 msg = msg % (
808 len(memworkingcopy),
808 len(memworkingcopy),
809 self._ctx2str(lastcommitted),
809 self._ctx2str(lastcommitted),
810 )
810 )
811 else:
811 else:
812 msg = _(b'became %s') % self._ctx2str(lastcommitted)
812 msg = _(b'became %s') % self._ctx2str(lastcommitted)
813 if self.ui.verbose and msg:
813 if self.ui.verbose and msg:
814 self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg))
814 self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg))
815 self.finalnode = lastcommitted and lastcommitted.node()
815 self.finalnode = lastcommitted and lastcommitted.node()
816
816
817 def _ctx2str(self, ctx):
817 def _ctx2str(self, ctx):
818 if self.ui.debugflag:
818 if self.ui.debugflag:
819 return b'%d:%s' % (ctx.rev(), ctx.hex())
819 return b'%d:%s' % (ctx.rev(), ctx.hex())
820 else:
820 else:
821 return b'%d:%s' % (ctx.rev(), short(ctx.node()))
821 return b'%d:%s' % (ctx.rev(), short(ctx.node()))
822
822
823 def _getnewfilecontents(self, ctx):
823 def _getnewfilecontents(self, ctx):
824 """(ctx) -> {path: str}
824 """(ctx) -> {path: str}
825
825
826 fetch file contents from filefixupstates.
826 fetch file contents from filefixupstates.
827 return the working copy overrides - files different from ctx.
827 return the working copy overrides - files different from ctx.
828 """
828 """
829 result = {}
829 result = {}
830 for path in self.paths:
830 for path in self.paths:
831 ctx2fctx = self.fctxmap[path] # {ctx: fctx}
831 ctx2fctx = self.fctxmap[path] # {ctx: fctx}
832 if ctx not in ctx2fctx:
832 if ctx not in ctx2fctx:
833 continue
833 continue
834 fctx = ctx2fctx[ctx]
834 fctx = ctx2fctx[ctx]
835 content = fctx.data()
835 content = fctx.data()
836 newcontent = self.fixupmap[path].getfinalcontent(fctx)
836 newcontent = self.fixupmap[path].getfinalcontent(fctx)
837 if content != newcontent:
837 if content != newcontent:
838 result[fctx.path()] = newcontent
838 result[fctx.path()] = newcontent
839 return result
839 return result
840
840
841 def _movebookmarks(self, tr):
841 def _movebookmarks(self, tr):
842 repo = self.repo
842 repo = self.repo
843 needupdate = [
843 needupdate = [
844 (name, self.replacemap[hsh])
844 (name, self.replacemap[hsh])
845 for name, hsh in repo._bookmarks.items()
845 for name, hsh in repo._bookmarks.items()
846 if hsh in self.replacemap
846 if hsh in self.replacemap
847 ]
847 ]
848 changes = []
848 changes = []
849 for name, hsh in needupdate:
849 for name, hsh in needupdate:
850 if hsh:
850 if hsh:
851 changes.append((name, hsh))
851 changes.append((name, hsh))
852 if self.ui.verbose:
852 if self.ui.verbose:
853 self.ui.write(
853 self.ui.write(
854 _(b'moving bookmark %s to %s\n') % (name, hex(hsh))
854 _(b'moving bookmark %s to %s\n') % (name, hex(hsh))
855 )
855 )
856 else:
856 else:
857 changes.append((name, None))
857 changes.append((name, None))
858 if self.ui.verbose:
858 if self.ui.verbose:
859 self.ui.write(_(b'deleting bookmark %s\n') % name)
859 self.ui.write(_(b'deleting bookmark %s\n') % name)
860 repo._bookmarks.applychanges(repo, tr, changes)
860 repo._bookmarks.applychanges(repo, tr, changes)
861
861
862 def _moveworkingdirectoryparent(self):
862 def _moveworkingdirectoryparent(self):
863 if not self.finalnode:
863 if not self.finalnode:
864 # Find the latest not-{obsoleted,stripped} parent.
864 # Find the latest not-{obsoleted,stripped} parent.
865 revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys())
865 revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys())
866 ctx = self.repo[revs.first()]
866 ctx = self.repo[revs.first()]
867 self.finalnode = ctx.node()
867 self.finalnode = ctx.node()
868 else:
868 else:
869 ctx = self.repo[self.finalnode]
869 ctx = self.repo[self.finalnode]
870
870
871 dirstate = self.repo.dirstate
871 dirstate = self.repo.dirstate
872 # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
872 # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
873 # be slow. in absorb's case, no need to invalidate fsmonitorstate.
873 # be slow. in absorb's case, no need to invalidate fsmonitorstate.
874 noop = lambda: 0
874 noop = lambda: 0
875 restore = noop
875 restore = noop
876 if util.safehasattr(dirstate, '_fsmonitorstate'):
876 if util.safehasattr(dirstate, '_fsmonitorstate'):
877 bak = dirstate._fsmonitorstate.invalidate
877 bak = dirstate._fsmonitorstate.invalidate
878
878
879 def restore():
879 def restore():
880 dirstate._fsmonitorstate.invalidate = bak
880 dirstate._fsmonitorstate.invalidate = bak
881
881
882 dirstate._fsmonitorstate.invalidate = noop
882 dirstate._fsmonitorstate.invalidate = noop
883 try:
883 try:
884 with dirstate.parentchange():
884 with dirstate.parentchange():
885 dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
885 dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
886 finally:
886 finally:
887 restore()
887 restore()
888
888
889 @staticmethod
889 @staticmethod
890 def _willbecomenoop(memworkingcopy, ctx, pctx=None):
890 def _willbecomenoop(memworkingcopy, ctx, pctx=None):
891 """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
891 """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
892
892
893 if it will become an empty commit (does not change anything, after the
893 if it will become an empty commit (does not change anything, after the
894 memworkingcopy overrides), return True. otherwise return False.
894 memworkingcopy overrides), return True. otherwise return False.
895 """
895 """
896 if not pctx:
896 if not pctx:
897 parents = ctx.parents()
897 parents = ctx.parents()
898 if len(parents) != 1:
898 if len(parents) != 1:
899 return False
899 return False
900 pctx = parents[0]
900 pctx = parents[0]
901 if ctx.branch() != pctx.branch():
901 if ctx.branch() != pctx.branch():
902 return False
902 return False
903 if ctx.extra().get(b'close'):
903 if ctx.extra().get(b'close'):
904 return False
904 return False
905 # ctx changes more files (not a subset of memworkingcopy)
905 # ctx changes more files (not a subset of memworkingcopy)
906 if not set(ctx.files()).issubset(set(memworkingcopy)):
906 if not set(ctx.files()).issubset(set(memworkingcopy)):
907 return False
907 return False
908 for path, content in memworkingcopy.items():
908 for path, content in memworkingcopy.items():
909 if path not in pctx or path not in ctx:
909 if path not in pctx or path not in ctx:
910 return False
910 return False
911 fctx = ctx[path]
911 fctx = ctx[path]
912 pfctx = pctx[path]
912 pfctx = pctx[path]
913 if pfctx.flags() != fctx.flags():
913 if pfctx.flags() != fctx.flags():
914 return False
914 return False
915 if pfctx.data() != content:
915 if pfctx.data() != content:
916 return False
916 return False
917 return True
917 return True
918
918
919 def _commitsingle(self, memworkingcopy, ctx, p1=None):
919 def _commitsingle(self, memworkingcopy, ctx, p1=None):
920 """(ctx, {path: content}, node) -> node. make a single commit
920 """(ctx, {path: content}, node) -> node. make a single commit
921
921
922 the commit is a clone from ctx, with a (optionally) different p1, and
922 the commit is a clone from ctx, with a (optionally) different p1, and
923 different file contents replaced by memworkingcopy.
923 different file contents replaced by memworkingcopy.
924 """
924 """
925 parents = p1 and (p1, self.repo.nullid)
925 parents = p1 and (p1, self.repo.nullid)
926 extra = ctx.extra()
926 extra = ctx.extra()
927 if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
927 if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
928 extra[b'absorb_source'] = ctx.hex()
928 extra[b'absorb_source'] = ctx.hex()
929
929
930 desc = rewriteutil.update_hash_refs(
930 desc = rewriteutil.update_hash_refs(
931 ctx.repo(),
931 ctx.repo(),
932 ctx.description(),
932 ctx.description(),
933 {
933 {
934 oldnode: [newnode]
934 oldnode: [newnode]
935 for oldnode, newnode in self.replacemap.items()
935 for oldnode, newnode in self.replacemap.items()
936 },
936 },
937 )
937 )
938 mctx = overlaycontext(
938 mctx = overlaycontext(
939 memworkingcopy, ctx, parents, extra=extra, desc=desc
939 memworkingcopy, ctx, parents, extra=extra, desc=desc
940 )
940 )
941 return mctx.commit()
941 return mctx.commit()
942
942
943 @util.propertycache
943 @util.propertycache
944 def _useobsolete(self):
944 def _useobsolete(self):
945 """() -> bool"""
945 """() -> bool"""
946 return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
946 return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
947
947
948 def _cleanupoldcommits(self):
948 def _cleanupoldcommits(self):
949 replacements = {
949 replacements = {
950 k: ([v] if v is not None else [])
950 k: ([v] if v is not None else [])
951 for k, v in self.replacemap.items()
951 for k, v in self.replacemap.items()
952 }
952 }
953 if replacements:
953 if replacements:
954 scmutil.cleanupnodes(
954 scmutil.cleanupnodes(
955 self.repo, replacements, operation=b'absorb', fixphase=True
955 self.repo, replacements, operation=b'absorb', fixphase=True
956 )
956 )
957
957
958 @util.propertycache
958 @util.propertycache
959 def skip_empty_successor(self):
959 def skip_empty_successor(self):
960 return rewriteutil.skip_empty_successor(self.ui, b'absorb')
960 return rewriteutil.skip_empty_successor(self.ui, b'absorb')
961
961
962
962
963 def _parsechunk(hunk):
963 def _parsechunk(hunk):
964 """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
964 """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
965 if type(hunk) not in (crecord.uihunk, patch.recordhunk):
965 if type(hunk) not in (crecord.uihunk, patch.recordhunk):
966 return None, None
966 return None, None
967 path = hunk.header.filename()
967 path = hunk.header.filename()
968 a1 = hunk.fromline + len(hunk.before) - 1
968 a1 = hunk.fromline + len(hunk.before) - 1
969 # remove before and after context
969 # remove before and after context
970 hunk.before = hunk.after = []
970 hunk.before = hunk.after = []
971 buf = util.stringio()
971 buf = util.stringio()
972 hunk.write(buf)
972 hunk.write(buf)
973 patchlines = mdiff.splitnewlines(buf.getvalue())
973 patchlines = mdiff.splitnewlines(buf.getvalue())
974 # hunk.prettystr() will update hunk.removed
974 # hunk.prettystr() will update hunk.removed
975 a2 = a1 + hunk.removed
975 a2 = a1 + hunk.removed
976 blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')]
976 blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')]
977 return path, (a1, a2, blines)
977 return path, (a1, a2, blines)
978
978
979
979
980 def overlaydiffcontext(ctx, chunks):
980 def overlaydiffcontext(ctx, chunks):
981 """(ctx, [crecord.uihunk]) -> memctx
981 """(ctx, [crecord.uihunk]) -> memctx
982
982
983 return a memctx with some [1] patches (chunks) applied to ctx.
983 return a memctx with some [1] patches (chunks) applied to ctx.
984 [1]: modifications are handled. renames, mode changes, etc. are ignored.
984 [1]: modifications are handled. renames, mode changes, etc. are ignored.
985 """
985 """
986 # sadly the applying-patch logic is hardly reusable, and messy:
986 # sadly the applying-patch logic is hardly reusable, and messy:
987 # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
987 # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
988 # needs a file stream of a patch and will re-parse it, while we have
988 # needs a file stream of a patch and will re-parse it, while we have
989 # structured hunk objects at hand.
989 # structured hunk objects at hand.
990 # 2. a lot of different implementations about "chunk" (patch.hunk,
990 # 2. a lot of different implementations about "chunk" (patch.hunk,
991 # patch.recordhunk, crecord.uihunk)
991 # patch.recordhunk, crecord.uihunk)
992 # as we only care about applying changes to modified files, no mode
992 # as we only care about applying changes to modified files, no mode
993 # change, no binary diff, and no renames, it's probably okay to
993 # change, no binary diff, and no renames, it's probably okay to
994 # re-invent the logic using much simpler code here.
994 # re-invent the logic using much simpler code here.
995 memworkingcopy = {} # {path: content}
995 memworkingcopy = {} # {path: content}
996 patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
996 patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
997 for path, info in map(_parsechunk, chunks):
997 for path, info in map(_parsechunk, chunks):
998 if not path or not info:
998 if not path or not info:
999 continue
999 continue
1000 patchmap[path].append(info)
1000 patchmap[path].append(info)
1001 for path, patches in patchmap.items():
1001 for path, patches in patchmap.items():
1002 if path not in ctx or not patches:
1002 if path not in ctx or not patches:
1003 continue
1003 continue
1004 patches.sort(reverse=True)
1004 patches.sort(reverse=True)
1005 lines = mdiff.splitnewlines(ctx[path].data())
1005 lines = mdiff.splitnewlines(ctx[path].data())
1006 for a1, a2, blines in patches:
1006 for a1, a2, blines in patches:
1007 lines[a1:a2] = blines
1007 lines[a1:a2] = blines
1008 memworkingcopy[path] = b''.join(lines)
1008 memworkingcopy[path] = b''.join(lines)
1009 return overlaycontext(memworkingcopy, ctx)
1009 return overlaycontext(memworkingcopy, ctx)
1010
1010
1011
1011
1012 def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
1012 def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
1013 """pick fixup chunks from targetctx, apply them to stack.
1013 """pick fixup chunks from targetctx, apply them to stack.
1014
1014
1015 if targetctx is None, the working copy context will be used.
1015 if targetctx is None, the working copy context will be used.
1016 if stack is None, the current draft stack will be used.
1016 if stack is None, the current draft stack will be used.
1017 return fixupstate.
1017 return fixupstate.
1018 """
1018 """
1019 if stack is None:
1019 if stack is None:
1020 limit = ui.configint(b'absorb', b'max-stack-size')
1020 limit = ui.configint(b'absorb', b'max-stack-size')
1021 headctx = repo[b'.']
1021 headctx = repo[b'.']
1022 if len(headctx.parents()) > 1:
1022 if len(headctx.parents()) > 1:
1023 raise error.InputError(_(b'cannot absorb into a merge'))
1023 raise error.InputError(_(b'cannot absorb into a merge'))
1024 stack = getdraftstack(headctx, limit)
1024 stack = getdraftstack(headctx, limit)
1025 if limit and len(stack) >= limit:
1025 if limit and len(stack) >= limit:
1026 ui.warn(
1026 ui.warn(
1027 _(
1027 _(
1028 b'absorb: only the recent %d changesets will '
1028 b'absorb: only the recent %d changesets will '
1029 b'be analysed\n'
1029 b'be analysed\n'
1030 )
1030 )
1031 % limit
1031 % limit
1032 )
1032 )
1033 if not stack:
1033 if not stack:
1034 raise error.InputError(_(b'no mutable changeset to change'))
1034 raise error.InputError(_(b'no mutable changeset to change'))
1035 if targetctx is None: # default to working copy
1035 if targetctx is None: # default to working copy
1036 targetctx = repo[None]
1036 targetctx = repo[None]
1037 if pats is None:
1037 if pats is None:
1038 pats = ()
1038 pats = ()
1039 if opts is None:
1039 if opts is None:
1040 opts = {}
1040 opts = {}
1041 state = fixupstate(stack, ui=ui, opts=opts)
1041 state = fixupstate(stack, ui=ui, opts=opts)
1042 matcher = scmutil.match(targetctx, pats, opts)
1042 matcher = scmutil.match(targetctx, pats, opts)
1043 if opts.get(b'interactive'):
1043 if opts.get(b'interactive'):
1044 diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
1044 diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
1045 origchunks = patch.parsepatch(diff)
1045 origchunks = patch.parsepatch(diff)
1046 chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
1046 chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
1047 targetctx = overlaydiffcontext(stack[-1], chunks)
1047 targetctx = overlaydiffcontext(stack[-1], chunks)
1048 if opts.get(b'edit_lines'):
1048 if opts.get(b'edit_lines'):
1049 # If we're going to open the editor, don't ask the user to confirm
1049 # If we're going to open the editor, don't ask the user to confirm
1050 # first
1050 # first
1051 opts[b'apply_changes'] = True
1051 opts[b'apply_changes'] = True
1052 fm = None
1052 fm = None
1053 if opts.get(b'print_changes') or not opts.get(b'apply_changes'):
1053 if opts.get(b'print_changes') or not opts.get(b'apply_changes'):
1054 fm = ui.formatter(b'absorb', opts)
1054 fm = ui.formatter(b'absorb', opts)
1055 state.diffwith(targetctx, matcher, fm)
1055 state.diffwith(targetctx, matcher, fm)
1056 if fm is not None:
1056 if fm is not None:
1057 fm.startitem()
1057 fm.startitem()
1058 fm.write(
1058 fm.write(
1059 b"count", b"\n%d changesets affected\n", len(state.ctxaffected)
1059 b"count", b"\n%d changesets affected\n", len(state.ctxaffected)
1060 )
1060 )
1061 fm.data(linetype=b'summary')
1061 fm.data(linetype=b'summary')
1062 for ctx in reversed(stack):
1062 for ctx in reversed(stack):
1063 if ctx not in state.ctxaffected:
1063 if ctx not in state.ctxaffected:
1064 continue
1064 continue
1065 fm.startitem()
1065 fm.startitem()
1066 fm.context(ctx=ctx)
1066 fm.context(ctx=ctx)
1067 fm.data(linetype=b'changeset')
1067 fm.data(linetype=b'changeset')
1068 fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node')
1068 fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node')
1069 descfirstline = stringutil.firstline(ctx.description())
1069 descfirstline = stringutil.firstline(ctx.description())
1070 fm.write(
1070 fm.write(
1071 b'descfirstline',
1071 b'descfirstline',
1072 b'%s\n',
1072 b'%s\n',
1073 descfirstline,
1073 descfirstline,
1074 label=b'absorb.description',
1074 label=b'absorb.description',
1075 )
1075 )
1076 fm.end()
1076 fm.end()
1077 if not opts.get(b'dry_run'):
1077 if not opts.get(b'dry_run'):
1078 if (
1078 if (
1079 not opts.get(b'apply_changes')
1079 not opts.get(b'apply_changes')
1080 and state.ctxaffected
1080 and state.ctxaffected
1081 and ui.promptchoice(
1081 and ui.promptchoice(
1082 b"apply changes (y/N)? $$ &Yes $$ &No", default=1
1082 b"apply changes (y/N)? $$ &Yes $$ &No", default=1
1083 )
1083 )
1084 ):
1084 ):
1085 raise error.CanceledError(_(b'absorb cancelled\n'))
1085 raise error.CanceledError(_(b'absorb cancelled\n'))
1086
1086
1087 state.apply()
1087 state.apply()
1088 if state.commit():
1088 if state.commit():
1089 state.printchunkstats()
1089 state.printchunkstats()
1090 elif not ui.quiet:
1090 elif not ui.quiet:
1091 ui.write(_(b'nothing applied\n'))
1091 ui.write(_(b'nothing applied\n'))
1092 return state
1092 return state
1093
1093
1094
1094
1095 @command(
1095 @command(
1096 b'absorb',
1096 b'absorb',
1097 [
1097 [
1098 (
1098 (
1099 b'a',
1099 b'a',
1100 b'apply-changes',
1100 b'apply-changes',
1101 None,
1101 None,
1102 _(b'apply changes without prompting for confirmation'),
1102 _(b'apply changes without prompting for confirmation'),
1103 ),
1103 ),
1104 (
1104 (
1105 b'p',
1105 b'p',
1106 b'print-changes',
1106 b'print-changes',
1107 None,
1107 None,
1108 _(b'always print which changesets are modified by which changes'),
1108 _(b'always print which changesets are modified by which changes'),
1109 ),
1109 ),
1110 (
1110 (
1111 b'i',
1111 b'i',
1112 b'interactive',
1112 b'interactive',
1113 None,
1113 None,
1114 _(b'interactively select which chunks to apply'),
1114 _(b'interactively select which chunks to apply'),
1115 ),
1115 ),
1116 (
1116 (
1117 b'e',
1117 b'e',
1118 b'edit-lines',
1118 b'edit-lines',
1119 None,
1119 None,
1120 _(
1120 _(
1121 b'edit what lines belong to which changesets before commit '
1121 b'edit what lines belong to which changesets before commit '
1122 b'(EXPERIMENTAL)'
1122 b'(EXPERIMENTAL)'
1123 ),
1123 ),
1124 ),
1124 ),
1125 ]
1125 ]
1126 + commands.dryrunopts
1126 + commands.dryrunopts
1127 + commands.templateopts
1127 + commands.templateopts
1128 + commands.walkopts,
1128 + commands.walkopts,
1129 _(b'hg absorb [OPTION] [FILE]...'),
1129 _(b'hg absorb [OPTION] [FILE]...'),
1130 helpcategory=command.CATEGORY_COMMITTING,
1130 helpcategory=command.CATEGORY_COMMITTING,
1131 helpbasic=True,
1131 helpbasic=True,
1132 )
1132 )
1133 def absorbcmd(ui, repo, *pats, **opts):
1133 def absorbcmd(ui, repo, *pats, **opts):
1134 """incorporate corrections into the stack of draft changesets
1134 """incorporate corrections into the stack of draft changesets
1135
1135
1136 absorb analyzes each change in your working directory and attempts to
1136 absorb analyzes each change in your working directory and attempts to
1137 amend the changed lines into the changesets in your stack that first
1137 amend the changed lines into the changesets in your stack that first
1138 introduced those lines.
1138 introduced those lines.
1139
1139
1140 If absorb cannot find an unambiguous changeset to amend for a change,
1140 If absorb cannot find an unambiguous changeset to amend for a change,
1141 that change will be left in the working directory, untouched. They can be
1141 that change will be left in the working directory, untouched. They can be
1142 observed by :hg:`status` or :hg:`diff` afterwards. In other words,
1142 observed by :hg:`status` or :hg:`diff` afterwards. In other words,
1143 absorb does not write to the working directory.
1143 absorb does not write to the working directory.
1144
1144
1145 Changesets outside the revset `::. and not public() and not merge()` will
1145 Changesets outside the revset `::. and not public() and not merge()` will
1146 not be changed.
1146 not be changed.
1147
1147
1148 Changesets that become empty after applying the changes will be deleted.
1148 Changesets that become empty after applying the changes will be deleted.
1149
1149
1150 By default, absorb will show what it plans to do and prompt for
1150 By default, absorb will show what it plans to do and prompt for
1151 confirmation. If you are confident that the changes will be absorbed
1151 confirmation. If you are confident that the changes will be absorbed
1152 to the correct place, run :hg:`absorb -a` to apply the changes
1152 to the correct place, run :hg:`absorb -a` to apply the changes
1153 immediately.
1153 immediately.
1154
1154
1155 Returns 0 on success, 1 if all chunks were ignored and nothing amended.
1155 Returns 0 on success, 1 if all chunks were ignored and nothing amended.
1156 """
1156 """
1157 opts = pycompat.byteskwargs(opts)
1157 opts = pycompat.byteskwargs(opts)
1158
1158
1159 with repo.wlock(), repo.lock():
1159 with repo.wlock(), repo.lock():
1160 if not opts[b'dry_run']:
1160 if not opts[b'dry_run']:
1161 cmdutil.checkunfinished(repo)
1161 cmdutil.checkunfinished(repo)
1162
1162
1163 state = absorb(ui, repo, pats=pats, opts=opts)
1163 state = absorb(ui, repo, pats=pats, opts=opts)
1164 if sum(s[0] for s in state.chunkstats.values()) == 0:
1164 if sum(s[0] for s in state.chunkstats.values()) == 0:
1165 return 1
1165 return 1
@@ -1,489 +1,488 b''
1 # acl.py - changeset access control for mercurial
1 # acl.py - changeset access control for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''hooks for controlling repository access
8 '''hooks for controlling repository access
9
9
10 This hook makes it possible to allow or deny write access to given
10 This hook makes it possible to allow or deny write access to given
11 branches and paths of a repository when receiving incoming changesets
11 branches and paths of a repository when receiving incoming changesets
12 via pretxnchangegroup and pretxncommit.
12 via pretxnchangegroup and pretxncommit.
13
13
14 The authorization is matched based on the local user name on the
14 The authorization is matched based on the local user name on the
15 system where the hook runs, and not the committer of the original
15 system where the hook runs, and not the committer of the original
16 changeset (since the latter is merely informative).
16 changeset (since the latter is merely informative).
17
17
18 The acl hook is best used along with a restricted shell like hgsh,
18 The acl hook is best used along with a restricted shell like hgsh,
19 preventing authenticating users from doing anything other than pushing
19 preventing authenticating users from doing anything other than pushing
20 or pulling. The hook is not safe to use if users have interactive
20 or pulling. The hook is not safe to use if users have interactive
21 shell access, as they can then disable the hook. Nor is it safe if
21 shell access, as they can then disable the hook. Nor is it safe if
22 remote users share an account, because then there is no way to
22 remote users share an account, because then there is no way to
23 distinguish them.
23 distinguish them.
24
24
25 The order in which access checks are performed is:
25 The order in which access checks are performed is:
26
26
27 1) Deny list for branches (section ``acl.deny.branches``)
27 1) Deny list for branches (section ``acl.deny.branches``)
28 2) Allow list for branches (section ``acl.allow.branches``)
28 2) Allow list for branches (section ``acl.allow.branches``)
29 3) Deny list for paths (section ``acl.deny``)
29 3) Deny list for paths (section ``acl.deny``)
30 4) Allow list for paths (section ``acl.allow``)
30 4) Allow list for paths (section ``acl.allow``)
31
31
32 The allow and deny sections take key-value pairs.
32 The allow and deny sections take key-value pairs.
33
33
34 Branch-based Access Control
34 Branch-based Access Control
35 ---------------------------
35 ---------------------------
36
36
37 Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
37 Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
38 have branch-based access control. Keys in these sections can be
38 have branch-based access control. Keys in these sections can be
39 either:
39 either:
40
40
41 - a branch name, or
41 - a branch name, or
42 - an asterisk, to match any branch;
42 - an asterisk, to match any branch;
43
43
44 The corresponding values can be either:
44 The corresponding values can be either:
45
45
46 - a comma-separated list containing users and groups, or
46 - a comma-separated list containing users and groups, or
47 - an asterisk, to match anyone;
47 - an asterisk, to match anyone;
48
48
49 You can add the "!" prefix to a user or group name to invert the sense
49 You can add the "!" prefix to a user or group name to invert the sense
50 of the match.
50 of the match.
51
51
52 Path-based Access Control
52 Path-based Access Control
53 -------------------------
53 -------------------------
54
54
55 Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
55 Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
56 access control. Keys in these sections accept a subtree pattern (with
56 access control. Keys in these sections accept a subtree pattern (with
57 a glob syntax by default). The corresponding values follow the same
57 a glob syntax by default). The corresponding values follow the same
58 syntax as the other sections above.
58 syntax as the other sections above.
59
59
60 Bookmark-based Access Control
60 Bookmark-based Access Control
61 -----------------------------
61 -----------------------------
62 Use the ``acl.deny.bookmarks`` and ``acl.allow.bookmarks`` sections to
62 Use the ``acl.deny.bookmarks`` and ``acl.allow.bookmarks`` sections to
63 have bookmark-based access control. Keys in these sections can be
63 have bookmark-based access control. Keys in these sections can be
64 either:
64 either:
65
65
66 - a bookmark name, or
66 - a bookmark name, or
67 - an asterisk, to match any bookmark;
67 - an asterisk, to match any bookmark;
68
68
69 The corresponding values can be either:
69 The corresponding values can be either:
70
70
71 - a comma-separated list containing users and groups, or
71 - a comma-separated list containing users and groups, or
72 - an asterisk, to match anyone;
72 - an asterisk, to match anyone;
73
73
74 You can add the "!" prefix to a user or group name to invert the sense
74 You can add the "!" prefix to a user or group name to invert the sense
75 of the match.
75 of the match.
76
76
77 Note: for interactions between clients and servers using Mercurial 3.6+
77 Note: for interactions between clients and servers using Mercurial 3.6+
78 a rejection will generally reject the entire push, for interactions
78 a rejection will generally reject the entire push, for interactions
79 involving older clients, the commit transactions will already be accepted,
79 involving older clients, the commit transactions will already be accepted,
80 and only the bookmark movement will be rejected.
80 and only the bookmark movement will be rejected.
81
81
82 Groups
82 Groups
83 ------
83 ------
84
84
85 Group names must be prefixed with an ``@`` symbol. Specifying a group
85 Group names must be prefixed with an ``@`` symbol. Specifying a group
86 name has the same effect as specifying all the users in that group.
86 name has the same effect as specifying all the users in that group.
87
87
88 You can define group members in the ``acl.groups`` section.
88 You can define group members in the ``acl.groups`` section.
89 If a group name is not defined there, and Mercurial is running under
89 If a group name is not defined there, and Mercurial is running under
90 a Unix-like system, the list of users will be taken from the OS.
90 a Unix-like system, the list of users will be taken from the OS.
91 Otherwise, an exception will be raised.
91 Otherwise, an exception will be raised.
92
92
93 Example Configuration
93 Example Configuration
94 ---------------------
94 ---------------------
95
95
96 ::
96 ::
97
97
98 [hooks]
98 [hooks]
99
99
100 # Use this if you want to check access restrictions at commit time
100 # Use this if you want to check access restrictions at commit time
101 pretxncommit.acl = python:hgext.acl.hook
101 pretxncommit.acl = python:hgext.acl.hook
102
102
103 # Use this if you want to check access restrictions for pull, push,
103 # Use this if you want to check access restrictions for pull, push,
104 # bundle and serve.
104 # bundle and serve.
105 pretxnchangegroup.acl = python:hgext.acl.hook
105 pretxnchangegroup.acl = python:hgext.acl.hook
106
106
107 [acl]
107 [acl]
108 # Allow or deny access for incoming changes only if their source is
108 # Allow or deny access for incoming changes only if their source is
109 # listed here, let them pass otherwise. Source is "serve" for all
109 # listed here, let them pass otherwise. Source is "serve" for all
110 # remote access (http or ssh), "push", "pull" or "bundle" when the
110 # remote access (http or ssh), "push", "pull" or "bundle" when the
111 # related commands are run locally.
111 # related commands are run locally.
112 # Default: serve
112 # Default: serve
113 sources = serve
113 sources = serve
114
114
115 [acl.deny.branches]
115 [acl.deny.branches]
116
116
117 # Everyone is denied to the frozen branch:
117 # Everyone is denied to the frozen branch:
118 frozen-branch = *
118 frozen-branch = *
119
119
120 # A bad user is denied on all branches:
120 # A bad user is denied on all branches:
121 * = bad-user
121 * = bad-user
122
122
123 [acl.allow.branches]
123 [acl.allow.branches]
124
124
125 # A few users are allowed on branch-a:
125 # A few users are allowed on branch-a:
126 branch-a = user-1, user-2, user-3
126 branch-a = user-1, user-2, user-3
127
127
128 # Only one user is allowed on branch-b:
128 # Only one user is allowed on branch-b:
129 branch-b = user-1
129 branch-b = user-1
130
130
131 # The super user is allowed on any branch:
131 # The super user is allowed on any branch:
132 * = super-user
132 * = super-user
133
133
134 # Everyone is allowed on branch-for-tests:
134 # Everyone is allowed on branch-for-tests:
135 branch-for-tests = *
135 branch-for-tests = *
136
136
137 [acl.deny]
137 [acl.deny]
138 # This list is checked first. If a match is found, acl.allow is not
138 # This list is checked first. If a match is found, acl.allow is not
139 # checked. All users are granted access if acl.deny is not present.
139 # checked. All users are granted access if acl.deny is not present.
140 # Format for both lists: glob pattern = user, ..., @group, ...
140 # Format for both lists: glob pattern = user, ..., @group, ...
141
141
142 # To match everyone, use an asterisk for the user:
142 # To match everyone, use an asterisk for the user:
143 # my/glob/pattern = *
143 # my/glob/pattern = *
144
144
145 # user6 will not have write access to any file:
145 # user6 will not have write access to any file:
146 ** = user6
146 ** = user6
147
147
148 # Group "hg-denied" will not have write access to any file:
148 # Group "hg-denied" will not have write access to any file:
149 ** = @hg-denied
149 ** = @hg-denied
150
150
151 # Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
151 # Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
152 # everyone being able to change all other files. See below.
152 # everyone being able to change all other files. See below.
153 src/main/resources/DONT-TOUCH-THIS.txt = *
153 src/main/resources/DONT-TOUCH-THIS.txt = *
154
154
155 [acl.allow]
155 [acl.allow]
156 # if acl.allow is not present, all users are allowed by default
156 # if acl.allow is not present, all users are allowed by default
157 # empty acl.allow = no users allowed
157 # empty acl.allow = no users allowed
158
158
159 # User "doc_writer" has write access to any file under the "docs"
159 # User "doc_writer" has write access to any file under the "docs"
160 # folder:
160 # folder:
161 docs/** = doc_writer
161 docs/** = doc_writer
162
162
163 # User "jack" and group "designers" have write access to any file
163 # User "jack" and group "designers" have write access to any file
164 # under the "images" folder:
164 # under the "images" folder:
165 images/** = jack, @designers
165 images/** = jack, @designers
166
166
167 # Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
167 # Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
168 # will have write access to any file under the "resources" folder
168 # will have write access to any file under the "resources" folder
169 # (except for 1 file. See acl.deny):
169 # (except for 1 file. See acl.deny):
170 src/main/resources/** = *
170 src/main/resources/** = *
171
171
172 .hgtags = release_engineer
172 .hgtags = release_engineer
173
173
174 Examples using the "!" prefix
174 Examples using the "!" prefix
175 .............................
175 .............................
176
176
177 Suppose there's a branch that only a given user (or group) should be able to
177 Suppose there's a branch that only a given user (or group) should be able to
178 push to, and you don't want to restrict access to any other branch that may
178 push to, and you don't want to restrict access to any other branch that may
179 be created.
179 be created.
180
180
181 The "!" prefix allows you to prevent anyone except a given user or group to
181 The "!" prefix allows you to prevent anyone except a given user or group to
182 push changesets in a given branch or path.
182 push changesets in a given branch or path.
183
183
184 In the examples below, we will:
184 In the examples below, we will:
185 1) Deny access to branch "ring" to anyone but user "gollum"
185 1) Deny access to branch "ring" to anyone but user "gollum"
186 2) Deny access to branch "lake" to anyone but members of the group "hobbit"
186 2) Deny access to branch "lake" to anyone but members of the group "hobbit"
187 3) Deny access to a file to anyone but user "gollum"
187 3) Deny access to a file to anyone but user "gollum"
188
188
189 ::
189 ::
190
190
191 [acl.allow.branches]
191 [acl.allow.branches]
192 # Empty
192 # Empty
193
193
194 [acl.deny.branches]
194 [acl.deny.branches]
195
195
196 # 1) only 'gollum' can commit to branch 'ring';
196 # 1) only 'gollum' can commit to branch 'ring';
197 # 'gollum' and anyone else can still commit to any other branch.
197 # 'gollum' and anyone else can still commit to any other branch.
198 ring = !gollum
198 ring = !gollum
199
199
200 # 2) only members of the group 'hobbit' can commit to branch 'lake';
200 # 2) only members of the group 'hobbit' can commit to branch 'lake';
201 # 'hobbit' members and anyone else can still commit to any other branch.
201 # 'hobbit' members and anyone else can still commit to any other branch.
202 lake = !@hobbit
202 lake = !@hobbit
203
203
204 # You can also deny access based on file paths:
204 # You can also deny access based on file paths:
205
205
206 [acl.allow]
206 [acl.allow]
207 # Empty
207 # Empty
208
208
209 [acl.deny]
209 [acl.deny]
210 # 3) only 'gollum' can change the file below;
210 # 3) only 'gollum' can change the file below;
211 # 'gollum' and anyone else can still change any other file.
211 # 'gollum' and anyone else can still change any other file.
212 /misty/mountains/cave/ring = !gollum
212 /misty/mountains/cave/ring = !gollum
213
213
214 '''
214 '''
215
215
216
216
217 from mercurial.i18n import _
217 from mercurial.i18n import _
218 from mercurial import (
218 from mercurial import (
219 error,
219 error,
220 extensions,
220 extensions,
221 match,
221 match,
222 pycompat,
223 registrar,
222 registrar,
224 util,
223 util,
225 )
224 )
226 from mercurial.utils import procutil
225 from mercurial.utils import procutil
227
226
228 urlreq = util.urlreq
227 urlreq = util.urlreq
229
228
230 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
229 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
231 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
230 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
232 # be specifying the version(s) of Mercurial they are tested with, or
231 # be specifying the version(s) of Mercurial they are tested with, or
233 # leave the attribute unspecified.
232 # leave the attribute unspecified.
234 testedwith = b'ships-with-hg-core'
233 testedwith = b'ships-with-hg-core'
235
234
236 configtable = {}
235 configtable = {}
237 configitem = registrar.configitem(configtable)
236 configitem = registrar.configitem(configtable)
238
237
239 # deprecated config: acl.config
238 # deprecated config: acl.config
240 configitem(
239 configitem(
241 b'acl',
240 b'acl',
242 b'config',
241 b'config',
243 default=None,
242 default=None,
244 )
243 )
245 configitem(
244 configitem(
246 b'acl.groups',
245 b'acl.groups',
247 b'.*',
246 b'.*',
248 default=None,
247 default=None,
249 generic=True,
248 generic=True,
250 )
249 )
251 configitem(
250 configitem(
252 b'acl.deny.branches',
251 b'acl.deny.branches',
253 b'.*',
252 b'.*',
254 default=None,
253 default=None,
255 generic=True,
254 generic=True,
256 )
255 )
257 configitem(
256 configitem(
258 b'acl.allow.branches',
257 b'acl.allow.branches',
259 b'.*',
258 b'.*',
260 default=None,
259 default=None,
261 generic=True,
260 generic=True,
262 )
261 )
263 configitem(
262 configitem(
264 b'acl.deny',
263 b'acl.deny',
265 b'.*',
264 b'.*',
266 default=None,
265 default=None,
267 generic=True,
266 generic=True,
268 )
267 )
269 configitem(
268 configitem(
270 b'acl.allow',
269 b'acl.allow',
271 b'.*',
270 b'.*',
272 default=None,
271 default=None,
273 generic=True,
272 generic=True,
274 )
273 )
275 configitem(
274 configitem(
276 b'acl',
275 b'acl',
277 b'sources',
276 b'sources',
278 default=lambda: [b'serve'],
277 default=lambda: [b'serve'],
279 )
278 )
280
279
281
280
282 def _getusers(ui, group):
281 def _getusers(ui, group):
283
282
284 # First, try to use group definition from section [acl.groups]
283 # First, try to use group definition from section [acl.groups]
285 hgrcusers = ui.configlist(b'acl.groups', group)
284 hgrcusers = ui.configlist(b'acl.groups', group)
286 if hgrcusers:
285 if hgrcusers:
287 return hgrcusers
286 return hgrcusers
288
287
289 ui.debug(b'acl: "%s" not defined in [acl.groups]\n' % group)
288 ui.debug(b'acl: "%s" not defined in [acl.groups]\n' % group)
290 # If no users found in group definition, get users from OS-level group
289 # If no users found in group definition, get users from OS-level group
291 try:
290 try:
292 return util.groupmembers(group)
291 return util.groupmembers(group)
293 except KeyError:
292 except KeyError:
294 raise error.Abort(_(b"group '%s' is undefined") % group)
293 raise error.Abort(_(b"group '%s' is undefined") % group)
295
294
296
295
297 def _usermatch(ui, user, usersorgroups):
296 def _usermatch(ui, user, usersorgroups):
298
297
299 if usersorgroups == b'*':
298 if usersorgroups == b'*':
300 return True
299 return True
301
300
302 for ug in usersorgroups.replace(b',', b' ').split():
301 for ug in usersorgroups.replace(b',', b' ').split():
303
302
304 if ug.startswith(b'!'):
303 if ug.startswith(b'!'):
305 # Test for excluded user or group. Format:
304 # Test for excluded user or group. Format:
306 # if ug is a user name: !username
305 # if ug is a user name: !username
307 # if ug is a group name: !@groupname
306 # if ug is a group name: !@groupname
308 ug = ug[1:]
307 ug = ug[1:]
309 if (
308 if (
310 not ug.startswith(b'@')
309 not ug.startswith(b'@')
311 and user != ug
310 and user != ug
312 or ug.startswith(b'@')
311 or ug.startswith(b'@')
313 and user not in _getusers(ui, ug[1:])
312 and user not in _getusers(ui, ug[1:])
314 ):
313 ):
315 return True
314 return True
316
315
317 # Test for user or group. Format:
316 # Test for user or group. Format:
318 # if ug is a user name: username
317 # if ug is a user name: username
319 # if ug is a group name: @groupname
318 # if ug is a group name: @groupname
320 elif (
319 elif (
321 user == ug or ug.startswith(b'@') and user in _getusers(ui, ug[1:])
320 user == ug or ug.startswith(b'@') and user in _getusers(ui, ug[1:])
322 ):
321 ):
323 return True
322 return True
324
323
325 return False
324 return False
326
325
327
326
328 def buildmatch(ui, repo, user, key):
327 def buildmatch(ui, repo, user, key):
329 '''return tuple of (match function, list enabled).'''
328 '''return tuple of (match function, list enabled).'''
330 if not ui.has_section(key):
329 if not ui.has_section(key):
331 ui.debug(b'acl: %s not enabled\n' % key)
330 ui.debug(b'acl: %s not enabled\n' % key)
332 return None
331 return None
333
332
334 pats = [
333 pats = [
335 pat for pat, users in ui.configitems(key) if _usermatch(ui, user, users)
334 pat for pat, users in ui.configitems(key) if _usermatch(ui, user, users)
336 ]
335 ]
337 ui.debug(
336 ui.debug(
338 b'acl: %s enabled, %d entries for user %s\n' % (key, len(pats), user)
337 b'acl: %s enabled, %d entries for user %s\n' % (key, len(pats), user)
339 )
338 )
340
339
341 # Branch-based ACL
340 # Branch-based ACL
342 if not repo:
341 if not repo:
343 if pats:
342 if pats:
344 # If there's an asterisk (meaning "any branch"), always return True;
343 # If there's an asterisk (meaning "any branch"), always return True;
345 # Otherwise, test if b is in pats
344 # Otherwise, test if b is in pats
346 if b'*' in pats:
345 if b'*' in pats:
347 return util.always
346 return util.always
348 return lambda b: b in pats
347 return lambda b: b in pats
349 return util.never
348 return util.never
350
349
351 # Path-based ACL
350 # Path-based ACL
352 if pats:
351 if pats:
353 return match.match(repo.root, b'', pats)
352 return match.match(repo.root, b'', pats)
354 return util.never
353 return util.never
355
354
356
355
357 def ensureenabled(ui):
356 def ensureenabled(ui):
358 """make sure the extension is enabled when used as hook
357 """make sure the extension is enabled when used as hook
359
358
360 When acl is used through hooks, the extension is never formally loaded and
359 When acl is used through hooks, the extension is never formally loaded and
361 enabled. This has some side effect, for example the config declaration is
360 enabled. This has some side effect, for example the config declaration is
362 never loaded. This function ensure the extension is enabled when running
361 never loaded. This function ensure the extension is enabled when running
363 hooks.
362 hooks.
364 """
363 """
365 if b'acl' in ui._knownconfig:
364 if b'acl' in ui._knownconfig:
366 return
365 return
367 ui.setconfig(b'extensions', b'acl', b'', source=b'internal')
366 ui.setconfig(b'extensions', b'acl', b'', source=b'internal')
368 extensions.loadall(ui, [b'acl'])
367 extensions.loadall(ui, [b'acl'])
369
368
370
369
371 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
370 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
372
371
373 ensureenabled(ui)
372 ensureenabled(ui)
374
373
375 if hooktype not in [b'pretxnchangegroup', b'pretxncommit', b'prepushkey']:
374 if hooktype not in [b'pretxnchangegroup', b'pretxncommit', b'prepushkey']:
376 raise error.Abort(
375 raise error.Abort(
377 _(
376 _(
378 b'config error - hook type "%s" cannot stop '
377 b'config error - hook type "%s" cannot stop '
379 b'incoming changesets, commits, nor bookmarks'
378 b'incoming changesets, commits, nor bookmarks'
380 )
379 )
381 % hooktype
380 % hooktype
382 )
381 )
383 if hooktype == b'pretxnchangegroup' and source not in ui.configlist(
382 if hooktype == b'pretxnchangegroup' and source not in ui.configlist(
384 b'acl', b'sources'
383 b'acl', b'sources'
385 ):
384 ):
386 ui.debug(b'acl: changes have source "%s" - skipping\n' % source)
385 ui.debug(b'acl: changes have source "%s" - skipping\n' % source)
387 return
386 return
388
387
389 user = None
388 user = None
390 if source == b'serve' and 'url' in kwargs:
389 if source == b'serve' and 'url' in kwargs:
391 url = kwargs['url'].split(b':')
390 url = kwargs['url'].split(b':')
392 if url[0] == b'remote' and url[1].startswith(b'http'):
391 if url[0] == b'remote' and url[1].startswith(b'http'):
393 user = urlreq.unquote(url[3])
392 user = urlreq.unquote(url[3])
394
393
395 if user is None:
394 if user is None:
396 user = procutil.getuser()
395 user = procutil.getuser()
397
396
398 ui.debug(b'acl: checking access for user "%s"\n' % user)
397 ui.debug(b'acl: checking access for user "%s"\n' % user)
399
398
400 if hooktype == b'prepushkey':
399 if hooktype == b'prepushkey':
401 _pkhook(ui, repo, hooktype, node, source, user, **kwargs)
400 _pkhook(ui, repo, hooktype, node, source, user, **kwargs)
402 else:
401 else:
403 _txnhook(ui, repo, hooktype, node, source, user, **kwargs)
402 _txnhook(ui, repo, hooktype, node, source, user, **kwargs)
404
403
405
404
406 def _pkhook(ui, repo, hooktype, node, source, user, **kwargs):
405 def _pkhook(ui, repo, hooktype, node, source, user, **kwargs):
407 if kwargs['namespace'] == b'bookmarks':
406 if kwargs['namespace'] == b'bookmarks':
408 bookmark = kwargs['key']
407 bookmark = kwargs['key']
409 ctx = kwargs['new']
408 ctx = kwargs['new']
410 allowbookmarks = buildmatch(ui, None, user, b'acl.allow.bookmarks')
409 allowbookmarks = buildmatch(ui, None, user, b'acl.allow.bookmarks')
411 denybookmarks = buildmatch(ui, None, user, b'acl.deny.bookmarks')
410 denybookmarks = buildmatch(ui, None, user, b'acl.deny.bookmarks')
412
411
413 if denybookmarks and denybookmarks(bookmark):
412 if denybookmarks and denybookmarks(bookmark):
414 raise error.Abort(
413 raise error.Abort(
415 _(
414 _(
416 b'acl: user "%s" denied on bookmark "%s"'
415 b'acl: user "%s" denied on bookmark "%s"'
417 b' (changeset "%s")'
416 b' (changeset "%s")'
418 )
417 )
419 % (user, bookmark, ctx)
418 % (user, bookmark, ctx)
420 )
419 )
421 if allowbookmarks and not allowbookmarks(bookmark):
420 if allowbookmarks and not allowbookmarks(bookmark):
422 raise error.Abort(
421 raise error.Abort(
423 _(
422 _(
424 b'acl: user "%s" not allowed on bookmark "%s"'
423 b'acl: user "%s" not allowed on bookmark "%s"'
425 b' (changeset "%s")'
424 b' (changeset "%s")'
426 )
425 )
427 % (user, bookmark, ctx)
426 % (user, bookmark, ctx)
428 )
427 )
429 ui.debug(
428 ui.debug(
430 b'acl: bookmark access granted: "%s" on bookmark "%s"\n'
429 b'acl: bookmark access granted: "%s" on bookmark "%s"\n'
431 % (ctx, bookmark)
430 % (ctx, bookmark)
432 )
431 )
433
432
434
433
435 def _txnhook(ui, repo, hooktype, node, source, user, **kwargs):
434 def _txnhook(ui, repo, hooktype, node, source, user, **kwargs):
436 # deprecated config: acl.config
435 # deprecated config: acl.config
437 cfg = ui.config(b'acl', b'config')
436 cfg = ui.config(b'acl', b'config')
438 if cfg:
437 if cfg:
439 ui.readconfig(
438 ui.readconfig(
440 cfg,
439 cfg,
441 sections=[
440 sections=[
442 b'acl.groups',
441 b'acl.groups',
443 b'acl.allow.branches',
442 b'acl.allow.branches',
444 b'acl.deny.branches',
443 b'acl.deny.branches',
445 b'acl.allow',
444 b'acl.allow',
446 b'acl.deny',
445 b'acl.deny',
447 ],
446 ],
448 )
447 )
449
448
450 allowbranches = buildmatch(ui, None, user, b'acl.allow.branches')
449 allowbranches = buildmatch(ui, None, user, b'acl.allow.branches')
451 denybranches = buildmatch(ui, None, user, b'acl.deny.branches')
450 denybranches = buildmatch(ui, None, user, b'acl.deny.branches')
452 allow = buildmatch(ui, repo, user, b'acl.allow')
451 allow = buildmatch(ui, repo, user, b'acl.allow')
453 deny = buildmatch(ui, repo, user, b'acl.deny')
452 deny = buildmatch(ui, repo, user, b'acl.deny')
454
453
455 for rev in pycompat.xrange(repo[node].rev(), len(repo)):
454 for rev in range(repo[node].rev(), len(repo)):
456 ctx = repo[rev]
455 ctx = repo[rev]
457 branch = ctx.branch()
456 branch = ctx.branch()
458 if denybranches and denybranches(branch):
457 if denybranches and denybranches(branch):
459 raise error.Abort(
458 raise error.Abort(
460 _(b'acl: user "%s" denied on branch "%s" (changeset "%s")')
459 _(b'acl: user "%s" denied on branch "%s" (changeset "%s")')
461 % (user, branch, ctx)
460 % (user, branch, ctx)
462 )
461 )
463 if allowbranches and not allowbranches(branch):
462 if allowbranches and not allowbranches(branch):
464 raise error.Abort(
463 raise error.Abort(
465 _(
464 _(
466 b'acl: user "%s" not allowed on branch "%s"'
465 b'acl: user "%s" not allowed on branch "%s"'
467 b' (changeset "%s")'
466 b' (changeset "%s")'
468 )
467 )
469 % (user, branch, ctx)
468 % (user, branch, ctx)
470 )
469 )
471 ui.debug(
470 ui.debug(
472 b'acl: branch access granted: "%s" on branch "%s"\n' % (ctx, branch)
471 b'acl: branch access granted: "%s" on branch "%s"\n' % (ctx, branch)
473 )
472 )
474
473
475 for f in ctx.files():
474 for f in ctx.files():
476 if deny and deny(f):
475 if deny and deny(f):
477 raise error.Abort(
476 raise error.Abort(
478 _(b'acl: user "%s" denied on "%s" (changeset "%s")')
477 _(b'acl: user "%s" denied on "%s" (changeset "%s")')
479 % (user, f, ctx)
478 % (user, f, ctx)
480 )
479 )
481 if allow and not allow(f):
480 if allow and not allow(f):
482 raise error.Abort(
481 raise error.Abort(
483 _(
482 _(
484 b'acl: user "%s" not allowed on "%s"'
483 b'acl: user "%s" not allowed on "%s"'
485 b' (changeset "%s")'
484 b' (changeset "%s")'
486 )
485 )
487 % (user, f, ctx)
486 % (user, f, ctx)
488 )
487 )
489 ui.debug(b'acl: path access granted: "%s"\n' % ctx)
488 ui.debug(b'acl: path access granted: "%s"\n' % ctx)
@@ -1,108 +1,107 b''
1 # -*- coding: UTF-8 -*-
1 # -*- coding: UTF-8 -*-
2 # beautifygraph.py - improve graph output by using Unicode characters
2 # beautifygraph.py - improve graph output by using Unicode characters
3 #
3 #
4 # Copyright 2018 John Stiles <johnstiles@gmail.com>
4 # Copyright 2018 John Stiles <johnstiles@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''beautify log -G output by using Unicode characters (EXPERIMENTAL)
9 '''beautify log -G output by using Unicode characters (EXPERIMENTAL)
10
10
11 A terminal with UTF-8 support and monospace narrow text are required.
11 A terminal with UTF-8 support and monospace narrow text are required.
12 '''
12 '''
13
13
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial import (
16 from mercurial import (
17 encoding,
17 encoding,
18 extensions,
18 extensions,
19 graphmod,
19 graphmod,
20 pycompat,
21 templatekw,
20 templatekw,
22 )
21 )
23
22
24 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
23 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
25 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
24 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
26 # be specifying the version(s) of Mercurial they are tested with, or
25 # be specifying the version(s) of Mercurial they are tested with, or
27 # leave the attribute unspecified.
26 # leave the attribute unspecified.
28 testedwith = b'ships-with-hg-core'
27 testedwith = b'ships-with-hg-core'
29
28
30
29
31 def prettyedge(before, edge, after):
30 def prettyedge(before, edge, after):
32 if edge == b'~':
31 if edge == b'~':
33 return b'\xE2\x95\xA7' # U+2567 ╧
32 return b'\xE2\x95\xA7' # U+2567 ╧
34 if edge == b'/':
33 if edge == b'/':
35 return b'\xE2\x95\xB1' # U+2571 ╱
34 return b'\xE2\x95\xB1' # U+2571 ╱
36 if edge == b'-':
35 if edge == b'-':
37 return b'\xE2\x94\x80' # U+2500 ─
36 return b'\xE2\x94\x80' # U+2500 ─
38 if edge == b'|':
37 if edge == b'|':
39 return b'\xE2\x94\x82' # U+2502 │
38 return b'\xE2\x94\x82' # U+2502 │
40 if edge == b':':
39 if edge == b':':
41 return b'\xE2\x94\x86' # U+2506 ┆
40 return b'\xE2\x94\x86' # U+2506 ┆
42 if edge == b'\\':
41 if edge == b'\\':
43 return b'\xE2\x95\xB2' # U+2572 ╲
42 return b'\xE2\x95\xB2' # U+2572 ╲
44 if edge == b'+':
43 if edge == b'+':
45 if before == b' ' and not after == b' ':
44 if before == b' ' and not after == b' ':
46 return b'\xE2\x94\x9C' # U+251C ├
45 return b'\xE2\x94\x9C' # U+251C ├
47 if after == b' ' and not before == b' ':
46 if after == b' ' and not before == b' ':
48 return b'\xE2\x94\xA4' # U+2524 ┤
47 return b'\xE2\x94\xA4' # U+2524 ┤
49 return b'\xE2\x94\xBC' # U+253C ┼
48 return b'\xE2\x94\xBC' # U+253C ┼
50 return edge
49 return edge
51
50
52
51
53 def convertedges(line):
52 def convertedges(line):
54 line = b' %s ' % line
53 line = b' %s ' % line
55 pretty = []
54 pretty = []
56 for idx in pycompat.xrange(len(line) - 2):
55 for idx in range(len(line) - 2):
57 pretty.append(
56 pretty.append(
58 prettyedge(
57 prettyedge(
59 line[idx : idx + 1],
58 line[idx : idx + 1],
60 line[idx + 1 : idx + 2],
59 line[idx + 1 : idx + 2],
61 line[idx + 2 : idx + 3],
60 line[idx + 2 : idx + 3],
62 )
61 )
63 )
62 )
64 return b''.join(pretty)
63 return b''.join(pretty)
65
64
66
65
67 def getprettygraphnode(orig, *args, **kwargs):
66 def getprettygraphnode(orig, *args, **kwargs):
68 node = orig(*args, **kwargs)
67 node = orig(*args, **kwargs)
69 if node == b'o':
68 if node == b'o':
70 return b'\xE2\x97\x8B' # U+25CB ○
69 return b'\xE2\x97\x8B' # U+25CB ○
71 if node == b'@':
70 if node == b'@':
72 return b'\xE2\x97\x89' # U+25C9 ◉
71 return b'\xE2\x97\x89' # U+25C9 ◉
73 if node == b'%':
72 if node == b'%':
74 return b'\xE2\x97\x8D' # U+25CE ◎
73 return b'\xE2\x97\x8D' # U+25CE ◎
75 if node == b'*':
74 if node == b'*':
76 return b'\xE2\x88\x97' # U+2217 ∗
75 return b'\xE2\x88\x97' # U+2217 ∗
77 if node == b'x':
76 if node == b'x':
78 return b'\xE2\x97\x8C' # U+25CC ◌
77 return b'\xE2\x97\x8C' # U+25CC ◌
79 if node == b'_':
78 if node == b'_':
80 return b'\xE2\x95\xA4' # U+2564 ╤
79 return b'\xE2\x95\xA4' # U+2564 ╤
81 return node
80 return node
82
81
83
82
84 def outputprettygraph(orig, ui, graph, *args, **kwargs):
83 def outputprettygraph(orig, ui, graph, *args, **kwargs):
85 (edges, text) = zip(*graph)
84 (edges, text) = zip(*graph)
86 graph = zip([convertedges(e) for e in edges], text)
85 graph = zip([convertedges(e) for e in edges], text)
87 return orig(ui, graph, *args, **kwargs)
86 return orig(ui, graph, *args, **kwargs)
88
87
89
88
90 def extsetup(ui):
89 def extsetup(ui):
91 if ui.plain(b'graph'):
90 if ui.plain(b'graph'):
92 return
91 return
93
92
94 if encoding.encoding != b'UTF-8':
93 if encoding.encoding != b'UTF-8':
95 ui.warn(_(b'beautifygraph: unsupported encoding, UTF-8 required\n'))
94 ui.warn(_(b'beautifygraph: unsupported encoding, UTF-8 required\n'))
96 return
95 return
97
96
98 if 'A' in encoding._wide:
97 if 'A' in encoding._wide:
99 ui.warn(
98 ui.warn(
100 _(
99 _(
101 b'beautifygraph: unsupported terminal settings, '
100 b'beautifygraph: unsupported terminal settings, '
102 b'monospace narrow text required\n'
101 b'monospace narrow text required\n'
103 )
102 )
104 )
103 )
105 return
104 return
106
105
107 extensions.wrapfunction(graphmod, b'outputgraph', outputprettygraph)
106 extensions.wrapfunction(graphmod, b'outputgraph', outputprettygraph)
108 extensions.wrapfunction(templatekw, b'getgraphnode', getprettygraphnode)
107 extensions.wrapfunction(templatekw, b'getgraphnode', getprettygraphnode)
@@ -1,1068 +1,1068 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import functools
8 import functools
9 import os
9 import os
10 import pickle
10 import pickle
11 import re
11 import re
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.pycompat import open
14 from mercurial.pycompat import open
15 from mercurial import (
15 from mercurial import (
16 encoding,
16 encoding,
17 error,
17 error,
18 hook,
18 hook,
19 pycompat,
19 pycompat,
20 util,
20 util,
21 )
21 )
22 from mercurial.utils import (
22 from mercurial.utils import (
23 dateutil,
23 dateutil,
24 procutil,
24 procutil,
25 stringutil,
25 stringutil,
26 )
26 )
27
27
28
28
29 class logentry:
29 class logentry:
30 """Class logentry has the following attributes:
30 """Class logentry has the following attributes:
31 .author - author name as CVS knows it
31 .author - author name as CVS knows it
32 .branch - name of branch this revision is on
32 .branch - name of branch this revision is on
33 .branches - revision tuple of branches starting at this revision
33 .branches - revision tuple of branches starting at this revision
34 .comment - commit message
34 .comment - commit message
35 .commitid - CVS commitid or None
35 .commitid - CVS commitid or None
36 .date - the commit date as a (time, tz) tuple
36 .date - the commit date as a (time, tz) tuple
37 .dead - true if file revision is dead
37 .dead - true if file revision is dead
38 .file - Name of file
38 .file - Name of file
39 .lines - a tuple (+lines, -lines) or None
39 .lines - a tuple (+lines, -lines) or None
40 .parent - Previous revision of this entry
40 .parent - Previous revision of this entry
41 .rcs - name of file as returned from CVS
41 .rcs - name of file as returned from CVS
42 .revision - revision number as tuple
42 .revision - revision number as tuple
43 .tags - list of tags on the file
43 .tags - list of tags on the file
44 .synthetic - is this a synthetic "file ... added on ..." revision?
44 .synthetic - is this a synthetic "file ... added on ..." revision?
45 .mergepoint - the branch that has been merged from (if present in
45 .mergepoint - the branch that has been merged from (if present in
46 rlog output) or None
46 rlog output) or None
47 .branchpoints - the branches that start at the current entry or empty
47 .branchpoints - the branches that start at the current entry or empty
48 """
48 """
49
49
50 def __init__(self, **entries):
50 def __init__(self, **entries):
51 self.synthetic = False
51 self.synthetic = False
52 self.__dict__.update(entries)
52 self.__dict__.update(entries)
53
53
54 def __repr__(self):
54 def __repr__(self):
55 items = ("%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__))
55 items = ("%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__))
56 return "%s(%s)" % (type(self).__name__, ", ".join(items))
56 return "%s(%s)" % (type(self).__name__, ", ".join(items))
57
57
58
58
59 class logerror(Exception):
59 class logerror(Exception):
60 pass
60 pass
61
61
62
62
63 def getrepopath(cvspath):
63 def getrepopath(cvspath):
64 """Return the repository path from a CVS path.
64 """Return the repository path from a CVS path.
65
65
66 >>> getrepopath(b'/foo/bar')
66 >>> getrepopath(b'/foo/bar')
67 '/foo/bar'
67 '/foo/bar'
68 >>> getrepopath(b'c:/foo/bar')
68 >>> getrepopath(b'c:/foo/bar')
69 '/foo/bar'
69 '/foo/bar'
70 >>> getrepopath(b':pserver:10/foo/bar')
70 >>> getrepopath(b':pserver:10/foo/bar')
71 '/foo/bar'
71 '/foo/bar'
72 >>> getrepopath(b':pserver:10c:/foo/bar')
72 >>> getrepopath(b':pserver:10c:/foo/bar')
73 '/foo/bar'
73 '/foo/bar'
74 >>> getrepopath(b':pserver:/foo/bar')
74 >>> getrepopath(b':pserver:/foo/bar')
75 '/foo/bar'
75 '/foo/bar'
76 >>> getrepopath(b':pserver:c:/foo/bar')
76 >>> getrepopath(b':pserver:c:/foo/bar')
77 '/foo/bar'
77 '/foo/bar'
78 >>> getrepopath(b':pserver:truc@foo.bar:/foo/bar')
78 >>> getrepopath(b':pserver:truc@foo.bar:/foo/bar')
79 '/foo/bar'
79 '/foo/bar'
80 >>> getrepopath(b':pserver:truc@foo.bar:c:/foo/bar')
80 >>> getrepopath(b':pserver:truc@foo.bar:c:/foo/bar')
81 '/foo/bar'
81 '/foo/bar'
82 >>> getrepopath(b'user@server/path/to/repository')
82 >>> getrepopath(b'user@server/path/to/repository')
83 '/path/to/repository'
83 '/path/to/repository'
84 """
84 """
85 # According to CVS manual, CVS paths are expressed like:
85 # According to CVS manual, CVS paths are expressed like:
86 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
86 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
87 #
87 #
88 # CVSpath is splitted into parts and then position of the first occurrence
88 # CVSpath is splitted into parts and then position of the first occurrence
89 # of the '/' char after the '@' is located. The solution is the rest of the
89 # of the '/' char after the '@' is located. The solution is the rest of the
90 # string after that '/' sign including it
90 # string after that '/' sign including it
91
91
92 parts = cvspath.split(b':')
92 parts = cvspath.split(b':')
93 atposition = parts[-1].find(b'@')
93 atposition = parts[-1].find(b'@')
94 start = 0
94 start = 0
95
95
96 if atposition != -1:
96 if atposition != -1:
97 start = atposition
97 start = atposition
98
98
99 repopath = parts[-1][parts[-1].find(b'/', start) :]
99 repopath = parts[-1][parts[-1].find(b'/', start) :]
100 return repopath
100 return repopath
101
101
102
102
103 def createlog(ui, directory=None, root=b"", rlog=True, cache=None):
103 def createlog(ui, directory=None, root=b"", rlog=True, cache=None):
104 '''Collect the CVS rlog'''
104 '''Collect the CVS rlog'''
105
105
106 # Because we store many duplicate commit log messages, reusing strings
106 # Because we store many duplicate commit log messages, reusing strings
107 # saves a lot of memory and pickle storage space.
107 # saves a lot of memory and pickle storage space.
108 _scache = {}
108 _scache = {}
109
109
110 def scache(s):
110 def scache(s):
111 """return a shared version of a string"""
111 """return a shared version of a string"""
112 return _scache.setdefault(s, s)
112 return _scache.setdefault(s, s)
113
113
114 ui.status(_(b'collecting CVS rlog\n'))
114 ui.status(_(b'collecting CVS rlog\n'))
115
115
116 log = [] # list of logentry objects containing the CVS state
116 log = [] # list of logentry objects containing the CVS state
117
117
118 # patterns to match in CVS (r)log output, by state of use
118 # patterns to match in CVS (r)log output, by state of use
119 re_00 = re.compile(b'RCS file: (.+)$')
119 re_00 = re.compile(b'RCS file: (.+)$')
120 re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$')
120 re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$')
121 re_02 = re.compile(b'cvs (r?log|server): (.+)\n$')
121 re_02 = re.compile(b'cvs (r?log|server): (.+)\n$')
122 re_03 = re.compile(
122 re_03 = re.compile(
123 b"(Cannot access.+CVSROOT)|(can't create temporary directory.+)$"
123 b"(Cannot access.+CVSROOT)|(can't create temporary directory.+)$"
124 )
124 )
125 re_10 = re.compile(b'Working file: (.+)$')
125 re_10 = re.compile(b'Working file: (.+)$')
126 re_20 = re.compile(b'symbolic names:')
126 re_20 = re.compile(b'symbolic names:')
127 re_30 = re.compile(b'\t(.+): ([\\d.]+)$')
127 re_30 = re.compile(b'\t(.+): ([\\d.]+)$')
128 re_31 = re.compile(b'----------------------------$')
128 re_31 = re.compile(b'----------------------------$')
129 re_32 = re.compile(
129 re_32 = re.compile(
130 b'======================================='
130 b'======================================='
131 b'======================================$'
131 b'======================================$'
132 )
132 )
133 re_50 = re.compile(br'revision ([\d.]+)(\s+locked by:\s+.+;)?$')
133 re_50 = re.compile(br'revision ([\d.]+)(\s+locked by:\s+.+;)?$')
134 re_60 = re.compile(
134 re_60 = re.compile(
135 br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
135 br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
136 br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
136 br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
137 br'(\s+commitid:\s+([^;]+);)?'
137 br'(\s+commitid:\s+([^;]+);)?'
138 br'(.*mergepoint:\s+([^;]+);)?'
138 br'(.*mergepoint:\s+([^;]+);)?'
139 )
139 )
140 re_70 = re.compile(b'branches: (.+);$')
140 re_70 = re.compile(b'branches: (.+);$')
141
141
142 file_added_re = re.compile(br'file [^/]+ was (initially )?added on branch')
142 file_added_re = re.compile(br'file [^/]+ was (initially )?added on branch')
143
143
144 prefix = b'' # leading path to strip of what we get from CVS
144 prefix = b'' # leading path to strip of what we get from CVS
145
145
146 if directory is None:
146 if directory is None:
147 # Current working directory
147 # Current working directory
148
148
149 # Get the real directory in the repository
149 # Get the real directory in the repository
150 try:
150 try:
151 with open(os.path.join(b'CVS', b'Repository'), b'rb') as f:
151 with open(os.path.join(b'CVS', b'Repository'), b'rb') as f:
152 prefix = f.read().strip()
152 prefix = f.read().strip()
153 directory = prefix
153 directory = prefix
154 if prefix == b".":
154 if prefix == b".":
155 prefix = b""
155 prefix = b""
156 except IOError:
156 except IOError:
157 raise logerror(_(b'not a CVS sandbox'))
157 raise logerror(_(b'not a CVS sandbox'))
158
158
159 if prefix and not prefix.endswith(pycompat.ossep):
159 if prefix and not prefix.endswith(pycompat.ossep):
160 prefix += pycompat.ossep
160 prefix += pycompat.ossep
161
161
162 # Use the Root file in the sandbox, if it exists
162 # Use the Root file in the sandbox, if it exists
163 try:
163 try:
164 root = open(os.path.join(b'CVS', b'Root'), b'rb').read().strip()
164 root = open(os.path.join(b'CVS', b'Root'), b'rb').read().strip()
165 except IOError:
165 except IOError:
166 pass
166 pass
167
167
168 if not root:
168 if not root:
169 root = encoding.environ.get(b'CVSROOT', b'')
169 root = encoding.environ.get(b'CVSROOT', b'')
170
170
171 # read log cache if one exists
171 # read log cache if one exists
172 oldlog = []
172 oldlog = []
173 date = None
173 date = None
174
174
175 if cache:
175 if cache:
176 cachedir = os.path.expanduser(b'~/.hg.cvsps')
176 cachedir = os.path.expanduser(b'~/.hg.cvsps')
177 if not os.path.exists(cachedir):
177 if not os.path.exists(cachedir):
178 os.mkdir(cachedir)
178 os.mkdir(cachedir)
179
179
180 # The cvsps cache pickle needs a uniquified name, based on the
180 # The cvsps cache pickle needs a uniquified name, based on the
181 # repository location. The address may have all sort of nasties
181 # repository location. The address may have all sort of nasties
182 # in it, slashes, colons and such. So here we take just the
182 # in it, slashes, colons and such. So here we take just the
183 # alphanumeric characters, concatenated in a way that does not
183 # alphanumeric characters, concatenated in a way that does not
184 # mix up the various components, so that
184 # mix up the various components, so that
185 # :pserver:user@server:/path
185 # :pserver:user@server:/path
186 # and
186 # and
187 # /pserver/user/server/path
187 # /pserver/user/server/path
188 # are mapped to different cache file names.
188 # are mapped to different cache file names.
189 cachefile = root.split(b":") + [directory, b"cache"]
189 cachefile = root.split(b":") + [directory, b"cache"]
190 cachefile = [b'-'.join(re.findall(br'\w+', s)) for s in cachefile if s]
190 cachefile = [b'-'.join(re.findall(br'\w+', s)) for s in cachefile if s]
191 cachefile = os.path.join(
191 cachefile = os.path.join(
192 cachedir, b'.'.join([s for s in cachefile if s])
192 cachedir, b'.'.join([s for s in cachefile if s])
193 )
193 )
194
194
195 if cache == b'update':
195 if cache == b'update':
196 try:
196 try:
197 ui.note(_(b'reading cvs log cache %s\n') % cachefile)
197 ui.note(_(b'reading cvs log cache %s\n') % cachefile)
198 oldlog = pickle.load(open(cachefile, b'rb'))
198 oldlog = pickle.load(open(cachefile, b'rb'))
199 for e in oldlog:
199 for e in oldlog:
200 if not (
200 if not (
201 util.safehasattr(e, b'branchpoints')
201 util.safehasattr(e, b'branchpoints')
202 and util.safehasattr(e, b'commitid')
202 and util.safehasattr(e, b'commitid')
203 and util.safehasattr(e, b'mergepoint')
203 and util.safehasattr(e, b'mergepoint')
204 ):
204 ):
205 ui.status(_(b'ignoring old cache\n'))
205 ui.status(_(b'ignoring old cache\n'))
206 oldlog = []
206 oldlog = []
207 break
207 break
208
208
209 ui.note(_(b'cache has %d log entries\n') % len(oldlog))
209 ui.note(_(b'cache has %d log entries\n') % len(oldlog))
210 except Exception as e:
210 except Exception as e:
211 ui.note(_(b'error reading cache: %r\n') % e)
211 ui.note(_(b'error reading cache: %r\n') % e)
212
212
213 if oldlog:
213 if oldlog:
214 date = oldlog[-1].date # last commit date as a (time,tz) tuple
214 date = oldlog[-1].date # last commit date as a (time,tz) tuple
215 date = dateutil.datestr(date, b'%Y/%m/%d %H:%M:%S %1%2')
215 date = dateutil.datestr(date, b'%Y/%m/%d %H:%M:%S %1%2')
216
216
217 # build the CVS commandline
217 # build the CVS commandline
218 cmd = [b'cvs', b'-q']
218 cmd = [b'cvs', b'-q']
219 if root:
219 if root:
220 cmd.append(b'-d%s' % root)
220 cmd.append(b'-d%s' % root)
221 p = util.normpath(getrepopath(root))
221 p = util.normpath(getrepopath(root))
222 if not p.endswith(b'/'):
222 if not p.endswith(b'/'):
223 p += b'/'
223 p += b'/'
224 if prefix:
224 if prefix:
225 # looks like normpath replaces "" by "."
225 # looks like normpath replaces "" by "."
226 prefix = p + util.normpath(prefix)
226 prefix = p + util.normpath(prefix)
227 else:
227 else:
228 prefix = p
228 prefix = p
229 cmd.append([b'log', b'rlog'][rlog])
229 cmd.append([b'log', b'rlog'][rlog])
230 if date:
230 if date:
231 # no space between option and date string
231 # no space between option and date string
232 cmd.append(b'-d>%s' % date)
232 cmd.append(b'-d>%s' % date)
233 cmd.append(directory)
233 cmd.append(directory)
234
234
235 # state machine begins here
235 # state machine begins here
236 tags = {} # dictionary of revisions on current file with their tags
236 tags = {} # dictionary of revisions on current file with their tags
237 branchmap = {} # mapping between branch names and revision numbers
237 branchmap = {} # mapping between branch names and revision numbers
238 rcsmap = {}
238 rcsmap = {}
239 state = 0
239 state = 0
240 store = False # set when a new record can be appended
240 store = False # set when a new record can be appended
241
241
242 cmd = [procutil.shellquote(arg) for arg in cmd]
242 cmd = [procutil.shellquote(arg) for arg in cmd]
243 ui.note(_(b"running %s\n") % (b' '.join(cmd)))
243 ui.note(_(b"running %s\n") % (b' '.join(cmd)))
244 ui.debug(b"prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
244 ui.debug(b"prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
245
245
246 pfp = procutil.popen(b' '.join(cmd), b'rb')
246 pfp = procutil.popen(b' '.join(cmd), b'rb')
247 peek = util.fromnativeeol(pfp.readline())
247 peek = util.fromnativeeol(pfp.readline())
248 while True:
248 while True:
249 line = peek
249 line = peek
250 if line == b'':
250 if line == b'':
251 break
251 break
252 peek = util.fromnativeeol(pfp.readline())
252 peek = util.fromnativeeol(pfp.readline())
253 if line.endswith(b'\n'):
253 if line.endswith(b'\n'):
254 line = line[:-1]
254 line = line[:-1]
255 # ui.debug('state=%d line=%r\n' % (state, line))
255 # ui.debug('state=%d line=%r\n' % (state, line))
256
256
257 if state == 0:
257 if state == 0:
258 # initial state, consume input until we see 'RCS file'
258 # initial state, consume input until we see 'RCS file'
259 match = re_00.match(line)
259 match = re_00.match(line)
260 if match:
260 if match:
261 rcs = match.group(1)
261 rcs = match.group(1)
262 tags = {}
262 tags = {}
263 if rlog:
263 if rlog:
264 filename = util.normpath(rcs[:-2])
264 filename = util.normpath(rcs[:-2])
265 if filename.startswith(prefix):
265 if filename.startswith(prefix):
266 filename = filename[len(prefix) :]
266 filename = filename[len(prefix) :]
267 if filename.startswith(b'/'):
267 if filename.startswith(b'/'):
268 filename = filename[1:]
268 filename = filename[1:]
269 if filename.startswith(b'Attic/'):
269 if filename.startswith(b'Attic/'):
270 filename = filename[6:]
270 filename = filename[6:]
271 else:
271 else:
272 filename = filename.replace(b'/Attic/', b'/')
272 filename = filename.replace(b'/Attic/', b'/')
273 state = 2
273 state = 2
274 continue
274 continue
275 state = 1
275 state = 1
276 continue
276 continue
277 match = re_01.match(line)
277 match = re_01.match(line)
278 if match:
278 if match:
279 raise logerror(match.group(1))
279 raise logerror(match.group(1))
280 match = re_02.match(line)
280 match = re_02.match(line)
281 if match:
281 if match:
282 raise logerror(match.group(2))
282 raise logerror(match.group(2))
283 if re_03.match(line):
283 if re_03.match(line):
284 raise logerror(line)
284 raise logerror(line)
285
285
286 elif state == 1:
286 elif state == 1:
287 # expect 'Working file' (only when using log instead of rlog)
287 # expect 'Working file' (only when using log instead of rlog)
288 match = re_10.match(line)
288 match = re_10.match(line)
289 assert match, _(b'RCS file must be followed by working file')
289 assert match, _(b'RCS file must be followed by working file')
290 filename = util.normpath(match.group(1))
290 filename = util.normpath(match.group(1))
291 state = 2
291 state = 2
292
292
293 elif state == 2:
293 elif state == 2:
294 # expect 'symbolic names'
294 # expect 'symbolic names'
295 if re_20.match(line):
295 if re_20.match(line):
296 branchmap = {}
296 branchmap = {}
297 state = 3
297 state = 3
298
298
299 elif state == 3:
299 elif state == 3:
300 # read the symbolic names and store as tags
300 # read the symbolic names and store as tags
301 match = re_30.match(line)
301 match = re_30.match(line)
302 if match:
302 if match:
303 rev = [int(x) for x in match.group(2).split(b'.')]
303 rev = [int(x) for x in match.group(2).split(b'.')]
304
304
305 # Convert magic branch number to an odd-numbered one
305 # Convert magic branch number to an odd-numbered one
306 revn = len(rev)
306 revn = len(rev)
307 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
307 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
308 rev = rev[:-2] + rev[-1:]
308 rev = rev[:-2] + rev[-1:]
309 rev = tuple(rev)
309 rev = tuple(rev)
310
310
311 if rev not in tags:
311 if rev not in tags:
312 tags[rev] = []
312 tags[rev] = []
313 tags[rev].append(match.group(1))
313 tags[rev].append(match.group(1))
314 branchmap[match.group(1)] = match.group(2)
314 branchmap[match.group(1)] = match.group(2)
315
315
316 elif re_31.match(line):
316 elif re_31.match(line):
317 state = 5
317 state = 5
318 elif re_32.match(line):
318 elif re_32.match(line):
319 state = 0
319 state = 0
320
320
321 elif state == 4:
321 elif state == 4:
322 # expecting '------' separator before first revision
322 # expecting '------' separator before first revision
323 if re_31.match(line):
323 if re_31.match(line):
324 state = 5
324 state = 5
325 else:
325 else:
326 assert not re_32.match(line), _(
326 assert not re_32.match(line), _(
327 b'must have at least some revisions'
327 b'must have at least some revisions'
328 )
328 )
329
329
330 elif state == 5:
330 elif state == 5:
331 # expecting revision number and possibly (ignored) lock indication
331 # expecting revision number and possibly (ignored) lock indication
332 # we create the logentry here from values stored in states 0 to 4,
332 # we create the logentry here from values stored in states 0 to 4,
333 # as this state is re-entered for subsequent revisions of a file.
333 # as this state is re-entered for subsequent revisions of a file.
334 match = re_50.match(line)
334 match = re_50.match(line)
335 assert match, _(b'expected revision number')
335 assert match, _(b'expected revision number')
336 e = logentry(
336 e = logentry(
337 rcs=scache(rcs),
337 rcs=scache(rcs),
338 file=scache(filename),
338 file=scache(filename),
339 revision=tuple([int(x) for x in match.group(1).split(b'.')]),
339 revision=tuple([int(x) for x in match.group(1).split(b'.')]),
340 branches=[],
340 branches=[],
341 parent=None,
341 parent=None,
342 commitid=None,
342 commitid=None,
343 mergepoint=None,
343 mergepoint=None,
344 branchpoints=set(),
344 branchpoints=set(),
345 )
345 )
346
346
347 state = 6
347 state = 6
348
348
349 elif state == 6:
349 elif state == 6:
350 # expecting date, author, state, lines changed
350 # expecting date, author, state, lines changed
351 match = re_60.match(line)
351 match = re_60.match(line)
352 assert match, _(b'revision must be followed by date line')
352 assert match, _(b'revision must be followed by date line')
353 d = match.group(1)
353 d = match.group(1)
354 if d[2] == b'/':
354 if d[2] == b'/':
355 # Y2K
355 # Y2K
356 d = b'19' + d
356 d = b'19' + d
357
357
358 if len(d.split()) != 3:
358 if len(d.split()) != 3:
359 # cvs log dates always in GMT
359 # cvs log dates always in GMT
360 d = d + b' UTC'
360 d = d + b' UTC'
361 e.date = dateutil.parsedate(
361 e.date = dateutil.parsedate(
362 d,
362 d,
363 [
363 [
364 b'%y/%m/%d %H:%M:%S',
364 b'%y/%m/%d %H:%M:%S',
365 b'%Y/%m/%d %H:%M:%S',
365 b'%Y/%m/%d %H:%M:%S',
366 b'%Y-%m-%d %H:%M:%S',
366 b'%Y-%m-%d %H:%M:%S',
367 ],
367 ],
368 )
368 )
369 e.author = scache(match.group(2))
369 e.author = scache(match.group(2))
370 e.dead = match.group(3).lower() == b'dead'
370 e.dead = match.group(3).lower() == b'dead'
371
371
372 if match.group(5):
372 if match.group(5):
373 if match.group(6):
373 if match.group(6):
374 e.lines = (int(match.group(5)), int(match.group(6)))
374 e.lines = (int(match.group(5)), int(match.group(6)))
375 else:
375 else:
376 e.lines = (int(match.group(5)), 0)
376 e.lines = (int(match.group(5)), 0)
377 elif match.group(6):
377 elif match.group(6):
378 e.lines = (0, int(match.group(6)))
378 e.lines = (0, int(match.group(6)))
379 else:
379 else:
380 e.lines = None
380 e.lines = None
381
381
382 if match.group(7): # cvs 1.12 commitid
382 if match.group(7): # cvs 1.12 commitid
383 e.commitid = match.group(8)
383 e.commitid = match.group(8)
384
384
385 if match.group(9): # cvsnt mergepoint
385 if match.group(9): # cvsnt mergepoint
386 myrev = match.group(10).split(b'.')
386 myrev = match.group(10).split(b'.')
387 if len(myrev) == 2: # head
387 if len(myrev) == 2: # head
388 e.mergepoint = b'HEAD'
388 e.mergepoint = b'HEAD'
389 else:
389 else:
390 myrev = b'.'.join(myrev[:-2] + [b'0', myrev[-2]])
390 myrev = b'.'.join(myrev[:-2] + [b'0', myrev[-2]])
391 branches = [b for b in branchmap if branchmap[b] == myrev]
391 branches = [b for b in branchmap if branchmap[b] == myrev]
392 assert len(branches) == 1, (
392 assert len(branches) == 1, (
393 b'unknown branch: %s' % e.mergepoint
393 b'unknown branch: %s' % e.mergepoint
394 )
394 )
395 e.mergepoint = branches[0]
395 e.mergepoint = branches[0]
396
396
397 e.comment = []
397 e.comment = []
398 state = 7
398 state = 7
399
399
400 elif state == 7:
400 elif state == 7:
401 # read the revision numbers of branches that start at this revision
401 # read the revision numbers of branches that start at this revision
402 # or store the commit log message otherwise
402 # or store the commit log message otherwise
403 m = re_70.match(line)
403 m = re_70.match(line)
404 if m:
404 if m:
405 e.branches = [
405 e.branches = [
406 tuple([int(y) for y in x.strip().split(b'.')])
406 tuple([int(y) for y in x.strip().split(b'.')])
407 for x in m.group(1).split(b';')
407 for x in m.group(1).split(b';')
408 ]
408 ]
409 state = 8
409 state = 8
410 elif re_31.match(line) and re_50.match(peek):
410 elif re_31.match(line) and re_50.match(peek):
411 state = 5
411 state = 5
412 store = True
412 store = True
413 elif re_32.match(line):
413 elif re_32.match(line):
414 state = 0
414 state = 0
415 store = True
415 store = True
416 else:
416 else:
417 e.comment.append(line)
417 e.comment.append(line)
418
418
419 elif state == 8:
419 elif state == 8:
420 # store commit log message
420 # store commit log message
421 if re_31.match(line):
421 if re_31.match(line):
422 cpeek = peek
422 cpeek = peek
423 if cpeek.endswith(b'\n'):
423 if cpeek.endswith(b'\n'):
424 cpeek = cpeek[:-1]
424 cpeek = cpeek[:-1]
425 if re_50.match(cpeek):
425 if re_50.match(cpeek):
426 state = 5
426 state = 5
427 store = True
427 store = True
428 else:
428 else:
429 e.comment.append(line)
429 e.comment.append(line)
430 elif re_32.match(line):
430 elif re_32.match(line):
431 state = 0
431 state = 0
432 store = True
432 store = True
433 else:
433 else:
434 e.comment.append(line)
434 e.comment.append(line)
435
435
436 # When a file is added on a branch B1, CVS creates a synthetic
436 # When a file is added on a branch B1, CVS creates a synthetic
437 # dead trunk revision 1.1 so that the branch has a root.
437 # dead trunk revision 1.1 so that the branch has a root.
438 # Likewise, if you merge such a file to a later branch B2 (one
438 # Likewise, if you merge such a file to a later branch B2 (one
439 # that already existed when the file was added on B1), CVS
439 # that already existed when the file was added on B1), CVS
440 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
440 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
441 # these revisions now, but mark them synthetic so
441 # these revisions now, but mark them synthetic so
442 # createchangeset() can take care of them.
442 # createchangeset() can take care of them.
443 if (
443 if (
444 store
444 store
445 and e.dead
445 and e.dead
446 and e.revision[-1] == 1
446 and e.revision[-1] == 1
447 and len(e.comment) == 1 # 1.1 or 1.1.x.1
447 and len(e.comment) == 1 # 1.1 or 1.1.x.1
448 and file_added_re.match(e.comment[0])
448 and file_added_re.match(e.comment[0])
449 ):
449 ):
450 ui.debug(
450 ui.debug(
451 b'found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])
451 b'found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])
452 )
452 )
453 e.synthetic = True
453 e.synthetic = True
454
454
455 if store:
455 if store:
456 # clean up the results and save in the log.
456 # clean up the results and save in the log.
457 store = False
457 store = False
458 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
458 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
459 e.comment = scache(b'\n'.join(e.comment))
459 e.comment = scache(b'\n'.join(e.comment))
460
460
461 revn = len(e.revision)
461 revn = len(e.revision)
462 if revn > 3 and (revn % 2) == 0:
462 if revn > 3 and (revn % 2) == 0:
463 e.branch = tags.get(e.revision[:-1], [None])[0]
463 e.branch = tags.get(e.revision[:-1], [None])[0]
464 else:
464 else:
465 e.branch = None
465 e.branch = None
466
466
467 # find the branches starting from this revision
467 # find the branches starting from this revision
468 branchpoints = set()
468 branchpoints = set()
469 for branch, revision in branchmap.items():
469 for branch, revision in branchmap.items():
470 revparts = tuple([int(i) for i in revision.split(b'.')])
470 revparts = tuple([int(i) for i in revision.split(b'.')])
471 if len(revparts) < 2: # bad tags
471 if len(revparts) < 2: # bad tags
472 continue
472 continue
473 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
473 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
474 # normal branch
474 # normal branch
475 if revparts[:-2] == e.revision:
475 if revparts[:-2] == e.revision:
476 branchpoints.add(branch)
476 branchpoints.add(branch)
477 elif revparts == (1, 1, 1): # vendor branch
477 elif revparts == (1, 1, 1): # vendor branch
478 if revparts in e.branches:
478 if revparts in e.branches:
479 branchpoints.add(branch)
479 branchpoints.add(branch)
480 e.branchpoints = branchpoints
480 e.branchpoints = branchpoints
481
481
482 log.append(e)
482 log.append(e)
483
483
484 rcsmap[e.rcs.replace(b'/Attic/', b'/')] = e.rcs
484 rcsmap[e.rcs.replace(b'/Attic/', b'/')] = e.rcs
485
485
486 if len(log) % 100 == 0:
486 if len(log) % 100 == 0:
487 ui.status(
487 ui.status(
488 stringutil.ellipsis(b'%d %s' % (len(log), e.file), 80)
488 stringutil.ellipsis(b'%d %s' % (len(log), e.file), 80)
489 + b'\n'
489 + b'\n'
490 )
490 )
491
491
492 log.sort(key=lambda x: (x.rcs, x.revision))
492 log.sort(key=lambda x: (x.rcs, x.revision))
493
493
494 # find parent revisions of individual files
494 # find parent revisions of individual files
495 versions = {}
495 versions = {}
496 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
496 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
497 rcs = e.rcs.replace(b'/Attic/', b'/')
497 rcs = e.rcs.replace(b'/Attic/', b'/')
498 if rcs in rcsmap:
498 if rcs in rcsmap:
499 e.rcs = rcsmap[rcs]
499 e.rcs = rcsmap[rcs]
500 branch = e.revision[:-1]
500 branch = e.revision[:-1]
501 versions[(e.rcs, branch)] = e.revision
501 versions[(e.rcs, branch)] = e.revision
502
502
503 for e in log:
503 for e in log:
504 branch = e.revision[:-1]
504 branch = e.revision[:-1]
505 p = versions.get((e.rcs, branch), None)
505 p = versions.get((e.rcs, branch), None)
506 if p is None:
506 if p is None:
507 p = e.revision[:-2]
507 p = e.revision[:-2]
508 e.parent = p
508 e.parent = p
509 versions[(e.rcs, branch)] = e.revision
509 versions[(e.rcs, branch)] = e.revision
510
510
511 # update the log cache
511 # update the log cache
512 if cache:
512 if cache:
513 if log:
513 if log:
514 # join up the old and new logs
514 # join up the old and new logs
515 log.sort(key=lambda x: x.date)
515 log.sort(key=lambda x: x.date)
516
516
517 if oldlog and oldlog[-1].date >= log[0].date:
517 if oldlog and oldlog[-1].date >= log[0].date:
518 raise logerror(
518 raise logerror(
519 _(
519 _(
520 b'log cache overlaps with new log entries,'
520 b'log cache overlaps with new log entries,'
521 b' re-run without cache.'
521 b' re-run without cache.'
522 )
522 )
523 )
523 )
524
524
525 log = oldlog + log
525 log = oldlog + log
526
526
527 # write the new cachefile
527 # write the new cachefile
528 ui.note(_(b'writing cvs log cache %s\n') % cachefile)
528 ui.note(_(b'writing cvs log cache %s\n') % cachefile)
529 pickle.dump(log, open(cachefile, b'wb'))
529 pickle.dump(log, open(cachefile, b'wb'))
530 else:
530 else:
531 log = oldlog
531 log = oldlog
532
532
533 ui.status(_(b'%d log entries\n') % len(log))
533 ui.status(_(b'%d log entries\n') % len(log))
534
534
535 encodings = ui.configlist(b'convert', b'cvsps.logencoding')
535 encodings = ui.configlist(b'convert', b'cvsps.logencoding')
536 if encodings:
536 if encodings:
537
537
538 def revstr(r):
538 def revstr(r):
539 # this is needed, because logentry.revision is a tuple of "int"
539 # this is needed, because logentry.revision is a tuple of "int"
540 # (e.g. (1, 2) for "1.2")
540 # (e.g. (1, 2) for "1.2")
541 return b'.'.join(pycompat.maplist(pycompat.bytestr, r))
541 return b'.'.join(pycompat.maplist(pycompat.bytestr, r))
542
542
543 for entry in log:
543 for entry in log:
544 comment = entry.comment
544 comment = entry.comment
545 for e in encodings:
545 for e in encodings:
546 try:
546 try:
547 entry.comment = comment.decode(pycompat.sysstr(e)).encode(
547 entry.comment = comment.decode(pycompat.sysstr(e)).encode(
548 'utf-8'
548 'utf-8'
549 )
549 )
550 if ui.debugflag:
550 if ui.debugflag:
551 ui.debug(
551 ui.debug(
552 b"transcoding by %s: %s of %s\n"
552 b"transcoding by %s: %s of %s\n"
553 % (e, revstr(entry.revision), entry.file)
553 % (e, revstr(entry.revision), entry.file)
554 )
554 )
555 break
555 break
556 except UnicodeDecodeError:
556 except UnicodeDecodeError:
557 pass # try next encoding
557 pass # try next encoding
558 except LookupError as inst: # unknown encoding, maybe
558 except LookupError as inst: # unknown encoding, maybe
559 raise error.Abort(
559 raise error.Abort(
560 pycompat.bytestr(inst),
560 pycompat.bytestr(inst),
561 hint=_(
561 hint=_(
562 b'check convert.cvsps.logencoding configuration'
562 b'check convert.cvsps.logencoding configuration'
563 ),
563 ),
564 )
564 )
565 else:
565 else:
566 raise error.Abort(
566 raise error.Abort(
567 _(
567 _(
568 b"no encoding can transcode"
568 b"no encoding can transcode"
569 b" CVS log message for %s of %s"
569 b" CVS log message for %s of %s"
570 )
570 )
571 % (revstr(entry.revision), entry.file),
571 % (revstr(entry.revision), entry.file),
572 hint=_(b'check convert.cvsps.logencoding configuration'),
572 hint=_(b'check convert.cvsps.logencoding configuration'),
573 )
573 )
574
574
575 hook.hook(ui, None, b"cvslog", True, log=log)
575 hook.hook(ui, None, b"cvslog", True, log=log)
576
576
577 return log
577 return log
578
578
579
579
580 class changeset:
580 class changeset:
581 """Class changeset has the following attributes:
581 """Class changeset has the following attributes:
582 .id - integer identifying this changeset (list index)
582 .id - integer identifying this changeset (list index)
583 .author - author name as CVS knows it
583 .author - author name as CVS knows it
584 .branch - name of branch this changeset is on, or None
584 .branch - name of branch this changeset is on, or None
585 .comment - commit message
585 .comment - commit message
586 .commitid - CVS commitid or None
586 .commitid - CVS commitid or None
587 .date - the commit date as a (time,tz) tuple
587 .date - the commit date as a (time,tz) tuple
588 .entries - list of logentry objects in this changeset
588 .entries - list of logentry objects in this changeset
589 .parents - list of one or two parent changesets
589 .parents - list of one or two parent changesets
590 .tags - list of tags on this changeset
590 .tags - list of tags on this changeset
591 .synthetic - from synthetic revision "file ... added on branch ..."
591 .synthetic - from synthetic revision "file ... added on branch ..."
592 .mergepoint- the branch that has been merged from or None
592 .mergepoint- the branch that has been merged from or None
593 .branchpoints- the branches that start at the current entry or empty
593 .branchpoints- the branches that start at the current entry or empty
594 """
594 """
595
595
596 def __init__(self, **entries):
596 def __init__(self, **entries):
597 self.id = None
597 self.id = None
598 self.synthetic = False
598 self.synthetic = False
599 self.__dict__.update(entries)
599 self.__dict__.update(entries)
600
600
601 def __repr__(self):
601 def __repr__(self):
602 items = (
602 items = (
603 b"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
603 b"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
604 )
604 )
605 return b"%s(%s)" % (type(self).__name__, b", ".join(items))
605 return b"%s(%s)" % (type(self).__name__, b", ".join(items))
606
606
607
607
608 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
608 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
609 '''Convert log into changesets.'''
609 '''Convert log into changesets.'''
610
610
611 ui.status(_(b'creating changesets\n'))
611 ui.status(_(b'creating changesets\n'))
612
612
613 # try to order commitids by date
613 # try to order commitids by date
614 mindate = {}
614 mindate = {}
615 for e in log:
615 for e in log:
616 if e.commitid:
616 if e.commitid:
617 if e.commitid not in mindate:
617 if e.commitid not in mindate:
618 mindate[e.commitid] = e.date
618 mindate[e.commitid] = e.date
619 else:
619 else:
620 mindate[e.commitid] = min(e.date, mindate[e.commitid])
620 mindate[e.commitid] = min(e.date, mindate[e.commitid])
621
621
622 # Merge changesets
622 # Merge changesets
623 log.sort(
623 log.sort(
624 key=lambda x: (
624 key=lambda x: (
625 mindate.get(x.commitid, (-1, 0)),
625 mindate.get(x.commitid, (-1, 0)),
626 x.commitid or b'',
626 x.commitid or b'',
627 x.comment,
627 x.comment,
628 x.author,
628 x.author,
629 x.branch or b'',
629 x.branch or b'',
630 x.date,
630 x.date,
631 x.branchpoints,
631 x.branchpoints,
632 )
632 )
633 )
633 )
634
634
635 changesets = []
635 changesets = []
636 files = set()
636 files = set()
637 c = None
637 c = None
638 for i, e in enumerate(log):
638 for i, e in enumerate(log):
639
639
640 # Check if log entry belongs to the current changeset or not.
640 # Check if log entry belongs to the current changeset or not.
641
641
642 # Since CVS is file-centric, two different file revisions with
642 # Since CVS is file-centric, two different file revisions with
643 # different branchpoints should be treated as belonging to two
643 # different branchpoints should be treated as belonging to two
644 # different changesets (and the ordering is important and not
644 # different changesets (and the ordering is important and not
645 # honoured by cvsps at this point).
645 # honoured by cvsps at this point).
646 #
646 #
647 # Consider the following case:
647 # Consider the following case:
648 # foo 1.1 branchpoints: [MYBRANCH]
648 # foo 1.1 branchpoints: [MYBRANCH]
649 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
649 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
650 #
650 #
651 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
651 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
652 # later version of foo may be in MYBRANCH2, so foo should be the
652 # later version of foo may be in MYBRANCH2, so foo should be the
653 # first changeset and bar the next and MYBRANCH and MYBRANCH2
653 # first changeset and bar the next and MYBRANCH and MYBRANCH2
654 # should both start off of the bar changeset. No provisions are
654 # should both start off of the bar changeset. No provisions are
655 # made to ensure that this is, in fact, what happens.
655 # made to ensure that this is, in fact, what happens.
656 if not (
656 if not (
657 c
657 c
658 and e.branchpoints == c.branchpoints
658 and e.branchpoints == c.branchpoints
659 and ( # cvs commitids
659 and ( # cvs commitids
660 (e.commitid is not None and e.commitid == c.commitid)
660 (e.commitid is not None and e.commitid == c.commitid)
661 or ( # no commitids, use fuzzy commit detection
661 or ( # no commitids, use fuzzy commit detection
662 (e.commitid is None or c.commitid is None)
662 (e.commitid is None or c.commitid is None)
663 and e.comment == c.comment
663 and e.comment == c.comment
664 and e.author == c.author
664 and e.author == c.author
665 and e.branch == c.branch
665 and e.branch == c.branch
666 and (
666 and (
667 (c.date[0] + c.date[1])
667 (c.date[0] + c.date[1])
668 <= (e.date[0] + e.date[1])
668 <= (e.date[0] + e.date[1])
669 <= (c.date[0] + c.date[1]) + fuzz
669 <= (c.date[0] + c.date[1]) + fuzz
670 )
670 )
671 and e.file not in files
671 and e.file not in files
672 )
672 )
673 )
673 )
674 ):
674 ):
675 c = changeset(
675 c = changeset(
676 comment=e.comment,
676 comment=e.comment,
677 author=e.author,
677 author=e.author,
678 branch=e.branch,
678 branch=e.branch,
679 date=e.date,
679 date=e.date,
680 entries=[],
680 entries=[],
681 mergepoint=e.mergepoint,
681 mergepoint=e.mergepoint,
682 branchpoints=e.branchpoints,
682 branchpoints=e.branchpoints,
683 commitid=e.commitid,
683 commitid=e.commitid,
684 )
684 )
685 changesets.append(c)
685 changesets.append(c)
686
686
687 files = set()
687 files = set()
688 if len(changesets) % 100 == 0:
688 if len(changesets) % 100 == 0:
689 t = b'%d %s' % (len(changesets), repr(e.comment)[1:-1])
689 t = b'%d %s' % (len(changesets), repr(e.comment)[1:-1])
690 ui.status(stringutil.ellipsis(t, 80) + b'\n')
690 ui.status(stringutil.ellipsis(t, 80) + b'\n')
691
691
692 c.entries.append(e)
692 c.entries.append(e)
693 files.add(e.file)
693 files.add(e.file)
694 c.date = e.date # changeset date is date of latest commit in it
694 c.date = e.date # changeset date is date of latest commit in it
695
695
696 # Mark synthetic changesets
696 # Mark synthetic changesets
697
697
698 for c in changesets:
698 for c in changesets:
699 # Synthetic revisions always get their own changeset, because
699 # Synthetic revisions always get their own changeset, because
700 # the log message includes the filename. E.g. if you add file3
700 # the log message includes the filename. E.g. if you add file3
701 # and file4 on a branch, you get four log entries and three
701 # and file4 on a branch, you get four log entries and three
702 # changesets:
702 # changesets:
703 # "File file3 was added on branch ..." (synthetic, 1 entry)
703 # "File file3 was added on branch ..." (synthetic, 1 entry)
704 # "File file4 was added on branch ..." (synthetic, 1 entry)
704 # "File file4 was added on branch ..." (synthetic, 1 entry)
705 # "Add file3 and file4 to fix ..." (real, 2 entries)
705 # "Add file3 and file4 to fix ..." (real, 2 entries)
706 # Hence the check for 1 entry here.
706 # Hence the check for 1 entry here.
707 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
707 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
708
708
709 # Sort files in each changeset
709 # Sort files in each changeset
710
710
711 def entitycompare(l, r):
711 def entitycompare(l, r):
712 """Mimic cvsps sorting order"""
712 """Mimic cvsps sorting order"""
713 l = l.file.split(b'/')
713 l = l.file.split(b'/')
714 r = r.file.split(b'/')
714 r = r.file.split(b'/')
715 nl = len(l)
715 nl = len(l)
716 nr = len(r)
716 nr = len(r)
717 n = min(nl, nr)
717 n = min(nl, nr)
718 for i in range(n):
718 for i in range(n):
719 if i + 1 == nl and nl < nr:
719 if i + 1 == nl and nl < nr:
720 return -1
720 return -1
721 elif i + 1 == nr and nl > nr:
721 elif i + 1 == nr and nl > nr:
722 return +1
722 return +1
723 elif l[i] < r[i]:
723 elif l[i] < r[i]:
724 return -1
724 return -1
725 elif l[i] > r[i]:
725 elif l[i] > r[i]:
726 return +1
726 return +1
727 return 0
727 return 0
728
728
729 for c in changesets:
729 for c in changesets:
730 c.entries.sort(key=functools.cmp_to_key(entitycompare))
730 c.entries.sort(key=functools.cmp_to_key(entitycompare))
731
731
732 # Sort changesets by date
732 # Sort changesets by date
733
733
734 odd = set()
734 odd = set()
735
735
736 def cscmp(l, r):
736 def cscmp(l, r):
737 d = sum(l.date) - sum(r.date)
737 d = sum(l.date) - sum(r.date)
738 if d:
738 if d:
739 return d
739 return d
740
740
741 # detect vendor branches and initial commits on a branch
741 # detect vendor branches and initial commits on a branch
742 le = {}
742 le = {}
743 for e in l.entries:
743 for e in l.entries:
744 le[e.rcs] = e.revision
744 le[e.rcs] = e.revision
745 re = {}
745 re = {}
746 for e in r.entries:
746 for e in r.entries:
747 re[e.rcs] = e.revision
747 re[e.rcs] = e.revision
748
748
749 d = 0
749 d = 0
750 for e in l.entries:
750 for e in l.entries:
751 if re.get(e.rcs, None) == e.parent:
751 if re.get(e.rcs, None) == e.parent:
752 assert not d
752 assert not d
753 d = 1
753 d = 1
754 break
754 break
755
755
756 for e in r.entries:
756 for e in r.entries:
757 if le.get(e.rcs, None) == e.parent:
757 if le.get(e.rcs, None) == e.parent:
758 if d:
758 if d:
759 odd.add((l, r))
759 odd.add((l, r))
760 d = -1
760 d = -1
761 break
761 break
762 # By this point, the changesets are sufficiently compared that
762 # By this point, the changesets are sufficiently compared that
763 # we don't really care about ordering. However, this leaves
763 # we don't really care about ordering. However, this leaves
764 # some race conditions in the tests, so we compare on the
764 # some race conditions in the tests, so we compare on the
765 # number of files modified, the files contained in each
765 # number of files modified, the files contained in each
766 # changeset, and the branchpoints in the change to ensure test
766 # changeset, and the branchpoints in the change to ensure test
767 # output remains stable.
767 # output remains stable.
768
768
769 # recommended replacement for cmp from
769 # recommended replacement for cmp from
770 # https://docs.python.org/3.0/whatsnew/3.0.html
770 # https://docs.python.org/3.0/whatsnew/3.0.html
771 c = lambda x, y: (x > y) - (x < y)
771 c = lambda x, y: (x > y) - (x < y)
772 # Sort bigger changes first.
772 # Sort bigger changes first.
773 if not d:
773 if not d:
774 d = c(len(l.entries), len(r.entries))
774 d = c(len(l.entries), len(r.entries))
775 # Try sorting by filename in the change.
775 # Try sorting by filename in the change.
776 if not d:
776 if not d:
777 d = c([e.file for e in l.entries], [e.file for e in r.entries])
777 d = c([e.file for e in l.entries], [e.file for e in r.entries])
778 # Try and put changes without a branch point before ones with
778 # Try and put changes without a branch point before ones with
779 # a branch point.
779 # a branch point.
780 if not d:
780 if not d:
781 d = c(len(l.branchpoints), len(r.branchpoints))
781 d = c(len(l.branchpoints), len(r.branchpoints))
782 return d
782 return d
783
783
784 changesets.sort(key=functools.cmp_to_key(cscmp))
784 changesets.sort(key=functools.cmp_to_key(cscmp))
785
785
786 # Collect tags
786 # Collect tags
787
787
788 globaltags = {}
788 globaltags = {}
789 for c in changesets:
789 for c in changesets:
790 for e in c.entries:
790 for e in c.entries:
791 for tag in e.tags:
791 for tag in e.tags:
792 # remember which is the latest changeset to have this tag
792 # remember which is the latest changeset to have this tag
793 globaltags[tag] = c
793 globaltags[tag] = c
794
794
795 for c in changesets:
795 for c in changesets:
796 tags = set()
796 tags = set()
797 for e in c.entries:
797 for e in c.entries:
798 tags.update(e.tags)
798 tags.update(e.tags)
799 # remember tags only if this is the latest changeset to have it
799 # remember tags only if this is the latest changeset to have it
800 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
800 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
801
801
802 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
802 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
803 # by inserting dummy changesets with two parents, and handle
803 # by inserting dummy changesets with two parents, and handle
804 # {{mergefrombranch BRANCHNAME}} by setting two parents.
804 # {{mergefrombranch BRANCHNAME}} by setting two parents.
805
805
806 if mergeto is None:
806 if mergeto is None:
807 mergeto = br'{{mergetobranch ([-\w]+)}}'
807 mergeto = br'{{mergetobranch ([-\w]+)}}'
808 if mergeto:
808 if mergeto:
809 mergeto = re.compile(mergeto)
809 mergeto = re.compile(mergeto)
810
810
811 if mergefrom is None:
811 if mergefrom is None:
812 mergefrom = br'{{mergefrombranch ([-\w]+)}}'
812 mergefrom = br'{{mergefrombranch ([-\w]+)}}'
813 if mergefrom:
813 if mergefrom:
814 mergefrom = re.compile(mergefrom)
814 mergefrom = re.compile(mergefrom)
815
815
816 versions = {} # changeset index where we saw any particular file version
816 versions = {} # changeset index where we saw any particular file version
817 branches = {} # changeset index where we saw a branch
817 branches = {} # changeset index where we saw a branch
818 n = len(changesets)
818 n = len(changesets)
819 i = 0
819 i = 0
820 while i < n:
820 while i < n:
821 c = changesets[i]
821 c = changesets[i]
822
822
823 for f in c.entries:
823 for f in c.entries:
824 versions[(f.rcs, f.revision)] = i
824 versions[(f.rcs, f.revision)] = i
825
825
826 p = None
826 p = None
827 if c.branch in branches:
827 if c.branch in branches:
828 p = branches[c.branch]
828 p = branches[c.branch]
829 else:
829 else:
830 # first changeset on a new branch
830 # first changeset on a new branch
831 # the parent is a changeset with the branch in its
831 # the parent is a changeset with the branch in its
832 # branchpoints such that it is the latest possible
832 # branchpoints such that it is the latest possible
833 # commit without any intervening, unrelated commits.
833 # commit without any intervening, unrelated commits.
834
834
835 for candidate in pycompat.xrange(i):
835 for candidate in range(i):
836 if c.branch not in changesets[candidate].branchpoints:
836 if c.branch not in changesets[candidate].branchpoints:
837 if p is not None:
837 if p is not None:
838 break
838 break
839 continue
839 continue
840 p = candidate
840 p = candidate
841
841
842 c.parents = []
842 c.parents = []
843 if p is not None:
843 if p is not None:
844 p = changesets[p]
844 p = changesets[p]
845
845
846 # Ensure no changeset has a synthetic changeset as a parent.
846 # Ensure no changeset has a synthetic changeset as a parent.
847 while p.synthetic:
847 while p.synthetic:
848 assert len(p.parents) <= 1, _(
848 assert len(p.parents) <= 1, _(
849 b'synthetic changeset cannot have multiple parents'
849 b'synthetic changeset cannot have multiple parents'
850 )
850 )
851 if p.parents:
851 if p.parents:
852 p = p.parents[0]
852 p = p.parents[0]
853 else:
853 else:
854 p = None
854 p = None
855 break
855 break
856
856
857 if p is not None:
857 if p is not None:
858 c.parents.append(p)
858 c.parents.append(p)
859
859
860 if c.mergepoint:
860 if c.mergepoint:
861 if c.mergepoint == b'HEAD':
861 if c.mergepoint == b'HEAD':
862 c.mergepoint = None
862 c.mergepoint = None
863 c.parents.append(changesets[branches[c.mergepoint]])
863 c.parents.append(changesets[branches[c.mergepoint]])
864
864
865 if mergefrom:
865 if mergefrom:
866 m = mergefrom.search(c.comment)
866 m = mergefrom.search(c.comment)
867 if m:
867 if m:
868 m = m.group(1)
868 m = m.group(1)
869 if m == b'HEAD':
869 if m == b'HEAD':
870 m = None
870 m = None
871 try:
871 try:
872 candidate = changesets[branches[m]]
872 candidate = changesets[branches[m]]
873 except KeyError:
873 except KeyError:
874 ui.warn(
874 ui.warn(
875 _(
875 _(
876 b"warning: CVS commit message references "
876 b"warning: CVS commit message references "
877 b"non-existent branch %r:\n%s\n"
877 b"non-existent branch %r:\n%s\n"
878 )
878 )
879 % (pycompat.bytestr(m), c.comment)
879 % (pycompat.bytestr(m), c.comment)
880 )
880 )
881 if m in branches and c.branch != m and not candidate.synthetic:
881 if m in branches and c.branch != m and not candidate.synthetic:
882 c.parents.append(candidate)
882 c.parents.append(candidate)
883
883
884 if mergeto:
884 if mergeto:
885 m = mergeto.search(c.comment)
885 m = mergeto.search(c.comment)
886 if m:
886 if m:
887 if m.groups():
887 if m.groups():
888 m = m.group(1)
888 m = m.group(1)
889 if m == b'HEAD':
889 if m == b'HEAD':
890 m = None
890 m = None
891 else:
891 else:
892 m = None # if no group found then merge to HEAD
892 m = None # if no group found then merge to HEAD
893 if m in branches and c.branch != m:
893 if m in branches and c.branch != m:
894 # insert empty changeset for merge
894 # insert empty changeset for merge
895 cc = changeset(
895 cc = changeset(
896 author=c.author,
896 author=c.author,
897 branch=m,
897 branch=m,
898 date=c.date,
898 date=c.date,
899 comment=b'convert-repo: CVS merge from branch %s'
899 comment=b'convert-repo: CVS merge from branch %s'
900 % c.branch,
900 % c.branch,
901 entries=[],
901 entries=[],
902 tags=[],
902 tags=[],
903 parents=[changesets[branches[m]], c],
903 parents=[changesets[branches[m]], c],
904 )
904 )
905 changesets.insert(i + 1, cc)
905 changesets.insert(i + 1, cc)
906 branches[m] = i + 1
906 branches[m] = i + 1
907
907
908 # adjust our loop counters now we have inserted a new entry
908 # adjust our loop counters now we have inserted a new entry
909 n += 1
909 n += 1
910 i += 2
910 i += 2
911 continue
911 continue
912
912
913 branches[c.branch] = i
913 branches[c.branch] = i
914 i += 1
914 i += 1
915
915
916 # Drop synthetic changesets (safe now that we have ensured no other
916 # Drop synthetic changesets (safe now that we have ensured no other
917 # changesets can have them as parents).
917 # changesets can have them as parents).
918 i = 0
918 i = 0
919 while i < len(changesets):
919 while i < len(changesets):
920 if changesets[i].synthetic:
920 if changesets[i].synthetic:
921 del changesets[i]
921 del changesets[i]
922 else:
922 else:
923 i += 1
923 i += 1
924
924
925 # Number changesets
925 # Number changesets
926
926
927 for i, c in enumerate(changesets):
927 for i, c in enumerate(changesets):
928 c.id = i + 1
928 c.id = i + 1
929
929
930 if odd:
930 if odd:
931 for l, r in odd:
931 for l, r in odd:
932 if l.id is not None and r.id is not None:
932 if l.id is not None and r.id is not None:
933 ui.warn(
933 ui.warn(
934 _(b'changeset %d is both before and after %d\n')
934 _(b'changeset %d is both before and after %d\n')
935 % (l.id, r.id)
935 % (l.id, r.id)
936 )
936 )
937
937
938 ui.status(_(b'%d changeset entries\n') % len(changesets))
938 ui.status(_(b'%d changeset entries\n') % len(changesets))
939
939
940 hook.hook(ui, None, b"cvschangesets", True, changesets=changesets)
940 hook.hook(ui, None, b"cvschangesets", True, changesets=changesets)
941
941
942 return changesets
942 return changesets
943
943
944
944
945 def debugcvsps(ui, *args, **opts):
945 def debugcvsps(ui, *args, **opts):
946 """Read CVS rlog for current directory or named path in
946 """Read CVS rlog for current directory or named path in
947 repository, and convert the log to changesets based on matching
947 repository, and convert the log to changesets based on matching
948 commit log entries and dates.
948 commit log entries and dates.
949 """
949 """
950 opts = pycompat.byteskwargs(opts)
950 opts = pycompat.byteskwargs(opts)
951 if opts[b"new_cache"]:
951 if opts[b"new_cache"]:
952 cache = b"write"
952 cache = b"write"
953 elif opts[b"update_cache"]:
953 elif opts[b"update_cache"]:
954 cache = b"update"
954 cache = b"update"
955 else:
955 else:
956 cache = None
956 cache = None
957
957
958 revisions = opts[b"revisions"]
958 revisions = opts[b"revisions"]
959
959
960 try:
960 try:
961 if args:
961 if args:
962 log = []
962 log = []
963 for d in args:
963 for d in args:
964 log += createlog(ui, d, root=opts[b"root"], cache=cache)
964 log += createlog(ui, d, root=opts[b"root"], cache=cache)
965 else:
965 else:
966 log = createlog(ui, root=opts[b"root"], cache=cache)
966 log = createlog(ui, root=opts[b"root"], cache=cache)
967 except logerror as e:
967 except logerror as e:
968 ui.write(b"%r\n" % e)
968 ui.write(b"%r\n" % e)
969 return
969 return
970
970
971 changesets = createchangeset(ui, log, opts[b"fuzz"])
971 changesets = createchangeset(ui, log, opts[b"fuzz"])
972 del log
972 del log
973
973
974 # Print changesets (optionally filtered)
974 # Print changesets (optionally filtered)
975
975
976 off = len(revisions)
976 off = len(revisions)
977 branches = {} # latest version number in each branch
977 branches = {} # latest version number in each branch
978 ancestors = {} # parent branch
978 ancestors = {} # parent branch
979 for cs in changesets:
979 for cs in changesets:
980
980
981 if opts[b"ancestors"]:
981 if opts[b"ancestors"]:
982 if cs.branch not in branches and cs.parents and cs.parents[0].id:
982 if cs.branch not in branches and cs.parents and cs.parents[0].id:
983 ancestors[cs.branch] = (
983 ancestors[cs.branch] = (
984 changesets[cs.parents[0].id - 1].branch,
984 changesets[cs.parents[0].id - 1].branch,
985 cs.parents[0].id,
985 cs.parents[0].id,
986 )
986 )
987 branches[cs.branch] = cs.id
987 branches[cs.branch] = cs.id
988
988
989 # limit by branches
989 # limit by branches
990 if (
990 if (
991 opts[b"branches"]
991 opts[b"branches"]
992 and (cs.branch or b'HEAD') not in opts[b"branches"]
992 and (cs.branch or b'HEAD') not in opts[b"branches"]
993 ):
993 ):
994 continue
994 continue
995
995
996 if not off:
996 if not off:
997 # Note: trailing spaces on several lines here are needed to have
997 # Note: trailing spaces on several lines here are needed to have
998 # bug-for-bug compatibility with cvsps.
998 # bug-for-bug compatibility with cvsps.
999 ui.write(b'---------------------\n')
999 ui.write(b'---------------------\n')
1000 ui.write((b'PatchSet %d \n' % cs.id))
1000 ui.write((b'PatchSet %d \n' % cs.id))
1001 ui.write(
1001 ui.write(
1002 (
1002 (
1003 b'Date: %s\n'
1003 b'Date: %s\n'
1004 % dateutil.datestr(cs.date, b'%Y/%m/%d %H:%M:%S %1%2')
1004 % dateutil.datestr(cs.date, b'%Y/%m/%d %H:%M:%S %1%2')
1005 )
1005 )
1006 )
1006 )
1007 ui.write((b'Author: %s\n' % cs.author))
1007 ui.write((b'Author: %s\n' % cs.author))
1008 ui.write((b'Branch: %s\n' % (cs.branch or b'HEAD')))
1008 ui.write((b'Branch: %s\n' % (cs.branch or b'HEAD')))
1009 ui.write(
1009 ui.write(
1010 (
1010 (
1011 b'Tag%s: %s \n'
1011 b'Tag%s: %s \n'
1012 % (
1012 % (
1013 [b'', b's'][len(cs.tags) > 1],
1013 [b'', b's'][len(cs.tags) > 1],
1014 b','.join(cs.tags) or b'(none)',
1014 b','.join(cs.tags) or b'(none)',
1015 )
1015 )
1016 )
1016 )
1017 )
1017 )
1018 if cs.branchpoints:
1018 if cs.branchpoints:
1019 ui.writenoi18n(
1019 ui.writenoi18n(
1020 b'Branchpoints: %s \n' % b', '.join(sorted(cs.branchpoints))
1020 b'Branchpoints: %s \n' % b', '.join(sorted(cs.branchpoints))
1021 )
1021 )
1022 if opts[b"parents"] and cs.parents:
1022 if opts[b"parents"] and cs.parents:
1023 if len(cs.parents) > 1:
1023 if len(cs.parents) > 1:
1024 ui.write(
1024 ui.write(
1025 (
1025 (
1026 b'Parents: %s\n'
1026 b'Parents: %s\n'
1027 % (b','.join([(b"%d" % p.id) for p in cs.parents]))
1027 % (b','.join([(b"%d" % p.id) for p in cs.parents]))
1028 )
1028 )
1029 )
1029 )
1030 else:
1030 else:
1031 ui.write((b'Parent: %d\n' % cs.parents[0].id))
1031 ui.write((b'Parent: %d\n' % cs.parents[0].id))
1032
1032
1033 if opts[b"ancestors"]:
1033 if opts[b"ancestors"]:
1034 b = cs.branch
1034 b = cs.branch
1035 r = []
1035 r = []
1036 while b:
1036 while b:
1037 b, c = ancestors[b]
1037 b, c = ancestors[b]
1038 r.append(b'%s:%d:%d' % (b or b"HEAD", c, branches[b]))
1038 r.append(b'%s:%d:%d' % (b or b"HEAD", c, branches[b]))
1039 if r:
1039 if r:
1040 ui.write((b'Ancestors: %s\n' % (b','.join(r))))
1040 ui.write((b'Ancestors: %s\n' % (b','.join(r))))
1041
1041
1042 ui.writenoi18n(b'Log:\n')
1042 ui.writenoi18n(b'Log:\n')
1043 ui.write(b'%s\n\n' % cs.comment)
1043 ui.write(b'%s\n\n' % cs.comment)
1044 ui.writenoi18n(b'Members: \n')
1044 ui.writenoi18n(b'Members: \n')
1045 for f in cs.entries:
1045 for f in cs.entries:
1046 fn = f.file
1046 fn = f.file
1047 if fn.startswith(opts[b"prefix"]):
1047 if fn.startswith(opts[b"prefix"]):
1048 fn = fn[len(opts[b"prefix"]) :]
1048 fn = fn[len(opts[b"prefix"]) :]
1049 ui.write(
1049 ui.write(
1050 b'\t%s:%s->%s%s \n'
1050 b'\t%s:%s->%s%s \n'
1051 % (
1051 % (
1052 fn,
1052 fn,
1053 b'.'.join([b"%d" % x for x in f.parent]) or b'INITIAL',
1053 b'.'.join([b"%d" % x for x in f.parent]) or b'INITIAL',
1054 b'.'.join([(b"%d" % x) for x in f.revision]),
1054 b'.'.join([(b"%d" % x) for x in f.revision]),
1055 [b'', b'(DEAD)'][f.dead],
1055 [b'', b'(DEAD)'][f.dead],
1056 )
1056 )
1057 )
1057 )
1058 ui.write(b'\n')
1058 ui.write(b'\n')
1059
1059
1060 # have we seen the start tag?
1060 # have we seen the start tag?
1061 if revisions and off:
1061 if revisions and off:
1062 if revisions[0] == (b"%d" % cs.id) or revisions[0] in cs.tags:
1062 if revisions[0] == (b"%d" % cs.id) or revisions[0] in cs.tags:
1063 off = False
1063 off = False
1064
1064
1065 # see if we reached the end tag
1065 # see if we reached the end tag
1066 if len(revisions) > 1 and not off:
1066 if len(revisions) > 1 and not off:
1067 if revisions[1] == (b"%d" % cs.id) or revisions[1] in cs.tags:
1067 if revisions[1] == (b"%d" % cs.id) or revisions[1] in cs.tags:
1068 break
1068 break
@@ -1,479 +1,479 b''
1 """automatically manage newlines in repository files
1 """automatically manage newlines in repository files
2
2
3 This extension allows you to manage the type of line endings (CRLF or
3 This extension allows you to manage the type of line endings (CRLF or
4 LF) that are used in the repository and in the local working
4 LF) that are used in the repository and in the local working
5 directory. That way you can get CRLF line endings on Windows and LF on
5 directory. That way you can get CRLF line endings on Windows and LF on
6 Unix/Mac, thereby letting everybody use their OS native line endings.
6 Unix/Mac, thereby letting everybody use their OS native line endings.
7
7
8 The extension reads its configuration from a versioned ``.hgeol``
8 The extension reads its configuration from a versioned ``.hgeol``
9 configuration file found in the root of the working directory. The
9 configuration file found in the root of the working directory. The
10 ``.hgeol`` file use the same syntax as all other Mercurial
10 ``.hgeol`` file use the same syntax as all other Mercurial
11 configuration files. It uses two sections, ``[patterns]`` and
11 configuration files. It uses two sections, ``[patterns]`` and
12 ``[repository]``.
12 ``[repository]``.
13
13
14 The ``[patterns]`` section specifies how line endings should be
14 The ``[patterns]`` section specifies how line endings should be
15 converted between the working directory and the repository. The format is
15 converted between the working directory and the repository. The format is
16 specified by a file pattern. The first match is used, so put more
16 specified by a file pattern. The first match is used, so put more
17 specific patterns first. The available line endings are ``LF``,
17 specific patterns first. The available line endings are ``LF``,
18 ``CRLF``, and ``BIN``.
18 ``CRLF``, and ``BIN``.
19
19
20 Files with the declared format of ``CRLF`` or ``LF`` are always
20 Files with the declared format of ``CRLF`` or ``LF`` are always
21 checked out and stored in the repository in that format and files
21 checked out and stored in the repository in that format and files
22 declared to be binary (``BIN``) are left unchanged. Additionally,
22 declared to be binary (``BIN``) are left unchanged. Additionally,
23 ``native`` is an alias for checking out in the platform's default line
23 ``native`` is an alias for checking out in the platform's default line
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
26 default behavior; it is only needed if you need to override a later,
26 default behavior; it is only needed if you need to override a later,
27 more general pattern.
27 more general pattern.
28
28
29 The optional ``[repository]`` section specifies the line endings to
29 The optional ``[repository]`` section specifies the line endings to
30 use for files stored in the repository. It has a single setting,
30 use for files stored in the repository. It has a single setting,
31 ``native``, which determines the storage line endings for files
31 ``native``, which determines the storage line endings for files
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
35 will be converted to ``LF`` when stored in the repository. Files
35 will be converted to ``LF`` when stored in the repository. Files
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
37 are always stored as-is in the repository.
37 are always stored as-is in the repository.
38
38
39 Example versioned ``.hgeol`` file::
39 Example versioned ``.hgeol`` file::
40
40
41 [patterns]
41 [patterns]
42 **.py = native
42 **.py = native
43 **.vcproj = CRLF
43 **.vcproj = CRLF
44 **.txt = native
44 **.txt = native
45 Makefile = LF
45 Makefile = LF
46 **.jpg = BIN
46 **.jpg = BIN
47
47
48 [repository]
48 [repository]
49 native = LF
49 native = LF
50
50
51 .. note::
51 .. note::
52
52
53 The rules will first apply when files are touched in the working
53 The rules will first apply when files are touched in the working
54 directory, e.g. by updating to null and back to tip to touch all files.
54 directory, e.g. by updating to null and back to tip to touch all files.
55
55
56 The extension uses an optional ``[eol]`` section read from both the
56 The extension uses an optional ``[eol]`` section read from both the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
58 latter overriding the former. You can use that section to control the
58 latter overriding the former. You can use that section to control the
59 overall behavior. There are three settings:
59 overall behavior. There are three settings:
60
60
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
62 ``CRLF`` to override the default interpretation of ``native`` for
62 ``CRLF`` to override the default interpretation of ``native`` for
63 checkout. This can be used with :hg:`archive` on Unix, say, to
63 checkout. This can be used with :hg:`archive` on Unix, say, to
64 generate an archive where files have line endings for Windows.
64 generate an archive where files have line endings for Windows.
65
65
66 - ``eol.only-consistent`` (default True) can be set to False to make
66 - ``eol.only-consistent`` (default True) can be set to False to make
67 the extension convert files with inconsistent EOLs. Inconsistent
67 the extension convert files with inconsistent EOLs. Inconsistent
68 means that there is both ``CRLF`` and ``LF`` present in the file.
68 means that there is both ``CRLF`` and ``LF`` present in the file.
69 Such files are normally not touched under the assumption that they
69 Such files are normally not touched under the assumption that they
70 have mixed EOLs on purpose.
70 have mixed EOLs on purpose.
71
71
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
73 ensure that converted files end with a EOL character (either ``\\n``
73 ensure that converted files end with a EOL character (either ``\\n``
74 or ``\\r\\n`` as per the configured patterns).
74 or ``\\r\\n`` as per the configured patterns).
75
75
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
77 like the deprecated win32text extension does. This means that you can
77 like the deprecated win32text extension does. This means that you can
78 disable win32text and enable eol and your filters will still work. You
78 disable win32text and enable eol and your filters will still work. You
79 only need to these filters until you have prepared a ``.hgeol`` file.
79 only need to these filters until you have prepared a ``.hgeol`` file.
80
80
81 The ``win32text.forbid*`` hooks provided by the win32text extension
81 The ``win32text.forbid*`` hooks provided by the win32text extension
82 have been unified into a single hook named ``eol.checkheadshook``. The
82 have been unified into a single hook named ``eol.checkheadshook``. The
83 hook will lookup the expected line endings from the ``.hgeol`` file,
83 hook will lookup the expected line endings from the ``.hgeol`` file,
84 which means you must migrate to a ``.hgeol`` file first before using
84 which means you must migrate to a ``.hgeol`` file first before using
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
86 invalid revisions will be pushed. To forbid them completely, use the
86 invalid revisions will be pushed. To forbid them completely, use the
87 ``eol.checkallhook`` hook. These hooks are best used as
87 ``eol.checkallhook`` hook. These hooks are best used as
88 ``pretxnchangegroup`` hooks.
88 ``pretxnchangegroup`` hooks.
89
89
90 See :hg:`help patterns` for more information about the glob patterns
90 See :hg:`help patterns` for more information about the glob patterns
91 used.
91 used.
92 """
92 """
93
93
94
94
95 import os
95 import os
96 import re
96 import re
97 from mercurial.i18n import _
97 from mercurial.i18n import _
98 from mercurial import (
98 from mercurial import (
99 config,
99 config,
100 error as errormod,
100 error as errormod,
101 extensions,
101 extensions,
102 match,
102 match,
103 pycompat,
103 pycompat,
104 registrar,
104 registrar,
105 scmutil,
105 scmutil,
106 util,
106 util,
107 )
107 )
108 from mercurial.utils import stringutil
108 from mercurial.utils import stringutil
109
109
110 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
110 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
111 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
111 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
112 # be specifying the version(s) of Mercurial they are tested with, or
112 # be specifying the version(s) of Mercurial they are tested with, or
113 # leave the attribute unspecified.
113 # leave the attribute unspecified.
114 testedwith = b'ships-with-hg-core'
114 testedwith = b'ships-with-hg-core'
115
115
116 configtable = {}
116 configtable = {}
117 configitem = registrar.configitem(configtable)
117 configitem = registrar.configitem(configtable)
118
118
119 configitem(
119 configitem(
120 b'eol',
120 b'eol',
121 b'fix-trailing-newline',
121 b'fix-trailing-newline',
122 default=False,
122 default=False,
123 )
123 )
124 configitem(
124 configitem(
125 b'eol',
125 b'eol',
126 b'native',
126 b'native',
127 default=pycompat.oslinesep,
127 default=pycompat.oslinesep,
128 )
128 )
129 configitem(
129 configitem(
130 b'eol',
130 b'eol',
131 b'only-consistent',
131 b'only-consistent',
132 default=True,
132 default=True,
133 )
133 )
134
134
135 # Matches a lone LF, i.e., one that is not part of CRLF.
135 # Matches a lone LF, i.e., one that is not part of CRLF.
136 singlelf = re.compile(b'(^|[^\r])\n')
136 singlelf = re.compile(b'(^|[^\r])\n')
137
137
138
138
139 def inconsistenteol(data):
139 def inconsistenteol(data):
140 return b'\r\n' in data and singlelf.search(data)
140 return b'\r\n' in data and singlelf.search(data)
141
141
142
142
143 def tolf(s, params, ui, **kwargs):
143 def tolf(s, params, ui, **kwargs):
144 """Filter to convert to LF EOLs."""
144 """Filter to convert to LF EOLs."""
145 if stringutil.binary(s):
145 if stringutil.binary(s):
146 return s
146 return s
147 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
147 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
148 return s
148 return s
149 if (
149 if (
150 ui.configbool(b'eol', b'fix-trailing-newline')
150 ui.configbool(b'eol', b'fix-trailing-newline')
151 and s
151 and s
152 and not s.endswith(b'\n')
152 and not s.endswith(b'\n')
153 ):
153 ):
154 s = s + b'\n'
154 s = s + b'\n'
155 return util.tolf(s)
155 return util.tolf(s)
156
156
157
157
158 def tocrlf(s, params, ui, **kwargs):
158 def tocrlf(s, params, ui, **kwargs):
159 """Filter to convert to CRLF EOLs."""
159 """Filter to convert to CRLF EOLs."""
160 if stringutil.binary(s):
160 if stringutil.binary(s):
161 return s
161 return s
162 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
162 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
163 return s
163 return s
164 if (
164 if (
165 ui.configbool(b'eol', b'fix-trailing-newline')
165 ui.configbool(b'eol', b'fix-trailing-newline')
166 and s
166 and s
167 and not s.endswith(b'\n')
167 and not s.endswith(b'\n')
168 ):
168 ):
169 s = s + b'\n'
169 s = s + b'\n'
170 return util.tocrlf(s)
170 return util.tocrlf(s)
171
171
172
172
173 def isbinary(s, params, ui, **kwargs):
173 def isbinary(s, params, ui, **kwargs):
174 """Filter to do nothing with the file."""
174 """Filter to do nothing with the file."""
175 return s
175 return s
176
176
177
177
178 filters = {
178 filters = {
179 b'to-lf': tolf,
179 b'to-lf': tolf,
180 b'to-crlf': tocrlf,
180 b'to-crlf': tocrlf,
181 b'is-binary': isbinary,
181 b'is-binary': isbinary,
182 # The following provide backwards compatibility with win32text
182 # The following provide backwards compatibility with win32text
183 b'cleverencode:': tolf,
183 b'cleverencode:': tolf,
184 b'cleverdecode:': tocrlf,
184 b'cleverdecode:': tocrlf,
185 }
185 }
186
186
187
187
188 class eolfile:
188 class eolfile:
189 def __init__(self, ui, root, data):
189 def __init__(self, ui, root, data):
190 self._decode = {
190 self._decode = {
191 b'LF': b'to-lf',
191 b'LF': b'to-lf',
192 b'CRLF': b'to-crlf',
192 b'CRLF': b'to-crlf',
193 b'BIN': b'is-binary',
193 b'BIN': b'is-binary',
194 }
194 }
195 self._encode = {
195 self._encode = {
196 b'LF': b'to-lf',
196 b'LF': b'to-lf',
197 b'CRLF': b'to-crlf',
197 b'CRLF': b'to-crlf',
198 b'BIN': b'is-binary',
198 b'BIN': b'is-binary',
199 }
199 }
200
200
201 self.cfg = config.config()
201 self.cfg = config.config()
202 # Our files should not be touched. The pattern must be
202 # Our files should not be touched. The pattern must be
203 # inserted first override a '** = native' pattern.
203 # inserted first override a '** = native' pattern.
204 self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol')
204 self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol')
205 # We can then parse the user's patterns.
205 # We can then parse the user's patterns.
206 self.cfg.parse(b'.hgeol', data)
206 self.cfg.parse(b'.hgeol', data)
207
207
208 isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF'
208 isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF'
209 self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf'
209 self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf'
210 iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n')
210 iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n')
211 self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf'
211 self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf'
212
212
213 include = []
213 include = []
214 exclude = []
214 exclude = []
215 self.patterns = []
215 self.patterns = []
216 for pattern, style in self.cfg.items(b'patterns'):
216 for pattern, style in self.cfg.items(b'patterns'):
217 key = style.upper()
217 key = style.upper()
218 if key == b'BIN':
218 if key == b'BIN':
219 exclude.append(pattern)
219 exclude.append(pattern)
220 else:
220 else:
221 include.append(pattern)
221 include.append(pattern)
222 m = match.match(root, b'', [pattern])
222 m = match.match(root, b'', [pattern])
223 self.patterns.append((pattern, key, m))
223 self.patterns.append((pattern, key, m))
224 # This will match the files for which we need to care
224 # This will match the files for which we need to care
225 # about inconsistent newlines.
225 # about inconsistent newlines.
226 self.match = match.match(root, b'', [], include, exclude)
226 self.match = match.match(root, b'', [], include, exclude)
227
227
228 def copytoui(self, ui):
228 def copytoui(self, ui):
229 newpatterns = {pattern for pattern, key, m in self.patterns}
229 newpatterns = {pattern for pattern, key, m in self.patterns}
230 for section in (b'decode', b'encode'):
230 for section in (b'decode', b'encode'):
231 for oldpattern, _filter in ui.configitems(section):
231 for oldpattern, _filter in ui.configitems(section):
232 if oldpattern not in newpatterns:
232 if oldpattern not in newpatterns:
233 if ui.configsource(section, oldpattern) == b'eol':
233 if ui.configsource(section, oldpattern) == b'eol':
234 ui.setconfig(section, oldpattern, b'!', b'eol')
234 ui.setconfig(section, oldpattern, b'!', b'eol')
235 for pattern, key, m in self.patterns:
235 for pattern, key, m in self.patterns:
236 try:
236 try:
237 ui.setconfig(b'decode', pattern, self._decode[key], b'eol')
237 ui.setconfig(b'decode', pattern, self._decode[key], b'eol')
238 ui.setconfig(b'encode', pattern, self._encode[key], b'eol')
238 ui.setconfig(b'encode', pattern, self._encode[key], b'eol')
239 except KeyError:
239 except KeyError:
240 ui.warn(
240 ui.warn(
241 _(b"ignoring unknown EOL style '%s' from %s\n")
241 _(b"ignoring unknown EOL style '%s' from %s\n")
242 % (key, self.cfg.source(b'patterns', pattern))
242 % (key, self.cfg.source(b'patterns', pattern))
243 )
243 )
244 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
244 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
245 for k, v in self.cfg.items(b'eol'):
245 for k, v in self.cfg.items(b'eol'):
246 ui.setconfig(b'eol', k, v, b'eol')
246 ui.setconfig(b'eol', k, v, b'eol')
247
247
248 def checkrev(self, repo, ctx, files):
248 def checkrev(self, repo, ctx, files):
249 failed = []
249 failed = []
250 for f in files or ctx.files():
250 for f in files or ctx.files():
251 if f not in ctx:
251 if f not in ctx:
252 continue
252 continue
253 for pattern, key, m in self.patterns:
253 for pattern, key, m in self.patterns:
254 if not m(f):
254 if not m(f):
255 continue
255 continue
256 target = self._encode[key]
256 target = self._encode[key]
257 data = ctx[f].data()
257 data = ctx[f].data()
258 if (
258 if (
259 target == b"to-lf"
259 target == b"to-lf"
260 and b"\r\n" in data
260 and b"\r\n" in data
261 or target == b"to-crlf"
261 or target == b"to-crlf"
262 and singlelf.search(data)
262 and singlelf.search(data)
263 ):
263 ):
264 failed.append((f, target, bytes(ctx)))
264 failed.append((f, target, bytes(ctx)))
265 break
265 break
266 return failed
266 return failed
267
267
268
268
269 def parseeol(ui, repo, nodes):
269 def parseeol(ui, repo, nodes):
270 try:
270 try:
271 for node in nodes:
271 for node in nodes:
272 try:
272 try:
273 if node is None:
273 if node is None:
274 # Cannot use workingctx.data() since it would load
274 # Cannot use workingctx.data() since it would load
275 # and cache the filters before we configure them.
275 # and cache the filters before we configure them.
276 data = repo.wvfs(b'.hgeol').read()
276 data = repo.wvfs(b'.hgeol').read()
277 else:
277 else:
278 data = repo[node][b'.hgeol'].data()
278 data = repo[node][b'.hgeol'].data()
279 return eolfile(ui, repo.root, data)
279 return eolfile(ui, repo.root, data)
280 except (IOError, LookupError):
280 except (IOError, LookupError):
281 pass
281 pass
282 except errormod.ConfigError as inst:
282 except errormod.ConfigError as inst:
283 ui.warn(
283 ui.warn(
284 _(
284 _(
285 b"warning: ignoring .hgeol file due to parse error "
285 b"warning: ignoring .hgeol file due to parse error "
286 b"at %s: %s\n"
286 b"at %s: %s\n"
287 )
287 )
288 % (inst.location, inst.message)
288 % (inst.location, inst.message)
289 )
289 )
290 return None
290 return None
291
291
292
292
293 def ensureenabled(ui):
293 def ensureenabled(ui):
294 """make sure the extension is enabled when used as hook
294 """make sure the extension is enabled when used as hook
295
295
296 When eol is used through hooks, the extension is never formally loaded and
296 When eol is used through hooks, the extension is never formally loaded and
297 enabled. This has some side effect, for example the config declaration is
297 enabled. This has some side effect, for example the config declaration is
298 never loaded. This function ensure the extension is enabled when running
298 never loaded. This function ensure the extension is enabled when running
299 hooks.
299 hooks.
300 """
300 """
301 if b'eol' in ui._knownconfig:
301 if b'eol' in ui._knownconfig:
302 return
302 return
303 ui.setconfig(b'extensions', b'eol', b'', source=b'internal')
303 ui.setconfig(b'extensions', b'eol', b'', source=b'internal')
304 extensions.loadall(ui, [b'eol'])
304 extensions.loadall(ui, [b'eol'])
305
305
306
306
307 def _checkhook(ui, repo, node, headsonly):
307 def _checkhook(ui, repo, node, headsonly):
308 # Get revisions to check and touched files at the same time
308 # Get revisions to check and touched files at the same time
309 ensureenabled(ui)
309 ensureenabled(ui)
310 files = set()
310 files = set()
311 revs = set()
311 revs = set()
312 for rev in pycompat.xrange(repo[node].rev(), len(repo)):
312 for rev in range(repo[node].rev(), len(repo)):
313 revs.add(rev)
313 revs.add(rev)
314 if headsonly:
314 if headsonly:
315 ctx = repo[rev]
315 ctx = repo[rev]
316 files.update(ctx.files())
316 files.update(ctx.files())
317 for pctx in ctx.parents():
317 for pctx in ctx.parents():
318 revs.discard(pctx.rev())
318 revs.discard(pctx.rev())
319 failed = []
319 failed = []
320 for rev in revs:
320 for rev in revs:
321 ctx = repo[rev]
321 ctx = repo[rev]
322 eol = parseeol(ui, repo, [ctx.node()])
322 eol = parseeol(ui, repo, [ctx.node()])
323 if eol:
323 if eol:
324 failed.extend(eol.checkrev(repo, ctx, files))
324 failed.extend(eol.checkrev(repo, ctx, files))
325
325
326 if failed:
326 if failed:
327 eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'}
327 eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'}
328 msgs = []
328 msgs = []
329 for f, target, node in sorted(failed):
329 for f, target, node in sorted(failed):
330 msgs.append(
330 msgs.append(
331 _(b" %s in %s should not have %s line endings")
331 _(b" %s in %s should not have %s line endings")
332 % (f, node, eols[target])
332 % (f, node, eols[target])
333 )
333 )
334 raise errormod.Abort(
334 raise errormod.Abort(
335 _(b"end-of-line check failed:\n") + b"\n".join(msgs)
335 _(b"end-of-line check failed:\n") + b"\n".join(msgs)
336 )
336 )
337
337
338
338
339 def checkallhook(ui, repo, node, hooktype, **kwargs):
339 def checkallhook(ui, repo, node, hooktype, **kwargs):
340 """verify that files have expected EOLs"""
340 """verify that files have expected EOLs"""
341 _checkhook(ui, repo, node, False)
341 _checkhook(ui, repo, node, False)
342
342
343
343
344 def checkheadshook(ui, repo, node, hooktype, **kwargs):
344 def checkheadshook(ui, repo, node, hooktype, **kwargs):
345 """verify that files have expected EOLs"""
345 """verify that files have expected EOLs"""
346 _checkhook(ui, repo, node, True)
346 _checkhook(ui, repo, node, True)
347
347
348
348
349 # "checkheadshook" used to be called "hook"
349 # "checkheadshook" used to be called "hook"
350 hook = checkheadshook
350 hook = checkheadshook
351
351
352
352
353 def preupdate(ui, repo, hooktype, parent1, parent2):
353 def preupdate(ui, repo, hooktype, parent1, parent2):
354 p1node = scmutil.resolvehexnodeidprefix(repo, parent1)
354 p1node = scmutil.resolvehexnodeidprefix(repo, parent1)
355 repo.loadeol([p1node])
355 repo.loadeol([p1node])
356 return False
356 return False
357
357
358
358
359 def uisetup(ui):
359 def uisetup(ui):
360 ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol')
360 ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol')
361
361
362
362
363 def extsetup(ui):
363 def extsetup(ui):
364 try:
364 try:
365 extensions.find(b'win32text')
365 extensions.find(b'win32text')
366 ui.warn(
366 ui.warn(
367 _(
367 _(
368 b"the eol extension is incompatible with the "
368 b"the eol extension is incompatible with the "
369 b"win32text extension\n"
369 b"win32text extension\n"
370 )
370 )
371 )
371 )
372 except KeyError:
372 except KeyError:
373 pass
373 pass
374
374
375
375
376 def reposetup(ui, repo):
376 def reposetup(ui, repo):
377 uisetup(repo.ui)
377 uisetup(repo.ui)
378
378
379 if not repo.local():
379 if not repo.local():
380 return
380 return
381 for name, fn in filters.items():
381 for name, fn in filters.items():
382 repo.adddatafilter(name, fn)
382 repo.adddatafilter(name, fn)
383
383
384 ui.setconfig(b'patch', b'eol', b'auto', b'eol')
384 ui.setconfig(b'patch', b'eol', b'auto', b'eol')
385
385
386 class eolrepo(repo.__class__):
386 class eolrepo(repo.__class__):
387 def loadeol(self, nodes):
387 def loadeol(self, nodes):
388 eol = parseeol(self.ui, self, nodes)
388 eol = parseeol(self.ui, self, nodes)
389 if eol is None:
389 if eol is None:
390 return None
390 return None
391 eol.copytoui(self.ui)
391 eol.copytoui(self.ui)
392 return eol.match
392 return eol.match
393
393
394 def _hgcleardirstate(self):
394 def _hgcleardirstate(self):
395 self._eolmatch = self.loadeol([None])
395 self._eolmatch = self.loadeol([None])
396 if not self._eolmatch:
396 if not self._eolmatch:
397 self._eolmatch = util.never
397 self._eolmatch = util.never
398 return
398 return
399
399
400 oldeol = None
400 oldeol = None
401 try:
401 try:
402 cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache"))
402 cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache"))
403 except OSError:
403 except OSError:
404 cachemtime = 0
404 cachemtime = 0
405 else:
405 else:
406 olddata = self.vfs.read(b"eol.cache")
406 olddata = self.vfs.read(b"eol.cache")
407 if olddata:
407 if olddata:
408 oldeol = eolfile(self.ui, self.root, olddata)
408 oldeol = eolfile(self.ui, self.root, olddata)
409
409
410 try:
410 try:
411 eolmtime = os.path.getmtime(self.wjoin(b".hgeol"))
411 eolmtime = os.path.getmtime(self.wjoin(b".hgeol"))
412 except OSError:
412 except OSError:
413 eolmtime = 0
413 eolmtime = 0
414
414
415 if eolmtime >= cachemtime and eolmtime > 0:
415 if eolmtime >= cachemtime and eolmtime > 0:
416 self.ui.debug(b"eol: detected change in .hgeol\n")
416 self.ui.debug(b"eol: detected change in .hgeol\n")
417
417
418 hgeoldata = self.wvfs.read(b'.hgeol')
418 hgeoldata = self.wvfs.read(b'.hgeol')
419 neweol = eolfile(self.ui, self.root, hgeoldata)
419 neweol = eolfile(self.ui, self.root, hgeoldata)
420
420
421 wlock = None
421 wlock = None
422 try:
422 try:
423 wlock = self.wlock()
423 wlock = self.wlock()
424 for f in self.dirstate:
424 for f in self.dirstate:
425 if not self.dirstate.get_entry(f).maybe_clean:
425 if not self.dirstate.get_entry(f).maybe_clean:
426 continue
426 continue
427 if oldeol is not None:
427 if oldeol is not None:
428 if not oldeol.match(f) and not neweol.match(f):
428 if not oldeol.match(f) and not neweol.match(f):
429 continue
429 continue
430 oldkey = None
430 oldkey = None
431 for pattern, key, m in oldeol.patterns:
431 for pattern, key, m in oldeol.patterns:
432 if m(f):
432 if m(f):
433 oldkey = key
433 oldkey = key
434 break
434 break
435 newkey = None
435 newkey = None
436 for pattern, key, m in neweol.patterns:
436 for pattern, key, m in neweol.patterns:
437 if m(f):
437 if m(f):
438 newkey = key
438 newkey = key
439 break
439 break
440 if oldkey == newkey:
440 if oldkey == newkey:
441 continue
441 continue
442 # all normal files need to be looked at again since
442 # all normal files need to be looked at again since
443 # the new .hgeol file specify a different filter
443 # the new .hgeol file specify a different filter
444 self.dirstate.set_possibly_dirty(f)
444 self.dirstate.set_possibly_dirty(f)
445 # Write the cache to update mtime and cache .hgeol
445 # Write the cache to update mtime and cache .hgeol
446 with self.vfs(b"eol.cache", b"w") as f:
446 with self.vfs(b"eol.cache", b"w") as f:
447 f.write(hgeoldata)
447 f.write(hgeoldata)
448 except errormod.LockUnavailable:
448 except errormod.LockUnavailable:
449 # If we cannot lock the repository and clear the
449 # If we cannot lock the repository and clear the
450 # dirstate, then a commit might not see all files
450 # dirstate, then a commit might not see all files
451 # as modified. But if we cannot lock the
451 # as modified. But if we cannot lock the
452 # repository, then we can also not make a commit,
452 # repository, then we can also not make a commit,
453 # so ignore the error.
453 # so ignore the error.
454 pass
454 pass
455 finally:
455 finally:
456 if wlock is not None:
456 if wlock is not None:
457 wlock.release()
457 wlock.release()
458
458
459 def commitctx(self, ctx, error=False, origctx=None):
459 def commitctx(self, ctx, error=False, origctx=None):
460 for f in sorted(ctx.added() + ctx.modified()):
460 for f in sorted(ctx.added() + ctx.modified()):
461 if not self._eolmatch(f):
461 if not self._eolmatch(f):
462 continue
462 continue
463 fctx = ctx[f]
463 fctx = ctx[f]
464 if fctx is None:
464 if fctx is None:
465 continue
465 continue
466 data = fctx.data()
466 data = fctx.data()
467 if stringutil.binary(data):
467 if stringutil.binary(data):
468 # We should not abort here, since the user should
468 # We should not abort here, since the user should
469 # be able to say "** = native" to automatically
469 # be able to say "** = native" to automatically
470 # have all non-binary files taken care of.
470 # have all non-binary files taken care of.
471 continue
471 continue
472 if inconsistenteol(data):
472 if inconsistenteol(data):
473 raise errormod.Abort(
473 raise errormod.Abort(
474 _(b"inconsistent newline style in %s\n") % f
474 _(b"inconsistent newline style in %s\n") % f
475 )
475 )
476 return super(eolrepo, self).commitctx(ctx, error, origctx)
476 return super(eolrepo, self).commitctx(ctx, error, origctx)
477
477
478 repo.__class__ = eolrepo
478 repo.__class__ = eolrepo
479 repo._hgcleardirstate()
479 repo._hgcleardirstate()
@@ -1,858 +1,858 b''
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # context: context needed to annotate a file
3 # context: context needed to annotate a file
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import os
11 import os
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.pycompat import (
14 from mercurial.pycompat import (
15 getattr,
15 getattr,
16 open,
16 open,
17 setattr,
17 setattr,
18 )
18 )
19 from mercurial.node import (
19 from mercurial.node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 short,
22 short,
23 )
23 )
24 from mercurial import (
24 from mercurial import (
25 error,
25 error,
26 linelog as linelogmod,
26 linelog as linelogmod,
27 lock as lockmod,
27 lock as lockmod,
28 mdiff,
28 mdiff,
29 pycompat,
29 pycompat,
30 scmutil,
30 scmutil,
31 util,
31 util,
32 )
32 )
33 from mercurial.utils import (
33 from mercurial.utils import (
34 hashutil,
34 hashutil,
35 stringutil,
35 stringutil,
36 )
36 )
37
37
38 from . import (
38 from . import (
39 error as faerror,
39 error as faerror,
40 revmap as revmapmod,
40 revmap as revmapmod,
41 )
41 )
42
42
43 # given path, get filelog, cached
43 # given path, get filelog, cached
44 @util.lrucachefunc
44 @util.lrucachefunc
45 def _getflog(repo, path):
45 def _getflog(repo, path):
46 return repo.file(path)
46 return repo.file(path)
47
47
48
48
49 # extracted from mercurial.context.basefilectx.annotate
49 # extracted from mercurial.context.basefilectx.annotate
50 def _parents(f, follow=True):
50 def _parents(f, follow=True):
51 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
51 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
52 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
52 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
53 # from the topmost introrev (= srcrev) down to p.linkrev() if it
53 # from the topmost introrev (= srcrev) down to p.linkrev() if it
54 # isn't an ancestor of the srcrev.
54 # isn't an ancestor of the srcrev.
55 f._changeid
55 f._changeid
56 pl = f.parents()
56 pl = f.parents()
57
57
58 # Don't return renamed parents if we aren't following.
58 # Don't return renamed parents if we aren't following.
59 if not follow:
59 if not follow:
60 pl = [p for p in pl if p.path() == f.path()]
60 pl = [p for p in pl if p.path() == f.path()]
61
61
62 # renamed filectx won't have a filelog yet, so set it
62 # renamed filectx won't have a filelog yet, so set it
63 # from the cache to save time
63 # from the cache to save time
64 for p in pl:
64 for p in pl:
65 if not '_filelog' in p.__dict__:
65 if not '_filelog' in p.__dict__:
66 p._filelog = _getflog(f._repo, p.path())
66 p._filelog = _getflog(f._repo, p.path())
67
67
68 return pl
68 return pl
69
69
70
70
71 # extracted from mercurial.context.basefilectx.annotate. slightly modified
71 # extracted from mercurial.context.basefilectx.annotate. slightly modified
72 # so it takes a fctx instead of a pair of text and fctx.
72 # so it takes a fctx instead of a pair of text and fctx.
73 def _decorate(fctx):
73 def _decorate(fctx):
74 text = fctx.data()
74 text = fctx.data()
75 linecount = text.count(b'\n')
75 linecount = text.count(b'\n')
76 if text and not text.endswith(b'\n'):
76 if text and not text.endswith(b'\n'):
77 linecount += 1
77 linecount += 1
78 return ([(fctx, i) for i in pycompat.xrange(linecount)], text)
78 return ([(fctx, i) for i in range(linecount)], text)
79
79
80
80
81 # extracted from mercurial.context.basefilectx.annotate. slightly modified
81 # extracted from mercurial.context.basefilectx.annotate. slightly modified
82 # so it takes an extra "blocks" parameter calculated elsewhere, instead of
82 # so it takes an extra "blocks" parameter calculated elsewhere, instead of
83 # calculating diff here.
83 # calculating diff here.
84 def _pair(parent, child, blocks):
84 def _pair(parent, child, blocks):
85 for (a1, a2, b1, b2), t in blocks:
85 for (a1, a2, b1, b2), t in blocks:
86 # Changed blocks ('!') or blocks made only of blank lines ('~')
86 # Changed blocks ('!') or blocks made only of blank lines ('~')
87 # belong to the child.
87 # belong to the child.
88 if t == b'=':
88 if t == b'=':
89 child[0][b1:b2] = parent[0][a1:a2]
89 child[0][b1:b2] = parent[0][a1:a2]
90 return child
90 return child
91
91
92
92
93 # like scmutil.revsingle, but with lru cache, so their states (like manifests)
93 # like scmutil.revsingle, but with lru cache, so their states (like manifests)
94 # could be reused
94 # could be reused
95 _revsingle = util.lrucachefunc(scmutil.revsingle)
95 _revsingle = util.lrucachefunc(scmutil.revsingle)
96
96
97
97
98 def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
98 def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
99 """(repo, str, str) -> fctx
99 """(repo, str, str) -> fctx
100
100
101 get the filectx object from repo, rev, path, in an efficient way.
101 get the filectx object from repo, rev, path, in an efficient way.
102
102
103 if resolverev is True, "rev" is a revision specified by the revset
103 if resolverev is True, "rev" is a revision specified by the revset
104 language, otherwise "rev" is a nodeid, or a revision number that can
104 language, otherwise "rev" is a nodeid, or a revision number that can
105 be consumed by repo.__getitem__.
105 be consumed by repo.__getitem__.
106
106
107 if adjustctx is not None, the returned fctx will point to a changeset
107 if adjustctx is not None, the returned fctx will point to a changeset
108 that introduces the change (last modified the file). if adjustctx
108 that introduces the change (last modified the file). if adjustctx
109 is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
109 is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
110 faster for big repos but is incorrect for some cases.
110 faster for big repos but is incorrect for some cases.
111 """
111 """
112 if resolverev and not isinstance(rev, int) and rev is not None:
112 if resolverev and not isinstance(rev, int) and rev is not None:
113 ctx = _revsingle(repo, rev)
113 ctx = _revsingle(repo, rev)
114 else:
114 else:
115 ctx = repo[rev]
115 ctx = repo[rev]
116
116
117 # If we don't need to adjust the linkrev, create the filectx using the
117 # If we don't need to adjust the linkrev, create the filectx using the
118 # changectx instead of using ctx[path]. This means it already has the
118 # changectx instead of using ctx[path]. This means it already has the
119 # changectx information, so blame -u will be able to look directly at the
119 # changectx information, so blame -u will be able to look directly at the
120 # commitctx object instead of having to resolve it by going through the
120 # commitctx object instead of having to resolve it by going through the
121 # manifest. In a lazy-manifest world this can prevent us from downloading a
121 # manifest. In a lazy-manifest world this can prevent us from downloading a
122 # lot of data.
122 # lot of data.
123 if adjustctx is None:
123 if adjustctx is None:
124 # ctx.rev() is None means it's the working copy, which is a special
124 # ctx.rev() is None means it's the working copy, which is a special
125 # case.
125 # case.
126 if ctx.rev() is None:
126 if ctx.rev() is None:
127 fctx = ctx[path]
127 fctx = ctx[path]
128 else:
128 else:
129 fctx = repo.filectx(path, changeid=ctx.rev())
129 fctx = repo.filectx(path, changeid=ctx.rev())
130 else:
130 else:
131 fctx = ctx[path]
131 fctx = ctx[path]
132 if adjustctx == b'linkrev':
132 if adjustctx == b'linkrev':
133 introrev = fctx.linkrev()
133 introrev = fctx.linkrev()
134 else:
134 else:
135 introrev = fctx.introrev()
135 introrev = fctx.introrev()
136 if introrev != ctx.rev():
136 if introrev != ctx.rev():
137 fctx._changeid = introrev
137 fctx._changeid = introrev
138 fctx._changectx = repo[introrev]
138 fctx._changectx = repo[introrev]
139 return fctx
139 return fctx
140
140
141
141
142 # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
142 # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
143 def encodedir(path):
143 def encodedir(path):
144 return (
144 return (
145 path.replace(b'.hg/', b'.hg.hg/')
145 path.replace(b'.hg/', b'.hg.hg/')
146 .replace(b'.l/', b'.l.hg/')
146 .replace(b'.l/', b'.l.hg/')
147 .replace(b'.m/', b'.m.hg/')
147 .replace(b'.m/', b'.m.hg/')
148 .replace(b'.lock/', b'.lock.hg/')
148 .replace(b'.lock/', b'.lock.hg/')
149 )
149 )
150
150
151
151
152 def hashdiffopts(diffopts):
152 def hashdiffopts(diffopts):
153 diffoptstr = stringutil.pprint(
153 diffoptstr = stringutil.pprint(
154 sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults)
154 sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults)
155 )
155 )
156 return hex(hashutil.sha1(diffoptstr).digest())[:6]
156 return hex(hashutil.sha1(diffoptstr).digest())[:6]
157
157
158
158
159 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
159 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
160
160
161
161
162 class annotateopts:
162 class annotateopts:
163 """like mercurial.mdiff.diffopts, but is for annotate
163 """like mercurial.mdiff.diffopts, but is for annotate
164
164
165 followrename: follow renames, like "hg annotate -f"
165 followrename: follow renames, like "hg annotate -f"
166 followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
166 followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
167 """
167 """
168
168
169 defaults = {
169 defaults = {
170 b'diffopts': None,
170 b'diffopts': None,
171 b'followrename': True,
171 b'followrename': True,
172 b'followmerge': True,
172 b'followmerge': True,
173 }
173 }
174
174
175 def __init__(self, **opts):
175 def __init__(self, **opts):
176 opts = pycompat.byteskwargs(opts)
176 opts = pycompat.byteskwargs(opts)
177 for k, v in self.defaults.items():
177 for k, v in self.defaults.items():
178 setattr(self, k, opts.get(k, v))
178 setattr(self, k, opts.get(k, v))
179
179
180 @util.propertycache
180 @util.propertycache
181 def shortstr(self):
181 def shortstr(self):
182 """represent opts in a short string, suitable for a directory name"""
182 """represent opts in a short string, suitable for a directory name"""
183 result = b''
183 result = b''
184 if not self.followrename:
184 if not self.followrename:
185 result += b'r0'
185 result += b'r0'
186 if not self.followmerge:
186 if not self.followmerge:
187 result += b'm0'
187 result += b'm0'
188 if self.diffopts is not None:
188 if self.diffopts is not None:
189 assert isinstance(self.diffopts, mdiff.diffopts)
189 assert isinstance(self.diffopts, mdiff.diffopts)
190 diffopthash = hashdiffopts(self.diffopts)
190 diffopthash = hashdiffopts(self.diffopts)
191 if diffopthash != _defaultdiffopthash:
191 if diffopthash != _defaultdiffopthash:
192 result += b'i' + diffopthash
192 result += b'i' + diffopthash
193 return result or b'default'
193 return result or b'default'
194
194
195
195
196 defaultopts = annotateopts()
196 defaultopts = annotateopts()
197
197
198
198
199 class _annotatecontext:
199 class _annotatecontext:
200 """do not use this class directly as it does not use lock to protect
200 """do not use this class directly as it does not use lock to protect
201 writes. use "with annotatecontext(...)" instead.
201 writes. use "with annotatecontext(...)" instead.
202 """
202 """
203
203
204 def __init__(self, repo, path, linelogpath, revmappath, opts):
204 def __init__(self, repo, path, linelogpath, revmappath, opts):
205 self.repo = repo
205 self.repo = repo
206 self.ui = repo.ui
206 self.ui = repo.ui
207 self.path = path
207 self.path = path
208 self.opts = opts
208 self.opts = opts
209 self.linelogpath = linelogpath
209 self.linelogpath = linelogpath
210 self.revmappath = revmappath
210 self.revmappath = revmappath
211 self._linelog = None
211 self._linelog = None
212 self._revmap = None
212 self._revmap = None
213 self._node2path = {} # {str: str}
213 self._node2path = {} # {str: str}
214
214
215 @property
215 @property
216 def linelog(self):
216 def linelog(self):
217 if self._linelog is None:
217 if self._linelog is None:
218 if os.path.exists(self.linelogpath):
218 if os.path.exists(self.linelogpath):
219 with open(self.linelogpath, b'rb') as f:
219 with open(self.linelogpath, b'rb') as f:
220 try:
220 try:
221 self._linelog = linelogmod.linelog.fromdata(f.read())
221 self._linelog = linelogmod.linelog.fromdata(f.read())
222 except linelogmod.LineLogError:
222 except linelogmod.LineLogError:
223 self._linelog = linelogmod.linelog()
223 self._linelog = linelogmod.linelog()
224 else:
224 else:
225 self._linelog = linelogmod.linelog()
225 self._linelog = linelogmod.linelog()
226 return self._linelog
226 return self._linelog
227
227
228 @property
228 @property
229 def revmap(self):
229 def revmap(self):
230 if self._revmap is None:
230 if self._revmap is None:
231 self._revmap = revmapmod.revmap(self.revmappath)
231 self._revmap = revmapmod.revmap(self.revmappath)
232 return self._revmap
232 return self._revmap
233
233
234 def close(self):
234 def close(self):
235 if self._revmap is not None:
235 if self._revmap is not None:
236 self._revmap.flush()
236 self._revmap.flush()
237 self._revmap = None
237 self._revmap = None
238 if self._linelog is not None:
238 if self._linelog is not None:
239 with open(self.linelogpath, b'wb') as f:
239 with open(self.linelogpath, b'wb') as f:
240 f.write(self._linelog.encode())
240 f.write(self._linelog.encode())
241 self._linelog = None
241 self._linelog = None
242
242
243 __del__ = close
243 __del__ = close
244
244
245 def rebuild(self):
245 def rebuild(self):
246 """delete linelog and revmap, useful for rebuilding"""
246 """delete linelog and revmap, useful for rebuilding"""
247 self.close()
247 self.close()
248 self._node2path.clear()
248 self._node2path.clear()
249 _unlinkpaths([self.revmappath, self.linelogpath])
249 _unlinkpaths([self.revmappath, self.linelogpath])
250
250
251 @property
251 @property
252 def lastnode(self):
252 def lastnode(self):
253 """return last node in revmap, or None if revmap is empty"""
253 """return last node in revmap, or None if revmap is empty"""
254 if self._revmap is None:
254 if self._revmap is None:
255 # fast path, read revmap without loading its full content
255 # fast path, read revmap without loading its full content
256 return revmapmod.getlastnode(self.revmappath)
256 return revmapmod.getlastnode(self.revmappath)
257 else:
257 else:
258 return self._revmap.rev2hsh(self._revmap.maxrev)
258 return self._revmap.rev2hsh(self._revmap.maxrev)
259
259
260 def isuptodate(self, master, strict=True):
260 def isuptodate(self, master, strict=True):
261 """return True if the revmap / linelog is up-to-date, or the file
261 """return True if the revmap / linelog is up-to-date, or the file
262 does not exist in the master revision. False otherwise.
262 does not exist in the master revision. False otherwise.
263
263
264 it tries to be fast and could return false negatives, because of the
264 it tries to be fast and could return false negatives, because of the
265 use of linkrev instead of introrev.
265 use of linkrev instead of introrev.
266
266
267 useful for both server and client to decide whether to update
267 useful for both server and client to decide whether to update
268 fastannotate cache or not.
268 fastannotate cache or not.
269
269
270 if strict is True, even if fctx exists in the revmap, but is not the
270 if strict is True, even if fctx exists in the revmap, but is not the
271 last node, isuptodate will return False. it's good for performance - no
271 last node, isuptodate will return False. it's good for performance - no
272 expensive check was done.
272 expensive check was done.
273
273
274 if strict is False, if fctx exists in the revmap, this function may
274 if strict is False, if fctx exists in the revmap, this function may
275 return True. this is useful for the client to skip downloading the
275 return True. this is useful for the client to skip downloading the
276 cache if the client's master is behind the server's.
276 cache if the client's master is behind the server's.
277 """
277 """
278 lastnode = self.lastnode
278 lastnode = self.lastnode
279 try:
279 try:
280 f = self._resolvefctx(master, resolverev=True)
280 f = self._resolvefctx(master, resolverev=True)
281 # choose linkrev instead of introrev as the check is meant to be
281 # choose linkrev instead of introrev as the check is meant to be
282 # *fast*.
282 # *fast*.
283 linknode = self.repo.changelog.node(f.linkrev())
283 linknode = self.repo.changelog.node(f.linkrev())
284 if not strict and lastnode and linknode != lastnode:
284 if not strict and lastnode and linknode != lastnode:
285 # check if f.node() is in the revmap. note: this loads the
285 # check if f.node() is in the revmap. note: this loads the
286 # revmap and can be slow.
286 # revmap and can be slow.
287 return self.revmap.hsh2rev(linknode) is not None
287 return self.revmap.hsh2rev(linknode) is not None
288 # avoid resolving old manifest, or slow adjustlinkrev to be fast,
288 # avoid resolving old manifest, or slow adjustlinkrev to be fast,
289 # false negatives are acceptable in this case.
289 # false negatives are acceptable in this case.
290 return linknode == lastnode
290 return linknode == lastnode
291 except LookupError:
291 except LookupError:
292 # master does not have the file, or the revmap is ahead
292 # master does not have the file, or the revmap is ahead
293 return True
293 return True
294
294
295 def annotate(self, rev, master=None, showpath=False, showlines=False):
295 def annotate(self, rev, master=None, showpath=False, showlines=False):
296 """incrementally update the cache so it includes revisions in the main
296 """incrementally update the cache so it includes revisions in the main
297 branch till 'master'. and run annotate on 'rev', which may or may not be
297 branch till 'master'. and run annotate on 'rev', which may or may not be
298 included in the main branch.
298 included in the main branch.
299
299
300 if master is None, do not update linelog.
300 if master is None, do not update linelog.
301
301
302 the first value returned is the annotate result, it is [(node, linenum)]
302 the first value returned is the annotate result, it is [(node, linenum)]
303 by default. [(node, linenum, path)] if showpath is True.
303 by default. [(node, linenum, path)] if showpath is True.
304
304
305 if showlines is True, a second value will be returned, it is a list of
305 if showlines is True, a second value will be returned, it is a list of
306 corresponding line contents.
306 corresponding line contents.
307 """
307 """
308
308
309 # the fast path test requires commit hash, convert rev number to hash,
309 # the fast path test requires commit hash, convert rev number to hash,
310 # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
310 # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
311 # command could give us a revision number even if the user passes a
311 # command could give us a revision number even if the user passes a
312 # commit hash.
312 # commit hash.
313 if isinstance(rev, int):
313 if isinstance(rev, int):
314 rev = hex(self.repo.changelog.node(rev))
314 rev = hex(self.repo.changelog.node(rev))
315
315
316 # fast path: if rev is in the main branch already
316 # fast path: if rev is in the main branch already
317 directly, revfctx = self.canannotatedirectly(rev)
317 directly, revfctx = self.canannotatedirectly(rev)
318 if directly:
318 if directly:
319 if self.ui.debugflag:
319 if self.ui.debugflag:
320 self.ui.debug(
320 self.ui.debug(
321 b'fastannotate: %s: using fast path '
321 b'fastannotate: %s: using fast path '
322 b'(resolved fctx: %s)\n'
322 b'(resolved fctx: %s)\n'
323 % (
323 % (
324 self.path,
324 self.path,
325 stringutil.pprint(util.safehasattr(revfctx, b'node')),
325 stringutil.pprint(util.safehasattr(revfctx, b'node')),
326 )
326 )
327 )
327 )
328 return self.annotatedirectly(revfctx, showpath, showlines)
328 return self.annotatedirectly(revfctx, showpath, showlines)
329
329
330 # resolve master
330 # resolve master
331 masterfctx = None
331 masterfctx = None
332 if master:
332 if master:
333 try:
333 try:
334 masterfctx = self._resolvefctx(
334 masterfctx = self._resolvefctx(
335 master, resolverev=True, adjustctx=True
335 master, resolverev=True, adjustctx=True
336 )
336 )
337 except LookupError: # master does not have the file
337 except LookupError: # master does not have the file
338 pass
338 pass
339 else:
339 else:
340 if masterfctx in self.revmap: # no need to update linelog
340 if masterfctx in self.revmap: # no need to update linelog
341 masterfctx = None
341 masterfctx = None
342
342
343 # ... - @ <- rev (can be an arbitrary changeset,
343 # ... - @ <- rev (can be an arbitrary changeset,
344 # / not necessarily a descendant
344 # / not necessarily a descendant
345 # master -> o of master)
345 # master -> o of master)
346 # |
346 # |
347 # a merge -> o 'o': new changesets in the main branch
347 # a merge -> o 'o': new changesets in the main branch
348 # |\ '#': revisions in the main branch that
348 # |\ '#': revisions in the main branch that
349 # o * exist in linelog / revmap
349 # o * exist in linelog / revmap
350 # | . '*': changesets in side branches, or
350 # | . '*': changesets in side branches, or
351 # last master -> # . descendants of master
351 # last master -> # . descendants of master
352 # | .
352 # | .
353 # # * joint: '#', and is a parent of a '*'
353 # # * joint: '#', and is a parent of a '*'
354 # |/
354 # |/
355 # a joint -> # ^^^^ --- side branches
355 # a joint -> # ^^^^ --- side branches
356 # |
356 # |
357 # ^ --- main branch (in linelog)
357 # ^ --- main branch (in linelog)
358
358
359 # these DFSes are similar to the traditional annotate algorithm.
359 # these DFSes are similar to the traditional annotate algorithm.
360 # we cannot really reuse the code for perf reason.
360 # we cannot really reuse the code for perf reason.
361
361
362 # 1st DFS calculates merges, joint points, and needed.
362 # 1st DFS calculates merges, joint points, and needed.
363 # "needed" is a simple reference counting dict to free items in
363 # "needed" is a simple reference counting dict to free items in
364 # "hist", reducing its memory usage otherwise could be huge.
364 # "hist", reducing its memory usage otherwise could be huge.
365 initvisit = [revfctx]
365 initvisit = [revfctx]
366 if masterfctx:
366 if masterfctx:
367 if masterfctx.rev() is None:
367 if masterfctx.rev() is None:
368 raise error.Abort(
368 raise error.Abort(
369 _(b'cannot update linelog to wdir()'),
369 _(b'cannot update linelog to wdir()'),
370 hint=_(b'set fastannotate.mainbranch'),
370 hint=_(b'set fastannotate.mainbranch'),
371 )
371 )
372 initvisit.append(masterfctx)
372 initvisit.append(masterfctx)
373 visit = initvisit[:]
373 visit = initvisit[:]
374 pcache = {}
374 pcache = {}
375 needed = {revfctx: 1}
375 needed = {revfctx: 1}
376 hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
376 hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
377 while visit:
377 while visit:
378 f = visit.pop()
378 f = visit.pop()
379 if f in pcache or f in hist:
379 if f in pcache or f in hist:
380 continue
380 continue
381 if f in self.revmap: # in the old main branch, it's a joint
381 if f in self.revmap: # in the old main branch, it's a joint
382 llrev = self.revmap.hsh2rev(f.node())
382 llrev = self.revmap.hsh2rev(f.node())
383 self.linelog.annotate(llrev)
383 self.linelog.annotate(llrev)
384 result = self.linelog.annotateresult
384 result = self.linelog.annotateresult
385 hist[f] = (result, f.data())
385 hist[f] = (result, f.data())
386 continue
386 continue
387 pl = self._parentfunc(f)
387 pl = self._parentfunc(f)
388 pcache[f] = pl
388 pcache[f] = pl
389 for p in pl:
389 for p in pl:
390 needed[p] = needed.get(p, 0) + 1
390 needed[p] = needed.get(p, 0) + 1
391 if p not in pcache:
391 if p not in pcache:
392 visit.append(p)
392 visit.append(p)
393
393
394 # 2nd (simple) DFS calculates new changesets in the main branch
394 # 2nd (simple) DFS calculates new changesets in the main branch
395 # ('o' nodes in # the above graph), so we know when to update linelog.
395 # ('o' nodes in # the above graph), so we know when to update linelog.
396 newmainbranch = set()
396 newmainbranch = set()
397 f = masterfctx
397 f = masterfctx
398 while f and f not in self.revmap:
398 while f and f not in self.revmap:
399 newmainbranch.add(f)
399 newmainbranch.add(f)
400 pl = pcache[f]
400 pl = pcache[f]
401 if pl:
401 if pl:
402 f = pl[0]
402 f = pl[0]
403 else:
403 else:
404 f = None
404 f = None
405 break
405 break
406
406
407 # f, if present, is the position where the last build stopped at, and
407 # f, if present, is the position where the last build stopped at, and
408 # should be the "master" last time. check to see if we can continue
408 # should be the "master" last time. check to see if we can continue
409 # building the linelog incrementally. (we cannot if diverged)
409 # building the linelog incrementally. (we cannot if diverged)
410 if masterfctx is not None:
410 if masterfctx is not None:
411 self._checklastmasterhead(f)
411 self._checklastmasterhead(f)
412
412
413 if self.ui.debugflag:
413 if self.ui.debugflag:
414 if newmainbranch:
414 if newmainbranch:
415 self.ui.debug(
415 self.ui.debug(
416 b'fastannotate: %s: %d new changesets in the main'
416 b'fastannotate: %s: %d new changesets in the main'
417 b' branch\n' % (self.path, len(newmainbranch))
417 b' branch\n' % (self.path, len(newmainbranch))
418 )
418 )
419 elif not hist: # no joints, no updates
419 elif not hist: # no joints, no updates
420 self.ui.debug(
420 self.ui.debug(
421 b'fastannotate: %s: linelog cannot help in '
421 b'fastannotate: %s: linelog cannot help in '
422 b'annotating this revision\n' % self.path
422 b'annotating this revision\n' % self.path
423 )
423 )
424
424
425 # prepare annotateresult so we can update linelog incrementally
425 # prepare annotateresult so we can update linelog incrementally
426 self.linelog.annotate(self.linelog.maxrev)
426 self.linelog.annotate(self.linelog.maxrev)
427
427
428 # 3rd DFS does the actual annotate
428 # 3rd DFS does the actual annotate
429 visit = initvisit[:]
429 visit = initvisit[:]
430 progress = self.ui.makeprogress(
430 progress = self.ui.makeprogress(
431 b'building cache', total=len(newmainbranch)
431 b'building cache', total=len(newmainbranch)
432 )
432 )
433 while visit:
433 while visit:
434 f = visit[-1]
434 f = visit[-1]
435 if f in hist:
435 if f in hist:
436 visit.pop()
436 visit.pop()
437 continue
437 continue
438
438
439 ready = True
439 ready = True
440 pl = pcache[f]
440 pl = pcache[f]
441 for p in pl:
441 for p in pl:
442 if p not in hist:
442 if p not in hist:
443 ready = False
443 ready = False
444 visit.append(p)
444 visit.append(p)
445 if not ready:
445 if not ready:
446 continue
446 continue
447
447
448 visit.pop()
448 visit.pop()
449 blocks = None # mdiff blocks, used for appending linelog
449 blocks = None # mdiff blocks, used for appending linelog
450 ismainbranch = f in newmainbranch
450 ismainbranch = f in newmainbranch
451 # curr is the same as the traditional annotate algorithm,
451 # curr is the same as the traditional annotate algorithm,
452 # if we only care about linear history (do not follow merge),
452 # if we only care about linear history (do not follow merge),
453 # then curr is not actually used.
453 # then curr is not actually used.
454 assert f not in hist
454 assert f not in hist
455 curr = _decorate(f)
455 curr = _decorate(f)
456 for i, p in enumerate(pl):
456 for i, p in enumerate(pl):
457 bs = list(self._diffblocks(hist[p][1], curr[1]))
457 bs = list(self._diffblocks(hist[p][1], curr[1]))
458 if i == 0 and ismainbranch:
458 if i == 0 and ismainbranch:
459 blocks = bs
459 blocks = bs
460 curr = _pair(hist[p], curr, bs)
460 curr = _pair(hist[p], curr, bs)
461 if needed[p] == 1:
461 if needed[p] == 1:
462 del hist[p]
462 del hist[p]
463 del needed[p]
463 del needed[p]
464 else:
464 else:
465 needed[p] -= 1
465 needed[p] -= 1
466
466
467 hist[f] = curr
467 hist[f] = curr
468 del pcache[f]
468 del pcache[f]
469
469
470 if ismainbranch: # need to write to linelog
470 if ismainbranch: # need to write to linelog
471 progress.increment()
471 progress.increment()
472 bannotated = None
472 bannotated = None
473 if len(pl) == 2 and self.opts.followmerge: # merge
473 if len(pl) == 2 and self.opts.followmerge: # merge
474 bannotated = curr[0]
474 bannotated = curr[0]
475 if blocks is None: # no parents, add an empty one
475 if blocks is None: # no parents, add an empty one
476 blocks = list(self._diffblocks(b'', curr[1]))
476 blocks = list(self._diffblocks(b'', curr[1]))
477 self._appendrev(f, blocks, bannotated)
477 self._appendrev(f, blocks, bannotated)
478 elif showpath: # not append linelog, but we need to record path
478 elif showpath: # not append linelog, but we need to record path
479 self._node2path[f.node()] = f.path()
479 self._node2path[f.node()] = f.path()
480
480
481 progress.complete()
481 progress.complete()
482
482
483 result = [
483 result = [
484 ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
484 ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
485 for fr, l in hist[revfctx][0]
485 for fr, l in hist[revfctx][0]
486 ] # [(node, linenumber)]
486 ] # [(node, linenumber)]
487 return self._refineannotateresult(result, revfctx, showpath, showlines)
487 return self._refineannotateresult(result, revfctx, showpath, showlines)
488
488
489 def canannotatedirectly(self, rev):
489 def canannotatedirectly(self, rev):
490 """(str) -> bool, fctx or node.
490 """(str) -> bool, fctx or node.
491 return (True, f) if we can annotate without updating the linelog, pass
491 return (True, f) if we can annotate without updating the linelog, pass
492 f to annotatedirectly.
492 f to annotatedirectly.
493 return (False, f) if we need extra calculation. f is the fctx resolved
493 return (False, f) if we need extra calculation. f is the fctx resolved
494 from rev.
494 from rev.
495 """
495 """
496 result = True
496 result = True
497 f = None
497 f = None
498 if not isinstance(rev, int) and rev is not None:
498 if not isinstance(rev, int) and rev is not None:
499 hsh = {20: bytes, 40: bin}.get(len(rev), lambda x: None)(rev)
499 hsh = {20: bytes, 40: bin}.get(len(rev), lambda x: None)(rev)
500 if hsh is not None and (hsh, self.path) in self.revmap:
500 if hsh is not None and (hsh, self.path) in self.revmap:
501 f = hsh
501 f = hsh
502 if f is None:
502 if f is None:
503 adjustctx = b'linkrev' if self._perfhack else True
503 adjustctx = b'linkrev' if self._perfhack else True
504 f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
504 f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
505 result = f in self.revmap
505 result = f in self.revmap
506 if not result and self._perfhack:
506 if not result and self._perfhack:
507 # redo the resolution without perfhack - as we are going to
507 # redo the resolution without perfhack - as we are going to
508 # do write operations, we need a correct fctx.
508 # do write operations, we need a correct fctx.
509 f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
509 f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
510 return result, f
510 return result, f
511
511
512 def annotatealllines(self, rev, showpath=False, showlines=False):
512 def annotatealllines(self, rev, showpath=False, showlines=False):
513 """(rev : str) -> [(node : str, linenum : int, path : str)]
513 """(rev : str) -> [(node : str, linenum : int, path : str)]
514
514
515 the result has the same format with annotate, but include all (including
515 the result has the same format with annotate, but include all (including
516 deleted) lines up to rev. call this after calling annotate(rev, ...) for
516 deleted) lines up to rev. call this after calling annotate(rev, ...) for
517 better performance and accuracy.
517 better performance and accuracy.
518 """
518 """
519 revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
519 revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
520
520
521 # find a chain from rev to anything in the mainbranch
521 # find a chain from rev to anything in the mainbranch
522 if revfctx not in self.revmap:
522 if revfctx not in self.revmap:
523 chain = [revfctx]
523 chain = [revfctx]
524 a = b''
524 a = b''
525 while True:
525 while True:
526 f = chain[-1]
526 f = chain[-1]
527 pl = self._parentfunc(f)
527 pl = self._parentfunc(f)
528 if not pl:
528 if not pl:
529 break
529 break
530 if pl[0] in self.revmap:
530 if pl[0] in self.revmap:
531 a = pl[0].data()
531 a = pl[0].data()
532 break
532 break
533 chain.append(pl[0])
533 chain.append(pl[0])
534
534
535 # both self.linelog and self.revmap is backed by filesystem. now
535 # both self.linelog and self.revmap is backed by filesystem. now
536 # we want to modify them but do not want to write changes back to
536 # we want to modify them but do not want to write changes back to
537 # files. so we create in-memory objects and copy them. it's like
537 # files. so we create in-memory objects and copy them. it's like
538 # a "fork".
538 # a "fork".
539 linelog = linelogmod.linelog()
539 linelog = linelogmod.linelog()
540 linelog.copyfrom(self.linelog)
540 linelog.copyfrom(self.linelog)
541 linelog.annotate(linelog.maxrev)
541 linelog.annotate(linelog.maxrev)
542 revmap = revmapmod.revmap()
542 revmap = revmapmod.revmap()
543 revmap.copyfrom(self.revmap)
543 revmap.copyfrom(self.revmap)
544
544
545 for f in reversed(chain):
545 for f in reversed(chain):
546 b = f.data()
546 b = f.data()
547 blocks = list(self._diffblocks(a, b))
547 blocks = list(self._diffblocks(a, b))
548 self._doappendrev(linelog, revmap, f, blocks)
548 self._doappendrev(linelog, revmap, f, blocks)
549 a = b
549 a = b
550 else:
550 else:
551 # fastpath: use existing linelog, revmap as we don't write to them
551 # fastpath: use existing linelog, revmap as we don't write to them
552 linelog = self.linelog
552 linelog = self.linelog
553 revmap = self.revmap
553 revmap = self.revmap
554
554
555 lines = linelog.getalllines()
555 lines = linelog.getalllines()
556 hsh = revfctx.node()
556 hsh = revfctx.node()
557 llrev = revmap.hsh2rev(hsh)
557 llrev = revmap.hsh2rev(hsh)
558 result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
558 result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
559 # cannot use _refineannotateresult since we need custom logic for
559 # cannot use _refineannotateresult since we need custom logic for
560 # resolving line contents
560 # resolving line contents
561 if showpath:
561 if showpath:
562 result = self._addpathtoresult(result, revmap)
562 result = self._addpathtoresult(result, revmap)
563 if showlines:
563 if showlines:
564 linecontents = self._resolvelines(result, revmap, linelog)
564 linecontents = self._resolvelines(result, revmap, linelog)
565 result = (result, linecontents)
565 result = (result, linecontents)
566 return result
566 return result
567
567
568 def _resolvelines(self, annotateresult, revmap, linelog):
568 def _resolvelines(self, annotateresult, revmap, linelog):
569 """(annotateresult) -> [line]. designed for annotatealllines.
569 """(annotateresult) -> [line]. designed for annotatealllines.
570 this is probably the most inefficient code in the whole fastannotate
570 this is probably the most inefficient code in the whole fastannotate
571 directory. but we have made a decision that the linelog does not
571 directory. but we have made a decision that the linelog does not
572 store line contents. so getting them requires random accesses to
572 store line contents. so getting them requires random accesses to
573 the revlog data, since they can be many, it can be very slow.
573 the revlog data, since they can be many, it can be very slow.
574 """
574 """
575 # [llrev]
575 # [llrev]
576 revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
576 revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
577 result = [None] * len(annotateresult)
577 result = [None] * len(annotateresult)
578 # {(rev, linenum): [lineindex]}
578 # {(rev, linenum): [lineindex]}
579 key2idxs = collections.defaultdict(list)
579 key2idxs = collections.defaultdict(list)
580 for i in pycompat.xrange(len(result)):
580 for i in range(len(result)):
581 key2idxs[(revs[i], annotateresult[i][1])].append(i)
581 key2idxs[(revs[i], annotateresult[i][1])].append(i)
582 while key2idxs:
582 while key2idxs:
583 # find an unresolved line and its linelog rev to annotate
583 # find an unresolved line and its linelog rev to annotate
584 hsh = None
584 hsh = None
585 try:
585 try:
586 for (rev, _linenum), idxs in key2idxs.items():
586 for (rev, _linenum), idxs in key2idxs.items():
587 if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
587 if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
588 continue
588 continue
589 hsh = annotateresult[idxs[0]][0]
589 hsh = annotateresult[idxs[0]][0]
590 break
590 break
591 except StopIteration: # no more unresolved lines
591 except StopIteration: # no more unresolved lines
592 return result
592 return result
593 if hsh is None:
593 if hsh is None:
594 # the remaining key2idxs are not in main branch, resolving them
594 # the remaining key2idxs are not in main branch, resolving them
595 # using the hard way...
595 # using the hard way...
596 revlines = {}
596 revlines = {}
597 for (rev, linenum), idxs in key2idxs.items():
597 for (rev, linenum), idxs in key2idxs.items():
598 if rev not in revlines:
598 if rev not in revlines:
599 hsh = annotateresult[idxs[0]][0]
599 hsh = annotateresult[idxs[0]][0]
600 if self.ui.debugflag:
600 if self.ui.debugflag:
601 self.ui.debug(
601 self.ui.debug(
602 b'fastannotate: reading %s line #%d '
602 b'fastannotate: reading %s line #%d '
603 b'to resolve lines %r\n'
603 b'to resolve lines %r\n'
604 % (short(hsh), linenum, idxs)
604 % (short(hsh), linenum, idxs)
605 )
605 )
606 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
606 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
607 lines = mdiff.splitnewlines(fctx.data())
607 lines = mdiff.splitnewlines(fctx.data())
608 revlines[rev] = lines
608 revlines[rev] = lines
609 for idx in idxs:
609 for idx in idxs:
610 result[idx] = revlines[rev][linenum]
610 result[idx] = revlines[rev][linenum]
611 assert all(x is not None for x in result)
611 assert all(x is not None for x in result)
612 return result
612 return result
613
613
614 # run the annotate and the lines should match to the file content
614 # run the annotate and the lines should match to the file content
615 self.ui.debug(
615 self.ui.debug(
616 b'fastannotate: annotate %s to resolve lines\n' % short(hsh)
616 b'fastannotate: annotate %s to resolve lines\n' % short(hsh)
617 )
617 )
618 linelog.annotate(rev)
618 linelog.annotate(rev)
619 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
619 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
620 annotated = linelog.annotateresult
620 annotated = linelog.annotateresult
621 lines = mdiff.splitnewlines(fctx.data())
621 lines = mdiff.splitnewlines(fctx.data())
622 if len(lines) != len(annotated):
622 if len(lines) != len(annotated):
623 raise faerror.CorruptedFileError(b'unexpected annotated lines')
623 raise faerror.CorruptedFileError(b'unexpected annotated lines')
624 # resolve lines from the annotate result
624 # resolve lines from the annotate result
625 for i, line in enumerate(lines):
625 for i, line in enumerate(lines):
626 k = annotated[i]
626 k = annotated[i]
627 if k in key2idxs:
627 if k in key2idxs:
628 for idx in key2idxs[k]:
628 for idx in key2idxs[k]:
629 result[idx] = line
629 result[idx] = line
630 del key2idxs[k]
630 del key2idxs[k]
631 return result
631 return result
632
632
633 def annotatedirectly(self, f, showpath, showlines):
633 def annotatedirectly(self, f, showpath, showlines):
634 """like annotate, but when we know that f is in linelog.
634 """like annotate, but when we know that f is in linelog.
635 f can be either a 20-char str (node) or a fctx. this is for perf - in
635 f can be either a 20-char str (node) or a fctx. this is for perf - in
636 the best case, the user provides a node and we don't need to read the
636 the best case, the user provides a node and we don't need to read the
637 filelog or construct any filecontext.
637 filelog or construct any filecontext.
638 """
638 """
639 if isinstance(f, bytes):
639 if isinstance(f, bytes):
640 hsh = f
640 hsh = f
641 else:
641 else:
642 hsh = f.node()
642 hsh = f.node()
643 llrev = self.revmap.hsh2rev(hsh)
643 llrev = self.revmap.hsh2rev(hsh)
644 if not llrev:
644 if not llrev:
645 raise faerror.CorruptedFileError(b'%s is not in revmap' % hex(hsh))
645 raise faerror.CorruptedFileError(b'%s is not in revmap' % hex(hsh))
646 if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
646 if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
647 raise faerror.CorruptedFileError(
647 raise faerror.CorruptedFileError(
648 b'%s is not in revmap mainbranch' % hex(hsh)
648 b'%s is not in revmap mainbranch' % hex(hsh)
649 )
649 )
650 self.linelog.annotate(llrev)
650 self.linelog.annotate(llrev)
651 result = [
651 result = [
652 (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult
652 (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult
653 ]
653 ]
654 return self._refineannotateresult(result, f, showpath, showlines)
654 return self._refineannotateresult(result, f, showpath, showlines)
655
655
656 def _refineannotateresult(self, result, f, showpath, showlines):
656 def _refineannotateresult(self, result, f, showpath, showlines):
657 """add the missing path or line contents, they can be expensive.
657 """add the missing path or line contents, they can be expensive.
658 f could be either node or fctx.
658 f could be either node or fctx.
659 """
659 """
660 if showpath:
660 if showpath:
661 result = self._addpathtoresult(result)
661 result = self._addpathtoresult(result)
662 if showlines:
662 if showlines:
663 if isinstance(f, bytes): # f: node or fctx
663 if isinstance(f, bytes): # f: node or fctx
664 llrev = self.revmap.hsh2rev(f)
664 llrev = self.revmap.hsh2rev(f)
665 fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
665 fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
666 else:
666 else:
667 fctx = f
667 fctx = f
668 lines = mdiff.splitnewlines(fctx.data())
668 lines = mdiff.splitnewlines(fctx.data())
669 if len(lines) != len(result): # linelog is probably corrupted
669 if len(lines) != len(result): # linelog is probably corrupted
670 raise faerror.CorruptedFileError()
670 raise faerror.CorruptedFileError()
671 result = (result, lines)
671 result = (result, lines)
672 return result
672 return result
673
673
674 def _appendrev(self, fctx, blocks, bannotated=None):
674 def _appendrev(self, fctx, blocks, bannotated=None):
675 self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
675 self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
676
676
677 def _diffblocks(self, a, b):
677 def _diffblocks(self, a, b):
678 return mdiff.allblocks(a, b, self.opts.diffopts)
678 return mdiff.allblocks(a, b, self.opts.diffopts)
679
679
680 @staticmethod
680 @staticmethod
681 def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
681 def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
682 """append a revision to linelog and revmap"""
682 """append a revision to linelog and revmap"""
683
683
684 def getllrev(f):
684 def getllrev(f):
685 """(fctx) -> int"""
685 """(fctx) -> int"""
686 # f should not be a linelog revision
686 # f should not be a linelog revision
687 if isinstance(f, int):
687 if isinstance(f, int):
688 raise error.ProgrammingError(b'f should not be an int')
688 raise error.ProgrammingError(b'f should not be an int')
689 # f is a fctx, allocate linelog rev on demand
689 # f is a fctx, allocate linelog rev on demand
690 hsh = f.node()
690 hsh = f.node()
691 rev = revmap.hsh2rev(hsh)
691 rev = revmap.hsh2rev(hsh)
692 if rev is None:
692 if rev is None:
693 rev = revmap.append(hsh, sidebranch=True, path=f.path())
693 rev = revmap.append(hsh, sidebranch=True, path=f.path())
694 return rev
694 return rev
695
695
696 # append sidebranch revisions to revmap
696 # append sidebranch revisions to revmap
697 siderevs = []
697 siderevs = []
698 siderevmap = {} # node: int
698 siderevmap = {} # node: int
699 if bannotated is not None:
699 if bannotated is not None:
700 for (a1, a2, b1, b2), op in blocks:
700 for (a1, a2, b1, b2), op in blocks:
701 if op != b'=':
701 if op != b'=':
702 # f could be either linelong rev, or fctx.
702 # f could be either linelong rev, or fctx.
703 siderevs += [
703 siderevs += [
704 f
704 f
705 for f, l in bannotated[b1:b2]
705 for f, l in bannotated[b1:b2]
706 if not isinstance(f, int)
706 if not isinstance(f, int)
707 ]
707 ]
708 siderevs = set(siderevs)
708 siderevs = set(siderevs)
709 if fctx in siderevs: # mainnode must be appended seperately
709 if fctx in siderevs: # mainnode must be appended seperately
710 siderevs.remove(fctx)
710 siderevs.remove(fctx)
711 for f in siderevs:
711 for f in siderevs:
712 siderevmap[f] = getllrev(f)
712 siderevmap[f] = getllrev(f)
713
713
714 # the changeset in the main branch, could be a merge
714 # the changeset in the main branch, could be a merge
715 llrev = revmap.append(fctx.node(), path=fctx.path())
715 llrev = revmap.append(fctx.node(), path=fctx.path())
716 siderevmap[fctx] = llrev
716 siderevmap[fctx] = llrev
717
717
718 for (a1, a2, b1, b2), op in reversed(blocks):
718 for (a1, a2, b1, b2), op in reversed(blocks):
719 if op == b'=':
719 if op == b'=':
720 continue
720 continue
721 if bannotated is None:
721 if bannotated is None:
722 linelog.replacelines(llrev, a1, a2, b1, b2)
722 linelog.replacelines(llrev, a1, a2, b1, b2)
723 else:
723 else:
724 blines = [
724 blines = [
725 ((r if isinstance(r, int) else siderevmap[r]), l)
725 ((r if isinstance(r, int) else siderevmap[r]), l)
726 for r, l in bannotated[b1:b2]
726 for r, l in bannotated[b1:b2]
727 ]
727 ]
728 linelog.replacelines_vec(llrev, a1, a2, blines)
728 linelog.replacelines_vec(llrev, a1, a2, blines)
729
729
730 def _addpathtoresult(self, annotateresult, revmap=None):
730 def _addpathtoresult(self, annotateresult, revmap=None):
731 """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
731 """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
732 if revmap is None:
732 if revmap is None:
733 revmap = self.revmap
733 revmap = self.revmap
734
734
735 def _getpath(nodeid):
735 def _getpath(nodeid):
736 path = self._node2path.get(nodeid)
736 path = self._node2path.get(nodeid)
737 if path is None:
737 if path is None:
738 path = revmap.rev2path(revmap.hsh2rev(nodeid))
738 path = revmap.rev2path(revmap.hsh2rev(nodeid))
739 self._node2path[nodeid] = path
739 self._node2path[nodeid] = path
740 return path
740 return path
741
741
742 return [(n, l, _getpath(n)) for n, l in annotateresult]
742 return [(n, l, _getpath(n)) for n, l in annotateresult]
743
743
744 def _checklastmasterhead(self, fctx):
744 def _checklastmasterhead(self, fctx):
745 """check if fctx is the master's head last time, raise if not"""
745 """check if fctx is the master's head last time, raise if not"""
746 if fctx is None:
746 if fctx is None:
747 llrev = 0
747 llrev = 0
748 else:
748 else:
749 llrev = self.revmap.hsh2rev(fctx.node())
749 llrev = self.revmap.hsh2rev(fctx.node())
750 if not llrev:
750 if not llrev:
751 raise faerror.CannotReuseError()
751 raise faerror.CannotReuseError()
752 if self.linelog.maxrev != llrev:
752 if self.linelog.maxrev != llrev:
753 raise faerror.CannotReuseError()
753 raise faerror.CannotReuseError()
754
754
755 @util.propertycache
755 @util.propertycache
756 def _parentfunc(self):
756 def _parentfunc(self):
757 """-> (fctx) -> [fctx]"""
757 """-> (fctx) -> [fctx]"""
758 followrename = self.opts.followrename
758 followrename = self.opts.followrename
759 followmerge = self.opts.followmerge
759 followmerge = self.opts.followmerge
760
760
761 def parents(f):
761 def parents(f):
762 pl = _parents(f, follow=followrename)
762 pl = _parents(f, follow=followrename)
763 if not followmerge:
763 if not followmerge:
764 pl = pl[:1]
764 pl = pl[:1]
765 return pl
765 return pl
766
766
767 return parents
767 return parents
768
768
769 @util.propertycache
769 @util.propertycache
770 def _perfhack(self):
770 def _perfhack(self):
771 return self.ui.configbool(b'fastannotate', b'perfhack')
771 return self.ui.configbool(b'fastannotate', b'perfhack')
772
772
773 def _resolvefctx(self, rev, path=None, **kwds):
773 def _resolvefctx(self, rev, path=None, **kwds):
774 return resolvefctx(self.repo, rev, (path or self.path), **kwds)
774 return resolvefctx(self.repo, rev, (path or self.path), **kwds)
775
775
776
776
777 def _unlinkpaths(paths):
777 def _unlinkpaths(paths):
778 """silent, best-effort unlink"""
778 """silent, best-effort unlink"""
779 for path in paths:
779 for path in paths:
780 try:
780 try:
781 util.unlink(path)
781 util.unlink(path)
782 except OSError:
782 except OSError:
783 pass
783 pass
784
784
785
785
786 class pathhelper:
786 class pathhelper:
787 """helper for getting paths for lockfile, linelog and revmap"""
787 """helper for getting paths for lockfile, linelog and revmap"""
788
788
789 def __init__(self, repo, path, opts=defaultopts):
789 def __init__(self, repo, path, opts=defaultopts):
790 # different options use different directories
790 # different options use different directories
791 self._vfspath = os.path.join(
791 self._vfspath = os.path.join(
792 b'fastannotate', opts.shortstr, encodedir(path)
792 b'fastannotate', opts.shortstr, encodedir(path)
793 )
793 )
794 self._repo = repo
794 self._repo = repo
795
795
796 @property
796 @property
797 def dirname(self):
797 def dirname(self):
798 return os.path.dirname(self._repo.vfs.join(self._vfspath))
798 return os.path.dirname(self._repo.vfs.join(self._vfspath))
799
799
800 @property
800 @property
801 def linelogpath(self):
801 def linelogpath(self):
802 return self._repo.vfs.join(self._vfspath + b'.l')
802 return self._repo.vfs.join(self._vfspath + b'.l')
803
803
804 def lock(self):
804 def lock(self):
805 return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock')
805 return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock')
806
806
807 @property
807 @property
808 def revmappath(self):
808 def revmappath(self):
809 return self._repo.vfs.join(self._vfspath + b'.m')
809 return self._repo.vfs.join(self._vfspath + b'.m')
810
810
811
811
812 @contextlib.contextmanager
812 @contextlib.contextmanager
813 def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
813 def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
814 """context needed to perform (fast) annotate on a file
814 """context needed to perform (fast) annotate on a file
815
815
816 an annotatecontext of a single file consists of two structures: the
816 an annotatecontext of a single file consists of two structures: the
817 linelog and the revmap. this function takes care of locking. only 1
817 linelog and the revmap. this function takes care of locking. only 1
818 process is allowed to write that file's linelog and revmap at a time.
818 process is allowed to write that file's linelog and revmap at a time.
819
819
820 when something goes wrong, this function will assume the linelog and the
820 when something goes wrong, this function will assume the linelog and the
821 revmap are in a bad state, and remove them from disk.
821 revmap are in a bad state, and remove them from disk.
822
822
823 use this function in the following way:
823 use this function in the following way:
824
824
825 with annotatecontext(...) as actx:
825 with annotatecontext(...) as actx:
826 actx. ....
826 actx. ....
827 """
827 """
828 helper = pathhelper(repo, path, opts)
828 helper = pathhelper(repo, path, opts)
829 util.makedirs(helper.dirname)
829 util.makedirs(helper.dirname)
830 revmappath = helper.revmappath
830 revmappath = helper.revmappath
831 linelogpath = helper.linelogpath
831 linelogpath = helper.linelogpath
832 actx = None
832 actx = None
833 try:
833 try:
834 with helper.lock():
834 with helper.lock():
835 actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
835 actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
836 if rebuild:
836 if rebuild:
837 actx.rebuild()
837 actx.rebuild()
838 yield actx
838 yield actx
839 except Exception:
839 except Exception:
840 if actx is not None:
840 if actx is not None:
841 actx.rebuild()
841 actx.rebuild()
842 repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path)
842 repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path)
843 raise
843 raise
844 finally:
844 finally:
845 if actx is not None:
845 if actx is not None:
846 actx.close()
846 actx.close()
847
847
848
848
849 def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
849 def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
850 """like annotatecontext but get the context from a fctx. convenient when
850 """like annotatecontext but get the context from a fctx. convenient when
851 used in fctx.annotate
851 used in fctx.annotate
852 """
852 """
853 repo = fctx._repo
853 repo = fctx._repo
854 path = fctx._path
854 path = fctx._path
855 if repo.ui.configbool(b'fastannotate', b'forcefollow', True):
855 if repo.ui.configbool(b'fastannotate', b'forcefollow', True):
856 follow = True
856 follow = True
857 aopts = annotateopts(diffopts=diffopts, followrename=follow)
857 aopts = annotateopts(diffopts=diffopts, followrename=follow)
858 return annotatecontext(repo, path, aopts, rebuild)
858 return annotatecontext(repo, path, aopts, rebuild)
@@ -1,176 +1,176 b''
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # format: defines the format used to output annotate result
3 # format: defines the format used to output annotate result
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.node import (
8 from mercurial.node import (
9 hex,
9 hex,
10 short,
10 short,
11 )
11 )
12 from mercurial import (
12 from mercurial import (
13 encoding,
13 encoding,
14 pycompat,
14 pycompat,
15 templatefilters,
15 templatefilters,
16 util,
16 util,
17 )
17 )
18 from mercurial.utils import dateutil
18 from mercurial.utils import dateutil
19
19
20 # imitating mercurial.commands.annotate, not using the vanilla formatter since
20 # imitating mercurial.commands.annotate, not using the vanilla formatter since
21 # the data structures are a bit different, and we have some fast paths.
21 # the data structures are a bit different, and we have some fast paths.
22 class defaultformatter:
22 class defaultformatter:
23 """the default formatter that does leftpad and support some common flags"""
23 """the default formatter that does leftpad and support some common flags"""
24
24
25 def __init__(self, ui, repo, opts):
25 def __init__(self, ui, repo, opts):
26 self.ui = ui
26 self.ui = ui
27 self.opts = opts
27 self.opts = opts
28
28
29 if ui.quiet:
29 if ui.quiet:
30 datefunc = dateutil.shortdate
30 datefunc = dateutil.shortdate
31 else:
31 else:
32 datefunc = dateutil.datestr
32 datefunc = dateutil.datestr
33 datefunc = util.cachefunc(datefunc)
33 datefunc = util.cachefunc(datefunc)
34 getctx = util.cachefunc(lambda x: repo[x[0]])
34 getctx = util.cachefunc(lambda x: repo[x[0]])
35 hexfunc = self._hexfunc
35 hexfunc = self._hexfunc
36
36
37 # special handling working copy "changeset" and "rev" functions
37 # special handling working copy "changeset" and "rev" functions
38 if self.opts.get(b'rev') == b'wdir()':
38 if self.opts.get(b'rev') == b'wdir()':
39 orig = hexfunc
39 orig = hexfunc
40 hexfunc = lambda x: None if x is None else orig(x)
40 hexfunc = lambda x: None if x is None else orig(x)
41 wnode = hexfunc(repo[b'.'].node()) + b'+'
41 wnode = hexfunc(repo[b'.'].node()) + b'+'
42 wrev = b'%d' % repo[b'.'].rev()
42 wrev = b'%d' % repo[b'.'].rev()
43 wrevpad = b''
43 wrevpad = b''
44 if not opts.get(b'changeset'): # only show + if changeset is hidden
44 if not opts.get(b'changeset'): # only show + if changeset is hidden
45 wrev += b'+'
45 wrev += b'+'
46 wrevpad = b' '
46 wrevpad = b' '
47 revenc = lambda x: wrev if x is None else (b'%d' % x) + wrevpad
47 revenc = lambda x: wrev if x is None else (b'%d' % x) + wrevpad
48
48
49 def csetenc(x):
49 def csetenc(x):
50 if x is None:
50 if x is None:
51 return wnode
51 return wnode
52 return pycompat.bytestr(x) + b' '
52 return pycompat.bytestr(x) + b' '
53
53
54 else:
54 else:
55 revenc = csetenc = pycompat.bytestr
55 revenc = csetenc = pycompat.bytestr
56
56
57 # opt name, separator, raw value (for json/plain), encoder (for plain)
57 # opt name, separator, raw value (for json/plain), encoder (for plain)
58 opmap = [
58 opmap = [
59 (b'user', b' ', lambda x: getctx(x).user(), ui.shortuser),
59 (b'user', b' ', lambda x: getctx(x).user(), ui.shortuser),
60 (b'number', b' ', lambda x: getctx(x).rev(), revenc),
60 (b'number', b' ', lambda x: getctx(x).rev(), revenc),
61 (b'changeset', b' ', lambda x: hexfunc(x[0]), csetenc),
61 (b'changeset', b' ', lambda x: hexfunc(x[0]), csetenc),
62 (b'date', b' ', lambda x: getctx(x).date(), datefunc),
62 (b'date', b' ', lambda x: getctx(x).date(), datefunc),
63 (b'file', b' ', lambda x: x[2], pycompat.bytestr),
63 (b'file', b' ', lambda x: x[2], pycompat.bytestr),
64 (b'line_number', b':', lambda x: x[1] + 1, pycompat.bytestr),
64 (b'line_number', b':', lambda x: x[1] + 1, pycompat.bytestr),
65 ]
65 ]
66 fieldnamemap = {b'number': b'rev', b'changeset': b'node'}
66 fieldnamemap = {b'number': b'rev', b'changeset': b'node'}
67 funcmap = [
67 funcmap = [
68 (get, sep, fieldnamemap.get(op, op), enc)
68 (get, sep, fieldnamemap.get(op, op), enc)
69 for op, sep, get, enc in opmap
69 for op, sep, get, enc in opmap
70 if opts.get(op)
70 if opts.get(op)
71 ]
71 ]
72 # no separator for first column
72 # no separator for first column
73 funcmap[0] = list(funcmap[0])
73 funcmap[0] = list(funcmap[0])
74 funcmap[0][1] = b''
74 funcmap[0][1] = b''
75 self.funcmap = funcmap
75 self.funcmap = funcmap
76
76
77 def write(self, annotatedresult, lines=None, existinglines=None):
77 def write(self, annotatedresult, lines=None, existinglines=None):
78 """(annotateresult, [str], set([rev, linenum])) -> None. write output.
78 """(annotateresult, [str], set([rev, linenum])) -> None. write output.
79 annotateresult can be [(node, linenum, path)], or [(node, linenum)]
79 annotateresult can be [(node, linenum, path)], or [(node, linenum)]
80 """
80 """
81 pieces = [] # [[str]]
81 pieces = [] # [[str]]
82 maxwidths = [] # [int]
82 maxwidths = [] # [int]
83
83
84 # calculate padding
84 # calculate padding
85 for f, sep, name, enc in self.funcmap:
85 for f, sep, name, enc in self.funcmap:
86 l = [enc(f(x)) for x in annotatedresult]
86 l = [enc(f(x)) for x in annotatedresult]
87 pieces.append(l)
87 pieces.append(l)
88 if name in [b'node', b'date']: # node and date has fixed size
88 if name in [b'node', b'date']: # node and date has fixed size
89 l = l[:1]
89 l = l[:1]
90 widths = pycompat.maplist(encoding.colwidth, set(l))
90 widths = pycompat.maplist(encoding.colwidth, set(l))
91 maxwidth = max(widths) if widths else 0
91 maxwidth = max(widths) if widths else 0
92 maxwidths.append(maxwidth)
92 maxwidths.append(maxwidth)
93
93
94 # buffered output
94 # buffered output
95 result = b''
95 result = b''
96 for i in pycompat.xrange(len(annotatedresult)):
96 for i in range(len(annotatedresult)):
97 for j, p in enumerate(pieces):
97 for j, p in enumerate(pieces):
98 sep = self.funcmap[j][1]
98 sep = self.funcmap[j][1]
99 padding = b' ' * (maxwidths[j] - len(p[i]))
99 padding = b' ' * (maxwidths[j] - len(p[i]))
100 result += sep + padding + p[i]
100 result += sep + padding + p[i]
101 if lines:
101 if lines:
102 if existinglines is None:
102 if existinglines is None:
103 result += b': ' + lines[i]
103 result += b': ' + lines[i]
104 else: # extra formatting showing whether a line exists
104 else: # extra formatting showing whether a line exists
105 key = (annotatedresult[i][0], annotatedresult[i][1])
105 key = (annotatedresult[i][0], annotatedresult[i][1])
106 if key in existinglines:
106 if key in existinglines:
107 result += b': ' + lines[i]
107 result += b': ' + lines[i]
108 else:
108 else:
109 result += b': ' + self.ui.label(
109 result += b': ' + self.ui.label(
110 b'-' + lines[i], b'diff.deleted'
110 b'-' + lines[i], b'diff.deleted'
111 )
111 )
112
112
113 if result[-1:] != b'\n':
113 if result[-1:] != b'\n':
114 result += b'\n'
114 result += b'\n'
115
115
116 self.ui.write(result)
116 self.ui.write(result)
117
117
118 @util.propertycache
118 @util.propertycache
119 def _hexfunc(self):
119 def _hexfunc(self):
120 if self.ui.debugflag or self.opts.get(b'long_hash'):
120 if self.ui.debugflag or self.opts.get(b'long_hash'):
121 return hex
121 return hex
122 else:
122 else:
123 return short
123 return short
124
124
125 def end(self):
125 def end(self):
126 pass
126 pass
127
127
128
128
129 class jsonformatter(defaultformatter):
129 class jsonformatter(defaultformatter):
130 def __init__(self, ui, repo, opts):
130 def __init__(self, ui, repo, opts):
131 super(jsonformatter, self).__init__(ui, repo, opts)
131 super(jsonformatter, self).__init__(ui, repo, opts)
132 self.ui.write(b'[')
132 self.ui.write(b'[')
133 self.needcomma = False
133 self.needcomma = False
134
134
135 def write(self, annotatedresult, lines=None, existinglines=None):
135 def write(self, annotatedresult, lines=None, existinglines=None):
136 if annotatedresult:
136 if annotatedresult:
137 self._writecomma()
137 self._writecomma()
138
138
139 pieces = [
139 pieces = [
140 (name, pycompat.maplist(f, annotatedresult))
140 (name, pycompat.maplist(f, annotatedresult))
141 for f, sep, name, enc in self.funcmap
141 for f, sep, name, enc in self.funcmap
142 ]
142 ]
143 if lines is not None:
143 if lines is not None:
144 pieces.append((b'line', lines))
144 pieces.append((b'line', lines))
145 pieces.sort()
145 pieces.sort()
146
146
147 seps = [b','] * len(pieces[:-1]) + [b'']
147 seps = [b','] * len(pieces[:-1]) + [b'']
148
148
149 result = b''
149 result = b''
150 lasti = len(annotatedresult) - 1
150 lasti = len(annotatedresult) - 1
151 for i in pycompat.xrange(len(annotatedresult)):
151 for i in range(len(annotatedresult)):
152 result += b'\n {\n'
152 result += b'\n {\n'
153 for j, p in enumerate(pieces):
153 for j, p in enumerate(pieces):
154 k, vs = p
154 k, vs = p
155 result += b' "%s": %s%s\n' % (
155 result += b' "%s": %s%s\n' % (
156 k,
156 k,
157 templatefilters.json(vs[i], paranoid=False),
157 templatefilters.json(vs[i], paranoid=False),
158 seps[j],
158 seps[j],
159 )
159 )
160 result += b' }%s' % (b'' if i == lasti else b',')
160 result += b' }%s' % (b'' if i == lasti else b',')
161 if lasti >= 0:
161 if lasti >= 0:
162 self.needcomma = True
162 self.needcomma = True
163
163
164 self.ui.write(result)
164 self.ui.write(result)
165
165
166 def _writecomma(self):
166 def _writecomma(self):
167 if self.needcomma:
167 if self.needcomma:
168 self.ui.write(b',')
168 self.ui.write(b',')
169 self.needcomma = False
169 self.needcomma = False
170
170
171 @util.propertycache
171 @util.propertycache
172 def _hexfunc(self):
172 def _hexfunc(self):
173 return hex
173 return hex
174
174
175 def end(self):
175 def end(self):
176 self.ui.write(b'\n]\n')
176 self.ui.write(b'\n]\n')
@@ -1,262 +1,259 b''
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # revmap: trivial hg hash - linelog rev bidirectional map
3 # revmap: trivial hg hash - linelog rev bidirectional map
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import bisect
9 import bisect
10 import io
10 import io
11 import os
11 import os
12 import struct
12 import struct
13
13
14 from mercurial.node import hex
14 from mercurial.node import hex
15 from mercurial.pycompat import open
15 from mercurial.pycompat import open
16 from mercurial import (
16 from mercurial import (
17 error as hgerror,
17 error as hgerror,
18 pycompat,
19 )
18 )
20 from . import error
19 from . import error
21
20
22 # the revmap file format is straightforward:
21 # the revmap file format is straightforward:
23 #
22 #
24 # 8 bytes: header
23 # 8 bytes: header
25 # 1 byte : flag for linelog revision 1
24 # 1 byte : flag for linelog revision 1
26 # ? bytes: (optional) '\0'-terminated path string
25 # ? bytes: (optional) '\0'-terminated path string
27 # only exists if (flag & renameflag) != 0
26 # only exists if (flag & renameflag) != 0
28 # 20 bytes: hg hash for linelog revision 1
27 # 20 bytes: hg hash for linelog revision 1
29 # 1 byte : flag for linelog revision 2
28 # 1 byte : flag for linelog revision 2
30 # ? bytes: (optional) '\0'-terminated path string
29 # ? bytes: (optional) '\0'-terminated path string
31 # 20 bytes: hg hash for linelog revision 2
30 # 20 bytes: hg hash for linelog revision 2
32 # ....
31 # ....
33 #
32 #
34 # the implementation is kinda stupid: __init__ loads the whole revmap.
33 # the implementation is kinda stupid: __init__ loads the whole revmap.
35 # no laziness. benchmark shows loading 10000 revisions is about 0.015
34 # no laziness. benchmark shows loading 10000 revisions is about 0.015
36 # seconds, which looks enough for our use-case. if this implementation
35 # seconds, which looks enough for our use-case. if this implementation
37 # becomes a bottleneck, we can change it to lazily read the file
36 # becomes a bottleneck, we can change it to lazily read the file
38 # from the end.
37 # from the end.
39
38
40 # whether the changeset is in the side branch. i.e. not in the linear main
39 # whether the changeset is in the side branch. i.e. not in the linear main
41 # branch but only got referenced by lines in merge changesets.
40 # branch but only got referenced by lines in merge changesets.
42 sidebranchflag = 1
41 sidebranchflag = 1
43
42
44 # whether the changeset changes the file path (ie. is a rename)
43 # whether the changeset changes the file path (ie. is a rename)
45 renameflag = 2
44 renameflag = 2
46
45
47 # len(mercurial.node.nullid)
46 # len(mercurial.node.nullid)
48 _hshlen = 20
47 _hshlen = 20
49
48
50
49
51 class revmap:
50 class revmap:
52 """trivial hg bin hash - linelog rev bidirectional map
51 """trivial hg bin hash - linelog rev bidirectional map
53
52
54 also stores a flag (uint8) for each revision, and track renames.
53 also stores a flag (uint8) for each revision, and track renames.
55 """
54 """
56
55
57 HEADER = b'REVMAP1\0'
56 HEADER = b'REVMAP1\0'
58
57
59 def __init__(self, path=None):
58 def __init__(self, path=None):
60 """create or load the revmap, optionally associate to a file
59 """create or load the revmap, optionally associate to a file
61
60
62 if path is None, the revmap is entirely in-memory. the caller is
61 if path is None, the revmap is entirely in-memory. the caller is
63 responsible for locking. concurrent writes to a same file is unsafe.
62 responsible for locking. concurrent writes to a same file is unsafe.
64 the caller needs to make sure one file is associated to at most one
63 the caller needs to make sure one file is associated to at most one
65 revmap object at a time."""
64 revmap object at a time."""
66 self.path = path
65 self.path = path
67 self._rev2hsh = [None]
66 self._rev2hsh = [None]
68 self._rev2flag = [None]
67 self._rev2flag = [None]
69 self._hsh2rev = {}
68 self._hsh2rev = {}
70 # since rename does not happen frequently, do not store path for every
69 # since rename does not happen frequently, do not store path for every
71 # revision. self._renamerevs can be used for bisecting.
70 # revision. self._renamerevs can be used for bisecting.
72 self._renamerevs = [0]
71 self._renamerevs = [0]
73 self._renamepaths = [b'']
72 self._renamepaths = [b'']
74 self._lastmaxrev = -1
73 self._lastmaxrev = -1
75 if path:
74 if path:
76 if os.path.exists(path):
75 if os.path.exists(path):
77 self._load()
76 self._load()
78 else:
77 else:
79 # write the header so "append" can do incremental updates
78 # write the header so "append" can do incremental updates
80 self.flush()
79 self.flush()
81
80
82 def copyfrom(self, rhs):
81 def copyfrom(self, rhs):
83 """copy the map data from another revmap. do not affect self.path"""
82 """copy the map data from another revmap. do not affect self.path"""
84 self._rev2hsh = rhs._rev2hsh[:]
83 self._rev2hsh = rhs._rev2hsh[:]
85 self._rev2flag = rhs._rev2flag[:]
84 self._rev2flag = rhs._rev2flag[:]
86 self._hsh2rev = rhs._hsh2rev.copy()
85 self._hsh2rev = rhs._hsh2rev.copy()
87 self._renamerevs = rhs._renamerevs[:]
86 self._renamerevs = rhs._renamerevs[:]
88 self._renamepaths = rhs._renamepaths[:]
87 self._renamepaths = rhs._renamepaths[:]
89 self._lastmaxrev = -1
88 self._lastmaxrev = -1
90
89
91 @property
90 @property
92 def maxrev(self):
91 def maxrev(self):
93 """return max linelog revision number"""
92 """return max linelog revision number"""
94 return len(self._rev2hsh) - 1
93 return len(self._rev2hsh) - 1
95
94
96 def append(self, hsh, sidebranch=False, path=None, flush=False):
95 def append(self, hsh, sidebranch=False, path=None, flush=False):
97 """add a binary hg hash and return the mapped linelog revision.
96 """add a binary hg hash and return the mapped linelog revision.
98 if flush is True, incrementally update the file.
97 if flush is True, incrementally update the file.
99 """
98 """
100 if hsh in self._hsh2rev:
99 if hsh in self._hsh2rev:
101 raise error.CorruptedFileError(
100 raise error.CorruptedFileError(
102 b'%r is in revmap already' % hex(hsh)
101 b'%r is in revmap already' % hex(hsh)
103 )
102 )
104 if len(hsh) != _hshlen:
103 if len(hsh) != _hshlen:
105 raise hgerror.ProgrammingError(
104 raise hgerror.ProgrammingError(
106 b'hsh must be %d-char long' % _hshlen
105 b'hsh must be %d-char long' % _hshlen
107 )
106 )
108 idx = len(self._rev2hsh)
107 idx = len(self._rev2hsh)
109 flag = 0
108 flag = 0
110 if sidebranch:
109 if sidebranch:
111 flag |= sidebranchflag
110 flag |= sidebranchflag
112 if path is not None and path != self._renamepaths[-1]:
111 if path is not None and path != self._renamepaths[-1]:
113 flag |= renameflag
112 flag |= renameflag
114 self._renamerevs.append(idx)
113 self._renamerevs.append(idx)
115 self._renamepaths.append(path)
114 self._renamepaths.append(path)
116 self._rev2hsh.append(hsh)
115 self._rev2hsh.append(hsh)
117 self._rev2flag.append(flag)
116 self._rev2flag.append(flag)
118 self._hsh2rev[hsh] = idx
117 self._hsh2rev[hsh] = idx
119 if flush:
118 if flush:
120 self.flush()
119 self.flush()
121 return idx
120 return idx
122
121
123 def rev2hsh(self, rev):
122 def rev2hsh(self, rev):
124 """convert linelog revision to hg hash. return None if not found."""
123 """convert linelog revision to hg hash. return None if not found."""
125 if rev > self.maxrev or rev < 0:
124 if rev > self.maxrev or rev < 0:
126 return None
125 return None
127 return self._rev2hsh[rev]
126 return self._rev2hsh[rev]
128
127
129 def rev2flag(self, rev):
128 def rev2flag(self, rev):
130 """get the flag (uint8) for a given linelog revision.
129 """get the flag (uint8) for a given linelog revision.
131 return None if revision does not exist.
130 return None if revision does not exist.
132 """
131 """
133 if rev > self.maxrev or rev < 0:
132 if rev > self.maxrev or rev < 0:
134 return None
133 return None
135 return self._rev2flag[rev]
134 return self._rev2flag[rev]
136
135
137 def rev2path(self, rev):
136 def rev2path(self, rev):
138 """get the path for a given linelog revision.
137 """get the path for a given linelog revision.
139 return None if revision does not exist.
138 return None if revision does not exist.
140 """
139 """
141 if rev > self.maxrev or rev < 0:
140 if rev > self.maxrev or rev < 0:
142 return None
141 return None
143 idx = bisect.bisect_right(self._renamerevs, rev) - 1
142 idx = bisect.bisect_right(self._renamerevs, rev) - 1
144 return self._renamepaths[idx]
143 return self._renamepaths[idx]
145
144
146 def hsh2rev(self, hsh):
145 def hsh2rev(self, hsh):
147 """convert hg hash to linelog revision. return None if not found."""
146 """convert hg hash to linelog revision. return None if not found."""
148 return self._hsh2rev.get(hsh)
147 return self._hsh2rev.get(hsh)
149
148
150 def clear(self, flush=False):
149 def clear(self, flush=False):
151 """make the map empty. if flush is True, write to disk"""
150 """make the map empty. if flush is True, write to disk"""
152 # rev 0 is reserved, real rev starts from 1
151 # rev 0 is reserved, real rev starts from 1
153 self._rev2hsh = [None]
152 self._rev2hsh = [None]
154 self._rev2flag = [None]
153 self._rev2flag = [None]
155 self._hsh2rev = {}
154 self._hsh2rev = {}
156 self._rev2path = [b'']
155 self._rev2path = [b'']
157 self._lastmaxrev = -1
156 self._lastmaxrev = -1
158 if flush:
157 if flush:
159 self.flush()
158 self.flush()
160
159
161 def flush(self):
160 def flush(self):
162 """write the state down to the file"""
161 """write the state down to the file"""
163 if not self.path:
162 if not self.path:
164 return
163 return
165 if self._lastmaxrev == -1: # write the entire file
164 if self._lastmaxrev == -1: # write the entire file
166 with open(self.path, b'wb') as f:
165 with open(self.path, b'wb') as f:
167 f.write(self.HEADER)
166 f.write(self.HEADER)
168 for i in pycompat.xrange(1, len(self._rev2hsh)):
167 for i in range(1, len(self._rev2hsh)):
169 self._writerev(i, f)
168 self._writerev(i, f)
170 else: # append incrementally
169 else: # append incrementally
171 with open(self.path, b'ab') as f:
170 with open(self.path, b'ab') as f:
172 for i in pycompat.xrange(
171 for i in range(self._lastmaxrev + 1, len(self._rev2hsh)):
173 self._lastmaxrev + 1, len(self._rev2hsh)
174 ):
175 self._writerev(i, f)
172 self._writerev(i, f)
176 self._lastmaxrev = self.maxrev
173 self._lastmaxrev = self.maxrev
177
174
178 def _load(self):
175 def _load(self):
179 """load state from file"""
176 """load state from file"""
180 if not self.path:
177 if not self.path:
181 return
178 return
182 # use local variables in a loop. CPython uses LOAD_FAST for them,
179 # use local variables in a loop. CPython uses LOAD_FAST for them,
183 # which is faster than both LOAD_CONST and LOAD_GLOBAL.
180 # which is faster than both LOAD_CONST and LOAD_GLOBAL.
184 flaglen = 1
181 flaglen = 1
185 hshlen = _hshlen
182 hshlen = _hshlen
186 with open(self.path, b'rb') as f:
183 with open(self.path, b'rb') as f:
187 if f.read(len(self.HEADER)) != self.HEADER:
184 if f.read(len(self.HEADER)) != self.HEADER:
188 raise error.CorruptedFileError()
185 raise error.CorruptedFileError()
189 self.clear(flush=False)
186 self.clear(flush=False)
190 while True:
187 while True:
191 buf = f.read(flaglen)
188 buf = f.read(flaglen)
192 if not buf:
189 if not buf:
193 break
190 break
194 flag = ord(buf)
191 flag = ord(buf)
195 rev = len(self._rev2hsh)
192 rev = len(self._rev2hsh)
196 if flag & renameflag:
193 if flag & renameflag:
197 path = self._readcstr(f)
194 path = self._readcstr(f)
198 self._renamerevs.append(rev)
195 self._renamerevs.append(rev)
199 self._renamepaths.append(path)
196 self._renamepaths.append(path)
200 hsh = f.read(hshlen)
197 hsh = f.read(hshlen)
201 if len(hsh) != hshlen:
198 if len(hsh) != hshlen:
202 raise error.CorruptedFileError()
199 raise error.CorruptedFileError()
203 self._hsh2rev[hsh] = rev
200 self._hsh2rev[hsh] = rev
204 self._rev2flag.append(flag)
201 self._rev2flag.append(flag)
205 self._rev2hsh.append(hsh)
202 self._rev2hsh.append(hsh)
206 self._lastmaxrev = self.maxrev
203 self._lastmaxrev = self.maxrev
207
204
208 def _writerev(self, rev, f):
205 def _writerev(self, rev, f):
209 """append a revision data to file"""
206 """append a revision data to file"""
210 flag = self._rev2flag[rev]
207 flag = self._rev2flag[rev]
211 hsh = self._rev2hsh[rev]
208 hsh = self._rev2hsh[rev]
212 f.write(struct.pack(b'B', flag))
209 f.write(struct.pack(b'B', flag))
213 if flag & renameflag:
210 if flag & renameflag:
214 path = self.rev2path(rev)
211 path = self.rev2path(rev)
215 if path is None:
212 if path is None:
216 raise error.CorruptedFileError(b'cannot find path for %s' % rev)
213 raise error.CorruptedFileError(b'cannot find path for %s' % rev)
217 f.write(path + b'\0')
214 f.write(path + b'\0')
218 f.write(hsh)
215 f.write(hsh)
219
216
220 @staticmethod
217 @staticmethod
221 def _readcstr(f):
218 def _readcstr(f):
222 """read a C-language-like '\0'-terminated string"""
219 """read a C-language-like '\0'-terminated string"""
223 buf = b''
220 buf = b''
224 while True:
221 while True:
225 ch = f.read(1)
222 ch = f.read(1)
226 if not ch: # unexpected eof
223 if not ch: # unexpected eof
227 raise error.CorruptedFileError()
224 raise error.CorruptedFileError()
228 if ch == b'\0':
225 if ch == b'\0':
229 break
226 break
230 buf += ch
227 buf += ch
231 return buf
228 return buf
232
229
233 def __contains__(self, f):
230 def __contains__(self, f):
234 """(fctx or (node, path)) -> bool.
231 """(fctx or (node, path)) -> bool.
235 test if (node, path) is in the map, and is not in a side branch.
232 test if (node, path) is in the map, and is not in a side branch.
236 f can be either a tuple of (node, path), or a fctx.
233 f can be either a tuple of (node, path), or a fctx.
237 """
234 """
238 if isinstance(f, tuple): # f: (node, path)
235 if isinstance(f, tuple): # f: (node, path)
239 hsh, path = f
236 hsh, path = f
240 else: # f: fctx
237 else: # f: fctx
241 hsh, path = f.node(), f.path()
238 hsh, path = f.node(), f.path()
242 rev = self.hsh2rev(hsh)
239 rev = self.hsh2rev(hsh)
243 if rev is None:
240 if rev is None:
244 return False
241 return False
245 if path is not None and path != self.rev2path(rev):
242 if path is not None and path != self.rev2path(rev):
246 return False
243 return False
247 return (self.rev2flag(rev) & sidebranchflag) == 0
244 return (self.rev2flag(rev) & sidebranchflag) == 0
248
245
249
246
250 def getlastnode(path):
247 def getlastnode(path):
251 """return the last hash in a revmap, without loading its full content.
248 """return the last hash in a revmap, without loading its full content.
252 this is equivalent to `m = revmap(path); m.rev2hsh(m.maxrev)`, but faster.
249 this is equivalent to `m = revmap(path); m.rev2hsh(m.maxrev)`, but faster.
253 """
250 """
254 hsh = None
251 hsh = None
255 try:
252 try:
256 with open(path, b'rb') as f:
253 with open(path, b'rb') as f:
257 f.seek(-_hshlen, io.SEEK_END)
254 f.seek(-_hshlen, io.SEEK_END)
258 if f.tell() > len(revmap.HEADER):
255 if f.tell() > len(revmap.HEADER):
259 hsh = f.read(_hshlen)
256 hsh = f.read(_hshlen)
260 except IOError:
257 except IOError:
261 pass
258 pass
262 return hsh
259 return hsh
@@ -1,547 +1,547 b''
1 from mercurial.i18n import _
1 from mercurial.i18n import _
2
2
3 from mercurial.node import (
3 from mercurial.node import (
4 bin,
4 bin,
5 hex,
5 hex,
6 nullrev,
6 nullrev,
7 sha1nodeconstants,
7 sha1nodeconstants,
8 )
8 )
9 from mercurial import (
9 from mercurial import (
10 ancestor,
10 ancestor,
11 changelog as hgchangelog,
11 changelog as hgchangelog,
12 dagop,
12 dagop,
13 encoding,
13 encoding,
14 error,
14 error,
15 manifest,
15 manifest,
16 pycompat,
16 pycompat,
17 )
17 )
18 from mercurial.interfaces import (
18 from mercurial.interfaces import (
19 repository,
19 repository,
20 util as interfaceutil,
20 util as interfaceutil,
21 )
21 )
22 from mercurial.utils import stringutil
22 from mercurial.utils import stringutil
23 from . import (
23 from . import (
24 gitutil,
24 gitutil,
25 index,
25 index,
26 manifest as gitmanifest,
26 manifest as gitmanifest,
27 )
27 )
28
28
29 pygit2 = gitutil.get_pygit2()
29 pygit2 = gitutil.get_pygit2()
30
30
31
31
32 class baselog: # revlog.revlog):
32 class baselog: # revlog.revlog):
33 """Common implementations between changelog and manifestlog."""
33 """Common implementations between changelog and manifestlog."""
34
34
35 def __init__(self, gr, db):
35 def __init__(self, gr, db):
36 self.gitrepo = gr
36 self.gitrepo = gr
37 self._db = db
37 self._db = db
38
38
39 def __len__(self):
39 def __len__(self):
40 return int(
40 return int(
41 self._db.execute('SELECT COUNT(*) FROM changelog').fetchone()[0]
41 self._db.execute('SELECT COUNT(*) FROM changelog').fetchone()[0]
42 )
42 )
43
43
44 def rev(self, n):
44 def rev(self, n):
45 if n == sha1nodeconstants.nullid:
45 if n == sha1nodeconstants.nullid:
46 return -1
46 return -1
47 t = self._db.execute(
47 t = self._db.execute(
48 'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),)
48 'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),)
49 ).fetchone()
49 ).fetchone()
50 if t is None:
50 if t is None:
51 raise error.LookupError(n, b'00changelog.i', _(b'no node %d'))
51 raise error.LookupError(n, b'00changelog.i', _(b'no node %d'))
52 return t[0]
52 return t[0]
53
53
54 def node(self, r):
54 def node(self, r):
55 if r == nullrev:
55 if r == nullrev:
56 return sha1nodeconstants.nullid
56 return sha1nodeconstants.nullid
57 t = self._db.execute(
57 t = self._db.execute(
58 'SELECT node FROM changelog WHERE rev = ?', (r,)
58 'SELECT node FROM changelog WHERE rev = ?', (r,)
59 ).fetchone()
59 ).fetchone()
60 if t is None:
60 if t is None:
61 raise error.LookupError(r, b'00changelog.i', _(b'no node'))
61 raise error.LookupError(r, b'00changelog.i', _(b'no node'))
62 return bin(t[0])
62 return bin(t[0])
63
63
64 def hasnode(self, n):
64 def hasnode(self, n):
65 t = self._db.execute(
65 t = self._db.execute(
66 'SELECT node FROM changelog WHERE node = ?',
66 'SELECT node FROM changelog WHERE node = ?',
67 (pycompat.sysstr(n),),
67 (pycompat.sysstr(n),),
68 ).fetchone()
68 ).fetchone()
69 return t is not None
69 return t is not None
70
70
71
71
72 class baselogindex:
72 class baselogindex:
73 def __init__(self, log):
73 def __init__(self, log):
74 self._log = log
74 self._log = log
75
75
76 def has_node(self, n):
76 def has_node(self, n):
77 return self._log.rev(n) != -1
77 return self._log.rev(n) != -1
78
78
79 def __len__(self):
79 def __len__(self):
80 return len(self._log)
80 return len(self._log)
81
81
82 def __getitem__(self, idx):
82 def __getitem__(self, idx):
83 p1rev, p2rev = self._log.parentrevs(idx)
83 p1rev, p2rev = self._log.parentrevs(idx)
84 # TODO: it's messy that the index leaks so far out of the
84 # TODO: it's messy that the index leaks so far out of the
85 # storage layer that we have to implement things like reading
85 # storage layer that we have to implement things like reading
86 # this raw tuple, which exposes revlog internals.
86 # this raw tuple, which exposes revlog internals.
87 return (
87 return (
88 # Pretend offset is just the index, since we don't really care.
88 # Pretend offset is just the index, since we don't really care.
89 idx,
89 idx,
90 # Same with lengths
90 # Same with lengths
91 idx, # length
91 idx, # length
92 idx, # rawsize
92 idx, # rawsize
93 -1, # delta base
93 -1, # delta base
94 idx, # linkrev TODO is this right?
94 idx, # linkrev TODO is this right?
95 p1rev,
95 p1rev,
96 p2rev,
96 p2rev,
97 self._log.node(idx),
97 self._log.node(idx),
98 )
98 )
99
99
100
100
101 # TODO: an interface for the changelog type?
101 # TODO: an interface for the changelog type?
102 class changelog(baselog):
102 class changelog(baselog):
103 # TODO: this appears to be an enumerated type, and should probably
103 # TODO: this appears to be an enumerated type, and should probably
104 # be part of the public changelog interface
104 # be part of the public changelog interface
105 _copiesstorage = b'extra'
105 _copiesstorage = b'extra'
106
106
107 def __contains__(self, rev):
107 def __contains__(self, rev):
108 try:
108 try:
109 self.node(rev)
109 self.node(rev)
110 return True
110 return True
111 except error.LookupError:
111 except error.LookupError:
112 return False
112 return False
113
113
114 def __iter__(self):
114 def __iter__(self):
115 return iter(pycompat.xrange(len(self)))
115 return iter(range(len(self)))
116
116
117 @property
117 @property
118 def filteredrevs(self):
118 def filteredrevs(self):
119 # TODO: we should probably add a refs/hg/ namespace for hidden
119 # TODO: we should probably add a refs/hg/ namespace for hidden
120 # heads etc, but that's an idea for later.
120 # heads etc, but that's an idea for later.
121 return set()
121 return set()
122
122
123 @property
123 @property
124 def index(self):
124 def index(self):
125 return baselogindex(self)
125 return baselogindex(self)
126
126
127 @property
127 @property
128 def nodemap(self):
128 def nodemap(self):
129 r = {
129 r = {
130 bin(v[0]): v[1]
130 bin(v[0]): v[1]
131 for v in self._db.execute('SELECT node, rev FROM changelog')
131 for v in self._db.execute('SELECT node, rev FROM changelog')
132 }
132 }
133 r[sha1nodeconstants.nullid] = nullrev
133 r[sha1nodeconstants.nullid] = nullrev
134 return r
134 return r
135
135
136 def tip(self):
136 def tip(self):
137 t = self._db.execute(
137 t = self._db.execute(
138 'SELECT node FROM changelog ORDER BY rev DESC LIMIT 1'
138 'SELECT node FROM changelog ORDER BY rev DESC LIMIT 1'
139 ).fetchone()
139 ).fetchone()
140 if t:
140 if t:
141 return bin(t[0])
141 return bin(t[0])
142 return sha1nodeconstants.nullid
142 return sha1nodeconstants.nullid
143
143
144 def revs(self, start=0, stop=None):
144 def revs(self, start=0, stop=None):
145 if stop is None:
145 if stop is None:
146 stop = self.tiprev()
146 stop = self.tiprev()
147 t = self._db.execute(
147 t = self._db.execute(
148 'SELECT rev FROM changelog '
148 'SELECT rev FROM changelog '
149 'WHERE rev >= ? AND rev <= ? '
149 'WHERE rev >= ? AND rev <= ? '
150 'ORDER BY REV ASC',
150 'ORDER BY REV ASC',
151 (start, stop),
151 (start, stop),
152 )
152 )
153 return (int(r[0]) for r in t)
153 return (int(r[0]) for r in t)
154
154
155 def tiprev(self):
155 def tiprev(self):
156 t = self._db.execute(
156 t = self._db.execute(
157 'SELECT rev FROM changelog ' 'ORDER BY REV DESC ' 'LIMIT 1'
157 'SELECT rev FROM changelog ' 'ORDER BY REV DESC ' 'LIMIT 1'
158 ).fetchone()
158 ).fetchone()
159
159
160 if t is not None:
160 if t is not None:
161 return t[0]
161 return t[0]
162 return -1
162 return -1
163
163
164 def _partialmatch(self, id):
164 def _partialmatch(self, id):
165 if sha1nodeconstants.wdirhex.startswith(id):
165 if sha1nodeconstants.wdirhex.startswith(id):
166 raise error.WdirUnsupported
166 raise error.WdirUnsupported
167 candidates = [
167 candidates = [
168 bin(x[0])
168 bin(x[0])
169 for x in self._db.execute(
169 for x in self._db.execute(
170 'SELECT node FROM changelog WHERE node LIKE ?',
170 'SELECT node FROM changelog WHERE node LIKE ?',
171 (pycompat.sysstr(id + b'%'),),
171 (pycompat.sysstr(id + b'%'),),
172 )
172 )
173 ]
173 ]
174 if sha1nodeconstants.nullhex.startswith(id):
174 if sha1nodeconstants.nullhex.startswith(id):
175 candidates.append(sha1nodeconstants.nullid)
175 candidates.append(sha1nodeconstants.nullid)
176 if len(candidates) > 1:
176 if len(candidates) > 1:
177 raise error.AmbiguousPrefixLookupError(
177 raise error.AmbiguousPrefixLookupError(
178 id, b'00changelog.i', _(b'ambiguous identifier')
178 id, b'00changelog.i', _(b'ambiguous identifier')
179 )
179 )
180 if candidates:
180 if candidates:
181 return candidates[0]
181 return candidates[0]
182 return None
182 return None
183
183
184 def flags(self, rev):
184 def flags(self, rev):
185 return 0
185 return 0
186
186
187 def shortest(self, node, minlength=1):
187 def shortest(self, node, minlength=1):
188 nodehex = hex(node)
188 nodehex = hex(node)
189 for attempt in pycompat.xrange(minlength, len(nodehex) + 1):
189 for attempt in range(minlength, len(nodehex) + 1):
190 candidate = nodehex[:attempt]
190 candidate = nodehex[:attempt]
191 matches = int(
191 matches = int(
192 self._db.execute(
192 self._db.execute(
193 'SELECT COUNT(*) FROM changelog WHERE node LIKE ?',
193 'SELECT COUNT(*) FROM changelog WHERE node LIKE ?',
194 (pycompat.sysstr(candidate + b'%'),),
194 (pycompat.sysstr(candidate + b'%'),),
195 ).fetchone()[0]
195 ).fetchone()[0]
196 )
196 )
197 if matches == 1:
197 if matches == 1:
198 return candidate
198 return candidate
199 return nodehex
199 return nodehex
200
200
201 def headrevs(self, revs=None):
201 def headrevs(self, revs=None):
202 realheads = [
202 realheads = [
203 int(x[0])
203 int(x[0])
204 for x in self._db.execute(
204 for x in self._db.execute(
205 'SELECT rev FROM changelog '
205 'SELECT rev FROM changelog '
206 'INNER JOIN heads ON changelog.node = heads.node'
206 'INNER JOIN heads ON changelog.node = heads.node'
207 )
207 )
208 ]
208 ]
209 if revs:
209 if revs:
210 return sorted([r for r in revs if r in realheads])
210 return sorted([r for r in revs if r in realheads])
211 return sorted(realheads)
211 return sorted(realheads)
212
212
213 def changelogrevision(self, nodeorrev):
213 def changelogrevision(self, nodeorrev):
214 # Ensure we have a node id
214 # Ensure we have a node id
215 if isinstance(nodeorrev, int):
215 if isinstance(nodeorrev, int):
216 n = self.node(nodeorrev)
216 n = self.node(nodeorrev)
217 else:
217 else:
218 n = nodeorrev
218 n = nodeorrev
219 extra = {b'branch': b'default'}
219 extra = {b'branch': b'default'}
220 # handle looking up nullid
220 # handle looking up nullid
221 if n == sha1nodeconstants.nullid:
221 if n == sha1nodeconstants.nullid:
222 return hgchangelog._changelogrevision(
222 return hgchangelog._changelogrevision(
223 extra=extra, manifest=sha1nodeconstants.nullid
223 extra=extra, manifest=sha1nodeconstants.nullid
224 )
224 )
225 hn = gitutil.togitnode(n)
225 hn = gitutil.togitnode(n)
226 # We've got a real commit!
226 # We've got a real commit!
227 files = [
227 files = [
228 r[0]
228 r[0]
229 for r in self._db.execute(
229 for r in self._db.execute(
230 'SELECT filename FROM changedfiles '
230 'SELECT filename FROM changedfiles '
231 'WHERE node = ? and filenode != ?',
231 'WHERE node = ? and filenode != ?',
232 (hn, gitutil.nullgit),
232 (hn, gitutil.nullgit),
233 )
233 )
234 ]
234 ]
235 filesremoved = [
235 filesremoved = [
236 r[0]
236 r[0]
237 for r in self._db.execute(
237 for r in self._db.execute(
238 'SELECT filename FROM changedfiles '
238 'SELECT filename FROM changedfiles '
239 'WHERE node = ? and filenode = ?',
239 'WHERE node = ? and filenode = ?',
240 (hn, gitutil.nullgit),
240 (hn, gitutil.nullgit),
241 )
241 )
242 ]
242 ]
243 c = self.gitrepo[hn]
243 c = self.gitrepo[hn]
244 return hgchangelog._changelogrevision(
244 return hgchangelog._changelogrevision(
245 manifest=n, # pretend manifest the same as the commit node
245 manifest=n, # pretend manifest the same as the commit node
246 user=b'%s <%s>'
246 user=b'%s <%s>'
247 % (c.author.name.encode('utf8'), c.author.email.encode('utf8')),
247 % (c.author.name.encode('utf8'), c.author.email.encode('utf8')),
248 date=(c.author.time, -c.author.offset * 60),
248 date=(c.author.time, -c.author.offset * 60),
249 files=files,
249 files=files,
250 # TODO filesadded in the index
250 # TODO filesadded in the index
251 filesremoved=filesremoved,
251 filesremoved=filesremoved,
252 description=c.message.encode('utf8'),
252 description=c.message.encode('utf8'),
253 # TODO do we want to handle extra? how?
253 # TODO do we want to handle extra? how?
254 extra=extra,
254 extra=extra,
255 )
255 )
256
256
257 def ancestors(self, revs, stoprev=0, inclusive=False):
257 def ancestors(self, revs, stoprev=0, inclusive=False):
258 revs = list(revs)
258 revs = list(revs)
259 tip = self.rev(self.tip())
259 tip = self.rev(self.tip())
260 for r in revs:
260 for r in revs:
261 if r > tip:
261 if r > tip:
262 raise IndexError(b'Invalid rev %r' % r)
262 raise IndexError(b'Invalid rev %r' % r)
263 return ancestor.lazyancestors(
263 return ancestor.lazyancestors(
264 self.parentrevs, revs, stoprev=stoprev, inclusive=inclusive
264 self.parentrevs, revs, stoprev=stoprev, inclusive=inclusive
265 )
265 )
266
266
267 # Cleanup opportunity: this is *identical* to the revlog.py version
267 # Cleanup opportunity: this is *identical* to the revlog.py version
268 def descendants(self, revs):
268 def descendants(self, revs):
269 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
269 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
270
270
271 def incrementalmissingrevs(self, common=None):
271 def incrementalmissingrevs(self, common=None):
272 """Return an object that can be used to incrementally compute the
272 """Return an object that can be used to incrementally compute the
273 revision numbers of the ancestors of arbitrary sets that are not
273 revision numbers of the ancestors of arbitrary sets that are not
274 ancestors of common. This is an ancestor.incrementalmissingancestors
274 ancestors of common. This is an ancestor.incrementalmissingancestors
275 object.
275 object.
276
276
277 'common' is a list of revision numbers. If common is not supplied, uses
277 'common' is a list of revision numbers. If common is not supplied, uses
278 nullrev.
278 nullrev.
279 """
279 """
280 if common is None:
280 if common is None:
281 common = [nullrev]
281 common = [nullrev]
282
282
283 return ancestor.incrementalmissingancestors(self.parentrevs, common)
283 return ancestor.incrementalmissingancestors(self.parentrevs, common)
284
284
285 def findmissing(self, common=None, heads=None):
285 def findmissing(self, common=None, heads=None):
286 """Return the ancestors of heads that are not ancestors of common.
286 """Return the ancestors of heads that are not ancestors of common.
287
287
288 More specifically, return a list of nodes N such that every N
288 More specifically, return a list of nodes N such that every N
289 satisfies the following constraints:
289 satisfies the following constraints:
290
290
291 1. N is an ancestor of some node in 'heads'
291 1. N is an ancestor of some node in 'heads'
292 2. N is not an ancestor of any node in 'common'
292 2. N is not an ancestor of any node in 'common'
293
293
294 The list is sorted by revision number, meaning it is
294 The list is sorted by revision number, meaning it is
295 topologically sorted.
295 topologically sorted.
296
296
297 'heads' and 'common' are both lists of node IDs. If heads is
297 'heads' and 'common' are both lists of node IDs. If heads is
298 not supplied, uses all of the revlog's heads. If common is not
298 not supplied, uses all of the revlog's heads. If common is not
299 supplied, uses nullid."""
299 supplied, uses nullid."""
300 if common is None:
300 if common is None:
301 common = [sha1nodeconstants.nullid]
301 common = [sha1nodeconstants.nullid]
302 if heads is None:
302 if heads is None:
303 heads = self.heads()
303 heads = self.heads()
304
304
305 common = [self.rev(n) for n in common]
305 common = [self.rev(n) for n in common]
306 heads = [self.rev(n) for n in heads]
306 heads = [self.rev(n) for n in heads]
307
307
308 inc = self.incrementalmissingrevs(common=common)
308 inc = self.incrementalmissingrevs(common=common)
309 return [self.node(r) for r in inc.missingancestors(heads)]
309 return [self.node(r) for r in inc.missingancestors(heads)]
310
310
311 def children(self, node):
311 def children(self, node):
312 """find the children of a given node"""
312 """find the children of a given node"""
313 c = []
313 c = []
314 p = self.rev(node)
314 p = self.rev(node)
315 for r in self.revs(start=p + 1):
315 for r in self.revs(start=p + 1):
316 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
316 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
317 if prevs:
317 if prevs:
318 for pr in prevs:
318 for pr in prevs:
319 if pr == p:
319 if pr == p:
320 c.append(self.node(r))
320 c.append(self.node(r))
321 elif p == nullrev:
321 elif p == nullrev:
322 c.append(self.node(r))
322 c.append(self.node(r))
323 return c
323 return c
324
324
325 def reachableroots(self, minroot, heads, roots, includepath=False):
325 def reachableroots(self, minroot, heads, roots, includepath=False):
326 return dagop._reachablerootspure(
326 return dagop._reachablerootspure(
327 self.parentrevs, minroot, roots, heads, includepath
327 self.parentrevs, minroot, roots, heads, includepath
328 )
328 )
329
329
330 # Cleanup opportunity: this is *identical* to the revlog.py version
330 # Cleanup opportunity: this is *identical* to the revlog.py version
331 def isancestor(self, a, b):
331 def isancestor(self, a, b):
332 a, b = self.rev(a), self.rev(b)
332 a, b = self.rev(a), self.rev(b)
333 return self.isancestorrev(a, b)
333 return self.isancestorrev(a, b)
334
334
335 # Cleanup opportunity: this is *identical* to the revlog.py version
335 # Cleanup opportunity: this is *identical* to the revlog.py version
336 def isancestorrev(self, a, b):
336 def isancestorrev(self, a, b):
337 if a == nullrev:
337 if a == nullrev:
338 return True
338 return True
339 elif a == b:
339 elif a == b:
340 return True
340 return True
341 elif a > b:
341 elif a > b:
342 return False
342 return False
343 return bool(self.reachableroots(a, [b], [a], includepath=False))
343 return bool(self.reachableroots(a, [b], [a], includepath=False))
344
344
345 def parentrevs(self, rev):
345 def parentrevs(self, rev):
346 n = self.node(rev)
346 n = self.node(rev)
347 hn = gitutil.togitnode(n)
347 hn = gitutil.togitnode(n)
348 if hn != gitutil.nullgit:
348 if hn != gitutil.nullgit:
349 c = self.gitrepo[hn]
349 c = self.gitrepo[hn]
350 else:
350 else:
351 return nullrev, nullrev
351 return nullrev, nullrev
352 p1 = p2 = nullrev
352 p1 = p2 = nullrev
353 if c.parents:
353 if c.parents:
354 p1 = self.rev(c.parents[0].id.raw)
354 p1 = self.rev(c.parents[0].id.raw)
355 if len(c.parents) > 2:
355 if len(c.parents) > 2:
356 raise error.Abort(b'TODO octopus merge handling')
356 raise error.Abort(b'TODO octopus merge handling')
357 if len(c.parents) == 2:
357 if len(c.parents) == 2:
358 p2 = self.rev(c.parents[1].id.raw)
358 p2 = self.rev(c.parents[1].id.raw)
359 return p1, p2
359 return p1, p2
360
360
361 # Private method is used at least by the tags code.
361 # Private method is used at least by the tags code.
362 _uncheckedparentrevs = parentrevs
362 _uncheckedparentrevs = parentrevs
363
363
364 def commonancestorsheads(self, a, b):
364 def commonancestorsheads(self, a, b):
365 # TODO the revlog verson of this has a C path, so we probably
365 # TODO the revlog verson of this has a C path, so we probably
366 # need to optimize this...
366 # need to optimize this...
367 a, b = self.rev(a), self.rev(b)
367 a, b = self.rev(a), self.rev(b)
368 return [
368 return [
369 self.node(n)
369 self.node(n)
370 for n in ancestor.commonancestorsheads(self.parentrevs, a, b)
370 for n in ancestor.commonancestorsheads(self.parentrevs, a, b)
371 ]
371 ]
372
372
373 def branchinfo(self, rev):
373 def branchinfo(self, rev):
374 """Git doesn't do named branches, so just put everything on default."""
374 """Git doesn't do named branches, so just put everything on default."""
375 return b'default', False
375 return b'default', False
376
376
377 def delayupdate(self, tr):
377 def delayupdate(self, tr):
378 # TODO: I think we can elide this because we're just dropping
378 # TODO: I think we can elide this because we're just dropping
379 # an object in the git repo?
379 # an object in the git repo?
380 pass
380 pass
381
381
382 def add(
382 def add(
383 self,
383 self,
384 manifest,
384 manifest,
385 files,
385 files,
386 desc,
386 desc,
387 transaction,
387 transaction,
388 p1,
388 p1,
389 p2,
389 p2,
390 user,
390 user,
391 date=None,
391 date=None,
392 extra=None,
392 extra=None,
393 p1copies=None,
393 p1copies=None,
394 p2copies=None,
394 p2copies=None,
395 filesadded=None,
395 filesadded=None,
396 filesremoved=None,
396 filesremoved=None,
397 ):
397 ):
398 parents = []
398 parents = []
399 hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2)
399 hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2)
400 if p1 != sha1nodeconstants.nullid:
400 if p1 != sha1nodeconstants.nullid:
401 parents.append(hp1)
401 parents.append(hp1)
402 if p2 and p2 != sha1nodeconstants.nullid:
402 if p2 and p2 != sha1nodeconstants.nullid:
403 parents.append(hp2)
403 parents.append(hp2)
404 assert date is not None
404 assert date is not None
405 timestamp, tz = date
405 timestamp, tz = date
406 sig = pygit2.Signature(
406 sig = pygit2.Signature(
407 encoding.unifromlocal(stringutil.person(user)),
407 encoding.unifromlocal(stringutil.person(user)),
408 encoding.unifromlocal(stringutil.email(user)),
408 encoding.unifromlocal(stringutil.email(user)),
409 int(timestamp),
409 int(timestamp),
410 -int(tz // 60),
410 -int(tz // 60),
411 )
411 )
412 oid = self.gitrepo.create_commit(
412 oid = self.gitrepo.create_commit(
413 None, sig, sig, desc, gitutil.togitnode(manifest), parents
413 None, sig, sig, desc, gitutil.togitnode(manifest), parents
414 )
414 )
415 # Set up an internal reference to force the commit into the
415 # Set up an internal reference to force the commit into the
416 # changelog. Hypothetically, we could even use this refs/hg/
416 # changelog. Hypothetically, we could even use this refs/hg/
417 # namespace to allow for anonymous heads on git repos, which
417 # namespace to allow for anonymous heads on git repos, which
418 # would be neat.
418 # would be neat.
419 self.gitrepo.references.create(
419 self.gitrepo.references.create(
420 'refs/hg/internal/latest-commit', oid, force=True
420 'refs/hg/internal/latest-commit', oid, force=True
421 )
421 )
422 # Reindex now to pick up changes. We omit the progress
422 # Reindex now to pick up changes. We omit the progress
423 # and log callbacks because this will be very quick.
423 # and log callbacks because this will be very quick.
424 index._index_repo(self.gitrepo, self._db)
424 index._index_repo(self.gitrepo, self._db)
425 return oid.raw
425 return oid.raw
426
426
427
427
428 class manifestlog(baselog):
428 class manifestlog(baselog):
429 nodeconstants = sha1nodeconstants
429 nodeconstants = sha1nodeconstants
430
430
431 def __getitem__(self, node):
431 def __getitem__(self, node):
432 return self.get(b'', node)
432 return self.get(b'', node)
433
433
434 def get(self, relpath, node):
434 def get(self, relpath, node):
435 if node == sha1nodeconstants.nullid:
435 if node == sha1nodeconstants.nullid:
436 # TODO: this should almost certainly be a memgittreemanifestctx
436 # TODO: this should almost certainly be a memgittreemanifestctx
437 return manifest.memtreemanifestctx(self, relpath)
437 return manifest.memtreemanifestctx(self, relpath)
438 commit = self.gitrepo[gitutil.togitnode(node)]
438 commit = self.gitrepo[gitutil.togitnode(node)]
439 t = commit.tree
439 t = commit.tree
440 if relpath:
440 if relpath:
441 parts = relpath.split(b'/')
441 parts = relpath.split(b'/')
442 for p in parts:
442 for p in parts:
443 te = t[p]
443 te = t[p]
444 t = self.gitrepo[te.id]
444 t = self.gitrepo[te.id]
445 return gitmanifest.gittreemanifestctx(self.gitrepo, t)
445 return gitmanifest.gittreemanifestctx(self.gitrepo, t)
446
446
447
447
448 @interfaceutil.implementer(repository.ifilestorage)
448 @interfaceutil.implementer(repository.ifilestorage)
449 class filelog(baselog):
449 class filelog(baselog):
450 def __init__(self, gr, db, path):
450 def __init__(self, gr, db, path):
451 super(filelog, self).__init__(gr, db)
451 super(filelog, self).__init__(gr, db)
452 assert isinstance(path, bytes)
452 assert isinstance(path, bytes)
453 self.path = path
453 self.path = path
454 self.nullid = sha1nodeconstants.nullid
454 self.nullid = sha1nodeconstants.nullid
455
455
456 def read(self, node):
456 def read(self, node):
457 if node == sha1nodeconstants.nullid:
457 if node == sha1nodeconstants.nullid:
458 return b''
458 return b''
459 return self.gitrepo[gitutil.togitnode(node)].data
459 return self.gitrepo[gitutil.togitnode(node)].data
460
460
461 def lookup(self, node):
461 def lookup(self, node):
462 if len(node) not in (20, 40):
462 if len(node) not in (20, 40):
463 node = int(node)
463 node = int(node)
464 if isinstance(node, int):
464 if isinstance(node, int):
465 assert False, b'todo revnums for nodes'
465 assert False, b'todo revnums for nodes'
466 if len(node) == 40:
466 if len(node) == 40:
467 node = bin(node)
467 node = bin(node)
468 hnode = gitutil.togitnode(node)
468 hnode = gitutil.togitnode(node)
469 if hnode in self.gitrepo:
469 if hnode in self.gitrepo:
470 return node
470 return node
471 raise error.LookupError(self.path, node, _(b'no match found'))
471 raise error.LookupError(self.path, node, _(b'no match found'))
472
472
473 def cmp(self, node, text):
473 def cmp(self, node, text):
474 """Returns True if text is different than content at `node`."""
474 """Returns True if text is different than content at `node`."""
475 return self.read(node) != text
475 return self.read(node) != text
476
476
477 def add(self, text, meta, transaction, link, p1=None, p2=None):
477 def add(self, text, meta, transaction, link, p1=None, p2=None):
478 assert not meta # Should we even try to handle this?
478 assert not meta # Should we even try to handle this?
479 return self.gitrepo.create_blob(text).raw
479 return self.gitrepo.create_blob(text).raw
480
480
481 def __iter__(self):
481 def __iter__(self):
482 for clrev in self._db.execute(
482 for clrev in self._db.execute(
483 '''
483 '''
484 SELECT rev FROM changelog
484 SELECT rev FROM changelog
485 INNER JOIN changedfiles ON changelog.node = changedfiles.node
485 INNER JOIN changedfiles ON changelog.node = changedfiles.node
486 WHERE changedfiles.filename = ? AND changedfiles.filenode != ?
486 WHERE changedfiles.filename = ? AND changedfiles.filenode != ?
487 ''',
487 ''',
488 (pycompat.fsdecode(self.path), gitutil.nullgit),
488 (pycompat.fsdecode(self.path), gitutil.nullgit),
489 ):
489 ):
490 yield clrev[0]
490 yield clrev[0]
491
491
492 def linkrev(self, fr):
492 def linkrev(self, fr):
493 return fr
493 return fr
494
494
495 def rev(self, node):
495 def rev(self, node):
496 row = self._db.execute(
496 row = self._db.execute(
497 '''
497 '''
498 SELECT rev FROM changelog
498 SELECT rev FROM changelog
499 INNER JOIN changedfiles ON changelog.node = changedfiles.node
499 INNER JOIN changedfiles ON changelog.node = changedfiles.node
500 WHERE changedfiles.filename = ? AND changedfiles.filenode = ?''',
500 WHERE changedfiles.filename = ? AND changedfiles.filenode = ?''',
501 (pycompat.fsdecode(self.path), gitutil.togitnode(node)),
501 (pycompat.fsdecode(self.path), gitutil.togitnode(node)),
502 ).fetchone()
502 ).fetchone()
503 if row is None:
503 if row is None:
504 raise error.LookupError(self.path, node, _(b'no such node'))
504 raise error.LookupError(self.path, node, _(b'no such node'))
505 return int(row[0])
505 return int(row[0])
506
506
507 def node(self, rev):
507 def node(self, rev):
508 maybe = self._db.execute(
508 maybe = self._db.execute(
509 '''SELECT filenode FROM changedfiles
509 '''SELECT filenode FROM changedfiles
510 INNER JOIN changelog ON changelog.node = changedfiles.node
510 INNER JOIN changelog ON changelog.node = changedfiles.node
511 WHERE changelog.rev = ? AND filename = ?
511 WHERE changelog.rev = ? AND filename = ?
512 ''',
512 ''',
513 (rev, pycompat.fsdecode(self.path)),
513 (rev, pycompat.fsdecode(self.path)),
514 ).fetchone()
514 ).fetchone()
515 if maybe is None:
515 if maybe is None:
516 raise IndexError('gitlog %r out of range %d' % (self.path, rev))
516 raise IndexError('gitlog %r out of range %d' % (self.path, rev))
517 return bin(maybe[0])
517 return bin(maybe[0])
518
518
519 def parents(self, node):
519 def parents(self, node):
520 gn = gitutil.togitnode(node)
520 gn = gitutil.togitnode(node)
521 gp = pycompat.fsdecode(self.path)
521 gp = pycompat.fsdecode(self.path)
522 ps = []
522 ps = []
523 for p in self._db.execute(
523 for p in self._db.execute(
524 '''SELECT p1filenode, p2filenode FROM changedfiles
524 '''SELECT p1filenode, p2filenode FROM changedfiles
525 WHERE filenode = ? AND filename = ?
525 WHERE filenode = ? AND filename = ?
526 ''',
526 ''',
527 (gn, gp),
527 (gn, gp),
528 ).fetchone():
528 ).fetchone():
529 if p is None:
529 if p is None:
530 commit = self._db.execute(
530 commit = self._db.execute(
531 "SELECT node FROM changedfiles "
531 "SELECT node FROM changedfiles "
532 "WHERE filenode = ? AND filename = ?",
532 "WHERE filenode = ? AND filename = ?",
533 (gn, gp),
533 (gn, gp),
534 ).fetchone()[0]
534 ).fetchone()[0]
535 # This filelog is missing some data. Build the
535 # This filelog is missing some data. Build the
536 # filelog, then recurse (which will always find data).
536 # filelog, then recurse (which will always find data).
537 if pycompat.ispy3:
537 if pycompat.ispy3:
538 commit = commit.decode('ascii')
538 commit = commit.decode('ascii')
539 index.fill_in_filelog(self.gitrepo, self._db, commit, gp, gn)
539 index.fill_in_filelog(self.gitrepo, self._db, commit, gp, gn)
540 return self.parents(node)
540 return self.parents(node)
541 else:
541 else:
542 ps.append(bin(p))
542 ps.append(bin(p))
543 return ps
543 return ps
544
544
545 def renamed(self, node):
545 def renamed(self, node):
546 # TODO: renames/copies
546 # TODO: renames/copies
547 return False
547 return False
@@ -1,385 +1,385 b''
1 # Minimal support for git commands on an hg repository
1 # Minimal support for git commands on an hg repository
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''browse the repository in a graphical way
8 '''browse the repository in a graphical way
9
9
10 The hgk extension allows browsing the history of a repository in a
10 The hgk extension allows browsing the history of a repository in a
11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
12 distributed with Mercurial.)
12 distributed with Mercurial.)
13
13
14 hgk consists of two parts: a Tcl script that does the displaying and
14 hgk consists of two parts: a Tcl script that does the displaying and
15 querying of information, and an extension to Mercurial named hgk.py,
15 querying of information, and an extension to Mercurial named hgk.py,
16 which provides hooks for hgk to get information. hgk can be found in
16 which provides hooks for hgk to get information. hgk can be found in
17 the contrib directory, and the extension is shipped in the hgext
17 the contrib directory, and the extension is shipped in the hgext
18 repository, and needs to be enabled.
18 repository, and needs to be enabled.
19
19
20 The :hg:`view` command will launch the hgk Tcl script. For this command
20 The :hg:`view` command will launch the hgk Tcl script. For this command
21 to work, hgk must be in your search path. Alternately, you can specify
21 to work, hgk must be in your search path. Alternately, you can specify
22 the path to hgk in your configuration file::
22 the path to hgk in your configuration file::
23
23
24 [hgk]
24 [hgk]
25 path = /location/of/hgk
25 path = /location/of/hgk
26
26
27 hgk can make use of the extdiff extension to visualize revisions.
27 hgk can make use of the extdiff extension to visualize revisions.
28 Assuming you had already configured extdiff vdiff command, just add::
28 Assuming you had already configured extdiff vdiff command, just add::
29
29
30 [hgk]
30 [hgk]
31 vdiff=vdiff
31 vdiff=vdiff
32
32
33 Revisions context menu will now display additional entries to fire
33 Revisions context menu will now display additional entries to fire
34 vdiff on hovered and selected revisions.
34 vdiff on hovered and selected revisions.
35 '''
35 '''
36
36
37
37
38 import os
38 import os
39
39
40 from mercurial.i18n import _
40 from mercurial.i18n import _
41 from mercurial.node import (
41 from mercurial.node import (
42 nullrev,
42 nullrev,
43 short,
43 short,
44 )
44 )
45 from mercurial import (
45 from mercurial import (
46 commands,
46 commands,
47 obsolete,
47 obsolete,
48 patch,
48 patch,
49 pycompat,
49 pycompat,
50 registrar,
50 registrar,
51 scmutil,
51 scmutil,
52 )
52 )
53
53
54 cmdtable = {}
54 cmdtable = {}
55 command = registrar.command(cmdtable)
55 command = registrar.command(cmdtable)
56 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
56 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
57 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
57 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
58 # be specifying the version(s) of Mercurial they are tested with, or
58 # be specifying the version(s) of Mercurial they are tested with, or
59 # leave the attribute unspecified.
59 # leave the attribute unspecified.
60 testedwith = b'ships-with-hg-core'
60 testedwith = b'ships-with-hg-core'
61
61
62 configtable = {}
62 configtable = {}
63 configitem = registrar.configitem(configtable)
63 configitem = registrar.configitem(configtable)
64
64
65 configitem(
65 configitem(
66 b'hgk',
66 b'hgk',
67 b'path',
67 b'path',
68 default=b'hgk',
68 default=b'hgk',
69 )
69 )
70
70
71
71
72 @command(
72 @command(
73 b'debug-diff-tree',
73 b'debug-diff-tree',
74 [
74 [
75 (b'p', b'patch', None, _(b'generate patch')),
75 (b'p', b'patch', None, _(b'generate patch')),
76 (b'r', b'recursive', None, _(b'recursive')),
76 (b'r', b'recursive', None, _(b'recursive')),
77 (b'P', b'pretty', None, _(b'pretty')),
77 (b'P', b'pretty', None, _(b'pretty')),
78 (b's', b'stdin', None, _(b'stdin')),
78 (b's', b'stdin', None, _(b'stdin')),
79 (b'C', b'copy', None, _(b'detect copies')),
79 (b'C', b'copy', None, _(b'detect copies')),
80 (b'S', b'search', b"", _(b'search')),
80 (b'S', b'search', b"", _(b'search')),
81 ],
81 ],
82 b'[OPTION]... NODE1 NODE2 [FILE]...',
82 b'[OPTION]... NODE1 NODE2 [FILE]...',
83 inferrepo=True,
83 inferrepo=True,
84 )
84 )
85 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
85 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
86 """diff trees from two commits"""
86 """diff trees from two commits"""
87
87
88 def __difftree(repo, node1, node2, files=None):
88 def __difftree(repo, node1, node2, files=None):
89 assert node2 is not None
89 assert node2 is not None
90 if files is None:
90 if files is None:
91 files = []
91 files = []
92 mmap = repo[node1].manifest()
92 mmap = repo[node1].manifest()
93 mmap2 = repo[node2].manifest()
93 mmap2 = repo[node2].manifest()
94 m = scmutil.match(repo[node1], files)
94 m = scmutil.match(repo[node1], files)
95 st = repo.status(node1, node2, m)
95 st = repo.status(node1, node2, m)
96 empty = short(repo.nullid)
96 empty = short(repo.nullid)
97
97
98 for f in st.modified:
98 for f in st.modified:
99 # TODO get file permissions
99 # TODO get file permissions
100 ui.writenoi18n(
100 ui.writenoi18n(
101 b":100664 100664 %s %s M\t%s\t%s\n"
101 b":100664 100664 %s %s M\t%s\t%s\n"
102 % (short(mmap[f]), short(mmap2[f]), f, f)
102 % (short(mmap[f]), short(mmap2[f]), f, f)
103 )
103 )
104 for f in st.added:
104 for f in st.added:
105 ui.writenoi18n(
105 ui.writenoi18n(
106 b":000000 100664 %s %s N\t%s\t%s\n"
106 b":000000 100664 %s %s N\t%s\t%s\n"
107 % (empty, short(mmap2[f]), f, f)
107 % (empty, short(mmap2[f]), f, f)
108 )
108 )
109 for f in st.removed:
109 for f in st.removed:
110 ui.writenoi18n(
110 ui.writenoi18n(
111 b":100664 000000 %s %s D\t%s\t%s\n"
111 b":100664 000000 %s %s D\t%s\t%s\n"
112 % (short(mmap[f]), empty, f, f)
112 % (short(mmap[f]), empty, f, f)
113 )
113 )
114
114
115 ##
115 ##
116
116
117 while True:
117 while True:
118 if opts['stdin']:
118 if opts['stdin']:
119 line = ui.fin.readline()
119 line = ui.fin.readline()
120 if not line:
120 if not line:
121 break
121 break
122 line = line.rstrip(pycompat.oslinesep).split(b' ')
122 line = line.rstrip(pycompat.oslinesep).split(b' ')
123 node1 = line[0]
123 node1 = line[0]
124 if len(line) > 1:
124 if len(line) > 1:
125 node2 = line[1]
125 node2 = line[1]
126 else:
126 else:
127 node2 = None
127 node2 = None
128 node1 = repo.lookup(node1)
128 node1 = repo.lookup(node1)
129 if node2:
129 if node2:
130 node2 = repo.lookup(node2)
130 node2 = repo.lookup(node2)
131 else:
131 else:
132 node2 = node1
132 node2 = node1
133 node1 = repo.changelog.parents(node1)[0]
133 node1 = repo.changelog.parents(node1)[0]
134 if opts['patch']:
134 if opts['patch']:
135 if opts['pretty']:
135 if opts['pretty']:
136 catcommit(ui, repo, node2, b"")
136 catcommit(ui, repo, node2, b"")
137 m = scmutil.match(repo[node1], files)
137 m = scmutil.match(repo[node1], files)
138 diffopts = patch.difffeatureopts(ui)
138 diffopts = patch.difffeatureopts(ui)
139 diffopts.git = True
139 diffopts.git = True
140 chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts)
140 chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts)
141 for chunk in chunks:
141 for chunk in chunks:
142 ui.write(chunk)
142 ui.write(chunk)
143 else:
143 else:
144 __difftree(repo, node1, node2, files=files)
144 __difftree(repo, node1, node2, files=files)
145 if not opts['stdin']:
145 if not opts['stdin']:
146 break
146 break
147
147
148
148
149 def catcommit(ui, repo, n, prefix, ctx=None):
149 def catcommit(ui, repo, n, prefix, ctx=None):
150 nlprefix = b'\n' + prefix
150 nlprefix = b'\n' + prefix
151 if ctx is None:
151 if ctx is None:
152 ctx = repo[n]
152 ctx = repo[n]
153 # use ctx.node() instead ??
153 # use ctx.node() instead ??
154 ui.write((b"tree %s\n" % short(ctx.changeset()[0])))
154 ui.write((b"tree %s\n" % short(ctx.changeset()[0])))
155 for p in ctx.parents():
155 for p in ctx.parents():
156 ui.write((b"parent %s\n" % p))
156 ui.write((b"parent %s\n" % p))
157
157
158 date = ctx.date()
158 date = ctx.date()
159 description = ctx.description().replace(b"\0", b"")
159 description = ctx.description().replace(b"\0", b"")
160 ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1])))
160 ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1])))
161
161
162 if b'committer' in ctx.extra():
162 if b'committer' in ctx.extra():
163 ui.write((b"committer %s\n" % ctx.extra()[b'committer']))
163 ui.write((b"committer %s\n" % ctx.extra()[b'committer']))
164
164
165 ui.write((b"revision %d\n" % ctx.rev()))
165 ui.write((b"revision %d\n" % ctx.rev()))
166 ui.write((b"branch %s\n" % ctx.branch()))
166 ui.write((b"branch %s\n" % ctx.branch()))
167 if obsolete.isenabled(repo, obsolete.createmarkersopt):
167 if obsolete.isenabled(repo, obsolete.createmarkersopt):
168 if ctx.obsolete():
168 if ctx.obsolete():
169 ui.writenoi18n(b"obsolete\n")
169 ui.writenoi18n(b"obsolete\n")
170 ui.write((b"phase %s\n\n" % ctx.phasestr()))
170 ui.write((b"phase %s\n\n" % ctx.phasestr()))
171
171
172 if prefix != b"":
172 if prefix != b"":
173 ui.write(
173 ui.write(
174 b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip())
174 b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip())
175 )
175 )
176 else:
176 else:
177 ui.write(description + b"\n")
177 ui.write(description + b"\n")
178 if prefix:
178 if prefix:
179 ui.write(b'\0')
179 ui.write(b'\0')
180
180
181
181
182 @command(b'debug-merge-base', [], _(b'REV REV'))
182 @command(b'debug-merge-base', [], _(b'REV REV'))
183 def base(ui, repo, node1, node2):
183 def base(ui, repo, node1, node2):
184 """output common ancestor information"""
184 """output common ancestor information"""
185 node1 = repo.lookup(node1)
185 node1 = repo.lookup(node1)
186 node2 = repo.lookup(node2)
186 node2 = repo.lookup(node2)
187 n = repo.changelog.ancestor(node1, node2)
187 n = repo.changelog.ancestor(node1, node2)
188 ui.write(short(n) + b"\n")
188 ui.write(short(n) + b"\n")
189
189
190
190
191 @command(
191 @command(
192 b'debug-cat-file',
192 b'debug-cat-file',
193 [(b's', b'stdin', None, _(b'stdin'))],
193 [(b's', b'stdin', None, _(b'stdin'))],
194 _(b'[OPTION]... TYPE FILE'),
194 _(b'[OPTION]... TYPE FILE'),
195 inferrepo=True,
195 inferrepo=True,
196 )
196 )
197 def catfile(ui, repo, type=None, r=None, **opts):
197 def catfile(ui, repo, type=None, r=None, **opts):
198 """cat a specific revision"""
198 """cat a specific revision"""
199 # in stdin mode, every line except the commit is prefixed with two
199 # in stdin mode, every line except the commit is prefixed with two
200 # spaces. This way the our caller can find the commit without magic
200 # spaces. This way the our caller can find the commit without magic
201 # strings
201 # strings
202 #
202 #
203 prefix = b""
203 prefix = b""
204 if opts['stdin']:
204 if opts['stdin']:
205 line = ui.fin.readline()
205 line = ui.fin.readline()
206 if not line:
206 if not line:
207 return
207 return
208 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
208 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
209 prefix = b" "
209 prefix = b" "
210 else:
210 else:
211 if not type or not r:
211 if not type or not r:
212 ui.warn(_(b"cat-file: type or revision not supplied\n"))
212 ui.warn(_(b"cat-file: type or revision not supplied\n"))
213 commands.help_(ui, b'cat-file')
213 commands.help_(ui, b'cat-file')
214
214
215 while r:
215 while r:
216 if type != b"commit":
216 if type != b"commit":
217 ui.warn(_(b"aborting hg cat-file only understands commits\n"))
217 ui.warn(_(b"aborting hg cat-file only understands commits\n"))
218 return 1
218 return 1
219 n = repo.lookup(r)
219 n = repo.lookup(r)
220 catcommit(ui, repo, n, prefix)
220 catcommit(ui, repo, n, prefix)
221 if opts['stdin']:
221 if opts['stdin']:
222 line = ui.fin.readline()
222 line = ui.fin.readline()
223 if not line:
223 if not line:
224 break
224 break
225 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
225 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
226 else:
226 else:
227 break
227 break
228
228
229
229
230 # git rev-tree is a confusing thing. You can supply a number of
230 # git rev-tree is a confusing thing. You can supply a number of
231 # commit sha1s on the command line, and it walks the commit history
231 # commit sha1s on the command line, and it walks the commit history
232 # telling you which commits are reachable from the supplied ones via
232 # telling you which commits are reachable from the supplied ones via
233 # a bitmask based on arg position.
233 # a bitmask based on arg position.
234 # you can specify a commit to stop at by starting the sha1 with ^
234 # you can specify a commit to stop at by starting the sha1 with ^
235 def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False):
235 def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False):
236 def chlogwalk():
236 def chlogwalk():
237 count = len(repo)
237 count = len(repo)
238 i = count
238 i = count
239 l = [0] * 100
239 l = [0] * 100
240 chunk = 100
240 chunk = 100
241 while True:
241 while True:
242 if chunk > i:
242 if chunk > i:
243 chunk = i
243 chunk = i
244 i = 0
244 i = 0
245 else:
245 else:
246 i -= chunk
246 i -= chunk
247
247
248 for x in pycompat.xrange(chunk):
248 for x in range(chunk):
249 if i + x >= count:
249 if i + x >= count:
250 l[chunk - x :] = [0] * (chunk - x)
250 l[chunk - x :] = [0] * (chunk - x)
251 break
251 break
252 if full is not None:
252 if full is not None:
253 if (i + x) in repo:
253 if (i + x) in repo:
254 l[x] = repo[i + x]
254 l[x] = repo[i + x]
255 l[x].changeset() # force reading
255 l[x].changeset() # force reading
256 else:
256 else:
257 if (i + x) in repo:
257 if (i + x) in repo:
258 l[x] = 1
258 l[x] = 1
259 for x in pycompat.xrange(chunk - 1, -1, -1):
259 for x in range(chunk - 1, -1, -1):
260 if l[x] != 0:
260 if l[x] != 0:
261 yield (i + x, full is not None and l[x] or None)
261 yield (i + x, full is not None and l[x] or None)
262 if i == 0:
262 if i == 0:
263 break
263 break
264
264
265 # calculate and return the reachability bitmask for sha
265 # calculate and return the reachability bitmask for sha
266 def is_reachable(ar, reachable, sha):
266 def is_reachable(ar, reachable, sha):
267 if len(ar) == 0:
267 if len(ar) == 0:
268 return 1
268 return 1
269 mask = 0
269 mask = 0
270 for i in pycompat.xrange(len(ar)):
270 for i in range(len(ar)):
271 if sha in reachable[i]:
271 if sha in reachable[i]:
272 mask |= 1 << i
272 mask |= 1 << i
273
273
274 return mask
274 return mask
275
275
276 reachable = []
276 reachable = []
277 stop_sha1 = []
277 stop_sha1 = []
278 want_sha1 = []
278 want_sha1 = []
279 count = 0
279 count = 0
280
280
281 # figure out which commits they are asking for and which ones they
281 # figure out which commits they are asking for and which ones they
282 # want us to stop on
282 # want us to stop on
283 for i, arg in enumerate(args):
283 for i, arg in enumerate(args):
284 if arg.startswith(b'^'):
284 if arg.startswith(b'^'):
285 s = repo.lookup(arg[1:])
285 s = repo.lookup(arg[1:])
286 stop_sha1.append(s)
286 stop_sha1.append(s)
287 want_sha1.append(s)
287 want_sha1.append(s)
288 elif arg != b'HEAD':
288 elif arg != b'HEAD':
289 want_sha1.append(repo.lookup(arg))
289 want_sha1.append(repo.lookup(arg))
290
290
291 # calculate the graph for the supplied commits
291 # calculate the graph for the supplied commits
292 for i, n in enumerate(want_sha1):
292 for i, n in enumerate(want_sha1):
293 reachable.append(set())
293 reachable.append(set())
294 visit = [n]
294 visit = [n]
295 reachable[i].add(n)
295 reachable[i].add(n)
296 while visit:
296 while visit:
297 n = visit.pop(0)
297 n = visit.pop(0)
298 if n in stop_sha1:
298 if n in stop_sha1:
299 continue
299 continue
300 for p in repo.changelog.parents(n):
300 for p in repo.changelog.parents(n):
301 if p not in reachable[i]:
301 if p not in reachable[i]:
302 reachable[i].add(p)
302 reachable[i].add(p)
303 visit.append(p)
303 visit.append(p)
304 if p in stop_sha1:
304 if p in stop_sha1:
305 continue
305 continue
306
306
307 # walk the repository looking for commits that are in our
307 # walk the repository looking for commits that are in our
308 # reachability graph
308 # reachability graph
309 for i, ctx in chlogwalk():
309 for i, ctx in chlogwalk():
310 if i not in repo:
310 if i not in repo:
311 continue
311 continue
312 n = repo.changelog.node(i)
312 n = repo.changelog.node(i)
313 mask = is_reachable(want_sha1, reachable, n)
313 mask = is_reachable(want_sha1, reachable, n)
314 if mask:
314 if mask:
315 parentstr = b""
315 parentstr = b""
316 if parents:
316 if parents:
317 pp = repo.changelog.parents(n)
317 pp = repo.changelog.parents(n)
318 if pp[0] != repo.nullid:
318 if pp[0] != repo.nullid:
319 parentstr += b" " + short(pp[0])
319 parentstr += b" " + short(pp[0])
320 if pp[1] != repo.nullid:
320 if pp[1] != repo.nullid:
321 parentstr += b" " + short(pp[1])
321 parentstr += b" " + short(pp[1])
322 if not full:
322 if not full:
323 ui.write(b"%s%s\n" % (short(n), parentstr))
323 ui.write(b"%s%s\n" % (short(n), parentstr))
324 elif full == b"commit":
324 elif full == b"commit":
325 ui.write(b"%s%s\n" % (short(n), parentstr))
325 ui.write(b"%s%s\n" % (short(n), parentstr))
326 catcommit(ui, repo, n, b' ', ctx)
326 catcommit(ui, repo, n, b' ', ctx)
327 else:
327 else:
328 (p1, p2) = repo.changelog.parents(n)
328 (p1, p2) = repo.changelog.parents(n)
329 (h, h1, h2) = map(short, (n, p1, p2))
329 (h, h1, h2) = map(short, (n, p1, p2))
330 (i1, i2) = map(repo.changelog.rev, (p1, p2))
330 (i1, i2) = map(repo.changelog.rev, (p1, p2))
331
331
332 date = ctx.date()[0]
332 date = ctx.date()[0]
333 ui.write(b"%s %s:%s" % (date, h, mask))
333 ui.write(b"%s %s:%s" % (date, h, mask))
334 mask = is_reachable(want_sha1, reachable, p1)
334 mask = is_reachable(want_sha1, reachable, p1)
335 if i1 != nullrev and mask > 0:
335 if i1 != nullrev and mask > 0:
336 ui.write(b"%s:%s " % (h1, mask)),
336 ui.write(b"%s:%s " % (h1, mask)),
337 mask = is_reachable(want_sha1, reachable, p2)
337 mask = is_reachable(want_sha1, reachable, p2)
338 if i2 != nullrev and mask > 0:
338 if i2 != nullrev and mask > 0:
339 ui.write(b"%s:%s " % (h2, mask))
339 ui.write(b"%s:%s " % (h2, mask))
340 ui.write(b"\n")
340 ui.write(b"\n")
341 if maxnr and count >= maxnr:
341 if maxnr and count >= maxnr:
342 break
342 break
343 count += 1
343 count += 1
344
344
345
345
346 # git rev-list tries to order things by date, and has the ability to stop
346 # git rev-list tries to order things by date, and has the ability to stop
347 # at a given commit without walking the whole repo. TODO add the stop
347 # at a given commit without walking the whole repo. TODO add the stop
348 # parameter
348 # parameter
349 @command(
349 @command(
350 b'debug-rev-list',
350 b'debug-rev-list',
351 [
351 [
352 (b'H', b'header', None, _(b'header')),
352 (b'H', b'header', None, _(b'header')),
353 (b't', b'topo-order', None, _(b'topo-order')),
353 (b't', b'topo-order', None, _(b'topo-order')),
354 (b'p', b'parents', None, _(b'parents')),
354 (b'p', b'parents', None, _(b'parents')),
355 (b'n', b'max-count', 0, _(b'max-count')),
355 (b'n', b'max-count', 0, _(b'max-count')),
356 ],
356 ],
357 b'[OPTION]... REV...',
357 b'[OPTION]... REV...',
358 )
358 )
359 def revlist(ui, repo, *revs, **opts):
359 def revlist(ui, repo, *revs, **opts):
360 """print revisions"""
360 """print revisions"""
361 if opts['header']:
361 if opts['header']:
362 full = b"commit"
362 full = b"commit"
363 else:
363 else:
364 full = None
364 full = None
365 copy = [x for x in revs]
365 copy = [x for x in revs]
366 revtree(ui, copy, repo, full, opts['max_count'], opts[r'parents'])
366 revtree(ui, copy, repo, full, opts['max_count'], opts[r'parents'])
367
367
368
368
369 @command(
369 @command(
370 b'view',
370 b'view',
371 [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))],
371 [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))],
372 _(b'[-l LIMIT] [REVRANGE]'),
372 _(b'[-l LIMIT] [REVRANGE]'),
373 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
373 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
374 )
374 )
375 def view(ui, repo, *etc, **opts):
375 def view(ui, repo, *etc, **opts):
376 """start interactive history viewer"""
376 """start interactive history viewer"""
377 opts = pycompat.byteskwargs(opts)
377 opts = pycompat.byteskwargs(opts)
378 os.chdir(repo.root)
378 os.chdir(repo.root)
379 optstr = b' '.join([b'--%s %s' % (k, v) for k, v in opts.items() if v])
379 optstr = b' '.join([b'--%s %s' % (k, v) for k, v in opts.items() if v])
380 if repo.filtername is None:
380 if repo.filtername is None:
381 optstr += b'--hidden'
381 optstr += b'--hidden'
382
382
383 cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc))
383 cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc))
384 ui.debug(b"running %s\n" % cmd)
384 ui.debug(b"running %s\n" % cmd)
385 ui.system(cmd, blockedtag=b'hgk_view')
385 ui.system(cmd, blockedtag=b'hgk_view')
@@ -1,2683 +1,2683 b''
1 # histedit.py - interactive history editing for mercurial
1 # histedit.py - interactive history editing for mercurial
2 #
2 #
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """interactive history editing
7 """interactive history editing
8
8
9 With this extension installed, Mercurial gains one new command: histedit. Usage
9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 is as follows, assuming the following history::
10 is as follows, assuming the following history::
11
11
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 | Add delta
13 | Add delta
14 |
14 |
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 | Add gamma
16 | Add gamma
17 |
17 |
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 | Add beta
19 | Add beta
20 |
20 |
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 Add alpha
22 Add alpha
23
23
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 file open in your editor::
25 file open in your editor::
26
26
27 pick c561b4e977df Add beta
27 pick c561b4e977df Add beta
28 pick 030b686bedc4 Add gamma
28 pick 030b686bedc4 Add gamma
29 pick 7c2fd3b9020c Add delta
29 pick 7c2fd3b9020c Add delta
30
30
31 # Edit history between c561b4e977df and 7c2fd3b9020c
31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 #
32 #
33 # Commits are listed from least to most recent
33 # Commits are listed from least to most recent
34 #
34 #
35 # Commands:
35 # Commands:
36 # p, pick = use commit
36 # p, pick = use commit
37 # e, edit = use commit, but allow edits before making new commit
37 # e, edit = use commit, but allow edits before making new commit
38 # f, fold = use commit, but combine it with the one above
38 # f, fold = use commit, but combine it with the one above
39 # r, roll = like fold, but discard this commit's description and date
39 # r, roll = like fold, but discard this commit's description and date
40 # d, drop = remove commit from history
40 # d, drop = remove commit from history
41 # m, mess = edit commit message without changing commit content
41 # m, mess = edit commit message without changing commit content
42 # b, base = checkout changeset and apply further changesets from there
42 # b, base = checkout changeset and apply further changesets from there
43 #
43 #
44
44
45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
46 for each revision in your history. For example, if you had meant to add gamma
46 for each revision in your history. For example, if you had meant to add gamma
47 before beta, and then wanted to add delta in the same revision as beta, you
47 before beta, and then wanted to add delta in the same revision as beta, you
48 would reorganize the file to look like this::
48 would reorganize the file to look like this::
49
49
50 pick 030b686bedc4 Add gamma
50 pick 030b686bedc4 Add gamma
51 pick c561b4e977df Add beta
51 pick c561b4e977df Add beta
52 fold 7c2fd3b9020c Add delta
52 fold 7c2fd3b9020c Add delta
53
53
54 # Edit history between c561b4e977df and 7c2fd3b9020c
54 # Edit history between c561b4e977df and 7c2fd3b9020c
55 #
55 #
56 # Commits are listed from least to most recent
56 # Commits are listed from least to most recent
57 #
57 #
58 # Commands:
58 # Commands:
59 # p, pick = use commit
59 # p, pick = use commit
60 # e, edit = use commit, but allow edits before making new commit
60 # e, edit = use commit, but allow edits before making new commit
61 # f, fold = use commit, but combine it with the one above
61 # f, fold = use commit, but combine it with the one above
62 # r, roll = like fold, but discard this commit's description and date
62 # r, roll = like fold, but discard this commit's description and date
63 # d, drop = remove commit from history
63 # d, drop = remove commit from history
64 # m, mess = edit commit message without changing commit content
64 # m, mess = edit commit message without changing commit content
65 # b, base = checkout changeset and apply further changesets from there
65 # b, base = checkout changeset and apply further changesets from there
66 #
66 #
67
67
68 At which point you close the editor and ``histedit`` starts working. When you
68 At which point you close the editor and ``histedit`` starts working. When you
69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
70 those revisions together, offering you a chance to clean up the commit message::
70 those revisions together, offering you a chance to clean up the commit message::
71
71
72 Add beta
72 Add beta
73 ***
73 ***
74 Add delta
74 Add delta
75
75
76 Edit the commit message to your liking, then close the editor. The date used
76 Edit the commit message to your liking, then close the editor. The date used
77 for the commit will be the later of the two commits' dates. For this example,
77 for the commit will be the later of the two commits' dates. For this example,
78 let's assume that the commit message was changed to ``Add beta and delta.``
78 let's assume that the commit message was changed to ``Add beta and delta.``
79 After histedit has run and had a chance to remove any old or temporary
79 After histedit has run and had a chance to remove any old or temporary
80 revisions it needed, the history looks like this::
80 revisions it needed, the history looks like this::
81
81
82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
83 | Add beta and delta.
83 | Add beta and delta.
84 |
84 |
85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
86 | Add gamma
86 | Add gamma
87 |
87 |
88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
89 Add alpha
89 Add alpha
90
90
91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
92 ones) until after it has completed all the editing operations, so it will
92 ones) until after it has completed all the editing operations, so it will
93 probably perform several strip operations when it's done. For the above example,
93 probably perform several strip operations when it's done. For the above example,
94 it had to run strip twice. Strip can be slow depending on a variety of factors,
94 it had to run strip twice. Strip can be slow depending on a variety of factors,
95 so you might need to be a little patient. You can choose to keep the original
95 so you might need to be a little patient. You can choose to keep the original
96 revisions by passing the ``--keep`` flag.
96 revisions by passing the ``--keep`` flag.
97
97
98 The ``edit`` operation will drop you back to a command prompt,
98 The ``edit`` operation will drop you back to a command prompt,
99 allowing you to edit files freely, or even use ``hg record`` to commit
99 allowing you to edit files freely, or even use ``hg record`` to commit
100 some changes as a separate commit. When you're done, any remaining
100 some changes as a separate commit. When you're done, any remaining
101 uncommitted changes will be committed as well. When done, run ``hg
101 uncommitted changes will be committed as well. When done, run ``hg
102 histedit --continue`` to finish this step. If there are uncommitted
102 histedit --continue`` to finish this step. If there are uncommitted
103 changes, you'll be prompted for a new commit message, but the default
103 changes, you'll be prompted for a new commit message, but the default
104 commit message will be the original message for the ``edit`` ed
104 commit message will be the original message for the ``edit`` ed
105 revision, and the date of the original commit will be preserved.
105 revision, and the date of the original commit will be preserved.
106
106
107 The ``message`` operation will give you a chance to revise a commit
107 The ``message`` operation will give you a chance to revise a commit
108 message without changing the contents. It's a shortcut for doing
108 message without changing the contents. It's a shortcut for doing
109 ``edit`` immediately followed by `hg histedit --continue``.
109 ``edit`` immediately followed by `hg histedit --continue``.
110
110
111 If ``histedit`` encounters a conflict when moving a revision (while
111 If ``histedit`` encounters a conflict when moving a revision (while
112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
113 ``edit`` with the difference that it won't prompt you for a commit
113 ``edit`` with the difference that it won't prompt you for a commit
114 message when done. If you decide at this point that you don't like how
114 message when done. If you decide at this point that you don't like how
115 much work it will be to rearrange history, or that you made a mistake,
115 much work it will be to rearrange history, or that you made a mistake,
116 you can use ``hg histedit --abort`` to abandon the new changes you
116 you can use ``hg histedit --abort`` to abandon the new changes you
117 have made and return to the state before you attempted to edit your
117 have made and return to the state before you attempted to edit your
118 history.
118 history.
119
119
120 If we clone the histedit-ed example repository above and add four more
120 If we clone the histedit-ed example repository above and add four more
121 changes, such that we have the following history::
121 changes, such that we have the following history::
122
122
123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
124 | Add theta
124 | Add theta
125 |
125 |
126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
127 | Add eta
127 | Add eta
128 |
128 |
129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
130 | Add zeta
130 | Add zeta
131 |
131 |
132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
133 | Add epsilon
133 | Add epsilon
134 |
134 |
135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
136 | Add beta and delta.
136 | Add beta and delta.
137 |
137 |
138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
139 | Add gamma
139 | Add gamma
140 |
140 |
141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
142 Add alpha
142 Add alpha
143
143
144 If you run ``hg histedit --outgoing`` on the clone then it is the same
144 If you run ``hg histedit --outgoing`` on the clone then it is the same
145 as running ``hg histedit 836302820282``. If you need plan to push to a
145 as running ``hg histedit 836302820282``. If you need plan to push to a
146 repository that Mercurial does not detect to be related to the source
146 repository that Mercurial does not detect to be related to the source
147 repo, you can add a ``--force`` option.
147 repo, you can add a ``--force`` option.
148
148
149 Config
149 Config
150 ------
150 ------
151
151
152 Histedit rule lines are truncated to 80 characters by default. You
152 Histedit rule lines are truncated to 80 characters by default. You
153 can customize this behavior by setting a different length in your
153 can customize this behavior by setting a different length in your
154 configuration file::
154 configuration file::
155
155
156 [histedit]
156 [histedit]
157 linelen = 120 # truncate rule lines at 120 characters
157 linelen = 120 # truncate rule lines at 120 characters
158
158
159 The summary of a change can be customized as well::
159 The summary of a change can be customized as well::
160
160
161 [histedit]
161 [histedit]
162 summary-template = '{rev} {bookmarks} {desc|firstline}'
162 summary-template = '{rev} {bookmarks} {desc|firstline}'
163
163
164 The customized summary should be kept short enough that rule lines
164 The customized summary should be kept short enough that rule lines
165 will fit in the configured line length. See above if that requires
165 will fit in the configured line length. See above if that requires
166 customization.
166 customization.
167
167
168 ``hg histedit`` attempts to automatically choose an appropriate base
168 ``hg histedit`` attempts to automatically choose an appropriate base
169 revision to use. To change which base revision is used, define a
169 revision to use. To change which base revision is used, define a
170 revset in your configuration file::
170 revset in your configuration file::
171
171
172 [histedit]
172 [histedit]
173 defaultrev = only(.) & draft()
173 defaultrev = only(.) & draft()
174
174
175 By default each edited revision needs to be present in histedit commands.
175 By default each edited revision needs to be present in histedit commands.
176 To remove revision you need to use ``drop`` operation. You can configure
176 To remove revision you need to use ``drop`` operation. You can configure
177 the drop to be implicit for missing commits by adding::
177 the drop to be implicit for missing commits by adding::
178
178
179 [histedit]
179 [histedit]
180 dropmissing = True
180 dropmissing = True
181
181
182 By default, histedit will close the transaction after each action. For
182 By default, histedit will close the transaction after each action. For
183 performance purposes, you can configure histedit to use a single transaction
183 performance purposes, you can configure histedit to use a single transaction
184 across the entire histedit. WARNING: This setting introduces a significant risk
184 across the entire histedit. WARNING: This setting introduces a significant risk
185 of losing the work you've done in a histedit if the histedit aborts
185 of losing the work you've done in a histedit if the histedit aborts
186 unexpectedly::
186 unexpectedly::
187
187
188 [histedit]
188 [histedit]
189 singletransaction = True
189 singletransaction = True
190
190
191 """
191 """
192
192
193
193
194 # chistedit dependencies that are not available everywhere
194 # chistedit dependencies that are not available everywhere
195 try:
195 try:
196 import fcntl
196 import fcntl
197 import termios
197 import termios
198 except ImportError:
198 except ImportError:
199 fcntl = None
199 fcntl = None
200 termios = None
200 termios = None
201
201
202 import binascii
202 import binascii
203 import functools
203 import functools
204 import os
204 import os
205 import pickle
205 import pickle
206 import struct
206 import struct
207
207
208 from mercurial.i18n import _
208 from mercurial.i18n import _
209 from mercurial.pycompat import (
209 from mercurial.pycompat import (
210 getattr,
210 getattr,
211 open,
211 open,
212 )
212 )
213 from mercurial.node import (
213 from mercurial.node import (
214 bin,
214 bin,
215 hex,
215 hex,
216 short,
216 short,
217 )
217 )
218 from mercurial import (
218 from mercurial import (
219 bundle2,
219 bundle2,
220 cmdutil,
220 cmdutil,
221 context,
221 context,
222 copies,
222 copies,
223 destutil,
223 destutil,
224 discovery,
224 discovery,
225 encoding,
225 encoding,
226 error,
226 error,
227 exchange,
227 exchange,
228 extensions,
228 extensions,
229 hg,
229 hg,
230 logcmdutil,
230 logcmdutil,
231 merge as mergemod,
231 merge as mergemod,
232 mergestate as mergestatemod,
232 mergestate as mergestatemod,
233 mergeutil,
233 mergeutil,
234 obsolete,
234 obsolete,
235 pycompat,
235 pycompat,
236 registrar,
236 registrar,
237 repair,
237 repair,
238 rewriteutil,
238 rewriteutil,
239 scmutil,
239 scmutil,
240 state as statemod,
240 state as statemod,
241 util,
241 util,
242 )
242 )
243 from mercurial.utils import (
243 from mercurial.utils import (
244 dateutil,
244 dateutil,
245 stringutil,
245 stringutil,
246 urlutil,
246 urlutil,
247 )
247 )
248
248
249 cmdtable = {}
249 cmdtable = {}
250 command = registrar.command(cmdtable)
250 command = registrar.command(cmdtable)
251
251
252 configtable = {}
252 configtable = {}
253 configitem = registrar.configitem(configtable)
253 configitem = registrar.configitem(configtable)
254 configitem(
254 configitem(
255 b'experimental',
255 b'experimental',
256 b'histedit.autoverb',
256 b'histedit.autoverb',
257 default=False,
257 default=False,
258 )
258 )
259 configitem(
259 configitem(
260 b'histedit',
260 b'histedit',
261 b'defaultrev',
261 b'defaultrev',
262 default=None,
262 default=None,
263 )
263 )
264 configitem(
264 configitem(
265 b'histedit',
265 b'histedit',
266 b'dropmissing',
266 b'dropmissing',
267 default=False,
267 default=False,
268 )
268 )
269 configitem(
269 configitem(
270 b'histedit',
270 b'histedit',
271 b'linelen',
271 b'linelen',
272 default=80,
272 default=80,
273 )
273 )
274 configitem(
274 configitem(
275 b'histedit',
275 b'histedit',
276 b'singletransaction',
276 b'singletransaction',
277 default=False,
277 default=False,
278 )
278 )
279 configitem(
279 configitem(
280 b'ui',
280 b'ui',
281 b'interface.histedit',
281 b'interface.histedit',
282 default=None,
282 default=None,
283 )
283 )
284 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
284 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
285 # TODO: Teach the text-based histedit interface to respect this config option
285 # TODO: Teach the text-based histedit interface to respect this config option
286 # before we make it non-experimental.
286 # before we make it non-experimental.
287 configitem(
287 configitem(
288 b'histedit', b'later-commits-first', default=False, experimental=True
288 b'histedit', b'later-commits-first', default=False, experimental=True
289 )
289 )
290
290
291 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
291 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
292 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
292 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
293 # be specifying the version(s) of Mercurial they are tested with, or
293 # be specifying the version(s) of Mercurial they are tested with, or
294 # leave the attribute unspecified.
294 # leave the attribute unspecified.
295 testedwith = b'ships-with-hg-core'
295 testedwith = b'ships-with-hg-core'
296
296
297 actiontable = {}
297 actiontable = {}
298 primaryactions = set()
298 primaryactions = set()
299 secondaryactions = set()
299 secondaryactions = set()
300 tertiaryactions = set()
300 tertiaryactions = set()
301 internalactions = set()
301 internalactions = set()
302
302
303
303
304 def geteditcomment(ui, first, last):
304 def geteditcomment(ui, first, last):
305 """construct the editor comment
305 """construct the editor comment
306 The comment includes::
306 The comment includes::
307 - an intro
307 - an intro
308 - sorted primary commands
308 - sorted primary commands
309 - sorted short commands
309 - sorted short commands
310 - sorted long commands
310 - sorted long commands
311 - additional hints
311 - additional hints
312
312
313 Commands are only included once.
313 Commands are only included once.
314 """
314 """
315 intro = _(
315 intro = _(
316 b"""Edit history between %s and %s
316 b"""Edit history between %s and %s
317
317
318 Commits are listed from least to most recent
318 Commits are listed from least to most recent
319
319
320 You can reorder changesets by reordering the lines
320 You can reorder changesets by reordering the lines
321
321
322 Commands:
322 Commands:
323 """
323 """
324 )
324 )
325 actions = []
325 actions = []
326
326
327 def addverb(v):
327 def addverb(v):
328 a = actiontable[v]
328 a = actiontable[v]
329 lines = a.message.split(b"\n")
329 lines = a.message.split(b"\n")
330 if len(a.verbs):
330 if len(a.verbs):
331 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
331 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
332 actions.append(b" %s = %s" % (v, lines[0]))
332 actions.append(b" %s = %s" % (v, lines[0]))
333 actions.extend([b' %s'] * (len(lines) - 1))
333 actions.extend([b' %s'] * (len(lines) - 1))
334
334
335 for v in (
335 for v in (
336 sorted(primaryactions)
336 sorted(primaryactions)
337 + sorted(secondaryactions)
337 + sorted(secondaryactions)
338 + sorted(tertiaryactions)
338 + sorted(tertiaryactions)
339 ):
339 ):
340 addverb(v)
340 addverb(v)
341 actions.append(b'')
341 actions.append(b'')
342
342
343 hints = []
343 hints = []
344 if ui.configbool(b'histedit', b'dropmissing'):
344 if ui.configbool(b'histedit', b'dropmissing'):
345 hints.append(
345 hints.append(
346 b"Deleting a changeset from the list "
346 b"Deleting a changeset from the list "
347 b"will DISCARD it from the edited history!"
347 b"will DISCARD it from the edited history!"
348 )
348 )
349
349
350 lines = (intro % (first, last)).split(b'\n') + actions + hints
350 lines = (intro % (first, last)).split(b'\n') + actions + hints
351
351
352 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
352 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
353
353
354
354
355 class histeditstate:
355 class histeditstate:
356 def __init__(self, repo):
356 def __init__(self, repo):
357 self.repo = repo
357 self.repo = repo
358 self.actions = None
358 self.actions = None
359 self.keep = None
359 self.keep = None
360 self.topmost = None
360 self.topmost = None
361 self.parentctxnode = None
361 self.parentctxnode = None
362 self.lock = None
362 self.lock = None
363 self.wlock = None
363 self.wlock = None
364 self.backupfile = None
364 self.backupfile = None
365 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
365 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
366 self.replacements = []
366 self.replacements = []
367
367
368 def read(self):
368 def read(self):
369 """Load histedit state from disk and set fields appropriately."""
369 """Load histedit state from disk and set fields appropriately."""
370 if not self.stateobj.exists():
370 if not self.stateobj.exists():
371 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
371 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
372
372
373 data = self._read()
373 data = self._read()
374
374
375 self.parentctxnode = data[b'parentctxnode']
375 self.parentctxnode = data[b'parentctxnode']
376 actions = parserules(data[b'rules'], self)
376 actions = parserules(data[b'rules'], self)
377 self.actions = actions
377 self.actions = actions
378 self.keep = data[b'keep']
378 self.keep = data[b'keep']
379 self.topmost = data[b'topmost']
379 self.topmost = data[b'topmost']
380 self.replacements = data[b'replacements']
380 self.replacements = data[b'replacements']
381 self.backupfile = data[b'backupfile']
381 self.backupfile = data[b'backupfile']
382
382
383 def _read(self):
383 def _read(self):
384 fp = self.repo.vfs.read(b'histedit-state')
384 fp = self.repo.vfs.read(b'histedit-state')
385 if fp.startswith(b'v1\n'):
385 if fp.startswith(b'v1\n'):
386 data = self._load()
386 data = self._load()
387 parentctxnode, rules, keep, topmost, replacements, backupfile = data
387 parentctxnode, rules, keep, topmost, replacements, backupfile = data
388 else:
388 else:
389 data = pickle.loads(fp)
389 data = pickle.loads(fp)
390 parentctxnode, rules, keep, topmost, replacements = data
390 parentctxnode, rules, keep, topmost, replacements = data
391 backupfile = None
391 backupfile = None
392 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
392 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
393
393
394 return {
394 return {
395 b'parentctxnode': parentctxnode,
395 b'parentctxnode': parentctxnode,
396 b"rules": rules,
396 b"rules": rules,
397 b"keep": keep,
397 b"keep": keep,
398 b"topmost": topmost,
398 b"topmost": topmost,
399 b"replacements": replacements,
399 b"replacements": replacements,
400 b"backupfile": backupfile,
400 b"backupfile": backupfile,
401 }
401 }
402
402
403 def write(self, tr=None):
403 def write(self, tr=None):
404 if tr:
404 if tr:
405 tr.addfilegenerator(
405 tr.addfilegenerator(
406 b'histedit-state',
406 b'histedit-state',
407 (b'histedit-state',),
407 (b'histedit-state',),
408 self._write,
408 self._write,
409 location=b'plain',
409 location=b'plain',
410 )
410 )
411 else:
411 else:
412 with self.repo.vfs(b"histedit-state", b"w") as f:
412 with self.repo.vfs(b"histedit-state", b"w") as f:
413 self._write(f)
413 self._write(f)
414
414
415 def _write(self, fp):
415 def _write(self, fp):
416 fp.write(b'v1\n')
416 fp.write(b'v1\n')
417 fp.write(b'%s\n' % hex(self.parentctxnode))
417 fp.write(b'%s\n' % hex(self.parentctxnode))
418 fp.write(b'%s\n' % hex(self.topmost))
418 fp.write(b'%s\n' % hex(self.topmost))
419 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
419 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
420 fp.write(b'%d\n' % len(self.actions))
420 fp.write(b'%d\n' % len(self.actions))
421 for action in self.actions:
421 for action in self.actions:
422 fp.write(b'%s\n' % action.tostate())
422 fp.write(b'%s\n' % action.tostate())
423 fp.write(b'%d\n' % len(self.replacements))
423 fp.write(b'%d\n' % len(self.replacements))
424 for replacement in self.replacements:
424 for replacement in self.replacements:
425 fp.write(
425 fp.write(
426 b'%s%s\n'
426 b'%s%s\n'
427 % (
427 % (
428 hex(replacement[0]),
428 hex(replacement[0]),
429 b''.join(hex(r) for r in replacement[1]),
429 b''.join(hex(r) for r in replacement[1]),
430 )
430 )
431 )
431 )
432 backupfile = self.backupfile
432 backupfile = self.backupfile
433 if not backupfile:
433 if not backupfile:
434 backupfile = b''
434 backupfile = b''
435 fp.write(b'%s\n' % backupfile)
435 fp.write(b'%s\n' % backupfile)
436
436
437 def _load(self):
437 def _load(self):
438 fp = self.repo.vfs(b'histedit-state', b'r')
438 fp = self.repo.vfs(b'histedit-state', b'r')
439 lines = [l[:-1] for l in fp.readlines()]
439 lines = [l[:-1] for l in fp.readlines()]
440
440
441 index = 0
441 index = 0
442 lines[index] # version number
442 lines[index] # version number
443 index += 1
443 index += 1
444
444
445 parentctxnode = bin(lines[index])
445 parentctxnode = bin(lines[index])
446 index += 1
446 index += 1
447
447
448 topmost = bin(lines[index])
448 topmost = bin(lines[index])
449 index += 1
449 index += 1
450
450
451 keep = lines[index] == b'True'
451 keep = lines[index] == b'True'
452 index += 1
452 index += 1
453
453
454 # Rules
454 # Rules
455 rules = []
455 rules = []
456 rulelen = int(lines[index])
456 rulelen = int(lines[index])
457 index += 1
457 index += 1
458 for i in pycompat.xrange(rulelen):
458 for i in range(rulelen):
459 ruleaction = lines[index]
459 ruleaction = lines[index]
460 index += 1
460 index += 1
461 rule = lines[index]
461 rule = lines[index]
462 index += 1
462 index += 1
463 rules.append((ruleaction, rule))
463 rules.append((ruleaction, rule))
464
464
465 # Replacements
465 # Replacements
466 replacements = []
466 replacements = []
467 replacementlen = int(lines[index])
467 replacementlen = int(lines[index])
468 index += 1
468 index += 1
469 for i in pycompat.xrange(replacementlen):
469 for i in range(replacementlen):
470 replacement = lines[index]
470 replacement = lines[index]
471 original = bin(replacement[:40])
471 original = bin(replacement[:40])
472 succ = [
472 succ = [
473 bin(replacement[i : i + 40])
473 bin(replacement[i : i + 40])
474 for i in range(40, len(replacement), 40)
474 for i in range(40, len(replacement), 40)
475 ]
475 ]
476 replacements.append((original, succ))
476 replacements.append((original, succ))
477 index += 1
477 index += 1
478
478
479 backupfile = lines[index]
479 backupfile = lines[index]
480 index += 1
480 index += 1
481
481
482 fp.close()
482 fp.close()
483
483
484 return parentctxnode, rules, keep, topmost, replacements, backupfile
484 return parentctxnode, rules, keep, topmost, replacements, backupfile
485
485
486 def clear(self):
486 def clear(self):
487 if self.inprogress():
487 if self.inprogress():
488 self.repo.vfs.unlink(b'histedit-state')
488 self.repo.vfs.unlink(b'histedit-state')
489
489
490 def inprogress(self):
490 def inprogress(self):
491 return self.repo.vfs.exists(b'histedit-state')
491 return self.repo.vfs.exists(b'histedit-state')
492
492
493
493
494 class histeditaction:
494 class histeditaction:
495 def __init__(self, state, node):
495 def __init__(self, state, node):
496 self.state = state
496 self.state = state
497 self.repo = state.repo
497 self.repo = state.repo
498 self.node = node
498 self.node = node
499
499
500 @classmethod
500 @classmethod
501 def fromrule(cls, state, rule):
501 def fromrule(cls, state, rule):
502 """Parses the given rule, returning an instance of the histeditaction."""
502 """Parses the given rule, returning an instance of the histeditaction."""
503 ruleid = rule.strip().split(b' ', 1)[0]
503 ruleid = rule.strip().split(b' ', 1)[0]
504 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
504 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
505 # Check for validation of rule ids and get the rulehash
505 # Check for validation of rule ids and get the rulehash
506 try:
506 try:
507 rev = bin(ruleid)
507 rev = bin(ruleid)
508 except binascii.Error:
508 except binascii.Error:
509 try:
509 try:
510 _ctx = scmutil.revsingle(state.repo, ruleid)
510 _ctx = scmutil.revsingle(state.repo, ruleid)
511 rulehash = _ctx.hex()
511 rulehash = _ctx.hex()
512 rev = bin(rulehash)
512 rev = bin(rulehash)
513 except error.RepoLookupError:
513 except error.RepoLookupError:
514 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
514 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
515 return cls(state, rev)
515 return cls(state, rev)
516
516
517 def verify(self, prev, expected, seen):
517 def verify(self, prev, expected, seen):
518 """Verifies semantic correctness of the rule"""
518 """Verifies semantic correctness of the rule"""
519 repo = self.repo
519 repo = self.repo
520 ha = hex(self.node)
520 ha = hex(self.node)
521 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
521 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
522 if self.node is None:
522 if self.node is None:
523 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
523 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
524 self._verifynodeconstraints(prev, expected, seen)
524 self._verifynodeconstraints(prev, expected, seen)
525
525
526 def _verifynodeconstraints(self, prev, expected, seen):
526 def _verifynodeconstraints(self, prev, expected, seen):
527 # by default command need a node in the edited list
527 # by default command need a node in the edited list
528 if self.node not in expected:
528 if self.node not in expected:
529 raise error.ParseError(
529 raise error.ParseError(
530 _(b'%s "%s" changeset was not a candidate')
530 _(b'%s "%s" changeset was not a candidate')
531 % (self.verb, short(self.node)),
531 % (self.verb, short(self.node)),
532 hint=_(b'only use listed changesets'),
532 hint=_(b'only use listed changesets'),
533 )
533 )
534 # and only one command per node
534 # and only one command per node
535 if self.node in seen:
535 if self.node in seen:
536 raise error.ParseError(
536 raise error.ParseError(
537 _(b'duplicated command for changeset %s') % short(self.node)
537 _(b'duplicated command for changeset %s') % short(self.node)
538 )
538 )
539
539
540 def torule(self):
540 def torule(self):
541 """build a histedit rule line for an action
541 """build a histedit rule line for an action
542
542
543 by default lines are in the form:
543 by default lines are in the form:
544 <hash> <rev> <summary>
544 <hash> <rev> <summary>
545 """
545 """
546 ctx = self.repo[self.node]
546 ctx = self.repo[self.node]
547 ui = self.repo.ui
547 ui = self.repo.ui
548 # We don't want color codes in the commit message template, so
548 # We don't want color codes in the commit message template, so
549 # disable the label() template function while we render it.
549 # disable the label() template function while we render it.
550 with ui.configoverride(
550 with ui.configoverride(
551 {(b'templatealias', b'label(l,x)'): b"x"}, b'histedit'
551 {(b'templatealias', b'label(l,x)'): b"x"}, b'histedit'
552 ):
552 ):
553 summary = cmdutil.rendertemplate(
553 summary = cmdutil.rendertemplate(
554 ctx, ui.config(b'histedit', b'summary-template')
554 ctx, ui.config(b'histedit', b'summary-template')
555 )
555 )
556 line = b'%s %s %s' % (self.verb, ctx, stringutil.firstline(summary))
556 line = b'%s %s %s' % (self.verb, ctx, stringutil.firstline(summary))
557 # trim to 75 columns by default so it's not stupidly wide in my editor
557 # trim to 75 columns by default so it's not stupidly wide in my editor
558 # (the 5 more are left for verb)
558 # (the 5 more are left for verb)
559 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
559 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
560 maxlen = max(maxlen, 22) # avoid truncating hash
560 maxlen = max(maxlen, 22) # avoid truncating hash
561 return stringutil.ellipsis(line, maxlen)
561 return stringutil.ellipsis(line, maxlen)
562
562
563 def tostate(self):
563 def tostate(self):
564 """Print an action in format used by histedit state files
564 """Print an action in format used by histedit state files
565 (the first line is a verb, the remainder is the second)
565 (the first line is a verb, the remainder is the second)
566 """
566 """
567 return b"%s\n%s" % (self.verb, hex(self.node))
567 return b"%s\n%s" % (self.verb, hex(self.node))
568
568
569 def run(self):
569 def run(self):
570 """Runs the action. The default behavior is simply apply the action's
570 """Runs the action. The default behavior is simply apply the action's
571 rulectx onto the current parentctx."""
571 rulectx onto the current parentctx."""
572 self.applychange()
572 self.applychange()
573 self.continuedirty()
573 self.continuedirty()
574 return self.continueclean()
574 return self.continueclean()
575
575
576 def applychange(self):
576 def applychange(self):
577 """Applies the changes from this action's rulectx onto the current
577 """Applies the changes from this action's rulectx onto the current
578 parentctx, but does not commit them."""
578 parentctx, but does not commit them."""
579 repo = self.repo
579 repo = self.repo
580 rulectx = repo[self.node]
580 rulectx = repo[self.node]
581 with repo.ui.silent():
581 with repo.ui.silent():
582 hg.update(repo, self.state.parentctxnode, quietempty=True)
582 hg.update(repo, self.state.parentctxnode, quietempty=True)
583 stats = applychanges(repo.ui, repo, rulectx, {})
583 stats = applychanges(repo.ui, repo, rulectx, {})
584 repo.dirstate.setbranch(rulectx.branch())
584 repo.dirstate.setbranch(rulectx.branch())
585 if stats.unresolvedcount:
585 if stats.unresolvedcount:
586 raise error.InterventionRequired(
586 raise error.InterventionRequired(
587 _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),
587 _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),
588 hint=_(b'hg histedit --continue to resume'),
588 hint=_(b'hg histedit --continue to resume'),
589 )
589 )
590
590
591 def continuedirty(self):
591 def continuedirty(self):
592 """Continues the action when changes have been applied to the working
592 """Continues the action when changes have been applied to the working
593 copy. The default behavior is to commit the dirty changes."""
593 copy. The default behavior is to commit the dirty changes."""
594 repo = self.repo
594 repo = self.repo
595 rulectx = repo[self.node]
595 rulectx = repo[self.node]
596
596
597 editor = self.commiteditor()
597 editor = self.commiteditor()
598 commit = commitfuncfor(repo, rulectx)
598 commit = commitfuncfor(repo, rulectx)
599 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
599 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
600 date = dateutil.makedate()
600 date = dateutil.makedate()
601 else:
601 else:
602 date = rulectx.date()
602 date = rulectx.date()
603 commit(
603 commit(
604 text=rulectx.description(),
604 text=rulectx.description(),
605 user=rulectx.user(),
605 user=rulectx.user(),
606 date=date,
606 date=date,
607 extra=rulectx.extra(),
607 extra=rulectx.extra(),
608 editor=editor,
608 editor=editor,
609 )
609 )
610
610
611 def commiteditor(self):
611 def commiteditor(self):
612 """The editor to be used to edit the commit message."""
612 """The editor to be used to edit the commit message."""
613 return False
613 return False
614
614
615 def continueclean(self):
615 def continueclean(self):
616 """Continues the action when the working copy is clean. The default
616 """Continues the action when the working copy is clean. The default
617 behavior is to accept the current commit as the new version of the
617 behavior is to accept the current commit as the new version of the
618 rulectx."""
618 rulectx."""
619 ctx = self.repo[b'.']
619 ctx = self.repo[b'.']
620 if ctx.node() == self.state.parentctxnode:
620 if ctx.node() == self.state.parentctxnode:
621 self.repo.ui.warn(
621 self.repo.ui.warn(
622 _(b'%s: skipping changeset (no changes)\n') % short(self.node)
622 _(b'%s: skipping changeset (no changes)\n') % short(self.node)
623 )
623 )
624 return ctx, [(self.node, tuple())]
624 return ctx, [(self.node, tuple())]
625 if ctx.node() == self.node:
625 if ctx.node() == self.node:
626 # Nothing changed
626 # Nothing changed
627 return ctx, []
627 return ctx, []
628 return ctx, [(self.node, (ctx.node(),))]
628 return ctx, [(self.node, (ctx.node(),))]
629
629
630
630
631 def commitfuncfor(repo, src):
631 def commitfuncfor(repo, src):
632 """Build a commit function for the replacement of <src>
632 """Build a commit function for the replacement of <src>
633
633
634 This function ensure we apply the same treatment to all changesets.
634 This function ensure we apply the same treatment to all changesets.
635
635
636 - Add a 'histedit_source' entry in extra.
636 - Add a 'histedit_source' entry in extra.
637
637
638 Note that fold has its own separated logic because its handling is a bit
638 Note that fold has its own separated logic because its handling is a bit
639 different and not easily factored out of the fold method.
639 different and not easily factored out of the fold method.
640 """
640 """
641 phasemin = src.phase()
641 phasemin = src.phase()
642
642
643 def commitfunc(**kwargs):
643 def commitfunc(**kwargs):
644 overrides = {(b'phases', b'new-commit'): phasemin}
644 overrides = {(b'phases', b'new-commit'): phasemin}
645 with repo.ui.configoverride(overrides, b'histedit'):
645 with repo.ui.configoverride(overrides, b'histedit'):
646 extra = kwargs.get('extra', {}).copy()
646 extra = kwargs.get('extra', {}).copy()
647 extra[b'histedit_source'] = src.hex()
647 extra[b'histedit_source'] = src.hex()
648 kwargs['extra'] = extra
648 kwargs['extra'] = extra
649 return repo.commit(**kwargs)
649 return repo.commit(**kwargs)
650
650
651 return commitfunc
651 return commitfunc
652
652
653
653
654 def applychanges(ui, repo, ctx, opts):
654 def applychanges(ui, repo, ctx, opts):
655 """Merge changeset from ctx (only) in the current working directory"""
655 """Merge changeset from ctx (only) in the current working directory"""
656 if ctx.p1().node() == repo.dirstate.p1():
656 if ctx.p1().node() == repo.dirstate.p1():
657 # edits are "in place" we do not need to make any merge,
657 # edits are "in place" we do not need to make any merge,
658 # just applies changes on parent for editing
658 # just applies changes on parent for editing
659 with ui.silent():
659 with ui.silent():
660 cmdutil.revert(ui, repo, ctx, all=True)
660 cmdutil.revert(ui, repo, ctx, all=True)
661 stats = mergemod.updateresult(0, 0, 0, 0)
661 stats = mergemod.updateresult(0, 0, 0, 0)
662 else:
662 else:
663 try:
663 try:
664 # ui.forcemerge is an internal variable, do not document
664 # ui.forcemerge is an internal variable, do not document
665 repo.ui.setconfig(
665 repo.ui.setconfig(
666 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
666 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
667 )
667 )
668 stats = mergemod.graft(
668 stats = mergemod.graft(
669 repo,
669 repo,
670 ctx,
670 ctx,
671 labels=[
671 labels=[
672 b'already edited',
672 b'already edited',
673 b'current change',
673 b'current change',
674 b'parent of current change',
674 b'parent of current change',
675 ],
675 ],
676 )
676 )
677 finally:
677 finally:
678 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
678 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
679 return stats
679 return stats
680
680
681
681
682 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
682 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
683 """collapse the set of revisions from first to last as new one.
683 """collapse the set of revisions from first to last as new one.
684
684
685 Expected commit options are:
685 Expected commit options are:
686 - message
686 - message
687 - date
687 - date
688 - username
688 - username
689 Commit message is edited in all cases.
689 Commit message is edited in all cases.
690
690
691 This function works in memory."""
691 This function works in memory."""
692 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
692 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
693 if not ctxs:
693 if not ctxs:
694 return None
694 return None
695 for c in ctxs:
695 for c in ctxs:
696 if not c.mutable():
696 if not c.mutable():
697 raise error.ParseError(
697 raise error.ParseError(
698 _(b"cannot fold into public change %s") % short(c.node())
698 _(b"cannot fold into public change %s") % short(c.node())
699 )
699 )
700 base = firstctx.p1()
700 base = firstctx.p1()
701
701
702 # commit a new version of the old changeset, including the update
702 # commit a new version of the old changeset, including the update
703 # collect all files which might be affected
703 # collect all files which might be affected
704 files = set()
704 files = set()
705 for ctx in ctxs:
705 for ctx in ctxs:
706 files.update(ctx.files())
706 files.update(ctx.files())
707
707
708 # Recompute copies (avoid recording a -> b -> a)
708 # Recompute copies (avoid recording a -> b -> a)
709 copied = copies.pathcopies(base, lastctx)
709 copied = copies.pathcopies(base, lastctx)
710
710
711 # prune files which were reverted by the updates
711 # prune files which were reverted by the updates
712 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
712 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
713 # commit version of these files as defined by head
713 # commit version of these files as defined by head
714 headmf = lastctx.manifest()
714 headmf = lastctx.manifest()
715
715
716 def filectxfn(repo, ctx, path):
716 def filectxfn(repo, ctx, path):
717 if path in headmf:
717 if path in headmf:
718 fctx = lastctx[path]
718 fctx = lastctx[path]
719 flags = fctx.flags()
719 flags = fctx.flags()
720 mctx = context.memfilectx(
720 mctx = context.memfilectx(
721 repo,
721 repo,
722 ctx,
722 ctx,
723 fctx.path(),
723 fctx.path(),
724 fctx.data(),
724 fctx.data(),
725 islink=b'l' in flags,
725 islink=b'l' in flags,
726 isexec=b'x' in flags,
726 isexec=b'x' in flags,
727 copysource=copied.get(path),
727 copysource=copied.get(path),
728 )
728 )
729 return mctx
729 return mctx
730 return None
730 return None
731
731
732 if commitopts.get(b'message'):
732 if commitopts.get(b'message'):
733 message = commitopts[b'message']
733 message = commitopts[b'message']
734 else:
734 else:
735 message = firstctx.description()
735 message = firstctx.description()
736 user = commitopts.get(b'user')
736 user = commitopts.get(b'user')
737 date = commitopts.get(b'date')
737 date = commitopts.get(b'date')
738 extra = commitopts.get(b'extra')
738 extra = commitopts.get(b'extra')
739
739
740 parents = (firstctx.p1().node(), firstctx.p2().node())
740 parents = (firstctx.p1().node(), firstctx.p2().node())
741 editor = None
741 editor = None
742 if not skipprompt:
742 if not skipprompt:
743 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
743 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
744 new = context.memctx(
744 new = context.memctx(
745 repo,
745 repo,
746 parents=parents,
746 parents=parents,
747 text=message,
747 text=message,
748 files=files,
748 files=files,
749 filectxfn=filectxfn,
749 filectxfn=filectxfn,
750 user=user,
750 user=user,
751 date=date,
751 date=date,
752 extra=extra,
752 extra=extra,
753 editor=editor,
753 editor=editor,
754 )
754 )
755 return repo.commitctx(new)
755 return repo.commitctx(new)
756
756
757
757
758 def _isdirtywc(repo):
758 def _isdirtywc(repo):
759 return repo[None].dirty(missing=True)
759 return repo[None].dirty(missing=True)
760
760
761
761
762 def abortdirty():
762 def abortdirty():
763 raise error.StateError(
763 raise error.StateError(
764 _(b'working copy has pending changes'),
764 _(b'working copy has pending changes'),
765 hint=_(
765 hint=_(
766 b'amend, commit, or revert them and run histedit '
766 b'amend, commit, or revert them and run histedit '
767 b'--continue, or abort with histedit --abort'
767 b'--continue, or abort with histedit --abort'
768 ),
768 ),
769 )
769 )
770
770
771
771
772 def action(verbs, message, priority=False, internal=False):
772 def action(verbs, message, priority=False, internal=False):
773 def wrap(cls):
773 def wrap(cls):
774 assert not priority or not internal
774 assert not priority or not internal
775 verb = verbs[0]
775 verb = verbs[0]
776 if priority:
776 if priority:
777 primaryactions.add(verb)
777 primaryactions.add(verb)
778 elif internal:
778 elif internal:
779 internalactions.add(verb)
779 internalactions.add(verb)
780 elif len(verbs) > 1:
780 elif len(verbs) > 1:
781 secondaryactions.add(verb)
781 secondaryactions.add(verb)
782 else:
782 else:
783 tertiaryactions.add(verb)
783 tertiaryactions.add(verb)
784
784
785 cls.verb = verb
785 cls.verb = verb
786 cls.verbs = verbs
786 cls.verbs = verbs
787 cls.message = message
787 cls.message = message
788 for verb in verbs:
788 for verb in verbs:
789 actiontable[verb] = cls
789 actiontable[verb] = cls
790 return cls
790 return cls
791
791
792 return wrap
792 return wrap
793
793
794
794
795 @action([b'pick', b'p'], _(b'use commit'), priority=True)
795 @action([b'pick', b'p'], _(b'use commit'), priority=True)
796 class pick(histeditaction):
796 class pick(histeditaction):
797 def run(self):
797 def run(self):
798 rulectx = self.repo[self.node]
798 rulectx = self.repo[self.node]
799 if rulectx.p1().node() == self.state.parentctxnode:
799 if rulectx.p1().node() == self.state.parentctxnode:
800 self.repo.ui.debug(b'node %s unchanged\n' % short(self.node))
800 self.repo.ui.debug(b'node %s unchanged\n' % short(self.node))
801 return rulectx, []
801 return rulectx, []
802
802
803 return super(pick, self).run()
803 return super(pick, self).run()
804
804
805
805
806 @action(
806 @action(
807 [b'edit', b'e'],
807 [b'edit', b'e'],
808 _(b'use commit, but allow edits before making new commit'),
808 _(b'use commit, but allow edits before making new commit'),
809 priority=True,
809 priority=True,
810 )
810 )
811 class edit(histeditaction):
811 class edit(histeditaction):
812 def run(self):
812 def run(self):
813 repo = self.repo
813 repo = self.repo
814 rulectx = repo[self.node]
814 rulectx = repo[self.node]
815 hg.update(repo, self.state.parentctxnode, quietempty=True)
815 hg.update(repo, self.state.parentctxnode, quietempty=True)
816 applychanges(repo.ui, repo, rulectx, {})
816 applychanges(repo.ui, repo, rulectx, {})
817 hint = _(b'to edit %s, `hg histedit --continue` after making changes')
817 hint = _(b'to edit %s, `hg histedit --continue` after making changes')
818 raise error.InterventionRequired(
818 raise error.InterventionRequired(
819 _(b'Editing (%s), commit as needed now to split the change')
819 _(b'Editing (%s), commit as needed now to split the change')
820 % short(self.node),
820 % short(self.node),
821 hint=hint % short(self.node),
821 hint=hint % short(self.node),
822 )
822 )
823
823
824 def commiteditor(self):
824 def commiteditor(self):
825 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
825 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
826
826
827
827
828 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
828 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
829 class fold(histeditaction):
829 class fold(histeditaction):
830 def verify(self, prev, expected, seen):
830 def verify(self, prev, expected, seen):
831 """Verifies semantic correctness of the fold rule"""
831 """Verifies semantic correctness of the fold rule"""
832 super(fold, self).verify(prev, expected, seen)
832 super(fold, self).verify(prev, expected, seen)
833 repo = self.repo
833 repo = self.repo
834 if not prev:
834 if not prev:
835 c = repo[self.node].p1()
835 c = repo[self.node].p1()
836 elif not prev.verb in (b'pick', b'base'):
836 elif not prev.verb in (b'pick', b'base'):
837 return
837 return
838 else:
838 else:
839 c = repo[prev.node]
839 c = repo[prev.node]
840 if not c.mutable():
840 if not c.mutable():
841 raise error.ParseError(
841 raise error.ParseError(
842 _(b"cannot fold into public change %s") % short(c.node())
842 _(b"cannot fold into public change %s") % short(c.node())
843 )
843 )
844
844
845 def continuedirty(self):
845 def continuedirty(self):
846 repo = self.repo
846 repo = self.repo
847 rulectx = repo[self.node]
847 rulectx = repo[self.node]
848
848
849 commit = commitfuncfor(repo, rulectx)
849 commit = commitfuncfor(repo, rulectx)
850 commit(
850 commit(
851 text=b'fold-temp-revision %s' % short(self.node),
851 text=b'fold-temp-revision %s' % short(self.node),
852 user=rulectx.user(),
852 user=rulectx.user(),
853 date=rulectx.date(),
853 date=rulectx.date(),
854 extra=rulectx.extra(),
854 extra=rulectx.extra(),
855 )
855 )
856
856
857 def continueclean(self):
857 def continueclean(self):
858 repo = self.repo
858 repo = self.repo
859 ctx = repo[b'.']
859 ctx = repo[b'.']
860 rulectx = repo[self.node]
860 rulectx = repo[self.node]
861 parentctxnode = self.state.parentctxnode
861 parentctxnode = self.state.parentctxnode
862 if ctx.node() == parentctxnode:
862 if ctx.node() == parentctxnode:
863 repo.ui.warn(_(b'%s: empty changeset\n') % short(self.node))
863 repo.ui.warn(_(b'%s: empty changeset\n') % short(self.node))
864 return ctx, [(self.node, (parentctxnode,))]
864 return ctx, [(self.node, (parentctxnode,))]
865
865
866 parentctx = repo[parentctxnode]
866 parentctx = repo[parentctxnode]
867 newcommits = {
867 newcommits = {
868 c.node()
868 c.node()
869 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
869 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
870 }
870 }
871 if not newcommits:
871 if not newcommits:
872 repo.ui.warn(
872 repo.ui.warn(
873 _(
873 _(
874 b'%s: cannot fold - working copy is not a '
874 b'%s: cannot fold - working copy is not a '
875 b'descendant of previous commit %s\n'
875 b'descendant of previous commit %s\n'
876 )
876 )
877 % (short(self.node), short(parentctxnode))
877 % (short(self.node), short(parentctxnode))
878 )
878 )
879 return ctx, [(self.node, (ctx.node(),))]
879 return ctx, [(self.node, (ctx.node(),))]
880
880
881 middlecommits = newcommits.copy()
881 middlecommits = newcommits.copy()
882 middlecommits.discard(ctx.node())
882 middlecommits.discard(ctx.node())
883
883
884 return self.finishfold(
884 return self.finishfold(
885 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
885 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
886 )
886 )
887
887
888 def skipprompt(self):
888 def skipprompt(self):
889 """Returns true if the rule should skip the message editor.
889 """Returns true if the rule should skip the message editor.
890
890
891 For example, 'fold' wants to show an editor, but 'rollup'
891 For example, 'fold' wants to show an editor, but 'rollup'
892 doesn't want to.
892 doesn't want to.
893 """
893 """
894 return False
894 return False
895
895
896 def mergedescs(self):
896 def mergedescs(self):
897 """Returns true if the rule should merge messages of multiple changes.
897 """Returns true if the rule should merge messages of multiple changes.
898
898
899 This exists mainly so that 'rollup' rules can be a subclass of
899 This exists mainly so that 'rollup' rules can be a subclass of
900 'fold'.
900 'fold'.
901 """
901 """
902 return True
902 return True
903
903
904 def firstdate(self):
904 def firstdate(self):
905 """Returns true if the rule should preserve the date of the first
905 """Returns true if the rule should preserve the date of the first
906 change.
906 change.
907
907
908 This exists mainly so that 'rollup' rules can be a subclass of
908 This exists mainly so that 'rollup' rules can be a subclass of
909 'fold'.
909 'fold'.
910 """
910 """
911 return False
911 return False
912
912
913 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
913 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
914 mergemod.update(ctx.p1())
914 mergemod.update(ctx.p1())
915 ### prepare new commit data
915 ### prepare new commit data
916 commitopts = {}
916 commitopts = {}
917 commitopts[b'user'] = ctx.user()
917 commitopts[b'user'] = ctx.user()
918 # commit message
918 # commit message
919 if not self.mergedescs():
919 if not self.mergedescs():
920 newmessage = ctx.description()
920 newmessage = ctx.description()
921 else:
921 else:
922 newmessage = (
922 newmessage = (
923 b'\n***\n'.join(
923 b'\n***\n'.join(
924 [ctx.description()]
924 [ctx.description()]
925 + [repo[r].description() for r in internalchanges]
925 + [repo[r].description() for r in internalchanges]
926 + [oldctx.description()]
926 + [oldctx.description()]
927 )
927 )
928 + b'\n'
928 + b'\n'
929 )
929 )
930 commitopts[b'message'] = newmessage
930 commitopts[b'message'] = newmessage
931 # date
931 # date
932 if self.firstdate():
932 if self.firstdate():
933 commitopts[b'date'] = ctx.date()
933 commitopts[b'date'] = ctx.date()
934 else:
934 else:
935 commitopts[b'date'] = max(ctx.date(), oldctx.date())
935 commitopts[b'date'] = max(ctx.date(), oldctx.date())
936 # if date is to be updated to current
936 # if date is to be updated to current
937 if ui.configbool(b'rewrite', b'update-timestamp'):
937 if ui.configbool(b'rewrite', b'update-timestamp'):
938 commitopts[b'date'] = dateutil.makedate()
938 commitopts[b'date'] = dateutil.makedate()
939
939
940 extra = ctx.extra().copy()
940 extra = ctx.extra().copy()
941 # histedit_source
941 # histedit_source
942 # note: ctx is likely a temporary commit but that the best we can do
942 # note: ctx is likely a temporary commit but that the best we can do
943 # here. This is sufficient to solve issue3681 anyway.
943 # here. This is sufficient to solve issue3681 anyway.
944 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
944 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
945 commitopts[b'extra'] = extra
945 commitopts[b'extra'] = extra
946 phasemin = max(ctx.phase(), oldctx.phase())
946 phasemin = max(ctx.phase(), oldctx.phase())
947 overrides = {(b'phases', b'new-commit'): phasemin}
947 overrides = {(b'phases', b'new-commit'): phasemin}
948 with repo.ui.configoverride(overrides, b'histedit'):
948 with repo.ui.configoverride(overrides, b'histedit'):
949 n = collapse(
949 n = collapse(
950 repo,
950 repo,
951 ctx,
951 ctx,
952 repo[newnode],
952 repo[newnode],
953 commitopts,
953 commitopts,
954 skipprompt=self.skipprompt(),
954 skipprompt=self.skipprompt(),
955 )
955 )
956 if n is None:
956 if n is None:
957 return ctx, []
957 return ctx, []
958 mergemod.update(repo[n])
958 mergemod.update(repo[n])
959 replacements = [
959 replacements = [
960 (oldctx.node(), (newnode,)),
960 (oldctx.node(), (newnode,)),
961 (ctx.node(), (n,)),
961 (ctx.node(), (n,)),
962 (newnode, (n,)),
962 (newnode, (n,)),
963 ]
963 ]
964 for ich in internalchanges:
964 for ich in internalchanges:
965 replacements.append((ich, (n,)))
965 replacements.append((ich, (n,)))
966 return repo[n], replacements
966 return repo[n], replacements
967
967
968
968
969 @action(
969 @action(
970 [b'base', b'b'],
970 [b'base', b'b'],
971 _(b'checkout changeset and apply further changesets from there'),
971 _(b'checkout changeset and apply further changesets from there'),
972 )
972 )
973 class base(histeditaction):
973 class base(histeditaction):
974 def run(self):
974 def run(self):
975 if self.repo[b'.'].node() != self.node:
975 if self.repo[b'.'].node() != self.node:
976 mergemod.clean_update(self.repo[self.node])
976 mergemod.clean_update(self.repo[self.node])
977 return self.continueclean()
977 return self.continueclean()
978
978
979 def continuedirty(self):
979 def continuedirty(self):
980 abortdirty()
980 abortdirty()
981
981
982 def continueclean(self):
982 def continueclean(self):
983 basectx = self.repo[b'.']
983 basectx = self.repo[b'.']
984 return basectx, []
984 return basectx, []
985
985
986 def _verifynodeconstraints(self, prev, expected, seen):
986 def _verifynodeconstraints(self, prev, expected, seen):
987 # base can only be use with a node not in the edited set
987 # base can only be use with a node not in the edited set
988 if self.node in expected:
988 if self.node in expected:
989 msg = _(b'%s "%s" changeset was an edited list candidate')
989 msg = _(b'%s "%s" changeset was an edited list candidate')
990 raise error.ParseError(
990 raise error.ParseError(
991 msg % (self.verb, short(self.node)),
991 msg % (self.verb, short(self.node)),
992 hint=_(b'base must only use unlisted changesets'),
992 hint=_(b'base must only use unlisted changesets'),
993 )
993 )
994
994
995
995
996 @action(
996 @action(
997 [b'_multifold'],
997 [b'_multifold'],
998 _(
998 _(
999 """fold subclass used for when multiple folds happen in a row
999 """fold subclass used for when multiple folds happen in a row
1000
1000
1001 We only want to fire the editor for the folded message once when
1001 We only want to fire the editor for the folded message once when
1002 (say) four changes are folded down into a single change. This is
1002 (say) four changes are folded down into a single change. This is
1003 similar to rollup, but we should preserve both messages so that
1003 similar to rollup, but we should preserve both messages so that
1004 when the last fold operation runs we can show the user all the
1004 when the last fold operation runs we can show the user all the
1005 commit messages in their editor.
1005 commit messages in their editor.
1006 """
1006 """
1007 ),
1007 ),
1008 internal=True,
1008 internal=True,
1009 )
1009 )
1010 class _multifold(fold):
1010 class _multifold(fold):
1011 def skipprompt(self):
1011 def skipprompt(self):
1012 return True
1012 return True
1013
1013
1014
1014
1015 @action(
1015 @action(
1016 [b"roll", b"r"],
1016 [b"roll", b"r"],
1017 _(b"like fold, but discard this commit's description and date"),
1017 _(b"like fold, but discard this commit's description and date"),
1018 )
1018 )
1019 class rollup(fold):
1019 class rollup(fold):
1020 def mergedescs(self):
1020 def mergedescs(self):
1021 return False
1021 return False
1022
1022
1023 def skipprompt(self):
1023 def skipprompt(self):
1024 return True
1024 return True
1025
1025
1026 def firstdate(self):
1026 def firstdate(self):
1027 return True
1027 return True
1028
1028
1029
1029
1030 @action([b"drop", b"d"], _(b'remove commit from history'))
1030 @action([b"drop", b"d"], _(b'remove commit from history'))
1031 class drop(histeditaction):
1031 class drop(histeditaction):
1032 def run(self):
1032 def run(self):
1033 parentctx = self.repo[self.state.parentctxnode]
1033 parentctx = self.repo[self.state.parentctxnode]
1034 return parentctx, [(self.node, tuple())]
1034 return parentctx, [(self.node, tuple())]
1035
1035
1036
1036
1037 @action(
1037 @action(
1038 [b"mess", b"m"],
1038 [b"mess", b"m"],
1039 _(b'edit commit message without changing commit content'),
1039 _(b'edit commit message without changing commit content'),
1040 priority=True,
1040 priority=True,
1041 )
1041 )
1042 class message(histeditaction):
1042 class message(histeditaction):
1043 def commiteditor(self):
1043 def commiteditor(self):
1044 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1044 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1045
1045
1046
1046
1047 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1047 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1048 """utility function to find the first outgoing changeset
1048 """utility function to find the first outgoing changeset
1049
1049
1050 Used by initialization code"""
1050 Used by initialization code"""
1051 if opts is None:
1051 if opts is None:
1052 opts = {}
1052 opts = {}
1053 path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
1053 path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
1054 dest = path.pushloc or path.loc
1054 dest = path.pushloc or path.loc
1055
1055
1056 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1056 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1057
1057
1058 revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
1058 revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
1059 other = hg.peer(repo, opts, dest)
1059 other = hg.peer(repo, opts, dest)
1060
1060
1061 if revs:
1061 if revs:
1062 revs = [repo.lookup(rev) for rev in revs]
1062 revs = [repo.lookup(rev) for rev in revs]
1063
1063
1064 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1064 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1065 if not outgoing.missing:
1065 if not outgoing.missing:
1066 raise error.StateError(_(b'no outgoing ancestors'))
1066 raise error.StateError(_(b'no outgoing ancestors'))
1067 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1067 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1068 if len(roots) > 1:
1068 if len(roots) > 1:
1069 msg = _(b'there are ambiguous outgoing revisions')
1069 msg = _(b'there are ambiguous outgoing revisions')
1070 hint = _(b"see 'hg help histedit' for more detail")
1070 hint = _(b"see 'hg help histedit' for more detail")
1071 raise error.StateError(msg, hint=hint)
1071 raise error.StateError(msg, hint=hint)
1072 return repo[roots[0]].node()
1072 return repo[roots[0]].node()
1073
1073
1074
1074
1075 # Curses Support
1075 # Curses Support
1076 try:
1076 try:
1077 import curses
1077 import curses
1078 except ImportError:
1078 except ImportError:
1079 curses = None
1079 curses = None
1080
1080
1081 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1081 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1082 ACTION_LABELS = {
1082 ACTION_LABELS = {
1083 b'fold': b'^fold',
1083 b'fold': b'^fold',
1084 b'roll': b'^roll',
1084 b'roll': b'^roll',
1085 }
1085 }
1086
1086
1087 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1087 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1088 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1088 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1089 COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11
1089 COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11
1090
1090
1091 E_QUIT, E_HISTEDIT = 1, 2
1091 E_QUIT, E_HISTEDIT = 1, 2
1092 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1092 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1093 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1093 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1094
1094
1095 KEYTABLE = {
1095 KEYTABLE = {
1096 b'global': {
1096 b'global': {
1097 b'h': b'next-action',
1097 b'h': b'next-action',
1098 b'KEY_RIGHT': b'next-action',
1098 b'KEY_RIGHT': b'next-action',
1099 b'l': b'prev-action',
1099 b'l': b'prev-action',
1100 b'KEY_LEFT': b'prev-action',
1100 b'KEY_LEFT': b'prev-action',
1101 b'q': b'quit',
1101 b'q': b'quit',
1102 b'c': b'histedit',
1102 b'c': b'histedit',
1103 b'C': b'histedit',
1103 b'C': b'histedit',
1104 b'v': b'showpatch',
1104 b'v': b'showpatch',
1105 b'?': b'help',
1105 b'?': b'help',
1106 },
1106 },
1107 MODE_RULES: {
1107 MODE_RULES: {
1108 b'd': b'action-drop',
1108 b'd': b'action-drop',
1109 b'e': b'action-edit',
1109 b'e': b'action-edit',
1110 b'f': b'action-fold',
1110 b'f': b'action-fold',
1111 b'm': b'action-mess',
1111 b'm': b'action-mess',
1112 b'p': b'action-pick',
1112 b'p': b'action-pick',
1113 b'r': b'action-roll',
1113 b'r': b'action-roll',
1114 b' ': b'select',
1114 b' ': b'select',
1115 b'j': b'down',
1115 b'j': b'down',
1116 b'k': b'up',
1116 b'k': b'up',
1117 b'KEY_DOWN': b'down',
1117 b'KEY_DOWN': b'down',
1118 b'KEY_UP': b'up',
1118 b'KEY_UP': b'up',
1119 b'J': b'move-down',
1119 b'J': b'move-down',
1120 b'K': b'move-up',
1120 b'K': b'move-up',
1121 b'KEY_NPAGE': b'move-down',
1121 b'KEY_NPAGE': b'move-down',
1122 b'KEY_PPAGE': b'move-up',
1122 b'KEY_PPAGE': b'move-up',
1123 b'0': b'goto', # Used for 0..9
1123 b'0': b'goto', # Used for 0..9
1124 },
1124 },
1125 MODE_PATCH: {
1125 MODE_PATCH: {
1126 b' ': b'page-down',
1126 b' ': b'page-down',
1127 b'KEY_NPAGE': b'page-down',
1127 b'KEY_NPAGE': b'page-down',
1128 b'KEY_PPAGE': b'page-up',
1128 b'KEY_PPAGE': b'page-up',
1129 b'j': b'line-down',
1129 b'j': b'line-down',
1130 b'k': b'line-up',
1130 b'k': b'line-up',
1131 b'KEY_DOWN': b'line-down',
1131 b'KEY_DOWN': b'line-down',
1132 b'KEY_UP': b'line-up',
1132 b'KEY_UP': b'line-up',
1133 b'J': b'down',
1133 b'J': b'down',
1134 b'K': b'up',
1134 b'K': b'up',
1135 },
1135 },
1136 MODE_HELP: {},
1136 MODE_HELP: {},
1137 }
1137 }
1138
1138
1139
1139
1140 def screen_size():
1140 def screen_size():
1141 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1141 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1142
1142
1143
1143
1144 class histeditrule:
1144 class histeditrule:
1145 def __init__(self, ui, ctx, pos, action=b'pick'):
1145 def __init__(self, ui, ctx, pos, action=b'pick'):
1146 self.ui = ui
1146 self.ui = ui
1147 self.ctx = ctx
1147 self.ctx = ctx
1148 self.action = action
1148 self.action = action
1149 self.origpos = pos
1149 self.origpos = pos
1150 self.pos = pos
1150 self.pos = pos
1151 self.conflicts = []
1151 self.conflicts = []
1152
1152
1153 def __bytes__(self):
1153 def __bytes__(self):
1154 # Example display of several histeditrules:
1154 # Example display of several histeditrules:
1155 #
1155 #
1156 # #10 pick 316392:06a16c25c053 add option to skip tests
1156 # #10 pick 316392:06a16c25c053 add option to skip tests
1157 # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED>
1157 # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED>
1158 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1158 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1159 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1159 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1160 #
1160 #
1161 # The carets point to the changeset being folded into ("roll this
1161 # The carets point to the changeset being folded into ("roll this
1162 # changeset into the changeset above").
1162 # changeset into the changeset above").
1163 return b'%s%s' % (self.prefix, self.desc)
1163 return b'%s%s' % (self.prefix, self.desc)
1164
1164
1165 __str__ = encoding.strmethod(__bytes__)
1165 __str__ = encoding.strmethod(__bytes__)
1166
1166
1167 @property
1167 @property
1168 def prefix(self):
1168 def prefix(self):
1169 # Some actions ('fold' and 'roll') combine a patch with a
1169 # Some actions ('fold' and 'roll') combine a patch with a
1170 # previous one. Add a marker showing which patch they apply
1170 # previous one. Add a marker showing which patch they apply
1171 # to.
1171 # to.
1172 action = ACTION_LABELS.get(self.action, self.action)
1172 action = ACTION_LABELS.get(self.action, self.action)
1173
1173
1174 h = self.ctx.hex()[0:12]
1174 h = self.ctx.hex()[0:12]
1175 r = self.ctx.rev()
1175 r = self.ctx.rev()
1176
1176
1177 return b"#%s %s %d:%s " % (
1177 return b"#%s %s %d:%s " % (
1178 (b'%d' % self.origpos).ljust(2),
1178 (b'%d' % self.origpos).ljust(2),
1179 action.ljust(6),
1179 action.ljust(6),
1180 r,
1180 r,
1181 h,
1181 h,
1182 )
1182 )
1183
1183
1184 @util.propertycache
1184 @util.propertycache
1185 def desc(self):
1185 def desc(self):
1186 summary = cmdutil.rendertemplate(
1186 summary = cmdutil.rendertemplate(
1187 self.ctx, self.ui.config(b'histedit', b'summary-template')
1187 self.ctx, self.ui.config(b'histedit', b'summary-template')
1188 )
1188 )
1189 if summary:
1189 if summary:
1190 return summary
1190 return summary
1191 # This is split off from the prefix property so that we can
1191 # This is split off from the prefix property so that we can
1192 # separately make the description for 'roll' red (since it
1192 # separately make the description for 'roll' red (since it
1193 # will get discarded).
1193 # will get discarded).
1194 return stringutil.firstline(self.ctx.description())
1194 return stringutil.firstline(self.ctx.description())
1195
1195
1196 def checkconflicts(self, other):
1196 def checkconflicts(self, other):
1197 if other.pos > self.pos and other.origpos <= self.origpos:
1197 if other.pos > self.pos and other.origpos <= self.origpos:
1198 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1198 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1199 self.conflicts.append(other)
1199 self.conflicts.append(other)
1200 return self.conflicts
1200 return self.conflicts
1201
1201
1202 if other in self.conflicts:
1202 if other in self.conflicts:
1203 self.conflicts.remove(other)
1203 self.conflicts.remove(other)
1204 return self.conflicts
1204 return self.conflicts
1205
1205
1206
1206
1207 def makecommands(rules):
1207 def makecommands(rules):
1208 """Returns a list of commands consumable by histedit --commands based on
1208 """Returns a list of commands consumable by histedit --commands based on
1209 our list of rules"""
1209 our list of rules"""
1210 commands = []
1210 commands = []
1211 for rules in rules:
1211 for rules in rules:
1212 commands.append(b'%s %s\n' % (rules.action, rules.ctx))
1212 commands.append(b'%s %s\n' % (rules.action, rules.ctx))
1213 return commands
1213 return commands
1214
1214
1215
1215
1216 def addln(win, y, x, line, color=None):
1216 def addln(win, y, x, line, color=None):
1217 """Add a line to the given window left padding but 100% filled with
1217 """Add a line to the given window left padding but 100% filled with
1218 whitespace characters, so that the color appears on the whole line"""
1218 whitespace characters, so that the color appears on the whole line"""
1219 maxy, maxx = win.getmaxyx()
1219 maxy, maxx = win.getmaxyx()
1220 length = maxx - 1 - x
1220 length = maxx - 1 - x
1221 line = bytes(line).ljust(length)[:length]
1221 line = bytes(line).ljust(length)[:length]
1222 if y < 0:
1222 if y < 0:
1223 y = maxy + y
1223 y = maxy + y
1224 if x < 0:
1224 if x < 0:
1225 x = maxx + x
1225 x = maxx + x
1226 if color:
1226 if color:
1227 win.addstr(y, x, line, color)
1227 win.addstr(y, x, line, color)
1228 else:
1228 else:
1229 win.addstr(y, x, line)
1229 win.addstr(y, x, line)
1230
1230
1231
1231
1232 def _trunc_head(line, n):
1232 def _trunc_head(line, n):
1233 if len(line) <= n:
1233 if len(line) <= n:
1234 return line
1234 return line
1235 return b'> ' + line[-(n - 2) :]
1235 return b'> ' + line[-(n - 2) :]
1236
1236
1237
1237
1238 def _trunc_tail(line, n):
1238 def _trunc_tail(line, n):
1239 if len(line) <= n:
1239 if len(line) <= n:
1240 return line
1240 return line
1241 return line[: n - 2] + b' >'
1241 return line[: n - 2] + b' >'
1242
1242
1243
1243
1244 class _chistedit_state:
1244 class _chistedit_state:
1245 def __init__(
1245 def __init__(
1246 self,
1246 self,
1247 repo,
1247 repo,
1248 rules,
1248 rules,
1249 stdscr,
1249 stdscr,
1250 ):
1250 ):
1251 self.repo = repo
1251 self.repo = repo
1252 self.rules = rules
1252 self.rules = rules
1253 self.stdscr = stdscr
1253 self.stdscr = stdscr
1254 self.later_on_top = repo.ui.configbool(
1254 self.later_on_top = repo.ui.configbool(
1255 b'histedit', b'later-commits-first'
1255 b'histedit', b'later-commits-first'
1256 )
1256 )
1257 # The current item in display order, initialized to point to the top
1257 # The current item in display order, initialized to point to the top
1258 # of the screen.
1258 # of the screen.
1259 self.pos = 0
1259 self.pos = 0
1260 self.selected = None
1260 self.selected = None
1261 self.mode = (MODE_INIT, MODE_INIT)
1261 self.mode = (MODE_INIT, MODE_INIT)
1262 self.page_height = None
1262 self.page_height = None
1263 self.modes = {
1263 self.modes = {
1264 MODE_RULES: {
1264 MODE_RULES: {
1265 b'line_offset': 0,
1265 b'line_offset': 0,
1266 },
1266 },
1267 MODE_PATCH: {
1267 MODE_PATCH: {
1268 b'line_offset': 0,
1268 b'line_offset': 0,
1269 },
1269 },
1270 }
1270 }
1271
1271
1272 def render_commit(self, win):
1272 def render_commit(self, win):
1273 """Renders the commit window that shows the log of the current selected
1273 """Renders the commit window that shows the log of the current selected
1274 commit"""
1274 commit"""
1275 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1275 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1276
1276
1277 ctx = rule.ctx
1277 ctx = rule.ctx
1278 win.box()
1278 win.box()
1279
1279
1280 maxy, maxx = win.getmaxyx()
1280 maxy, maxx = win.getmaxyx()
1281 length = maxx - 3
1281 length = maxx - 3
1282
1282
1283 line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12])
1283 line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12])
1284 win.addstr(1, 1, line[:length])
1284 win.addstr(1, 1, line[:length])
1285
1285
1286 line = b"user: %s" % ctx.user()
1286 line = b"user: %s" % ctx.user()
1287 win.addstr(2, 1, line[:length])
1287 win.addstr(2, 1, line[:length])
1288
1288
1289 bms = self.repo.nodebookmarks(ctx.node())
1289 bms = self.repo.nodebookmarks(ctx.node())
1290 line = b"bookmark: %s" % b' '.join(bms)
1290 line = b"bookmark: %s" % b' '.join(bms)
1291 win.addstr(3, 1, line[:length])
1291 win.addstr(3, 1, line[:length])
1292
1292
1293 line = b"summary: %s" % stringutil.firstline(ctx.description())
1293 line = b"summary: %s" % stringutil.firstline(ctx.description())
1294 win.addstr(4, 1, line[:length])
1294 win.addstr(4, 1, line[:length])
1295
1295
1296 line = b"files: "
1296 line = b"files: "
1297 win.addstr(5, 1, line)
1297 win.addstr(5, 1, line)
1298 fnx = 1 + len(line)
1298 fnx = 1 + len(line)
1299 fnmaxx = length - fnx + 1
1299 fnmaxx = length - fnx + 1
1300 y = 5
1300 y = 5
1301 fnmaxn = maxy - (1 + y) - 1
1301 fnmaxn = maxy - (1 + y) - 1
1302 files = ctx.files()
1302 files = ctx.files()
1303 for i, line1 in enumerate(files):
1303 for i, line1 in enumerate(files):
1304 if len(files) > fnmaxn and i == fnmaxn - 1:
1304 if len(files) > fnmaxn and i == fnmaxn - 1:
1305 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1305 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1306 y = y + 1
1306 y = y + 1
1307 break
1307 break
1308 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1308 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1309 y = y + 1
1309 y = y + 1
1310
1310
1311 conflicts = rule.conflicts
1311 conflicts = rule.conflicts
1312 if len(conflicts) > 0:
1312 if len(conflicts) > 0:
1313 conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts))
1313 conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts))
1314 conflictstr = b"changed files overlap with %s" % conflictstr
1314 conflictstr = b"changed files overlap with %s" % conflictstr
1315 else:
1315 else:
1316 conflictstr = b'no overlap'
1316 conflictstr = b'no overlap'
1317
1317
1318 win.addstr(y, 1, conflictstr[:length])
1318 win.addstr(y, 1, conflictstr[:length])
1319 win.noutrefresh()
1319 win.noutrefresh()
1320
1320
1321 def helplines(self):
1321 def helplines(self):
1322 if self.mode[0] == MODE_PATCH:
1322 if self.mode[0] == MODE_PATCH:
1323 help = b"""\
1323 help = b"""\
1324 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1324 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1325 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1325 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1326 """
1326 """
1327 else:
1327 else:
1328 help = b"""\
1328 help = b"""\
1329 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1329 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1330 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1330 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1331 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1331 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1332 """
1332 """
1333 if self.later_on_top:
1333 if self.later_on_top:
1334 help += b"Newer commits are shown above older commits.\n"
1334 help += b"Newer commits are shown above older commits.\n"
1335 else:
1335 else:
1336 help += b"Older commits are shown above newer commits.\n"
1336 help += b"Older commits are shown above newer commits.\n"
1337 return help.splitlines()
1337 return help.splitlines()
1338
1338
1339 def render_help(self, win):
1339 def render_help(self, win):
1340 maxy, maxx = win.getmaxyx()
1340 maxy, maxx = win.getmaxyx()
1341 for y, line in enumerate(self.helplines()):
1341 for y, line in enumerate(self.helplines()):
1342 if y >= maxy:
1342 if y >= maxy:
1343 break
1343 break
1344 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1344 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1345 win.noutrefresh()
1345 win.noutrefresh()
1346
1346
1347 def layout(self):
1347 def layout(self):
1348 maxy, maxx = self.stdscr.getmaxyx()
1348 maxy, maxx = self.stdscr.getmaxyx()
1349 helplen = len(self.helplines())
1349 helplen = len(self.helplines())
1350 mainlen = maxy - helplen - 12
1350 mainlen = maxy - helplen - 12
1351 if mainlen < 1:
1351 if mainlen < 1:
1352 raise error.Abort(
1352 raise error.Abort(
1353 _(b"terminal dimensions %d by %d too small for curses histedit")
1353 _(b"terminal dimensions %d by %d too small for curses histedit")
1354 % (maxy, maxx),
1354 % (maxy, maxx),
1355 hint=_(
1355 hint=_(
1356 b"enlarge your terminal or use --config ui.interface=text"
1356 b"enlarge your terminal or use --config ui.interface=text"
1357 ),
1357 ),
1358 )
1358 )
1359 return {
1359 return {
1360 b'commit': (12, maxx),
1360 b'commit': (12, maxx),
1361 b'help': (helplen, maxx),
1361 b'help': (helplen, maxx),
1362 b'main': (mainlen, maxx),
1362 b'main': (mainlen, maxx),
1363 }
1363 }
1364
1364
1365 def display_pos_to_rule_pos(self, display_pos):
1365 def display_pos_to_rule_pos(self, display_pos):
1366 """Converts a position in display order to rule order.
1366 """Converts a position in display order to rule order.
1367
1367
1368 The `display_pos` is the order from the top in display order, not
1368 The `display_pos` is the order from the top in display order, not
1369 considering which items are currently visible on the screen. Thus,
1369 considering which items are currently visible on the screen. Thus,
1370 `display_pos=0` is the item at the top (possibly after scrolling to
1370 `display_pos=0` is the item at the top (possibly after scrolling to
1371 the top)
1371 the top)
1372 """
1372 """
1373 if self.later_on_top:
1373 if self.later_on_top:
1374 return len(self.rules) - 1 - display_pos
1374 return len(self.rules) - 1 - display_pos
1375 else:
1375 else:
1376 return display_pos
1376 return display_pos
1377
1377
1378 def render_rules(self, rulesscr):
1378 def render_rules(self, rulesscr):
1379 start = self.modes[MODE_RULES][b'line_offset']
1379 start = self.modes[MODE_RULES][b'line_offset']
1380
1380
1381 conflicts = [r.ctx for r in self.rules if r.conflicts]
1381 conflicts = [r.ctx for r in self.rules if r.conflicts]
1382 if len(conflicts) > 0:
1382 if len(conflicts) > 0:
1383 line = b"potential conflict in %s" % b','.join(
1383 line = b"potential conflict in %s" % b','.join(
1384 map(pycompat.bytestr, conflicts)
1384 map(pycompat.bytestr, conflicts)
1385 )
1385 )
1386 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1386 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1387
1387
1388 for display_pos in range(start, len(self.rules)):
1388 for display_pos in range(start, len(self.rules)):
1389 y = display_pos - start
1389 y = display_pos - start
1390 if y < 0 or y >= self.page_height:
1390 if y < 0 or y >= self.page_height:
1391 continue
1391 continue
1392 rule_pos = self.display_pos_to_rule_pos(display_pos)
1392 rule_pos = self.display_pos_to_rule_pos(display_pos)
1393 rule = self.rules[rule_pos]
1393 rule = self.rules[rule_pos]
1394 if len(rule.conflicts) > 0:
1394 if len(rule.conflicts) > 0:
1395 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1395 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1396 else:
1396 else:
1397 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1397 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1398
1398
1399 if display_pos == self.selected:
1399 if display_pos == self.selected:
1400 rollcolor = COLOR_ROLL_SELECTED
1400 rollcolor = COLOR_ROLL_SELECTED
1401 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1401 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1402 elif display_pos == self.pos:
1402 elif display_pos == self.pos:
1403 rollcolor = COLOR_ROLL_CURRENT
1403 rollcolor = COLOR_ROLL_CURRENT
1404 addln(
1404 addln(
1405 rulesscr,
1405 rulesscr,
1406 y,
1406 y,
1407 2,
1407 2,
1408 rule,
1408 rule,
1409 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1409 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1410 )
1410 )
1411 else:
1411 else:
1412 rollcolor = COLOR_ROLL
1412 rollcolor = COLOR_ROLL
1413 addln(rulesscr, y, 2, rule)
1413 addln(rulesscr, y, 2, rule)
1414
1414
1415 if rule.action == b'roll':
1415 if rule.action == b'roll':
1416 rulesscr.addstr(
1416 rulesscr.addstr(
1417 y,
1417 y,
1418 2 + len(rule.prefix),
1418 2 + len(rule.prefix),
1419 rule.desc,
1419 rule.desc,
1420 curses.color_pair(rollcolor),
1420 curses.color_pair(rollcolor),
1421 )
1421 )
1422
1422
1423 rulesscr.noutrefresh()
1423 rulesscr.noutrefresh()
1424
1424
1425 def render_string(self, win, output, diffcolors=False):
1425 def render_string(self, win, output, diffcolors=False):
1426 maxy, maxx = win.getmaxyx()
1426 maxy, maxx = win.getmaxyx()
1427 length = min(maxy - 1, len(output))
1427 length = min(maxy - 1, len(output))
1428 for y in range(0, length):
1428 for y in range(0, length):
1429 line = output[y]
1429 line = output[y]
1430 if diffcolors:
1430 if diffcolors:
1431 if line and line[0] == b'+':
1431 if line and line[0] == b'+':
1432 win.addstr(
1432 win.addstr(
1433 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1433 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1434 )
1434 )
1435 elif line and line[0] == b'-':
1435 elif line and line[0] == b'-':
1436 win.addstr(
1436 win.addstr(
1437 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1437 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1438 )
1438 )
1439 elif line.startswith(b'@@ '):
1439 elif line.startswith(b'@@ '):
1440 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1440 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1441 else:
1441 else:
1442 win.addstr(y, 0, line)
1442 win.addstr(y, 0, line)
1443 else:
1443 else:
1444 win.addstr(y, 0, line)
1444 win.addstr(y, 0, line)
1445 win.noutrefresh()
1445 win.noutrefresh()
1446
1446
1447 def render_patch(self, win):
1447 def render_patch(self, win):
1448 start = self.modes[MODE_PATCH][b'line_offset']
1448 start = self.modes[MODE_PATCH][b'line_offset']
1449 content = self.modes[MODE_PATCH][b'patchcontents']
1449 content = self.modes[MODE_PATCH][b'patchcontents']
1450 self.render_string(win, content[start:], diffcolors=True)
1450 self.render_string(win, content[start:], diffcolors=True)
1451
1451
1452 def event(self, ch):
1452 def event(self, ch):
1453 """Change state based on the current character input
1453 """Change state based on the current character input
1454
1454
1455 This takes the current state and based on the current character input from
1455 This takes the current state and based on the current character input from
1456 the user we change the state.
1456 the user we change the state.
1457 """
1457 """
1458 oldpos = self.pos
1458 oldpos = self.pos
1459
1459
1460 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1460 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1461 return E_RESIZE
1461 return E_RESIZE
1462
1462
1463 lookup_ch = ch
1463 lookup_ch = ch
1464 if ch is not None and b'0' <= ch <= b'9':
1464 if ch is not None and b'0' <= ch <= b'9':
1465 lookup_ch = b'0'
1465 lookup_ch = b'0'
1466
1466
1467 curmode, prevmode = self.mode
1467 curmode, prevmode = self.mode
1468 action = KEYTABLE[curmode].get(
1468 action = KEYTABLE[curmode].get(
1469 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1469 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1470 )
1470 )
1471 if action is None:
1471 if action is None:
1472 return
1472 return
1473 if action in (b'down', b'move-down'):
1473 if action in (b'down', b'move-down'):
1474 newpos = min(oldpos + 1, len(self.rules) - 1)
1474 newpos = min(oldpos + 1, len(self.rules) - 1)
1475 self.move_cursor(oldpos, newpos)
1475 self.move_cursor(oldpos, newpos)
1476 if self.selected is not None or action == b'move-down':
1476 if self.selected is not None or action == b'move-down':
1477 self.swap(oldpos, newpos)
1477 self.swap(oldpos, newpos)
1478 elif action in (b'up', b'move-up'):
1478 elif action in (b'up', b'move-up'):
1479 newpos = max(0, oldpos - 1)
1479 newpos = max(0, oldpos - 1)
1480 self.move_cursor(oldpos, newpos)
1480 self.move_cursor(oldpos, newpos)
1481 if self.selected is not None or action == b'move-up':
1481 if self.selected is not None or action == b'move-up':
1482 self.swap(oldpos, newpos)
1482 self.swap(oldpos, newpos)
1483 elif action == b'next-action':
1483 elif action == b'next-action':
1484 self.cycle_action(oldpos, next=True)
1484 self.cycle_action(oldpos, next=True)
1485 elif action == b'prev-action':
1485 elif action == b'prev-action':
1486 self.cycle_action(oldpos, next=False)
1486 self.cycle_action(oldpos, next=False)
1487 elif action == b'select':
1487 elif action == b'select':
1488 self.selected = oldpos if self.selected is None else None
1488 self.selected = oldpos if self.selected is None else None
1489 self.make_selection(self.selected)
1489 self.make_selection(self.selected)
1490 elif action == b'goto' and int(ch) < len(self.rules) <= 10:
1490 elif action == b'goto' and int(ch) < len(self.rules) <= 10:
1491 newrule = next((r for r in self.rules if r.origpos == int(ch)))
1491 newrule = next((r for r in self.rules if r.origpos == int(ch)))
1492 self.move_cursor(oldpos, newrule.pos)
1492 self.move_cursor(oldpos, newrule.pos)
1493 if self.selected is not None:
1493 if self.selected is not None:
1494 self.swap(oldpos, newrule.pos)
1494 self.swap(oldpos, newrule.pos)
1495 elif action.startswith(b'action-'):
1495 elif action.startswith(b'action-'):
1496 self.change_action(oldpos, action[7:])
1496 self.change_action(oldpos, action[7:])
1497 elif action == b'showpatch':
1497 elif action == b'showpatch':
1498 self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode)
1498 self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode)
1499 elif action == b'help':
1499 elif action == b'help':
1500 self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode)
1500 self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode)
1501 elif action == b'quit':
1501 elif action == b'quit':
1502 return E_QUIT
1502 return E_QUIT
1503 elif action == b'histedit':
1503 elif action == b'histedit':
1504 return E_HISTEDIT
1504 return E_HISTEDIT
1505 elif action == b'page-down':
1505 elif action == b'page-down':
1506 return E_PAGEDOWN
1506 return E_PAGEDOWN
1507 elif action == b'page-up':
1507 elif action == b'page-up':
1508 return E_PAGEUP
1508 return E_PAGEUP
1509 elif action == b'line-down':
1509 elif action == b'line-down':
1510 return E_LINEDOWN
1510 return E_LINEDOWN
1511 elif action == b'line-up':
1511 elif action == b'line-up':
1512 return E_LINEUP
1512 return E_LINEUP
1513
1513
1514 def patch_contents(self):
1514 def patch_contents(self):
1515 repo = self.repo
1515 repo = self.repo
1516 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1516 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1517 displayer = logcmdutil.changesetdisplayer(
1517 displayer = logcmdutil.changesetdisplayer(
1518 repo.ui,
1518 repo.ui,
1519 repo,
1519 repo,
1520 {b"patch": True, b"template": b"status"},
1520 {b"patch": True, b"template": b"status"},
1521 buffered=True,
1521 buffered=True,
1522 )
1522 )
1523 overrides = {(b'ui', b'verbose'): True}
1523 overrides = {(b'ui', b'verbose'): True}
1524 with repo.ui.configoverride(overrides, source=b'histedit'):
1524 with repo.ui.configoverride(overrides, source=b'histedit'):
1525 displayer.show(rule.ctx)
1525 displayer.show(rule.ctx)
1526 displayer.close()
1526 displayer.close()
1527 return displayer.hunk[rule.ctx.rev()].splitlines()
1527 return displayer.hunk[rule.ctx.rev()].splitlines()
1528
1528
1529 def move_cursor(self, oldpos, newpos):
1529 def move_cursor(self, oldpos, newpos):
1530 """Change the rule/changeset that the cursor is pointing to, regardless of
1530 """Change the rule/changeset that the cursor is pointing to, regardless of
1531 current mode (you can switch between patches from the view patch window)."""
1531 current mode (you can switch between patches from the view patch window)."""
1532 self.pos = newpos
1532 self.pos = newpos
1533
1533
1534 mode, _ = self.mode
1534 mode, _ = self.mode
1535 if mode == MODE_RULES:
1535 if mode == MODE_RULES:
1536 # Scroll through the list by updating the view for MODE_RULES, so that
1536 # Scroll through the list by updating the view for MODE_RULES, so that
1537 # even if we are not currently viewing the rules, switching back will
1537 # even if we are not currently viewing the rules, switching back will
1538 # result in the cursor's rule being visible.
1538 # result in the cursor's rule being visible.
1539 modestate = self.modes[MODE_RULES]
1539 modestate = self.modes[MODE_RULES]
1540 if newpos < modestate[b'line_offset']:
1540 if newpos < modestate[b'line_offset']:
1541 modestate[b'line_offset'] = newpos
1541 modestate[b'line_offset'] = newpos
1542 elif newpos > modestate[b'line_offset'] + self.page_height - 1:
1542 elif newpos > modestate[b'line_offset'] + self.page_height - 1:
1543 modestate[b'line_offset'] = newpos - self.page_height + 1
1543 modestate[b'line_offset'] = newpos - self.page_height + 1
1544
1544
1545 # Reset the patch view region to the top of the new patch.
1545 # Reset the patch view region to the top of the new patch.
1546 self.modes[MODE_PATCH][b'line_offset'] = 0
1546 self.modes[MODE_PATCH][b'line_offset'] = 0
1547
1547
1548 def change_mode(self, mode):
1548 def change_mode(self, mode):
1549 curmode, _ = self.mode
1549 curmode, _ = self.mode
1550 self.mode = (mode, curmode)
1550 self.mode = (mode, curmode)
1551 if mode == MODE_PATCH:
1551 if mode == MODE_PATCH:
1552 self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents()
1552 self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents()
1553
1553
1554 def make_selection(self, pos):
1554 def make_selection(self, pos):
1555 self.selected = pos
1555 self.selected = pos
1556
1556
1557 def swap(self, oldpos, newpos):
1557 def swap(self, oldpos, newpos):
1558 """Swap two positions and calculate necessary conflicts in
1558 """Swap two positions and calculate necessary conflicts in
1559 O(|newpos-oldpos|) time"""
1559 O(|newpos-oldpos|) time"""
1560 old_rule_pos = self.display_pos_to_rule_pos(oldpos)
1560 old_rule_pos = self.display_pos_to_rule_pos(oldpos)
1561 new_rule_pos = self.display_pos_to_rule_pos(newpos)
1561 new_rule_pos = self.display_pos_to_rule_pos(newpos)
1562
1562
1563 rules = self.rules
1563 rules = self.rules
1564 assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules)
1564 assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules)
1565
1565
1566 rules[old_rule_pos], rules[new_rule_pos] = (
1566 rules[old_rule_pos], rules[new_rule_pos] = (
1567 rules[new_rule_pos],
1567 rules[new_rule_pos],
1568 rules[old_rule_pos],
1568 rules[old_rule_pos],
1569 )
1569 )
1570
1570
1571 # TODO: swap should not know about histeditrule's internals
1571 # TODO: swap should not know about histeditrule's internals
1572 rules[new_rule_pos].pos = new_rule_pos
1572 rules[new_rule_pos].pos = new_rule_pos
1573 rules[old_rule_pos].pos = old_rule_pos
1573 rules[old_rule_pos].pos = old_rule_pos
1574
1574
1575 start = min(old_rule_pos, new_rule_pos)
1575 start = min(old_rule_pos, new_rule_pos)
1576 end = max(old_rule_pos, new_rule_pos)
1576 end = max(old_rule_pos, new_rule_pos)
1577 for r in pycompat.xrange(start, end + 1):
1577 for r in range(start, end + 1):
1578 rules[new_rule_pos].checkconflicts(rules[r])
1578 rules[new_rule_pos].checkconflicts(rules[r])
1579 rules[old_rule_pos].checkconflicts(rules[r])
1579 rules[old_rule_pos].checkconflicts(rules[r])
1580
1580
1581 if self.selected:
1581 if self.selected:
1582 self.make_selection(newpos)
1582 self.make_selection(newpos)
1583
1583
1584 def change_action(self, pos, action):
1584 def change_action(self, pos, action):
1585 """Change the action state on the given position to the new action"""
1585 """Change the action state on the given position to the new action"""
1586 assert 0 <= pos < len(self.rules)
1586 assert 0 <= pos < len(self.rules)
1587 self.rules[pos].action = action
1587 self.rules[pos].action = action
1588
1588
1589 def cycle_action(self, pos, next=False):
1589 def cycle_action(self, pos, next=False):
1590 """Changes the action state the next or the previous action from
1590 """Changes the action state the next or the previous action from
1591 the action list"""
1591 the action list"""
1592 assert 0 <= pos < len(self.rules)
1592 assert 0 <= pos < len(self.rules)
1593 current = self.rules[pos].action
1593 current = self.rules[pos].action
1594
1594
1595 assert current in KEY_LIST
1595 assert current in KEY_LIST
1596
1596
1597 index = KEY_LIST.index(current)
1597 index = KEY_LIST.index(current)
1598 if next:
1598 if next:
1599 index += 1
1599 index += 1
1600 else:
1600 else:
1601 index -= 1
1601 index -= 1
1602 self.change_action(pos, KEY_LIST[index % len(KEY_LIST)])
1602 self.change_action(pos, KEY_LIST[index % len(KEY_LIST)])
1603
1603
1604 def change_view(self, delta, unit):
1604 def change_view(self, delta, unit):
1605 """Change the region of whatever is being viewed (a patch or the list of
1605 """Change the region of whatever is being viewed (a patch or the list of
1606 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1606 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1607 mode, _ = self.mode
1607 mode, _ = self.mode
1608 if mode != MODE_PATCH:
1608 if mode != MODE_PATCH:
1609 return
1609 return
1610 mode_state = self.modes[mode]
1610 mode_state = self.modes[mode]
1611 num_lines = len(mode_state[b'patchcontents'])
1611 num_lines = len(mode_state[b'patchcontents'])
1612 page_height = self.page_height
1612 page_height = self.page_height
1613 unit = page_height if unit == b'page' else 1
1613 unit = page_height if unit == b'page' else 1
1614 num_pages = 1 + (num_lines - 1) // page_height
1614 num_pages = 1 + (num_lines - 1) // page_height
1615 max_offset = (num_pages - 1) * page_height
1615 max_offset = (num_pages - 1) * page_height
1616 newline = mode_state[b'line_offset'] + delta * unit
1616 newline = mode_state[b'line_offset'] + delta * unit
1617 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1617 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1618
1618
1619
1619
1620 def _chisteditmain(repo, rules, stdscr):
1620 def _chisteditmain(repo, rules, stdscr):
1621 try:
1621 try:
1622 curses.use_default_colors()
1622 curses.use_default_colors()
1623 except curses.error:
1623 except curses.error:
1624 pass
1624 pass
1625
1625
1626 # initialize color pattern
1626 # initialize color pattern
1627 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1627 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1628 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1628 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1629 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1629 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1630 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1630 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1631 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1631 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1632 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1632 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1633 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1633 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1634 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1634 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1635 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1635 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1636 curses.init_pair(
1636 curses.init_pair(
1637 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1637 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1638 )
1638 )
1639 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1639 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1640
1640
1641 # don't display the cursor
1641 # don't display the cursor
1642 try:
1642 try:
1643 curses.curs_set(0)
1643 curses.curs_set(0)
1644 except curses.error:
1644 except curses.error:
1645 pass
1645 pass
1646
1646
1647 def drawvertwin(size, y, x):
1647 def drawvertwin(size, y, x):
1648 win = curses.newwin(size[0], size[1], y, x)
1648 win = curses.newwin(size[0], size[1], y, x)
1649 y += size[0]
1649 y += size[0]
1650 return win, y, x
1650 return win, y, x
1651
1651
1652 state = _chistedit_state(repo, rules, stdscr)
1652 state = _chistedit_state(repo, rules, stdscr)
1653
1653
1654 # eventloop
1654 # eventloop
1655 ch = None
1655 ch = None
1656 stdscr.clear()
1656 stdscr.clear()
1657 stdscr.refresh()
1657 stdscr.refresh()
1658 while True:
1658 while True:
1659 oldmode, unused = state.mode
1659 oldmode, unused = state.mode
1660 if oldmode == MODE_INIT:
1660 if oldmode == MODE_INIT:
1661 state.change_mode(MODE_RULES)
1661 state.change_mode(MODE_RULES)
1662 e = state.event(ch)
1662 e = state.event(ch)
1663
1663
1664 if e == E_QUIT:
1664 if e == E_QUIT:
1665 return False
1665 return False
1666 if e == E_HISTEDIT:
1666 if e == E_HISTEDIT:
1667 return state.rules
1667 return state.rules
1668 else:
1668 else:
1669 if e == E_RESIZE:
1669 if e == E_RESIZE:
1670 size = screen_size()
1670 size = screen_size()
1671 if size != stdscr.getmaxyx():
1671 if size != stdscr.getmaxyx():
1672 curses.resizeterm(*size)
1672 curses.resizeterm(*size)
1673
1673
1674 sizes = state.layout()
1674 sizes = state.layout()
1675 curmode, unused = state.mode
1675 curmode, unused = state.mode
1676 if curmode != oldmode:
1676 if curmode != oldmode:
1677 state.page_height = sizes[b'main'][0]
1677 state.page_height = sizes[b'main'][0]
1678 # Adjust the view to fit the current screen size.
1678 # Adjust the view to fit the current screen size.
1679 state.move_cursor(state.pos, state.pos)
1679 state.move_cursor(state.pos, state.pos)
1680
1680
1681 # Pack the windows against the top, each pane spread across the
1681 # Pack the windows against the top, each pane spread across the
1682 # full width of the screen.
1682 # full width of the screen.
1683 y, x = (0, 0)
1683 y, x = (0, 0)
1684 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1684 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1685 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1685 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1686 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1686 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1687
1687
1688 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1688 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1689 if e == E_PAGEDOWN:
1689 if e == E_PAGEDOWN:
1690 state.change_view(+1, b'page')
1690 state.change_view(+1, b'page')
1691 elif e == E_PAGEUP:
1691 elif e == E_PAGEUP:
1692 state.change_view(-1, b'page')
1692 state.change_view(-1, b'page')
1693 elif e == E_LINEDOWN:
1693 elif e == E_LINEDOWN:
1694 state.change_view(+1, b'line')
1694 state.change_view(+1, b'line')
1695 elif e == E_LINEUP:
1695 elif e == E_LINEUP:
1696 state.change_view(-1, b'line')
1696 state.change_view(-1, b'line')
1697
1697
1698 # start rendering
1698 # start rendering
1699 commitwin.erase()
1699 commitwin.erase()
1700 helpwin.erase()
1700 helpwin.erase()
1701 mainwin.erase()
1701 mainwin.erase()
1702 if curmode == MODE_PATCH:
1702 if curmode == MODE_PATCH:
1703 state.render_patch(mainwin)
1703 state.render_patch(mainwin)
1704 elif curmode == MODE_HELP:
1704 elif curmode == MODE_HELP:
1705 state.render_string(mainwin, __doc__.strip().splitlines())
1705 state.render_string(mainwin, __doc__.strip().splitlines())
1706 else:
1706 else:
1707 state.render_rules(mainwin)
1707 state.render_rules(mainwin)
1708 state.render_commit(commitwin)
1708 state.render_commit(commitwin)
1709 state.render_help(helpwin)
1709 state.render_help(helpwin)
1710 curses.doupdate()
1710 curses.doupdate()
1711 # done rendering
1711 # done rendering
1712 ch = encoding.strtolocal(stdscr.getkey())
1712 ch = encoding.strtolocal(stdscr.getkey())
1713
1713
1714
1714
1715 def _chistedit(ui, repo, freeargs, opts):
1715 def _chistedit(ui, repo, freeargs, opts):
1716 """interactively edit changeset history via a curses interface
1716 """interactively edit changeset history via a curses interface
1717
1717
1718 Provides a ncurses interface to histedit. Press ? in chistedit mode
1718 Provides a ncurses interface to histedit. Press ? in chistedit mode
1719 to see an extensive help. Requires python-curses to be installed."""
1719 to see an extensive help. Requires python-curses to be installed."""
1720
1720
1721 if curses is None:
1721 if curses is None:
1722 raise error.Abort(_(b"Python curses library required"))
1722 raise error.Abort(_(b"Python curses library required"))
1723
1723
1724 # disable color
1724 # disable color
1725 ui._colormode = None
1725 ui._colormode = None
1726
1726
1727 try:
1727 try:
1728 keep = opts.get(b'keep')
1728 keep = opts.get(b'keep')
1729 revs = opts.get(b'rev', [])[:]
1729 revs = opts.get(b'rev', [])[:]
1730 cmdutil.checkunfinished(repo)
1730 cmdutil.checkunfinished(repo)
1731 cmdutil.bailifchanged(repo)
1731 cmdutil.bailifchanged(repo)
1732
1732
1733 revs.extend(freeargs)
1733 revs.extend(freeargs)
1734 if not revs:
1734 if not revs:
1735 defaultrev = destutil.desthistedit(ui, repo)
1735 defaultrev = destutil.desthistedit(ui, repo)
1736 if defaultrev is not None:
1736 if defaultrev is not None:
1737 revs.append(defaultrev)
1737 revs.append(defaultrev)
1738 if len(revs) != 1:
1738 if len(revs) != 1:
1739 raise error.InputError(
1739 raise error.InputError(
1740 _(b'histedit requires exactly one ancestor revision')
1740 _(b'histedit requires exactly one ancestor revision')
1741 )
1741 )
1742
1742
1743 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
1743 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
1744 if len(rr) != 1:
1744 if len(rr) != 1:
1745 raise error.InputError(
1745 raise error.InputError(
1746 _(
1746 _(
1747 b'The specified revisions must have '
1747 b'The specified revisions must have '
1748 b'exactly one common root'
1748 b'exactly one common root'
1749 )
1749 )
1750 )
1750 )
1751 root = rr[0].node()
1751 root = rr[0].node()
1752
1752
1753 topmost = repo.dirstate.p1()
1753 topmost = repo.dirstate.p1()
1754 revs = between(repo, root, topmost, keep)
1754 revs = between(repo, root, topmost, keep)
1755 if not revs:
1755 if not revs:
1756 raise error.InputError(
1756 raise error.InputError(
1757 _(b'%s is not an ancestor of working directory') % short(root)
1757 _(b'%s is not an ancestor of working directory') % short(root)
1758 )
1758 )
1759
1759
1760 rules = []
1760 rules = []
1761 for i, r in enumerate(revs):
1761 for i, r in enumerate(revs):
1762 rules.append(histeditrule(ui, repo[r], i))
1762 rules.append(histeditrule(ui, repo[r], i))
1763 with util.with_lc_ctype():
1763 with util.with_lc_ctype():
1764 rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules))
1764 rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules))
1765 curses.echo()
1765 curses.echo()
1766 curses.endwin()
1766 curses.endwin()
1767 if rc is False:
1767 if rc is False:
1768 ui.write(_(b"histedit aborted\n"))
1768 ui.write(_(b"histedit aborted\n"))
1769 return 0
1769 return 0
1770 if type(rc) is list:
1770 if type(rc) is list:
1771 ui.status(_(b"performing changes\n"))
1771 ui.status(_(b"performing changes\n"))
1772 rules = makecommands(rc)
1772 rules = makecommands(rc)
1773 with repo.vfs(b'chistedit', b'w+') as fp:
1773 with repo.vfs(b'chistedit', b'w+') as fp:
1774 for r in rules:
1774 for r in rules:
1775 fp.write(r)
1775 fp.write(r)
1776 opts[b'commands'] = fp.name
1776 opts[b'commands'] = fp.name
1777 return _texthistedit(ui, repo, freeargs, opts)
1777 return _texthistedit(ui, repo, freeargs, opts)
1778 except KeyboardInterrupt:
1778 except KeyboardInterrupt:
1779 pass
1779 pass
1780 return -1
1780 return -1
1781
1781
1782
1782
1783 @command(
1783 @command(
1784 b'histedit',
1784 b'histedit',
1785 [
1785 [
1786 (
1786 (
1787 b'',
1787 b'',
1788 b'commands',
1788 b'commands',
1789 b'',
1789 b'',
1790 _(b'read history edits from the specified file'),
1790 _(b'read history edits from the specified file'),
1791 _(b'FILE'),
1791 _(b'FILE'),
1792 ),
1792 ),
1793 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1793 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1794 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1794 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1795 (
1795 (
1796 b'k',
1796 b'k',
1797 b'keep',
1797 b'keep',
1798 False,
1798 False,
1799 _(b"don't strip old nodes after edit is complete"),
1799 _(b"don't strip old nodes after edit is complete"),
1800 ),
1800 ),
1801 (b'', b'abort', False, _(b'abort an edit in progress')),
1801 (b'', b'abort', False, _(b'abort an edit in progress')),
1802 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1802 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1803 (
1803 (
1804 b'f',
1804 b'f',
1805 b'force',
1805 b'force',
1806 False,
1806 False,
1807 _(b'force outgoing even for unrelated repositories'),
1807 _(b'force outgoing even for unrelated repositories'),
1808 ),
1808 ),
1809 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1809 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1810 ]
1810 ]
1811 + cmdutil.formatteropts,
1811 + cmdutil.formatteropts,
1812 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1812 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1813 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1813 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1814 )
1814 )
1815 def histedit(ui, repo, *freeargs, **opts):
1815 def histedit(ui, repo, *freeargs, **opts):
1816 """interactively edit changeset history
1816 """interactively edit changeset history
1817
1817
1818 This command lets you edit a linear series of changesets (up to
1818 This command lets you edit a linear series of changesets (up to
1819 and including the working directory, which should be clean).
1819 and including the working directory, which should be clean).
1820 You can:
1820 You can:
1821
1821
1822 - `pick` to [re]order a changeset
1822 - `pick` to [re]order a changeset
1823
1823
1824 - `drop` to omit changeset
1824 - `drop` to omit changeset
1825
1825
1826 - `mess` to reword the changeset commit message
1826 - `mess` to reword the changeset commit message
1827
1827
1828 - `fold` to combine it with the preceding changeset (using the later date)
1828 - `fold` to combine it with the preceding changeset (using the later date)
1829
1829
1830 - `roll` like fold, but discarding this commit's description and date
1830 - `roll` like fold, but discarding this commit's description and date
1831
1831
1832 - `edit` to edit this changeset (preserving date)
1832 - `edit` to edit this changeset (preserving date)
1833
1833
1834 - `base` to checkout changeset and apply further changesets from there
1834 - `base` to checkout changeset and apply further changesets from there
1835
1835
1836 There are a number of ways to select the root changeset:
1836 There are a number of ways to select the root changeset:
1837
1837
1838 - Specify ANCESTOR directly
1838 - Specify ANCESTOR directly
1839
1839
1840 - Use --outgoing -- it will be the first linear changeset not
1840 - Use --outgoing -- it will be the first linear changeset not
1841 included in destination. (See :hg:`help config.paths.default-push`)
1841 included in destination. (See :hg:`help config.paths.default-push`)
1842
1842
1843 - Otherwise, the value from the "histedit.defaultrev" config option
1843 - Otherwise, the value from the "histedit.defaultrev" config option
1844 is used as a revset to select the base revision when ANCESTOR is not
1844 is used as a revset to select the base revision when ANCESTOR is not
1845 specified. The first revision returned by the revset is used. By
1845 specified. The first revision returned by the revset is used. By
1846 default, this selects the editable history that is unique to the
1846 default, this selects the editable history that is unique to the
1847 ancestry of the working directory.
1847 ancestry of the working directory.
1848
1848
1849 .. container:: verbose
1849 .. container:: verbose
1850
1850
1851 If you use --outgoing, this command will abort if there are ambiguous
1851 If you use --outgoing, this command will abort if there are ambiguous
1852 outgoing revisions. For example, if there are multiple branches
1852 outgoing revisions. For example, if there are multiple branches
1853 containing outgoing revisions.
1853 containing outgoing revisions.
1854
1854
1855 Use "min(outgoing() and ::.)" or similar revset specification
1855 Use "min(outgoing() and ::.)" or similar revset specification
1856 instead of --outgoing to specify edit target revision exactly in
1856 instead of --outgoing to specify edit target revision exactly in
1857 such ambiguous situation. See :hg:`help revsets` for detail about
1857 such ambiguous situation. See :hg:`help revsets` for detail about
1858 selecting revisions.
1858 selecting revisions.
1859
1859
1860 .. container:: verbose
1860 .. container:: verbose
1861
1861
1862 Examples:
1862 Examples:
1863
1863
1864 - A number of changes have been made.
1864 - A number of changes have been made.
1865 Revision 3 is no longer needed.
1865 Revision 3 is no longer needed.
1866
1866
1867 Start history editing from revision 3::
1867 Start history editing from revision 3::
1868
1868
1869 hg histedit -r 3
1869 hg histedit -r 3
1870
1870
1871 An editor opens, containing the list of revisions,
1871 An editor opens, containing the list of revisions,
1872 with specific actions specified::
1872 with specific actions specified::
1873
1873
1874 pick 5339bf82f0ca 3 Zworgle the foobar
1874 pick 5339bf82f0ca 3 Zworgle the foobar
1875 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1875 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1876 pick 0a9639fcda9d 5 Morgify the cromulancy
1876 pick 0a9639fcda9d 5 Morgify the cromulancy
1877
1877
1878 Additional information about the possible actions
1878 Additional information about the possible actions
1879 to take appears below the list of revisions.
1879 to take appears below the list of revisions.
1880
1880
1881 To remove revision 3 from the history,
1881 To remove revision 3 from the history,
1882 its action (at the beginning of the relevant line)
1882 its action (at the beginning of the relevant line)
1883 is changed to 'drop'::
1883 is changed to 'drop'::
1884
1884
1885 drop 5339bf82f0ca 3 Zworgle the foobar
1885 drop 5339bf82f0ca 3 Zworgle the foobar
1886 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1886 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1887 pick 0a9639fcda9d 5 Morgify the cromulancy
1887 pick 0a9639fcda9d 5 Morgify the cromulancy
1888
1888
1889 - A number of changes have been made.
1889 - A number of changes have been made.
1890 Revision 2 and 4 need to be swapped.
1890 Revision 2 and 4 need to be swapped.
1891
1891
1892 Start history editing from revision 2::
1892 Start history editing from revision 2::
1893
1893
1894 hg histedit -r 2
1894 hg histedit -r 2
1895
1895
1896 An editor opens, containing the list of revisions,
1896 An editor opens, containing the list of revisions,
1897 with specific actions specified::
1897 with specific actions specified::
1898
1898
1899 pick 252a1af424ad 2 Blorb a morgwazzle
1899 pick 252a1af424ad 2 Blorb a morgwazzle
1900 pick 5339bf82f0ca 3 Zworgle the foobar
1900 pick 5339bf82f0ca 3 Zworgle the foobar
1901 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1901 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1902
1902
1903 To swap revision 2 and 4, its lines are swapped
1903 To swap revision 2 and 4, its lines are swapped
1904 in the editor::
1904 in the editor::
1905
1905
1906 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1906 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1907 pick 5339bf82f0ca 3 Zworgle the foobar
1907 pick 5339bf82f0ca 3 Zworgle the foobar
1908 pick 252a1af424ad 2 Blorb a morgwazzle
1908 pick 252a1af424ad 2 Blorb a morgwazzle
1909
1909
1910 Returns 0 on success, 1 if user intervention is required (not only
1910 Returns 0 on success, 1 if user intervention is required (not only
1911 for intentional "edit" command, but also for resolving unexpected
1911 for intentional "edit" command, but also for resolving unexpected
1912 conflicts).
1912 conflicts).
1913 """
1913 """
1914 opts = pycompat.byteskwargs(opts)
1914 opts = pycompat.byteskwargs(opts)
1915
1915
1916 # kludge: _chistedit only works for starting an edit, not aborting
1916 # kludge: _chistedit only works for starting an edit, not aborting
1917 # or continuing, so fall back to regular _texthistedit for those
1917 # or continuing, so fall back to regular _texthistedit for those
1918 # operations.
1918 # operations.
1919 if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew:
1919 if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew:
1920 return _chistedit(ui, repo, freeargs, opts)
1920 return _chistedit(ui, repo, freeargs, opts)
1921 return _texthistedit(ui, repo, freeargs, opts)
1921 return _texthistedit(ui, repo, freeargs, opts)
1922
1922
1923
1923
1924 def _texthistedit(ui, repo, freeargs, opts):
1924 def _texthistedit(ui, repo, freeargs, opts):
1925 state = histeditstate(repo)
1925 state = histeditstate(repo)
1926 with repo.wlock() as wlock, repo.lock() as lock:
1926 with repo.wlock() as wlock, repo.lock() as lock:
1927 state.wlock = wlock
1927 state.wlock = wlock
1928 state.lock = lock
1928 state.lock = lock
1929 _histedit(ui, repo, state, freeargs, opts)
1929 _histedit(ui, repo, state, freeargs, opts)
1930
1930
1931
1931
1932 goalcontinue = b'continue'
1932 goalcontinue = b'continue'
1933 goalabort = b'abort'
1933 goalabort = b'abort'
1934 goaleditplan = b'edit-plan'
1934 goaleditplan = b'edit-plan'
1935 goalnew = b'new'
1935 goalnew = b'new'
1936
1936
1937
1937
1938 def _getgoal(opts):
1938 def _getgoal(opts):
1939 if opts.get(b'continue'):
1939 if opts.get(b'continue'):
1940 return goalcontinue
1940 return goalcontinue
1941 if opts.get(b'abort'):
1941 if opts.get(b'abort'):
1942 return goalabort
1942 return goalabort
1943 if opts.get(b'edit_plan'):
1943 if opts.get(b'edit_plan'):
1944 return goaleditplan
1944 return goaleditplan
1945 return goalnew
1945 return goalnew
1946
1946
1947
1947
1948 def _readfile(ui, path):
1948 def _readfile(ui, path):
1949 if path == b'-':
1949 if path == b'-':
1950 with ui.timeblockedsection(b'histedit'):
1950 with ui.timeblockedsection(b'histedit'):
1951 return ui.fin.read()
1951 return ui.fin.read()
1952 else:
1952 else:
1953 with open(path, b'rb') as f:
1953 with open(path, b'rb') as f:
1954 return f.read()
1954 return f.read()
1955
1955
1956
1956
1957 def _validateargs(ui, repo, freeargs, opts, goal, rules, revs):
1957 def _validateargs(ui, repo, freeargs, opts, goal, rules, revs):
1958 # TODO only abort if we try to histedit mq patches, not just
1958 # TODO only abort if we try to histedit mq patches, not just
1959 # blanket if mq patches are applied somewhere
1959 # blanket if mq patches are applied somewhere
1960 mq = getattr(repo, 'mq', None)
1960 mq = getattr(repo, 'mq', None)
1961 if mq and mq.applied:
1961 if mq and mq.applied:
1962 raise error.StateError(_(b'source has mq patches applied'))
1962 raise error.StateError(_(b'source has mq patches applied'))
1963
1963
1964 # basic argument incompatibility processing
1964 # basic argument incompatibility processing
1965 outg = opts.get(b'outgoing')
1965 outg = opts.get(b'outgoing')
1966 editplan = opts.get(b'edit_plan')
1966 editplan = opts.get(b'edit_plan')
1967 abort = opts.get(b'abort')
1967 abort = opts.get(b'abort')
1968 force = opts.get(b'force')
1968 force = opts.get(b'force')
1969 if force and not outg:
1969 if force and not outg:
1970 raise error.InputError(_(b'--force only allowed with --outgoing'))
1970 raise error.InputError(_(b'--force only allowed with --outgoing'))
1971 if goal == b'continue':
1971 if goal == b'continue':
1972 if any((outg, abort, revs, freeargs, rules, editplan)):
1972 if any((outg, abort, revs, freeargs, rules, editplan)):
1973 raise error.InputError(_(b'no arguments allowed with --continue'))
1973 raise error.InputError(_(b'no arguments allowed with --continue'))
1974 elif goal == b'abort':
1974 elif goal == b'abort':
1975 if any((outg, revs, freeargs, rules, editplan)):
1975 if any((outg, revs, freeargs, rules, editplan)):
1976 raise error.InputError(_(b'no arguments allowed with --abort'))
1976 raise error.InputError(_(b'no arguments allowed with --abort'))
1977 elif goal == b'edit-plan':
1977 elif goal == b'edit-plan':
1978 if any((outg, revs, freeargs)):
1978 if any((outg, revs, freeargs)):
1979 raise error.InputError(
1979 raise error.InputError(
1980 _(b'only --commands argument allowed with --edit-plan')
1980 _(b'only --commands argument allowed with --edit-plan')
1981 )
1981 )
1982 else:
1982 else:
1983 if outg:
1983 if outg:
1984 if revs:
1984 if revs:
1985 raise error.InputError(
1985 raise error.InputError(
1986 _(b'no revisions allowed with --outgoing')
1986 _(b'no revisions allowed with --outgoing')
1987 )
1987 )
1988 if len(freeargs) > 1:
1988 if len(freeargs) > 1:
1989 raise error.InputError(
1989 raise error.InputError(
1990 _(b'only one repo argument allowed with --outgoing')
1990 _(b'only one repo argument allowed with --outgoing')
1991 )
1991 )
1992 else:
1992 else:
1993 revs.extend(freeargs)
1993 revs.extend(freeargs)
1994 if len(revs) == 0:
1994 if len(revs) == 0:
1995 defaultrev = destutil.desthistedit(ui, repo)
1995 defaultrev = destutil.desthistedit(ui, repo)
1996 if defaultrev is not None:
1996 if defaultrev is not None:
1997 revs.append(defaultrev)
1997 revs.append(defaultrev)
1998
1998
1999 if len(revs) != 1:
1999 if len(revs) != 1:
2000 raise error.InputError(
2000 raise error.InputError(
2001 _(b'histedit requires exactly one ancestor revision')
2001 _(b'histedit requires exactly one ancestor revision')
2002 )
2002 )
2003
2003
2004
2004
2005 def _histedit(ui, repo, state, freeargs, opts):
2005 def _histedit(ui, repo, state, freeargs, opts):
2006 fm = ui.formatter(b'histedit', opts)
2006 fm = ui.formatter(b'histedit', opts)
2007 fm.startitem()
2007 fm.startitem()
2008 goal = _getgoal(opts)
2008 goal = _getgoal(opts)
2009 revs = opts.get(b'rev', [])
2009 revs = opts.get(b'rev', [])
2010 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2010 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2011 rules = opts.get(b'commands', b'')
2011 rules = opts.get(b'commands', b'')
2012 state.keep = opts.get(b'keep', False)
2012 state.keep = opts.get(b'keep', False)
2013
2013
2014 _validateargs(ui, repo, freeargs, opts, goal, rules, revs)
2014 _validateargs(ui, repo, freeargs, opts, goal, rules, revs)
2015
2015
2016 hastags = False
2016 hastags = False
2017 if revs:
2017 if revs:
2018 revs = logcmdutil.revrange(repo, revs)
2018 revs = logcmdutil.revrange(repo, revs)
2019 ctxs = [repo[rev] for rev in revs]
2019 ctxs = [repo[rev] for rev in revs]
2020 for ctx in ctxs:
2020 for ctx in ctxs:
2021 tags = [tag for tag in ctx.tags() if tag != b'tip']
2021 tags = [tag for tag in ctx.tags() if tag != b'tip']
2022 if not hastags:
2022 if not hastags:
2023 hastags = len(tags)
2023 hastags = len(tags)
2024 if hastags:
2024 if hastags:
2025 if ui.promptchoice(
2025 if ui.promptchoice(
2026 _(
2026 _(
2027 b'warning: tags associated with the given'
2027 b'warning: tags associated with the given'
2028 b' changeset will be lost after histedit.\n'
2028 b' changeset will be lost after histedit.\n'
2029 b'do you want to continue (yN)? $$ &Yes $$ &No'
2029 b'do you want to continue (yN)? $$ &Yes $$ &No'
2030 ),
2030 ),
2031 default=1,
2031 default=1,
2032 ):
2032 ):
2033 raise error.CanceledError(_(b'histedit cancelled\n'))
2033 raise error.CanceledError(_(b'histedit cancelled\n'))
2034 # rebuild state
2034 # rebuild state
2035 if goal == goalcontinue:
2035 if goal == goalcontinue:
2036 state.read()
2036 state.read()
2037 state = bootstrapcontinue(ui, state, opts)
2037 state = bootstrapcontinue(ui, state, opts)
2038 elif goal == goaleditplan:
2038 elif goal == goaleditplan:
2039 _edithisteditplan(ui, repo, state, rules)
2039 _edithisteditplan(ui, repo, state, rules)
2040 return
2040 return
2041 elif goal == goalabort:
2041 elif goal == goalabort:
2042 _aborthistedit(ui, repo, state, nobackup=nobackup)
2042 _aborthistedit(ui, repo, state, nobackup=nobackup)
2043 return
2043 return
2044 else:
2044 else:
2045 # goal == goalnew
2045 # goal == goalnew
2046 _newhistedit(ui, repo, state, revs, freeargs, opts)
2046 _newhistedit(ui, repo, state, revs, freeargs, opts)
2047
2047
2048 _continuehistedit(ui, repo, state)
2048 _continuehistedit(ui, repo, state)
2049 _finishhistedit(ui, repo, state, fm)
2049 _finishhistedit(ui, repo, state, fm)
2050 fm.end()
2050 fm.end()
2051
2051
2052
2052
2053 def _continuehistedit(ui, repo, state):
2053 def _continuehistedit(ui, repo, state):
2054 """This function runs after either:
2054 """This function runs after either:
2055 - bootstrapcontinue (if the goal is 'continue')
2055 - bootstrapcontinue (if the goal is 'continue')
2056 - _newhistedit (if the goal is 'new')
2056 - _newhistedit (if the goal is 'new')
2057 """
2057 """
2058 # preprocess rules so that we can hide inner folds from the user
2058 # preprocess rules so that we can hide inner folds from the user
2059 # and only show one editor
2059 # and only show one editor
2060 actions = state.actions[:]
2060 actions = state.actions[:]
2061 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
2061 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
2062 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
2062 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
2063 state.actions[idx].__class__ = _multifold
2063 state.actions[idx].__class__ = _multifold
2064
2064
2065 # Force an initial state file write, so the user can run --abort/continue
2065 # Force an initial state file write, so the user can run --abort/continue
2066 # even if there's an exception before the first transaction serialize.
2066 # even if there's an exception before the first transaction serialize.
2067 state.write()
2067 state.write()
2068
2068
2069 tr = None
2069 tr = None
2070 # Don't use singletransaction by default since it rolls the entire
2070 # Don't use singletransaction by default since it rolls the entire
2071 # transaction back if an unexpected exception happens (like a
2071 # transaction back if an unexpected exception happens (like a
2072 # pretxncommit hook throws, or the user aborts the commit msg editor).
2072 # pretxncommit hook throws, or the user aborts the commit msg editor).
2073 if ui.configbool(b"histedit", b"singletransaction"):
2073 if ui.configbool(b"histedit", b"singletransaction"):
2074 # Don't use a 'with' for the transaction, since actions may close
2074 # Don't use a 'with' for the transaction, since actions may close
2075 # and reopen a transaction. For example, if the action executes an
2075 # and reopen a transaction. For example, if the action executes an
2076 # external process it may choose to commit the transaction first.
2076 # external process it may choose to commit the transaction first.
2077 tr = repo.transaction(b'histedit')
2077 tr = repo.transaction(b'histedit')
2078 progress = ui.makeprogress(
2078 progress = ui.makeprogress(
2079 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
2079 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
2080 )
2080 )
2081 with progress, util.acceptintervention(tr):
2081 with progress, util.acceptintervention(tr):
2082 while state.actions:
2082 while state.actions:
2083 state.write(tr=tr)
2083 state.write(tr=tr)
2084 actobj = state.actions[0]
2084 actobj = state.actions[0]
2085 progress.increment(item=actobj.torule())
2085 progress.increment(item=actobj.torule())
2086 ui.debug(
2086 ui.debug(
2087 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
2087 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
2088 )
2088 )
2089 parentctx, replacement_ = actobj.run()
2089 parentctx, replacement_ = actobj.run()
2090 state.parentctxnode = parentctx.node()
2090 state.parentctxnode = parentctx.node()
2091 state.replacements.extend(replacement_)
2091 state.replacements.extend(replacement_)
2092 state.actions.pop(0)
2092 state.actions.pop(0)
2093
2093
2094 state.write()
2094 state.write()
2095
2095
2096
2096
2097 def _finishhistedit(ui, repo, state, fm):
2097 def _finishhistedit(ui, repo, state, fm):
2098 """This action runs when histedit is finishing its session"""
2098 """This action runs when histedit is finishing its session"""
2099 mergemod.update(repo[state.parentctxnode])
2099 mergemod.update(repo[state.parentctxnode])
2100
2100
2101 mapping, tmpnodes, created, ntm = processreplacement(state)
2101 mapping, tmpnodes, created, ntm = processreplacement(state)
2102 if mapping:
2102 if mapping:
2103 for prec, succs in mapping.items():
2103 for prec, succs in mapping.items():
2104 if not succs:
2104 if not succs:
2105 ui.debug(b'histedit: %s is dropped\n' % short(prec))
2105 ui.debug(b'histedit: %s is dropped\n' % short(prec))
2106 else:
2106 else:
2107 ui.debug(
2107 ui.debug(
2108 b'histedit: %s is replaced by %s\n'
2108 b'histedit: %s is replaced by %s\n'
2109 % (short(prec), short(succs[0]))
2109 % (short(prec), short(succs[0]))
2110 )
2110 )
2111 if len(succs) > 1:
2111 if len(succs) > 1:
2112 m = b'histedit: %s'
2112 m = b'histedit: %s'
2113 for n in succs[1:]:
2113 for n in succs[1:]:
2114 ui.debug(m % short(n))
2114 ui.debug(m % short(n))
2115
2115
2116 if not state.keep:
2116 if not state.keep:
2117 if mapping:
2117 if mapping:
2118 movetopmostbookmarks(repo, state.topmost, ntm)
2118 movetopmostbookmarks(repo, state.topmost, ntm)
2119 # TODO update mq state
2119 # TODO update mq state
2120 else:
2120 else:
2121 mapping = {}
2121 mapping = {}
2122
2122
2123 for n in tmpnodes:
2123 for n in tmpnodes:
2124 if n in repo:
2124 if n in repo:
2125 mapping[n] = ()
2125 mapping[n] = ()
2126
2126
2127 # remove entries about unknown nodes
2127 # remove entries about unknown nodes
2128 has_node = repo.unfiltered().changelog.index.has_node
2128 has_node = repo.unfiltered().changelog.index.has_node
2129 mapping = {
2129 mapping = {
2130 k: v
2130 k: v
2131 for k, v in mapping.items()
2131 for k, v in mapping.items()
2132 if has_node(k) and all(has_node(n) for n in v)
2132 if has_node(k) and all(has_node(n) for n in v)
2133 }
2133 }
2134 scmutil.cleanupnodes(repo, mapping, b'histedit')
2134 scmutil.cleanupnodes(repo, mapping, b'histedit')
2135 hf = fm.hexfunc
2135 hf = fm.hexfunc
2136 fl = fm.formatlist
2136 fl = fm.formatlist
2137 fd = fm.formatdict
2137 fd = fm.formatdict
2138 nodechanges = fd(
2138 nodechanges = fd(
2139 {
2139 {
2140 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2140 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2141 for oldn, newn in mapping.items()
2141 for oldn, newn in mapping.items()
2142 },
2142 },
2143 key=b"oldnode",
2143 key=b"oldnode",
2144 value=b"newnodes",
2144 value=b"newnodes",
2145 )
2145 )
2146 fm.data(nodechanges=nodechanges)
2146 fm.data(nodechanges=nodechanges)
2147
2147
2148 state.clear()
2148 state.clear()
2149 if os.path.exists(repo.sjoin(b'undo')):
2149 if os.path.exists(repo.sjoin(b'undo')):
2150 os.unlink(repo.sjoin(b'undo'))
2150 os.unlink(repo.sjoin(b'undo'))
2151 if repo.vfs.exists(b'histedit-last-edit.txt'):
2151 if repo.vfs.exists(b'histedit-last-edit.txt'):
2152 repo.vfs.unlink(b'histedit-last-edit.txt')
2152 repo.vfs.unlink(b'histedit-last-edit.txt')
2153
2153
2154
2154
2155 def _aborthistedit(ui, repo, state, nobackup=False):
2155 def _aborthistedit(ui, repo, state, nobackup=False):
2156 try:
2156 try:
2157 state.read()
2157 state.read()
2158 __, leafs, tmpnodes, __ = processreplacement(state)
2158 __, leafs, tmpnodes, __ = processreplacement(state)
2159 ui.debug(b'restore wc to old parent %s\n' % short(state.topmost))
2159 ui.debug(b'restore wc to old parent %s\n' % short(state.topmost))
2160
2160
2161 # Recover our old commits if necessary
2161 # Recover our old commits if necessary
2162 if not state.topmost in repo and state.backupfile:
2162 if not state.topmost in repo and state.backupfile:
2163 backupfile = repo.vfs.join(state.backupfile)
2163 backupfile = repo.vfs.join(state.backupfile)
2164 f = hg.openpath(ui, backupfile)
2164 f = hg.openpath(ui, backupfile)
2165 gen = exchange.readbundle(ui, f, backupfile)
2165 gen = exchange.readbundle(ui, f, backupfile)
2166 with repo.transaction(b'histedit.abort') as tr:
2166 with repo.transaction(b'histedit.abort') as tr:
2167 bundle2.applybundle(
2167 bundle2.applybundle(
2168 repo,
2168 repo,
2169 gen,
2169 gen,
2170 tr,
2170 tr,
2171 source=b'histedit',
2171 source=b'histedit',
2172 url=b'bundle:' + backupfile,
2172 url=b'bundle:' + backupfile,
2173 )
2173 )
2174
2174
2175 os.remove(backupfile)
2175 os.remove(backupfile)
2176
2176
2177 # check whether we should update away
2177 # check whether we should update away
2178 if repo.unfiltered().revs(
2178 if repo.unfiltered().revs(
2179 b'parents() and (%n or %ln::)',
2179 b'parents() and (%n or %ln::)',
2180 state.parentctxnode,
2180 state.parentctxnode,
2181 leafs | tmpnodes,
2181 leafs | tmpnodes,
2182 ):
2182 ):
2183 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2183 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2184 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2184 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2185 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2185 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2186 except Exception:
2186 except Exception:
2187 if state.inprogress():
2187 if state.inprogress():
2188 ui.warn(
2188 ui.warn(
2189 _(
2189 _(
2190 b'warning: encountered an exception during histedit '
2190 b'warning: encountered an exception during histedit '
2191 b'--abort; the repository may not have been completely '
2191 b'--abort; the repository may not have been completely '
2192 b'cleaned up\n'
2192 b'cleaned up\n'
2193 )
2193 )
2194 )
2194 )
2195 raise
2195 raise
2196 finally:
2196 finally:
2197 state.clear()
2197 state.clear()
2198
2198
2199
2199
2200 def hgaborthistedit(ui, repo):
2200 def hgaborthistedit(ui, repo):
2201 state = histeditstate(repo)
2201 state = histeditstate(repo)
2202 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2202 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2203 with repo.wlock() as wlock, repo.lock() as lock:
2203 with repo.wlock() as wlock, repo.lock() as lock:
2204 state.wlock = wlock
2204 state.wlock = wlock
2205 state.lock = lock
2205 state.lock = lock
2206 _aborthistedit(ui, repo, state, nobackup=nobackup)
2206 _aborthistedit(ui, repo, state, nobackup=nobackup)
2207
2207
2208
2208
2209 def _edithisteditplan(ui, repo, state, rules):
2209 def _edithisteditplan(ui, repo, state, rules):
2210 state.read()
2210 state.read()
2211 if not rules:
2211 if not rules:
2212 comment = geteditcomment(
2212 comment = geteditcomment(
2213 ui, short(state.parentctxnode), short(state.topmost)
2213 ui, short(state.parentctxnode), short(state.topmost)
2214 )
2214 )
2215 rules = ruleeditor(repo, ui, state.actions, comment)
2215 rules = ruleeditor(repo, ui, state.actions, comment)
2216 else:
2216 else:
2217 rules = _readfile(ui, rules)
2217 rules = _readfile(ui, rules)
2218 actions = parserules(rules, state)
2218 actions = parserules(rules, state)
2219 ctxs = [repo[act.node] for act in state.actions if act.node]
2219 ctxs = [repo[act.node] for act in state.actions if act.node]
2220 warnverifyactions(ui, repo, actions, state, ctxs)
2220 warnverifyactions(ui, repo, actions, state, ctxs)
2221 state.actions = actions
2221 state.actions = actions
2222 state.write()
2222 state.write()
2223
2223
2224
2224
2225 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2225 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2226 outg = opts.get(b'outgoing')
2226 outg = opts.get(b'outgoing')
2227 rules = opts.get(b'commands', b'')
2227 rules = opts.get(b'commands', b'')
2228 force = opts.get(b'force')
2228 force = opts.get(b'force')
2229
2229
2230 cmdutil.checkunfinished(repo)
2230 cmdutil.checkunfinished(repo)
2231 cmdutil.bailifchanged(repo)
2231 cmdutil.bailifchanged(repo)
2232
2232
2233 topmost = repo.dirstate.p1()
2233 topmost = repo.dirstate.p1()
2234 if outg:
2234 if outg:
2235 if freeargs:
2235 if freeargs:
2236 remote = freeargs[0]
2236 remote = freeargs[0]
2237 else:
2237 else:
2238 remote = None
2238 remote = None
2239 root = findoutgoing(ui, repo, remote, force, opts)
2239 root = findoutgoing(ui, repo, remote, force, opts)
2240 else:
2240 else:
2241 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
2241 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
2242 if len(rr) != 1:
2242 if len(rr) != 1:
2243 raise error.InputError(
2243 raise error.InputError(
2244 _(
2244 _(
2245 b'The specified revisions must have '
2245 b'The specified revisions must have '
2246 b'exactly one common root'
2246 b'exactly one common root'
2247 )
2247 )
2248 )
2248 )
2249 root = rr[0].node()
2249 root = rr[0].node()
2250
2250
2251 revs = between(repo, root, topmost, state.keep)
2251 revs = between(repo, root, topmost, state.keep)
2252 if not revs:
2252 if not revs:
2253 raise error.InputError(
2253 raise error.InputError(
2254 _(b'%s is not an ancestor of working directory') % short(root)
2254 _(b'%s is not an ancestor of working directory') % short(root)
2255 )
2255 )
2256
2256
2257 ctxs = [repo[r] for r in revs]
2257 ctxs = [repo[r] for r in revs]
2258
2258
2259 wctx = repo[None]
2259 wctx = repo[None]
2260 # Please don't ask me why `ancestors` is this value. I figured it
2260 # Please don't ask me why `ancestors` is this value. I figured it
2261 # out with print-debugging, not by actually understanding what the
2261 # out with print-debugging, not by actually understanding what the
2262 # merge code is doing. :(
2262 # merge code is doing. :(
2263 ancs = [repo[b'.']]
2263 ancs = [repo[b'.']]
2264 # Sniff-test to make sure we won't collide with untracked files in
2264 # Sniff-test to make sure we won't collide with untracked files in
2265 # the working directory. If we don't do this, we can get a
2265 # the working directory. If we don't do this, we can get a
2266 # collision after we've started histedit and backing out gets ugly
2266 # collision after we've started histedit and backing out gets ugly
2267 # for everyone, especially the user.
2267 # for everyone, especially the user.
2268 for c in [ctxs[0].p1()] + ctxs:
2268 for c in [ctxs[0].p1()] + ctxs:
2269 try:
2269 try:
2270 mergemod.calculateupdates(
2270 mergemod.calculateupdates(
2271 repo,
2271 repo,
2272 wctx,
2272 wctx,
2273 c,
2273 c,
2274 ancs,
2274 ancs,
2275 # These parameters were determined by print-debugging
2275 # These parameters were determined by print-debugging
2276 # what happens later on inside histedit.
2276 # what happens later on inside histedit.
2277 branchmerge=False,
2277 branchmerge=False,
2278 force=False,
2278 force=False,
2279 acceptremote=False,
2279 acceptremote=False,
2280 followcopies=False,
2280 followcopies=False,
2281 )
2281 )
2282 except error.Abort:
2282 except error.Abort:
2283 raise error.StateError(
2283 raise error.StateError(
2284 _(
2284 _(
2285 b"untracked files in working directory conflict with files in %s"
2285 b"untracked files in working directory conflict with files in %s"
2286 )
2286 )
2287 % c
2287 % c
2288 )
2288 )
2289
2289
2290 if not rules:
2290 if not rules:
2291 comment = geteditcomment(ui, short(root), short(topmost))
2291 comment = geteditcomment(ui, short(root), short(topmost))
2292 actions = [pick(state, r) for r in revs]
2292 actions = [pick(state, r) for r in revs]
2293 rules = ruleeditor(repo, ui, actions, comment)
2293 rules = ruleeditor(repo, ui, actions, comment)
2294 else:
2294 else:
2295 rules = _readfile(ui, rules)
2295 rules = _readfile(ui, rules)
2296 actions = parserules(rules, state)
2296 actions = parserules(rules, state)
2297 warnverifyactions(ui, repo, actions, state, ctxs)
2297 warnverifyactions(ui, repo, actions, state, ctxs)
2298
2298
2299 parentctxnode = repo[root].p1().node()
2299 parentctxnode = repo[root].p1().node()
2300
2300
2301 state.parentctxnode = parentctxnode
2301 state.parentctxnode = parentctxnode
2302 state.actions = actions
2302 state.actions = actions
2303 state.topmost = topmost
2303 state.topmost = topmost
2304 state.replacements = []
2304 state.replacements = []
2305
2305
2306 ui.log(
2306 ui.log(
2307 b"histedit",
2307 b"histedit",
2308 b"%d actions to histedit\n",
2308 b"%d actions to histedit\n",
2309 len(actions),
2309 len(actions),
2310 histedit_num_actions=len(actions),
2310 histedit_num_actions=len(actions),
2311 )
2311 )
2312
2312
2313 # Create a backup so we can always abort completely.
2313 # Create a backup so we can always abort completely.
2314 backupfile = None
2314 backupfile = None
2315 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2315 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2316 backupfile = repair.backupbundle(
2316 backupfile = repair.backupbundle(
2317 repo, [parentctxnode], [topmost], root, b'histedit'
2317 repo, [parentctxnode], [topmost], root, b'histedit'
2318 )
2318 )
2319 state.backupfile = backupfile
2319 state.backupfile = backupfile
2320
2320
2321
2321
2322 def _getsummary(ctx):
2322 def _getsummary(ctx):
2323 return stringutil.firstline(ctx.description())
2323 return stringutil.firstline(ctx.description())
2324
2324
2325
2325
2326 def bootstrapcontinue(ui, state, opts):
2326 def bootstrapcontinue(ui, state, opts):
2327 repo = state.repo
2327 repo = state.repo
2328
2328
2329 ms = mergestatemod.mergestate.read(repo)
2329 ms = mergestatemod.mergestate.read(repo)
2330 mergeutil.checkunresolved(ms)
2330 mergeutil.checkunresolved(ms)
2331
2331
2332 if state.actions:
2332 if state.actions:
2333 actobj = state.actions.pop(0)
2333 actobj = state.actions.pop(0)
2334
2334
2335 if _isdirtywc(repo):
2335 if _isdirtywc(repo):
2336 actobj.continuedirty()
2336 actobj.continuedirty()
2337 if _isdirtywc(repo):
2337 if _isdirtywc(repo):
2338 abortdirty()
2338 abortdirty()
2339
2339
2340 parentctx, replacements = actobj.continueclean()
2340 parentctx, replacements = actobj.continueclean()
2341
2341
2342 state.parentctxnode = parentctx.node()
2342 state.parentctxnode = parentctx.node()
2343 state.replacements.extend(replacements)
2343 state.replacements.extend(replacements)
2344
2344
2345 return state
2345 return state
2346
2346
2347
2347
2348 def between(repo, old, new, keep):
2348 def between(repo, old, new, keep):
2349 """select and validate the set of revision to edit
2349 """select and validate the set of revision to edit
2350
2350
2351 When keep is false, the specified set can't have children."""
2351 When keep is false, the specified set can't have children."""
2352 revs = repo.revs(b'%n::%n', old, new)
2352 revs = repo.revs(b'%n::%n', old, new)
2353 if revs and not keep:
2353 if revs and not keep:
2354 rewriteutil.precheck(repo, revs, b'edit')
2354 rewriteutil.precheck(repo, revs, b'edit')
2355 if repo.revs(b'(%ld) and merge()', revs):
2355 if repo.revs(b'(%ld) and merge()', revs):
2356 raise error.StateError(
2356 raise error.StateError(
2357 _(b'cannot edit history that contains merges')
2357 _(b'cannot edit history that contains merges')
2358 )
2358 )
2359 return pycompat.maplist(repo.changelog.node, revs)
2359 return pycompat.maplist(repo.changelog.node, revs)
2360
2360
2361
2361
2362 def ruleeditor(repo, ui, actions, editcomment=b""):
2362 def ruleeditor(repo, ui, actions, editcomment=b""):
2363 """open an editor to edit rules
2363 """open an editor to edit rules
2364
2364
2365 rules are in the format [ [act, ctx], ...] like in state.rules
2365 rules are in the format [ [act, ctx], ...] like in state.rules
2366 """
2366 """
2367 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2367 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2368 newact = util.sortdict()
2368 newact = util.sortdict()
2369 for act in actions:
2369 for act in actions:
2370 ctx = repo[act.node]
2370 ctx = repo[act.node]
2371 summary = _getsummary(ctx)
2371 summary = _getsummary(ctx)
2372 fword = summary.split(b' ', 1)[0].lower()
2372 fword = summary.split(b' ', 1)[0].lower()
2373 added = False
2373 added = False
2374
2374
2375 # if it doesn't end with the special character '!' just skip this
2375 # if it doesn't end with the special character '!' just skip this
2376 if fword.endswith(b'!'):
2376 if fword.endswith(b'!'):
2377 fword = fword[:-1]
2377 fword = fword[:-1]
2378 if fword in primaryactions | secondaryactions | tertiaryactions:
2378 if fword in primaryactions | secondaryactions | tertiaryactions:
2379 act.verb = fword
2379 act.verb = fword
2380 # get the target summary
2380 # get the target summary
2381 tsum = summary[len(fword) + 1 :].lstrip()
2381 tsum = summary[len(fword) + 1 :].lstrip()
2382 # safe but slow: reverse iterate over the actions so we
2382 # safe but slow: reverse iterate over the actions so we
2383 # don't clash on two commits having the same summary
2383 # don't clash on two commits having the same summary
2384 for na, l in reversed(list(newact.items())):
2384 for na, l in reversed(list(newact.items())):
2385 actx = repo[na.node]
2385 actx = repo[na.node]
2386 asum = _getsummary(actx)
2386 asum = _getsummary(actx)
2387 if asum == tsum:
2387 if asum == tsum:
2388 added = True
2388 added = True
2389 l.append(act)
2389 l.append(act)
2390 break
2390 break
2391
2391
2392 if not added:
2392 if not added:
2393 newact[act] = []
2393 newact[act] = []
2394
2394
2395 # copy over and flatten the new list
2395 # copy over and flatten the new list
2396 actions = []
2396 actions = []
2397 for na, l in newact.items():
2397 for na, l in newact.items():
2398 actions.append(na)
2398 actions.append(na)
2399 actions += l
2399 actions += l
2400
2400
2401 rules = b'\n'.join([act.torule() for act in actions])
2401 rules = b'\n'.join([act.torule() for act in actions])
2402 rules += b'\n\n'
2402 rules += b'\n\n'
2403 rules += editcomment
2403 rules += editcomment
2404 rules = ui.edit(
2404 rules = ui.edit(
2405 rules,
2405 rules,
2406 ui.username(),
2406 ui.username(),
2407 {b'prefix': b'histedit'},
2407 {b'prefix': b'histedit'},
2408 repopath=repo.path,
2408 repopath=repo.path,
2409 action=b'histedit',
2409 action=b'histedit',
2410 )
2410 )
2411
2411
2412 # Save edit rules in .hg/histedit-last-edit.txt in case
2412 # Save edit rules in .hg/histedit-last-edit.txt in case
2413 # the user needs to ask for help after something
2413 # the user needs to ask for help after something
2414 # surprising happens.
2414 # surprising happens.
2415 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2415 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2416 f.write(rules)
2416 f.write(rules)
2417
2417
2418 return rules
2418 return rules
2419
2419
2420
2420
2421 def parserules(rules, state):
2421 def parserules(rules, state):
2422 """Read the histedit rules string and return list of action objects"""
2422 """Read the histedit rules string and return list of action objects"""
2423 rules = [
2423 rules = [
2424 l
2424 l
2425 for l in (r.strip() for r in rules.splitlines())
2425 for l in (r.strip() for r in rules.splitlines())
2426 if l and not l.startswith(b'#')
2426 if l and not l.startswith(b'#')
2427 ]
2427 ]
2428 actions = []
2428 actions = []
2429 for r in rules:
2429 for r in rules:
2430 if b' ' not in r:
2430 if b' ' not in r:
2431 raise error.ParseError(_(b'malformed line "%s"') % r)
2431 raise error.ParseError(_(b'malformed line "%s"') % r)
2432 verb, rest = r.split(b' ', 1)
2432 verb, rest = r.split(b' ', 1)
2433
2433
2434 if verb not in actiontable:
2434 if verb not in actiontable:
2435 raise error.ParseError(_(b'unknown action "%s"') % verb)
2435 raise error.ParseError(_(b'unknown action "%s"') % verb)
2436
2436
2437 action = actiontable[verb].fromrule(state, rest)
2437 action = actiontable[verb].fromrule(state, rest)
2438 actions.append(action)
2438 actions.append(action)
2439 return actions
2439 return actions
2440
2440
2441
2441
2442 def warnverifyactions(ui, repo, actions, state, ctxs):
2442 def warnverifyactions(ui, repo, actions, state, ctxs):
2443 try:
2443 try:
2444 verifyactions(actions, state, ctxs)
2444 verifyactions(actions, state, ctxs)
2445 except error.ParseError:
2445 except error.ParseError:
2446 if repo.vfs.exists(b'histedit-last-edit.txt'):
2446 if repo.vfs.exists(b'histedit-last-edit.txt'):
2447 ui.warn(
2447 ui.warn(
2448 _(
2448 _(
2449 b'warning: histedit rules saved '
2449 b'warning: histedit rules saved '
2450 b'to: .hg/histedit-last-edit.txt\n'
2450 b'to: .hg/histedit-last-edit.txt\n'
2451 )
2451 )
2452 )
2452 )
2453 raise
2453 raise
2454
2454
2455
2455
2456 def verifyactions(actions, state, ctxs):
2456 def verifyactions(actions, state, ctxs):
2457 """Verify that there exists exactly one action per given changeset and
2457 """Verify that there exists exactly one action per given changeset and
2458 other constraints.
2458 other constraints.
2459
2459
2460 Will abort if there are to many or too few rules, a malformed rule,
2460 Will abort if there are to many or too few rules, a malformed rule,
2461 or a rule on a changeset outside of the user-given range.
2461 or a rule on a changeset outside of the user-given range.
2462 """
2462 """
2463 expected = {c.node() for c in ctxs}
2463 expected = {c.node() for c in ctxs}
2464 seen = set()
2464 seen = set()
2465 prev = None
2465 prev = None
2466
2466
2467 if actions and actions[0].verb in [b'roll', b'fold']:
2467 if actions and actions[0].verb in [b'roll', b'fold']:
2468 raise error.ParseError(
2468 raise error.ParseError(
2469 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2469 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2470 )
2470 )
2471
2471
2472 for action in actions:
2472 for action in actions:
2473 action.verify(prev, expected, seen)
2473 action.verify(prev, expected, seen)
2474 prev = action
2474 prev = action
2475 if action.node is not None:
2475 if action.node is not None:
2476 seen.add(action.node)
2476 seen.add(action.node)
2477 missing = sorted(expected - seen) # sort to stabilize output
2477 missing = sorted(expected - seen) # sort to stabilize output
2478
2478
2479 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2479 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2480 if len(actions) == 0:
2480 if len(actions) == 0:
2481 raise error.ParseError(
2481 raise error.ParseError(
2482 _(b'no rules provided'),
2482 _(b'no rules provided'),
2483 hint=_(b'use strip extension to remove commits'),
2483 hint=_(b'use strip extension to remove commits'),
2484 )
2484 )
2485
2485
2486 drops = [drop(state, n) for n in missing]
2486 drops = [drop(state, n) for n in missing]
2487 # put the in the beginning so they execute immediately and
2487 # put the in the beginning so they execute immediately and
2488 # don't show in the edit-plan in the future
2488 # don't show in the edit-plan in the future
2489 actions[:0] = drops
2489 actions[:0] = drops
2490 elif missing:
2490 elif missing:
2491 raise error.ParseError(
2491 raise error.ParseError(
2492 _(b'missing rules for changeset %s') % short(missing[0]),
2492 _(b'missing rules for changeset %s') % short(missing[0]),
2493 hint=_(
2493 hint=_(
2494 b'use "drop %s" to discard, see also: '
2494 b'use "drop %s" to discard, see also: '
2495 b"'hg help -e histedit.config'"
2495 b"'hg help -e histedit.config'"
2496 )
2496 )
2497 % short(missing[0]),
2497 % short(missing[0]),
2498 )
2498 )
2499
2499
2500
2500
2501 def adjustreplacementsfrommarkers(repo, oldreplacements):
2501 def adjustreplacementsfrommarkers(repo, oldreplacements):
2502 """Adjust replacements from obsolescence markers
2502 """Adjust replacements from obsolescence markers
2503
2503
2504 Replacements structure is originally generated based on
2504 Replacements structure is originally generated based on
2505 histedit's state and does not account for changes that are
2505 histedit's state and does not account for changes that are
2506 not recorded there. This function fixes that by adding
2506 not recorded there. This function fixes that by adding
2507 data read from obsolescence markers"""
2507 data read from obsolescence markers"""
2508 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2508 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2509 return oldreplacements
2509 return oldreplacements
2510
2510
2511 unfi = repo.unfiltered()
2511 unfi = repo.unfiltered()
2512 get_rev = unfi.changelog.index.get_rev
2512 get_rev = unfi.changelog.index.get_rev
2513 obsstore = repo.obsstore
2513 obsstore = repo.obsstore
2514 newreplacements = list(oldreplacements)
2514 newreplacements = list(oldreplacements)
2515 oldsuccs = [r[1] for r in oldreplacements]
2515 oldsuccs = [r[1] for r in oldreplacements]
2516 # successors that have already been added to succstocheck once
2516 # successors that have already been added to succstocheck once
2517 seensuccs = set().union(
2517 seensuccs = set().union(
2518 *oldsuccs
2518 *oldsuccs
2519 ) # create a set from an iterable of tuples
2519 ) # create a set from an iterable of tuples
2520 succstocheck = list(seensuccs)
2520 succstocheck = list(seensuccs)
2521 while succstocheck:
2521 while succstocheck:
2522 n = succstocheck.pop()
2522 n = succstocheck.pop()
2523 missing = get_rev(n) is None
2523 missing = get_rev(n) is None
2524 markers = obsstore.successors.get(n, ())
2524 markers = obsstore.successors.get(n, ())
2525 if missing and not markers:
2525 if missing and not markers:
2526 # dead end, mark it as such
2526 # dead end, mark it as such
2527 newreplacements.append((n, ()))
2527 newreplacements.append((n, ()))
2528 for marker in markers:
2528 for marker in markers:
2529 nsuccs = marker[1]
2529 nsuccs = marker[1]
2530 newreplacements.append((n, nsuccs))
2530 newreplacements.append((n, nsuccs))
2531 for nsucc in nsuccs:
2531 for nsucc in nsuccs:
2532 if nsucc not in seensuccs:
2532 if nsucc not in seensuccs:
2533 seensuccs.add(nsucc)
2533 seensuccs.add(nsucc)
2534 succstocheck.append(nsucc)
2534 succstocheck.append(nsucc)
2535
2535
2536 return newreplacements
2536 return newreplacements
2537
2537
2538
2538
2539 def processreplacement(state):
2539 def processreplacement(state):
2540 """process the list of replacements to return
2540 """process the list of replacements to return
2541
2541
2542 1) the final mapping between original and created nodes
2542 1) the final mapping between original and created nodes
2543 2) the list of temporary node created by histedit
2543 2) the list of temporary node created by histedit
2544 3) the list of new commit created by histedit"""
2544 3) the list of new commit created by histedit"""
2545 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2545 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2546 allsuccs = set()
2546 allsuccs = set()
2547 replaced = set()
2547 replaced = set()
2548 fullmapping = {}
2548 fullmapping = {}
2549 # initialize basic set
2549 # initialize basic set
2550 # fullmapping records all operations recorded in replacement
2550 # fullmapping records all operations recorded in replacement
2551 for rep in replacements:
2551 for rep in replacements:
2552 allsuccs.update(rep[1])
2552 allsuccs.update(rep[1])
2553 replaced.add(rep[0])
2553 replaced.add(rep[0])
2554 fullmapping.setdefault(rep[0], set()).update(rep[1])
2554 fullmapping.setdefault(rep[0], set()).update(rep[1])
2555 new = allsuccs - replaced
2555 new = allsuccs - replaced
2556 tmpnodes = allsuccs & replaced
2556 tmpnodes = allsuccs & replaced
2557 # Reduce content fullmapping into direct relation between original nodes
2557 # Reduce content fullmapping into direct relation between original nodes
2558 # and final node created during history edition
2558 # and final node created during history edition
2559 # Dropped changeset are replaced by an empty list
2559 # Dropped changeset are replaced by an empty list
2560 toproceed = set(fullmapping)
2560 toproceed = set(fullmapping)
2561 final = {}
2561 final = {}
2562 while toproceed:
2562 while toproceed:
2563 for x in list(toproceed):
2563 for x in list(toproceed):
2564 succs = fullmapping[x]
2564 succs = fullmapping[x]
2565 for s in list(succs):
2565 for s in list(succs):
2566 if s in toproceed:
2566 if s in toproceed:
2567 # non final node with unknown closure
2567 # non final node with unknown closure
2568 # We can't process this now
2568 # We can't process this now
2569 break
2569 break
2570 elif s in final:
2570 elif s in final:
2571 # non final node, replace with closure
2571 # non final node, replace with closure
2572 succs.remove(s)
2572 succs.remove(s)
2573 succs.update(final[s])
2573 succs.update(final[s])
2574 else:
2574 else:
2575 final[x] = succs
2575 final[x] = succs
2576 toproceed.remove(x)
2576 toproceed.remove(x)
2577 # remove tmpnodes from final mapping
2577 # remove tmpnodes from final mapping
2578 for n in tmpnodes:
2578 for n in tmpnodes:
2579 del final[n]
2579 del final[n]
2580 # we expect all changes involved in final to exist in the repo
2580 # we expect all changes involved in final to exist in the repo
2581 # turn `final` into list (topologically sorted)
2581 # turn `final` into list (topologically sorted)
2582 get_rev = state.repo.changelog.index.get_rev
2582 get_rev = state.repo.changelog.index.get_rev
2583 for prec, succs in final.items():
2583 for prec, succs in final.items():
2584 final[prec] = sorted(succs, key=get_rev)
2584 final[prec] = sorted(succs, key=get_rev)
2585
2585
2586 # computed topmost element (necessary for bookmark)
2586 # computed topmost element (necessary for bookmark)
2587 if new:
2587 if new:
2588 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2588 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2589 elif not final:
2589 elif not final:
2590 # Nothing rewritten at all. we won't need `newtopmost`
2590 # Nothing rewritten at all. we won't need `newtopmost`
2591 # It is the same as `oldtopmost` and `processreplacement` know it
2591 # It is the same as `oldtopmost` and `processreplacement` know it
2592 newtopmost = None
2592 newtopmost = None
2593 else:
2593 else:
2594 # every body died. The newtopmost is the parent of the root.
2594 # every body died. The newtopmost is the parent of the root.
2595 r = state.repo.changelog.rev
2595 r = state.repo.changelog.rev
2596 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2596 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2597
2597
2598 return final, tmpnodes, new, newtopmost
2598 return final, tmpnodes, new, newtopmost
2599
2599
2600
2600
2601 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2601 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2602 """Move bookmark from oldtopmost to newly created topmost
2602 """Move bookmark from oldtopmost to newly created topmost
2603
2603
2604 This is arguably a feature and we may only want that for the active
2604 This is arguably a feature and we may only want that for the active
2605 bookmark. But the behavior is kept compatible with the old version for now.
2605 bookmark. But the behavior is kept compatible with the old version for now.
2606 """
2606 """
2607 if not oldtopmost or not newtopmost:
2607 if not oldtopmost or not newtopmost:
2608 return
2608 return
2609 oldbmarks = repo.nodebookmarks(oldtopmost)
2609 oldbmarks = repo.nodebookmarks(oldtopmost)
2610 if oldbmarks:
2610 if oldbmarks:
2611 with repo.lock(), repo.transaction(b'histedit') as tr:
2611 with repo.lock(), repo.transaction(b'histedit') as tr:
2612 marks = repo._bookmarks
2612 marks = repo._bookmarks
2613 changes = []
2613 changes = []
2614 for name in oldbmarks:
2614 for name in oldbmarks:
2615 changes.append((name, newtopmost))
2615 changes.append((name, newtopmost))
2616 marks.applychanges(repo, tr, changes)
2616 marks.applychanges(repo, tr, changes)
2617
2617
2618
2618
2619 def cleanupnode(ui, repo, nodes, nobackup=False):
2619 def cleanupnode(ui, repo, nodes, nobackup=False):
2620 """strip a group of nodes from the repository
2620 """strip a group of nodes from the repository
2621
2621
2622 The set of node to strip may contains unknown nodes."""
2622 The set of node to strip may contains unknown nodes."""
2623 with repo.lock():
2623 with repo.lock():
2624 # do not let filtering get in the way of the cleanse
2624 # do not let filtering get in the way of the cleanse
2625 # we should probably get rid of obsolescence marker created during the
2625 # we should probably get rid of obsolescence marker created during the
2626 # histedit, but we currently do not have such information.
2626 # histedit, but we currently do not have such information.
2627 repo = repo.unfiltered()
2627 repo = repo.unfiltered()
2628 # Find all nodes that need to be stripped
2628 # Find all nodes that need to be stripped
2629 # (we use %lr instead of %ln to silently ignore unknown items)
2629 # (we use %lr instead of %ln to silently ignore unknown items)
2630 has_node = repo.changelog.index.has_node
2630 has_node = repo.changelog.index.has_node
2631 nodes = sorted(n for n in nodes if has_node(n))
2631 nodes = sorted(n for n in nodes if has_node(n))
2632 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2632 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2633 if roots:
2633 if roots:
2634 backup = not nobackup
2634 backup = not nobackup
2635 repair.strip(ui, repo, roots, backup=backup)
2635 repair.strip(ui, repo, roots, backup=backup)
2636
2636
2637
2637
2638 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2638 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2639 if isinstance(nodelist, bytes):
2639 if isinstance(nodelist, bytes):
2640 nodelist = [nodelist]
2640 nodelist = [nodelist]
2641 state = histeditstate(repo)
2641 state = histeditstate(repo)
2642 if state.inprogress():
2642 if state.inprogress():
2643 state.read()
2643 state.read()
2644 histedit_nodes = {
2644 histedit_nodes = {
2645 action.node for action in state.actions if action.node
2645 action.node for action in state.actions if action.node
2646 }
2646 }
2647 common_nodes = histedit_nodes & set(nodelist)
2647 common_nodes = histedit_nodes & set(nodelist)
2648 if common_nodes:
2648 if common_nodes:
2649 raise error.Abort(
2649 raise error.Abort(
2650 _(b"histedit in progress, can't strip %s")
2650 _(b"histedit in progress, can't strip %s")
2651 % b', '.join(short(x) for x in common_nodes)
2651 % b', '.join(short(x) for x in common_nodes)
2652 )
2652 )
2653 return orig(ui, repo, nodelist, *args, **kwargs)
2653 return orig(ui, repo, nodelist, *args, **kwargs)
2654
2654
2655
2655
2656 extensions.wrapfunction(repair, b'strip', stripwrapper)
2656 extensions.wrapfunction(repair, b'strip', stripwrapper)
2657
2657
2658
2658
2659 def summaryhook(ui, repo):
2659 def summaryhook(ui, repo):
2660 state = histeditstate(repo)
2660 state = histeditstate(repo)
2661 if not state.inprogress():
2661 if not state.inprogress():
2662 return
2662 return
2663 state.read()
2663 state.read()
2664 if state.actions:
2664 if state.actions:
2665 # i18n: column positioning for "hg summary"
2665 # i18n: column positioning for "hg summary"
2666 ui.write(
2666 ui.write(
2667 _(b'hist: %s (histedit --continue)\n')
2667 _(b'hist: %s (histedit --continue)\n')
2668 % (
2668 % (
2669 ui.label(_(b'%d remaining'), b'histedit.remaining')
2669 ui.label(_(b'%d remaining'), b'histedit.remaining')
2670 % len(state.actions)
2670 % len(state.actions)
2671 )
2671 )
2672 )
2672 )
2673
2673
2674
2674
2675 def extsetup(ui):
2675 def extsetup(ui):
2676 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2676 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2677 statemod.addunfinished(
2677 statemod.addunfinished(
2678 b'histedit',
2678 b'histedit',
2679 fname=b'histedit-state',
2679 fname=b'histedit-state',
2680 allowcommit=True,
2680 allowcommit=True,
2681 continueflag=True,
2681 continueflag=True,
2682 abortfunc=hgaborthistedit,
2682 abortfunc=hgaborthistedit,
2683 )
2683 )
@@ -1,4314 +1,4310 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help COMMAND` for more details)::
17 Common tasks (use :hg:`help COMMAND` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behavior can be configured with::
31 files creations or deletions. This behavior can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 It may be desirable for mq changesets to be kept in the secret phase (see
41 It may be desirable for mq changesets to be kept in the secret phase (see
42 :hg:`help phases`), which can be enabled with the following setting::
42 :hg:`help phases`), which can be enabled with the following setting::
43
43
44 [mq]
44 [mq]
45 secret = True
45 secret = True
46
46
47 You will by default be managing a patch queue named "patches". You can
47 You will by default be managing a patch queue named "patches". You can
48 create other, independent patch queues with the :hg:`qqueue` command.
48 create other, independent patch queues with the :hg:`qqueue` command.
49
49
50 If the working directory contains uncommitted files, qpush, qpop and
50 If the working directory contains uncommitted files, qpush, qpop and
51 qgoto abort immediately. If -f/--force is used, the changes are
51 qgoto abort immediately. If -f/--force is used, the changes are
52 discarded. Setting::
52 discarded. Setting::
53
53
54 [mq]
54 [mq]
55 keepchanges = True
55 keepchanges = True
56
56
57 make them behave as if --keep-changes were passed, and non-conflicting
57 make them behave as if --keep-changes were passed, and non-conflicting
58 local changes will be tolerated and preserved. If incompatible options
58 local changes will be tolerated and preserved. If incompatible options
59 such as -f/--force or --exact are passed, this setting is ignored.
59 such as -f/--force or --exact are passed, this setting is ignored.
60
60
61 This extension used to provide a strip command. This command now lives
61 This extension used to provide a strip command. This command now lives
62 in the strip extension.
62 in the strip extension.
63 '''
63 '''
64
64
65
65
66 import errno
66 import errno
67 import os
67 import os
68 import re
68 import re
69 import shutil
69 import shutil
70 import sys
70 import sys
71 from mercurial.i18n import _
71 from mercurial.i18n import _
72 from mercurial.node import (
72 from mercurial.node import (
73 bin,
73 bin,
74 hex,
74 hex,
75 nullrev,
75 nullrev,
76 short,
76 short,
77 )
77 )
78 from mercurial.pycompat import (
78 from mercurial.pycompat import (
79 delattr,
79 delattr,
80 getattr,
80 getattr,
81 open,
81 open,
82 )
82 )
83 from mercurial import (
83 from mercurial import (
84 cmdutil,
84 cmdutil,
85 commands,
85 commands,
86 dirstateguard,
86 dirstateguard,
87 encoding,
87 encoding,
88 error,
88 error,
89 extensions,
89 extensions,
90 hg,
90 hg,
91 localrepo,
91 localrepo,
92 lock as lockmod,
92 lock as lockmod,
93 logcmdutil,
93 logcmdutil,
94 patch as patchmod,
94 patch as patchmod,
95 phases,
95 phases,
96 pycompat,
96 pycompat,
97 registrar,
97 registrar,
98 revsetlang,
98 revsetlang,
99 scmutil,
99 scmutil,
100 smartset,
100 smartset,
101 strip,
101 strip,
102 subrepoutil,
102 subrepoutil,
103 util,
103 util,
104 vfs as vfsmod,
104 vfs as vfsmod,
105 )
105 )
106 from mercurial.utils import (
106 from mercurial.utils import (
107 dateutil,
107 dateutil,
108 stringutil,
108 stringutil,
109 urlutil,
109 urlutil,
110 )
110 )
111
111
112 release = lockmod.release
112 release = lockmod.release
113 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
113 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
114
114
115 cmdtable = {}
115 cmdtable = {}
116 command = registrar.command(cmdtable)
116 command = registrar.command(cmdtable)
117 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
117 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
118 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
118 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
119 # be specifying the version(s) of Mercurial they are tested with, or
119 # be specifying the version(s) of Mercurial they are tested with, or
120 # leave the attribute unspecified.
120 # leave the attribute unspecified.
121 testedwith = b'ships-with-hg-core'
121 testedwith = b'ships-with-hg-core'
122
122
123 configtable = {}
123 configtable = {}
124 configitem = registrar.configitem(configtable)
124 configitem = registrar.configitem(configtable)
125
125
126 configitem(
126 configitem(
127 b'mq',
127 b'mq',
128 b'git',
128 b'git',
129 default=b'auto',
129 default=b'auto',
130 )
130 )
131 configitem(
131 configitem(
132 b'mq',
132 b'mq',
133 b'keepchanges',
133 b'keepchanges',
134 default=False,
134 default=False,
135 )
135 )
136 configitem(
136 configitem(
137 b'mq',
137 b'mq',
138 b'plain',
138 b'plain',
139 default=False,
139 default=False,
140 )
140 )
141 configitem(
141 configitem(
142 b'mq',
142 b'mq',
143 b'secret',
143 b'secret',
144 default=False,
144 default=False,
145 )
145 )
146
146
147 # force load strip extension formerly included in mq and import some utility
147 # force load strip extension formerly included in mq and import some utility
148 try:
148 try:
149 extensions.find(b'strip')
149 extensions.find(b'strip')
150 except KeyError:
150 except KeyError:
151 # note: load is lazy so we could avoid the try-except,
151 # note: load is lazy so we could avoid the try-except,
152 # but I (marmoute) prefer this explicit code.
152 # but I (marmoute) prefer this explicit code.
153 class dummyui:
153 class dummyui:
154 def debug(self, msg):
154 def debug(self, msg):
155 pass
155 pass
156
156
157 def log(self, event, msgfmt, *msgargs, **opts):
157 def log(self, event, msgfmt, *msgargs, **opts):
158 pass
158 pass
159
159
160 extensions.load(dummyui(), b'strip', b'')
160 extensions.load(dummyui(), b'strip', b'')
161
161
162 strip = strip.strip
162 strip = strip.strip
163
163
164
164
165 def checksubstate(repo, baserev=None):
165 def checksubstate(repo, baserev=None):
166 """return list of subrepos at a different revision than substate.
166 """return list of subrepos at a different revision than substate.
167 Abort if any subrepos have uncommitted changes."""
167 Abort if any subrepos have uncommitted changes."""
168 inclsubs = []
168 inclsubs = []
169 wctx = repo[None]
169 wctx = repo[None]
170 if baserev:
170 if baserev:
171 bctx = repo[baserev]
171 bctx = repo[baserev]
172 else:
172 else:
173 bctx = wctx.p1()
173 bctx = wctx.p1()
174 for s in sorted(wctx.substate):
174 for s in sorted(wctx.substate):
175 wctx.sub(s).bailifchanged(True)
175 wctx.sub(s).bailifchanged(True)
176 if s not in bctx.substate or bctx.sub(s).dirty():
176 if s not in bctx.substate or bctx.sub(s).dirty():
177 inclsubs.append(s)
177 inclsubs.append(s)
178 return inclsubs
178 return inclsubs
179
179
180
180
181 # Patch names looks like unix-file names.
181 # Patch names looks like unix-file names.
182 # They must be joinable with queue directory and result in the patch path.
182 # They must be joinable with queue directory and result in the patch path.
183 normname = util.normpath
183 normname = util.normpath
184
184
185
185
186 class statusentry:
186 class statusentry:
187 def __init__(self, node, name):
187 def __init__(self, node, name):
188 self.node, self.name = node, name
188 self.node, self.name = node, name
189
189
190 def __bytes__(self):
190 def __bytes__(self):
191 return hex(self.node) + b':' + self.name
191 return hex(self.node) + b':' + self.name
192
192
193 __str__ = encoding.strmethod(__bytes__)
193 __str__ = encoding.strmethod(__bytes__)
194 __repr__ = encoding.strmethod(__bytes__)
194 __repr__ = encoding.strmethod(__bytes__)
195
195
196
196
197 # The order of the headers in 'hg export' HG patches:
197 # The order of the headers in 'hg export' HG patches:
198 HGHEADERS = [
198 HGHEADERS = [
199 # '# HG changeset patch',
199 # '# HG changeset patch',
200 b'# User ',
200 b'# User ',
201 b'# Date ',
201 b'# Date ',
202 b'# ',
202 b'# ',
203 b'# Branch ',
203 b'# Branch ',
204 b'# Node ID ',
204 b'# Node ID ',
205 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
205 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
206 ]
206 ]
207 # The order of headers in plain 'mail style' patches:
207 # The order of headers in plain 'mail style' patches:
208 PLAINHEADERS = {
208 PLAINHEADERS = {
209 b'from': 0,
209 b'from': 0,
210 b'date': 1,
210 b'date': 1,
211 b'subject': 2,
211 b'subject': 2,
212 }
212 }
213
213
214
214
215 def inserthgheader(lines, header, value):
215 def inserthgheader(lines, header, value):
216 """Assuming lines contains a HG patch header, add a header line with value.
216 """Assuming lines contains a HG patch header, add a header line with value.
217 >>> try: inserthgheader([], b'# Date ', b'z')
217 >>> try: inserthgheader([], b'# Date ', b'z')
218 ... except ValueError as inst: print("oops")
218 ... except ValueError as inst: print("oops")
219 oops
219 oops
220 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
220 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
221 ['# HG changeset patch', '# Date z']
221 ['# HG changeset patch', '# Date z']
222 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
222 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
223 ['# HG changeset patch', '# Date z', '']
223 ['# HG changeset patch', '# Date z', '']
224 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
224 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
225 ['# HG changeset patch', '# User y', '# Date z']
225 ['# HG changeset patch', '# User y', '# Date z']
226 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
226 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
227 ... b'# User ', b'z')
227 ... b'# User ', b'z')
228 ['# HG changeset patch', '# Date x', '# User z']
228 ['# HG changeset patch', '# Date x', '# User z']
229 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
229 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
230 ['# HG changeset patch', '# Date z']
230 ['# HG changeset patch', '# Date z']
231 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
231 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
232 ... b'# Date ', b'z')
232 ... b'# Date ', b'z')
233 ['# HG changeset patch', '# Date z', '', '# Date y']
233 ['# HG changeset patch', '# Date z', '', '# Date y']
234 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
234 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
235 ... b'# Date ', b'z')
235 ... b'# Date ', b'z')
236 ['# HG changeset patch', '# Date z', '# Parent y']
236 ['# HG changeset patch', '# Date z', '# Parent y']
237 """
237 """
238 start = lines.index(b'# HG changeset patch') + 1
238 start = lines.index(b'# HG changeset patch') + 1
239 newindex = HGHEADERS.index(header)
239 newindex = HGHEADERS.index(header)
240 bestpos = len(lines)
240 bestpos = len(lines)
241 for i in range(start, len(lines)):
241 for i in range(start, len(lines)):
242 line = lines[i]
242 line = lines[i]
243 if not line.startswith(b'# '):
243 if not line.startswith(b'# '):
244 bestpos = min(bestpos, i)
244 bestpos = min(bestpos, i)
245 break
245 break
246 for lineindex, h in enumerate(HGHEADERS):
246 for lineindex, h in enumerate(HGHEADERS):
247 if line.startswith(h):
247 if line.startswith(h):
248 if lineindex == newindex:
248 if lineindex == newindex:
249 lines[i] = header + value
249 lines[i] = header + value
250 return lines
250 return lines
251 if lineindex > newindex:
251 if lineindex > newindex:
252 bestpos = min(bestpos, i)
252 bestpos = min(bestpos, i)
253 break # next line
253 break # next line
254 lines.insert(bestpos, header + value)
254 lines.insert(bestpos, header + value)
255 return lines
255 return lines
256
256
257
257
258 def insertplainheader(lines, header, value):
258 def insertplainheader(lines, header, value):
259 """For lines containing a plain patch header, add a header line with value.
259 """For lines containing a plain patch header, add a header line with value.
260 >>> insertplainheader([], b'Date', b'z')
260 >>> insertplainheader([], b'Date', b'z')
261 ['Date: z']
261 ['Date: z']
262 >>> insertplainheader([b''], b'Date', b'z')
262 >>> insertplainheader([b''], b'Date', b'z')
263 ['Date: z', '']
263 ['Date: z', '']
264 >>> insertplainheader([b'x'], b'Date', b'z')
264 >>> insertplainheader([b'x'], b'Date', b'z')
265 ['Date: z', '', 'x']
265 ['Date: z', '', 'x']
266 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
266 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
267 ['From: y', 'Date: z', '', 'x']
267 ['From: y', 'Date: z', '', 'x']
268 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
268 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
269 [' date : x', 'From: z', '']
269 [' date : x', 'From: z', '']
270 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
270 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
271 ['Date: z', '', 'Date: y']
271 ['Date: z', '', 'Date: y']
272 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
272 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
273 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
273 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
274 """
274 """
275 newprio = PLAINHEADERS[header.lower()]
275 newprio = PLAINHEADERS[header.lower()]
276 bestpos = len(lines)
276 bestpos = len(lines)
277 for i, line in enumerate(lines):
277 for i, line in enumerate(lines):
278 if b':' in line:
278 if b':' in line:
279 lheader = line.split(b':', 1)[0].strip().lower()
279 lheader = line.split(b':', 1)[0].strip().lower()
280 lprio = PLAINHEADERS.get(lheader, newprio + 1)
280 lprio = PLAINHEADERS.get(lheader, newprio + 1)
281 if lprio == newprio:
281 if lprio == newprio:
282 lines[i] = b'%s: %s' % (header, value)
282 lines[i] = b'%s: %s' % (header, value)
283 return lines
283 return lines
284 if lprio > newprio and i < bestpos:
284 if lprio > newprio and i < bestpos:
285 bestpos = i
285 bestpos = i
286 else:
286 else:
287 if line:
287 if line:
288 lines.insert(i, b'')
288 lines.insert(i, b'')
289 if i < bestpos:
289 if i < bestpos:
290 bestpos = i
290 bestpos = i
291 break
291 break
292 lines.insert(bestpos, b'%s: %s' % (header, value))
292 lines.insert(bestpos, b'%s: %s' % (header, value))
293 return lines
293 return lines
294
294
295
295
296 class patchheader:
296 class patchheader:
297 def __init__(self, pf, plainmode=False):
297 def __init__(self, pf, plainmode=False):
298 def eatdiff(lines):
298 def eatdiff(lines):
299 while lines:
299 while lines:
300 l = lines[-1]
300 l = lines[-1]
301 if (
301 if (
302 l.startswith(b"diff -")
302 l.startswith(b"diff -")
303 or l.startswith(b"Index:")
303 or l.startswith(b"Index:")
304 or l.startswith(b"===========")
304 or l.startswith(b"===========")
305 ):
305 ):
306 del lines[-1]
306 del lines[-1]
307 else:
307 else:
308 break
308 break
309
309
310 def eatempty(lines):
310 def eatempty(lines):
311 while lines:
311 while lines:
312 if not lines[-1].strip():
312 if not lines[-1].strip():
313 del lines[-1]
313 del lines[-1]
314 else:
314 else:
315 break
315 break
316
316
317 message = []
317 message = []
318 comments = []
318 comments = []
319 user = None
319 user = None
320 date = None
320 date = None
321 parent = None
321 parent = None
322 format = None
322 format = None
323 subject = None
323 subject = None
324 branch = None
324 branch = None
325 nodeid = None
325 nodeid = None
326 diffstart = 0
326 diffstart = 0
327
327
328 for line in open(pf, b'rb'):
328 for line in open(pf, b'rb'):
329 line = line.rstrip()
329 line = line.rstrip()
330 if line.startswith(b'diff --git') or (
330 if line.startswith(b'diff --git') or (
331 diffstart and line.startswith(b'+++ ')
331 diffstart and line.startswith(b'+++ ')
332 ):
332 ):
333 diffstart = 2
333 diffstart = 2
334 break
334 break
335 diffstart = 0 # reset
335 diffstart = 0 # reset
336 if line.startswith(b"--- "):
336 if line.startswith(b"--- "):
337 diffstart = 1
337 diffstart = 1
338 continue
338 continue
339 elif format == b"hgpatch":
339 elif format == b"hgpatch":
340 # parse values when importing the result of an hg export
340 # parse values when importing the result of an hg export
341 if line.startswith(b"# User "):
341 if line.startswith(b"# User "):
342 user = line[7:]
342 user = line[7:]
343 elif line.startswith(b"# Date "):
343 elif line.startswith(b"# Date "):
344 date = line[7:]
344 date = line[7:]
345 elif line.startswith(b"# Parent "):
345 elif line.startswith(b"# Parent "):
346 parent = line[9:].lstrip() # handle double trailing space
346 parent = line[9:].lstrip() # handle double trailing space
347 elif line.startswith(b"# Branch "):
347 elif line.startswith(b"# Branch "):
348 branch = line[9:]
348 branch = line[9:]
349 elif line.startswith(b"# Node ID "):
349 elif line.startswith(b"# Node ID "):
350 nodeid = line[10:]
350 nodeid = line[10:]
351 elif not line.startswith(b"# ") and line:
351 elif not line.startswith(b"# ") and line:
352 message.append(line)
352 message.append(line)
353 format = None
353 format = None
354 elif line == b'# HG changeset patch':
354 elif line == b'# HG changeset patch':
355 message = []
355 message = []
356 format = b"hgpatch"
356 format = b"hgpatch"
357 elif format != b"tagdone" and (
357 elif format != b"tagdone" and (
358 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
358 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
359 ):
359 ):
360 subject = line[9:]
360 subject = line[9:]
361 format = b"tag"
361 format = b"tag"
362 elif format != b"tagdone" and (
362 elif format != b"tagdone" and (
363 line.startswith(b"From: ") or line.startswith(b"from: ")
363 line.startswith(b"From: ") or line.startswith(b"from: ")
364 ):
364 ):
365 user = line[6:]
365 user = line[6:]
366 format = b"tag"
366 format = b"tag"
367 elif format != b"tagdone" and (
367 elif format != b"tagdone" and (
368 line.startswith(b"Date: ") or line.startswith(b"date: ")
368 line.startswith(b"Date: ") or line.startswith(b"date: ")
369 ):
369 ):
370 date = line[6:]
370 date = line[6:]
371 format = b"tag"
371 format = b"tag"
372 elif format == b"tag" and line == b"":
372 elif format == b"tag" and line == b"":
373 # when looking for tags (subject: from: etc) they
373 # when looking for tags (subject: from: etc) they
374 # end once you find a blank line in the source
374 # end once you find a blank line in the source
375 format = b"tagdone"
375 format = b"tagdone"
376 elif message or line:
376 elif message or line:
377 message.append(line)
377 message.append(line)
378 comments.append(line)
378 comments.append(line)
379
379
380 eatdiff(message)
380 eatdiff(message)
381 eatdiff(comments)
381 eatdiff(comments)
382 # Remember the exact starting line of the patch diffs before consuming
382 # Remember the exact starting line of the patch diffs before consuming
383 # empty lines, for external use by TortoiseHg and others
383 # empty lines, for external use by TortoiseHg and others
384 self.diffstartline = len(comments)
384 self.diffstartline = len(comments)
385 eatempty(message)
385 eatempty(message)
386 eatempty(comments)
386 eatempty(comments)
387
387
388 # make sure message isn't empty
388 # make sure message isn't empty
389 if format and format.startswith(b"tag") and subject:
389 if format and format.startswith(b"tag") and subject:
390 message.insert(0, subject)
390 message.insert(0, subject)
391
391
392 self.message = message
392 self.message = message
393 self.comments = comments
393 self.comments = comments
394 self.user = user
394 self.user = user
395 self.date = date
395 self.date = date
396 self.parent = parent
396 self.parent = parent
397 # nodeid and branch are for external use by TortoiseHg and others
397 # nodeid and branch are for external use by TortoiseHg and others
398 self.nodeid = nodeid
398 self.nodeid = nodeid
399 self.branch = branch
399 self.branch = branch
400 self.haspatch = diffstart > 1
400 self.haspatch = diffstart > 1
401 self.plainmode = (
401 self.plainmode = (
402 plainmode
402 plainmode
403 or b'# HG changeset patch' not in self.comments
403 or b'# HG changeset patch' not in self.comments
404 and any(
404 and any(
405 c.startswith(b'Date: ') or c.startswith(b'From: ')
405 c.startswith(b'Date: ') or c.startswith(b'From: ')
406 for c in self.comments
406 for c in self.comments
407 )
407 )
408 )
408 )
409
409
410 def setuser(self, user):
410 def setuser(self, user):
411 try:
411 try:
412 inserthgheader(self.comments, b'# User ', user)
412 inserthgheader(self.comments, b'# User ', user)
413 except ValueError:
413 except ValueError:
414 if self.plainmode:
414 if self.plainmode:
415 insertplainheader(self.comments, b'From', user)
415 insertplainheader(self.comments, b'From', user)
416 else:
416 else:
417 tmp = [b'# HG changeset patch', b'# User ' + user]
417 tmp = [b'# HG changeset patch', b'# User ' + user]
418 self.comments = tmp + self.comments
418 self.comments = tmp + self.comments
419 self.user = user
419 self.user = user
420
420
421 def setdate(self, date):
421 def setdate(self, date):
422 try:
422 try:
423 inserthgheader(self.comments, b'# Date ', date)
423 inserthgheader(self.comments, b'# Date ', date)
424 except ValueError:
424 except ValueError:
425 if self.plainmode:
425 if self.plainmode:
426 insertplainheader(self.comments, b'Date', date)
426 insertplainheader(self.comments, b'Date', date)
427 else:
427 else:
428 tmp = [b'# HG changeset patch', b'# Date ' + date]
428 tmp = [b'# HG changeset patch', b'# Date ' + date]
429 self.comments = tmp + self.comments
429 self.comments = tmp + self.comments
430 self.date = date
430 self.date = date
431
431
432 def setparent(self, parent):
432 def setparent(self, parent):
433 try:
433 try:
434 inserthgheader(self.comments, b'# Parent ', parent)
434 inserthgheader(self.comments, b'# Parent ', parent)
435 except ValueError:
435 except ValueError:
436 if not self.plainmode:
436 if not self.plainmode:
437 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
437 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
438 self.comments = tmp + self.comments
438 self.comments = tmp + self.comments
439 self.parent = parent
439 self.parent = parent
440
440
441 def setmessage(self, message):
441 def setmessage(self, message):
442 if self.comments:
442 if self.comments:
443 self._delmsg()
443 self._delmsg()
444 self.message = [message]
444 self.message = [message]
445 if message:
445 if message:
446 if self.plainmode and self.comments and self.comments[-1]:
446 if self.plainmode and self.comments and self.comments[-1]:
447 self.comments.append(b'')
447 self.comments.append(b'')
448 self.comments.append(message)
448 self.comments.append(message)
449
449
450 def __bytes__(self):
450 def __bytes__(self):
451 s = b'\n'.join(self.comments).rstrip()
451 s = b'\n'.join(self.comments).rstrip()
452 if not s:
452 if not s:
453 return b''
453 return b''
454 return s + b'\n\n'
454 return s + b'\n\n'
455
455
456 __str__ = encoding.strmethod(__bytes__)
456 __str__ = encoding.strmethod(__bytes__)
457
457
458 def _delmsg(self):
458 def _delmsg(self):
459 """Remove existing message, keeping the rest of the comments fields.
459 """Remove existing message, keeping the rest of the comments fields.
460 If comments contains 'subject: ', message will prepend
460 If comments contains 'subject: ', message will prepend
461 the field and a blank line."""
461 the field and a blank line."""
462 if self.message:
462 if self.message:
463 subj = b'subject: ' + self.message[0].lower()
463 subj = b'subject: ' + self.message[0].lower()
464 for i in pycompat.xrange(len(self.comments)):
464 for i in range(len(self.comments)):
465 if subj == self.comments[i].lower():
465 if subj == self.comments[i].lower():
466 del self.comments[i]
466 del self.comments[i]
467 self.message = self.message[2:]
467 self.message = self.message[2:]
468 break
468 break
469 ci = 0
469 ci = 0
470 for mi in self.message:
470 for mi in self.message:
471 while mi != self.comments[ci]:
471 while mi != self.comments[ci]:
472 ci += 1
472 ci += 1
473 del self.comments[ci]
473 del self.comments[ci]
474
474
475
475
476 def newcommit(repo, phase, *args, **kwargs):
476 def newcommit(repo, phase, *args, **kwargs):
477 """helper dedicated to ensure a commit respect mq.secret setting
477 """helper dedicated to ensure a commit respect mq.secret setting
478
478
479 It should be used instead of repo.commit inside the mq source for operation
479 It should be used instead of repo.commit inside the mq source for operation
480 creating new changeset.
480 creating new changeset.
481 """
481 """
482 repo = repo.unfiltered()
482 repo = repo.unfiltered()
483 if phase is None:
483 if phase is None:
484 if repo.ui.configbool(b'mq', b'secret'):
484 if repo.ui.configbool(b'mq', b'secret'):
485 phase = phases.secret
485 phase = phases.secret
486 overrides = {(b'ui', b'allowemptycommit'): True}
486 overrides = {(b'ui', b'allowemptycommit'): True}
487 if phase is not None:
487 if phase is not None:
488 overrides[(b'phases', b'new-commit')] = phase
488 overrides[(b'phases', b'new-commit')] = phase
489 with repo.ui.configoverride(overrides, b'mq'):
489 with repo.ui.configoverride(overrides, b'mq'):
490 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
490 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
491 return repo.commit(*args, **kwargs)
491 return repo.commit(*args, **kwargs)
492
492
493
493
494 class AbortNoCleanup(error.Abort):
494 class AbortNoCleanup(error.Abort):
495 pass
495 pass
496
496
497
497
498 class queue:
498 class queue:
499 def __init__(self, ui, baseui, path, patchdir=None):
499 def __init__(self, ui, baseui, path, patchdir=None):
500 self.basepath = path
500 self.basepath = path
501 try:
501 try:
502 with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
502 with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
503 cur = fh.read().rstrip()
503 cur = fh.read().rstrip()
504
504
505 if not cur:
505 if not cur:
506 curpath = os.path.join(path, b'patches')
506 curpath = os.path.join(path, b'patches')
507 else:
507 else:
508 curpath = os.path.join(path, b'patches-' + cur)
508 curpath = os.path.join(path, b'patches-' + cur)
509 except IOError:
509 except IOError:
510 curpath = os.path.join(path, b'patches')
510 curpath = os.path.join(path, b'patches')
511 self.path = patchdir or curpath
511 self.path = patchdir or curpath
512 self.opener = vfsmod.vfs(self.path)
512 self.opener = vfsmod.vfs(self.path)
513 self.ui = ui
513 self.ui = ui
514 self.baseui = baseui
514 self.baseui = baseui
515 self.applieddirty = False
515 self.applieddirty = False
516 self.seriesdirty = False
516 self.seriesdirty = False
517 self.added = []
517 self.added = []
518 self.seriespath = b"series"
518 self.seriespath = b"series"
519 self.statuspath = b"status"
519 self.statuspath = b"status"
520 self.guardspath = b"guards"
520 self.guardspath = b"guards"
521 self.activeguards = None
521 self.activeguards = None
522 self.guardsdirty = False
522 self.guardsdirty = False
523 # Handle mq.git as a bool with extended values
523 # Handle mq.git as a bool with extended values
524 gitmode = ui.config(b'mq', b'git').lower()
524 gitmode = ui.config(b'mq', b'git').lower()
525 boolmode = stringutil.parsebool(gitmode)
525 boolmode = stringutil.parsebool(gitmode)
526 if boolmode is not None:
526 if boolmode is not None:
527 if boolmode:
527 if boolmode:
528 gitmode = b'yes'
528 gitmode = b'yes'
529 else:
529 else:
530 gitmode = b'no'
530 gitmode = b'no'
531 self.gitmode = gitmode
531 self.gitmode = gitmode
532 # deprecated config: mq.plain
532 # deprecated config: mq.plain
533 self.plainmode = ui.configbool(b'mq', b'plain')
533 self.plainmode = ui.configbool(b'mq', b'plain')
534 self.checkapplied = True
534 self.checkapplied = True
535
535
536 @util.propertycache
536 @util.propertycache
537 def applied(self):
537 def applied(self):
538 def parselines(lines):
538 def parselines(lines):
539 for l in lines:
539 for l in lines:
540 entry = l.split(b':', 1)
540 entry = l.split(b':', 1)
541 if len(entry) > 1:
541 if len(entry) > 1:
542 n, name = entry
542 n, name = entry
543 yield statusentry(bin(n), name)
543 yield statusentry(bin(n), name)
544 elif l.strip():
544 elif l.strip():
545 self.ui.warn(
545 self.ui.warn(
546 _(b'malformated mq status line: %s\n')
546 _(b'malformated mq status line: %s\n')
547 % stringutil.pprint(entry)
547 % stringutil.pprint(entry)
548 )
548 )
549 # else we ignore empty lines
549 # else we ignore empty lines
550
550
551 try:
551 try:
552 lines = self.opener.read(self.statuspath).splitlines()
552 lines = self.opener.read(self.statuspath).splitlines()
553 return list(parselines(lines))
553 return list(parselines(lines))
554 except IOError as e:
554 except IOError as e:
555 if e.errno == errno.ENOENT:
555 if e.errno == errno.ENOENT:
556 return []
556 return []
557 raise
557 raise
558
558
559 @util.propertycache
559 @util.propertycache
560 def fullseries(self):
560 def fullseries(self):
561 try:
561 try:
562 return self.opener.read(self.seriespath).splitlines()
562 return self.opener.read(self.seriespath).splitlines()
563 except IOError as e:
563 except IOError as e:
564 if e.errno == errno.ENOENT:
564 if e.errno == errno.ENOENT:
565 return []
565 return []
566 raise
566 raise
567
567
568 @util.propertycache
568 @util.propertycache
569 def series(self):
569 def series(self):
570 self.parseseries()
570 self.parseseries()
571 return self.series
571 return self.series
572
572
573 @util.propertycache
573 @util.propertycache
574 def seriesguards(self):
574 def seriesguards(self):
575 self.parseseries()
575 self.parseseries()
576 return self.seriesguards
576 return self.seriesguards
577
577
578 def invalidate(self):
578 def invalidate(self):
579 for a in 'applied fullseries series seriesguards'.split():
579 for a in 'applied fullseries series seriesguards'.split():
580 if a in self.__dict__:
580 if a in self.__dict__:
581 delattr(self, a)
581 delattr(self, a)
582 self.applieddirty = False
582 self.applieddirty = False
583 self.seriesdirty = False
583 self.seriesdirty = False
584 self.guardsdirty = False
584 self.guardsdirty = False
585 self.activeguards = None
585 self.activeguards = None
586
586
587 def diffopts(self, opts=None, patchfn=None, plain=False):
587 def diffopts(self, opts=None, patchfn=None, plain=False):
588 """Return diff options tweaked for this mq use, possibly upgrading to
588 """Return diff options tweaked for this mq use, possibly upgrading to
589 git format, and possibly plain and without lossy options."""
589 git format, and possibly plain and without lossy options."""
590 diffopts = patchmod.difffeatureopts(
590 diffopts = patchmod.difffeatureopts(
591 self.ui,
591 self.ui,
592 opts,
592 opts,
593 git=True,
593 git=True,
594 whitespace=not plain,
594 whitespace=not plain,
595 formatchanging=not plain,
595 formatchanging=not plain,
596 )
596 )
597 if self.gitmode == b'auto':
597 if self.gitmode == b'auto':
598 diffopts.upgrade = True
598 diffopts.upgrade = True
599 elif self.gitmode == b'keep':
599 elif self.gitmode == b'keep':
600 pass
600 pass
601 elif self.gitmode in (b'yes', b'no'):
601 elif self.gitmode in (b'yes', b'no'):
602 diffopts.git = self.gitmode == b'yes'
602 diffopts.git = self.gitmode == b'yes'
603 else:
603 else:
604 raise error.Abort(
604 raise error.Abort(
605 _(b'mq.git option can be auto/keep/yes/no got %s')
605 _(b'mq.git option can be auto/keep/yes/no got %s')
606 % self.gitmode
606 % self.gitmode
607 )
607 )
608 if patchfn:
608 if patchfn:
609 diffopts = self.patchopts(diffopts, patchfn)
609 diffopts = self.patchopts(diffopts, patchfn)
610 return diffopts
610 return diffopts
611
611
612 def patchopts(self, diffopts, *patches):
612 def patchopts(self, diffopts, *patches):
613 """Return a copy of input diff options with git set to true if
613 """Return a copy of input diff options with git set to true if
614 referenced patch is a git patch and should be preserved as such.
614 referenced patch is a git patch and should be preserved as such.
615 """
615 """
616 diffopts = diffopts.copy()
616 diffopts = diffopts.copy()
617 if not diffopts.git and self.gitmode == b'keep':
617 if not diffopts.git and self.gitmode == b'keep':
618 for patchfn in patches:
618 for patchfn in patches:
619 patchf = self.opener(patchfn, b'r')
619 patchf = self.opener(patchfn, b'r')
620 # if the patch was a git patch, refresh it as a git patch
620 # if the patch was a git patch, refresh it as a git patch
621 diffopts.git = any(
621 diffopts.git = any(
622 line.startswith(b'diff --git') for line in patchf
622 line.startswith(b'diff --git') for line in patchf
623 )
623 )
624 patchf.close()
624 patchf.close()
625 return diffopts
625 return diffopts
626
626
627 def join(self, *p):
627 def join(self, *p):
628 return os.path.join(self.path, *p)
628 return os.path.join(self.path, *p)
629
629
630 def findseries(self, patch):
630 def findseries(self, patch):
631 def matchpatch(l):
631 def matchpatch(l):
632 l = l.split(b'#', 1)[0]
632 l = l.split(b'#', 1)[0]
633 return l.strip() == patch
633 return l.strip() == patch
634
634
635 for index, l in enumerate(self.fullseries):
635 for index, l in enumerate(self.fullseries):
636 if matchpatch(l):
636 if matchpatch(l):
637 return index
637 return index
638 return None
638 return None
639
639
640 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
640 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
641
641
642 def parseseries(self):
642 def parseseries(self):
643 self.series = []
643 self.series = []
644 self.seriesguards = []
644 self.seriesguards = []
645 for l in self.fullseries:
645 for l in self.fullseries:
646 h = l.find(b'#')
646 h = l.find(b'#')
647 if h == -1:
647 if h == -1:
648 patch = l
648 patch = l
649 comment = b''
649 comment = b''
650 elif h == 0:
650 elif h == 0:
651 continue
651 continue
652 else:
652 else:
653 patch = l[:h]
653 patch = l[:h]
654 comment = l[h:]
654 comment = l[h:]
655 patch = patch.strip()
655 patch = patch.strip()
656 if patch:
656 if patch:
657 if patch in self.series:
657 if patch in self.series:
658 raise error.Abort(
658 raise error.Abort(
659 _(b'%s appears more than once in %s')
659 _(b'%s appears more than once in %s')
660 % (patch, self.join(self.seriespath))
660 % (patch, self.join(self.seriespath))
661 )
661 )
662 self.series.append(patch)
662 self.series.append(patch)
663 self.seriesguards.append(self.guard_re.findall(comment))
663 self.seriesguards.append(self.guard_re.findall(comment))
664
664
665 def checkguard(self, guard):
665 def checkguard(self, guard):
666 if not guard:
666 if not guard:
667 return _(b'guard cannot be an empty string')
667 return _(b'guard cannot be an empty string')
668 bad_chars = b'# \t\r\n\f'
668 bad_chars = b'# \t\r\n\f'
669 first = guard[0]
669 first = guard[0]
670 if first in b'-+':
670 if first in b'-+':
671 return _(b'guard %r starts with invalid character: %r') % (
671 return _(b'guard %r starts with invalid character: %r') % (
672 guard,
672 guard,
673 first,
673 first,
674 )
674 )
675 for c in bad_chars:
675 for c in bad_chars:
676 if c in guard:
676 if c in guard:
677 return _(b'invalid character in guard %r: %r') % (guard, c)
677 return _(b'invalid character in guard %r: %r') % (guard, c)
678
678
679 def setactive(self, guards):
679 def setactive(self, guards):
680 for guard in guards:
680 for guard in guards:
681 bad = self.checkguard(guard)
681 bad = self.checkguard(guard)
682 if bad:
682 if bad:
683 raise error.Abort(bad)
683 raise error.Abort(bad)
684 guards = sorted(set(guards))
684 guards = sorted(set(guards))
685 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
685 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
686 self.activeguards = guards
686 self.activeguards = guards
687 self.guardsdirty = True
687 self.guardsdirty = True
688
688
689 def active(self):
689 def active(self):
690 if self.activeguards is None:
690 if self.activeguards is None:
691 self.activeguards = []
691 self.activeguards = []
692 try:
692 try:
693 guards = self.opener.read(self.guardspath).split()
693 guards = self.opener.read(self.guardspath).split()
694 except IOError as err:
694 except IOError as err:
695 if err.errno != errno.ENOENT:
695 if err.errno != errno.ENOENT:
696 raise
696 raise
697 guards = []
697 guards = []
698 for i, guard in enumerate(guards):
698 for i, guard in enumerate(guards):
699 bad = self.checkguard(guard)
699 bad = self.checkguard(guard)
700 if bad:
700 if bad:
701 self.ui.warn(
701 self.ui.warn(
702 b'%s:%d: %s\n'
702 b'%s:%d: %s\n'
703 % (self.join(self.guardspath), i + 1, bad)
703 % (self.join(self.guardspath), i + 1, bad)
704 )
704 )
705 else:
705 else:
706 self.activeguards.append(guard)
706 self.activeguards.append(guard)
707 return self.activeguards
707 return self.activeguards
708
708
709 def setguards(self, idx, guards):
709 def setguards(self, idx, guards):
710 for g in guards:
710 for g in guards:
711 if len(g) < 2:
711 if len(g) < 2:
712 raise error.Abort(_(b'guard %r too short') % g)
712 raise error.Abort(_(b'guard %r too short') % g)
713 if g[0] not in b'-+':
713 if g[0] not in b'-+':
714 raise error.Abort(_(b'guard %r starts with invalid char') % g)
714 raise error.Abort(_(b'guard %r starts with invalid char') % g)
715 bad = self.checkguard(g[1:])
715 bad = self.checkguard(g[1:])
716 if bad:
716 if bad:
717 raise error.Abort(bad)
717 raise error.Abort(bad)
718 drop = self.guard_re.sub(b'', self.fullseries[idx])
718 drop = self.guard_re.sub(b'', self.fullseries[idx])
719 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
719 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
720 self.parseseries()
720 self.parseseries()
721 self.seriesdirty = True
721 self.seriesdirty = True
722
722
723 def pushable(self, idx):
723 def pushable(self, idx):
724 if isinstance(idx, bytes):
724 if isinstance(idx, bytes):
725 idx = self.series.index(idx)
725 idx = self.series.index(idx)
726 patchguards = self.seriesguards[idx]
726 patchguards = self.seriesguards[idx]
727 if not patchguards:
727 if not patchguards:
728 return True, None
728 return True, None
729 guards = self.active()
729 guards = self.active()
730 exactneg = [
730 exactneg = [
731 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
731 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
732 ]
732 ]
733 if exactneg:
733 if exactneg:
734 return False, stringutil.pprint(exactneg[0])
734 return False, stringutil.pprint(exactneg[0])
735 pos = [g for g in patchguards if g.startswith(b'+')]
735 pos = [g for g in patchguards if g.startswith(b'+')]
736 exactpos = [g for g in pos if g[1:] in guards]
736 exactpos = [g for g in pos if g[1:] in guards]
737 if pos:
737 if pos:
738 if exactpos:
738 if exactpos:
739 return True, stringutil.pprint(exactpos[0])
739 return True, stringutil.pprint(exactpos[0])
740 return False, b' '.join([stringutil.pprint(p) for p in pos])
740 return False, b' '.join([stringutil.pprint(p) for p in pos])
741 return True, b''
741 return True, b''
742
742
743 def explainpushable(self, idx, all_patches=False):
743 def explainpushable(self, idx, all_patches=False):
744 if all_patches:
744 if all_patches:
745 write = self.ui.write
745 write = self.ui.write
746 else:
746 else:
747 write = self.ui.warn
747 write = self.ui.warn
748
748
749 if all_patches or self.ui.verbose:
749 if all_patches or self.ui.verbose:
750 if isinstance(idx, bytes):
750 if isinstance(idx, bytes):
751 idx = self.series.index(idx)
751 idx = self.series.index(idx)
752 pushable, why = self.pushable(idx)
752 pushable, why = self.pushable(idx)
753 if all_patches and pushable:
753 if all_patches and pushable:
754 if why is None:
754 if why is None:
755 write(
755 write(
756 _(b'allowing %s - no guards in effect\n')
756 _(b'allowing %s - no guards in effect\n')
757 % self.series[idx]
757 % self.series[idx]
758 )
758 )
759 else:
759 else:
760 if not why:
760 if not why:
761 write(
761 write(
762 _(b'allowing %s - no matching negative guards\n')
762 _(b'allowing %s - no matching negative guards\n')
763 % self.series[idx]
763 % self.series[idx]
764 )
764 )
765 else:
765 else:
766 write(
766 write(
767 _(b'allowing %s - guarded by %s\n')
767 _(b'allowing %s - guarded by %s\n')
768 % (self.series[idx], why)
768 % (self.series[idx], why)
769 )
769 )
770 if not pushable:
770 if not pushable:
771 if why:
771 if why:
772 write(
772 write(
773 _(b'skipping %s - guarded by %s\n')
773 _(b'skipping %s - guarded by %s\n')
774 % (self.series[idx], why)
774 % (self.series[idx], why)
775 )
775 )
776 else:
776 else:
777 write(
777 write(
778 _(b'skipping %s - no matching guards\n')
778 _(b'skipping %s - no matching guards\n')
779 % self.series[idx]
779 % self.series[idx]
780 )
780 )
781
781
782 def savedirty(self):
782 def savedirty(self):
783 def writelist(items, path):
783 def writelist(items, path):
784 fp = self.opener(path, b'wb')
784 fp = self.opener(path, b'wb')
785 for i in items:
785 for i in items:
786 fp.write(b"%s\n" % i)
786 fp.write(b"%s\n" % i)
787 fp.close()
787 fp.close()
788
788
789 if self.applieddirty:
789 if self.applieddirty:
790 writelist(map(bytes, self.applied), self.statuspath)
790 writelist(map(bytes, self.applied), self.statuspath)
791 self.applieddirty = False
791 self.applieddirty = False
792 if self.seriesdirty:
792 if self.seriesdirty:
793 writelist(self.fullseries, self.seriespath)
793 writelist(self.fullseries, self.seriespath)
794 self.seriesdirty = False
794 self.seriesdirty = False
795 if self.guardsdirty:
795 if self.guardsdirty:
796 writelist(self.activeguards, self.guardspath)
796 writelist(self.activeguards, self.guardspath)
797 self.guardsdirty = False
797 self.guardsdirty = False
798 if self.added:
798 if self.added:
799 qrepo = self.qrepo()
799 qrepo = self.qrepo()
800 if qrepo:
800 if qrepo:
801 qrepo[None].add(f for f in self.added if f not in qrepo[None])
801 qrepo[None].add(f for f in self.added if f not in qrepo[None])
802 self.added = []
802 self.added = []
803
803
804 def removeundo(self, repo):
804 def removeundo(self, repo):
805 undo = repo.sjoin(b'undo')
805 undo = repo.sjoin(b'undo')
806 if not os.path.exists(undo):
806 if not os.path.exists(undo):
807 return
807 return
808 try:
808 try:
809 os.unlink(undo)
809 os.unlink(undo)
810 except OSError as inst:
810 except OSError as inst:
811 self.ui.warn(
811 self.ui.warn(
812 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
812 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
813 )
813 )
814
814
815 def backup(self, repo, files, copy=False):
815 def backup(self, repo, files, copy=False):
816 # backup local changes in --force case
816 # backup local changes in --force case
817 for f in sorted(files):
817 for f in sorted(files):
818 absf = repo.wjoin(f)
818 absf = repo.wjoin(f)
819 if os.path.lexists(absf):
819 if os.path.lexists(absf):
820 absorig = scmutil.backuppath(self.ui, repo, f)
820 absorig = scmutil.backuppath(self.ui, repo, f)
821 self.ui.note(
821 self.ui.note(
822 _(b'saving current version of %s as %s\n')
822 _(b'saving current version of %s as %s\n')
823 % (f, os.path.relpath(absorig))
823 % (f, os.path.relpath(absorig))
824 )
824 )
825
825
826 if copy:
826 if copy:
827 util.copyfile(absf, absorig)
827 util.copyfile(absf, absorig)
828 else:
828 else:
829 util.rename(absf, absorig)
829 util.rename(absf, absorig)
830
830
831 def printdiff(
831 def printdiff(
832 self,
832 self,
833 repo,
833 repo,
834 diffopts,
834 diffopts,
835 node1,
835 node1,
836 node2=None,
836 node2=None,
837 files=None,
837 files=None,
838 fp=None,
838 fp=None,
839 changes=None,
839 changes=None,
840 opts=None,
840 opts=None,
841 ):
841 ):
842 if opts is None:
842 if opts is None:
843 opts = {}
843 opts = {}
844 stat = opts.get(b'stat')
844 stat = opts.get(b'stat')
845 m = scmutil.match(repo[node1], files, opts)
845 m = scmutil.match(repo[node1], files, opts)
846 logcmdutil.diffordiffstat(
846 logcmdutil.diffordiffstat(
847 self.ui,
847 self.ui,
848 repo,
848 repo,
849 diffopts,
849 diffopts,
850 repo[node1],
850 repo[node1],
851 repo[node2],
851 repo[node2],
852 m,
852 m,
853 changes,
853 changes,
854 stat,
854 stat,
855 fp,
855 fp,
856 )
856 )
857
857
858 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
858 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
859 # first try just applying the patch
859 # first try just applying the patch
860 (err, n) = self.apply(
860 (err, n) = self.apply(
861 repo, [patch], update_status=False, strict=True, merge=rev
861 repo, [patch], update_status=False, strict=True, merge=rev
862 )
862 )
863
863
864 if err == 0:
864 if err == 0:
865 return (err, n)
865 return (err, n)
866
866
867 if n is None:
867 if n is None:
868 raise error.Abort(_(b"apply failed for patch %s") % patch)
868 raise error.Abort(_(b"apply failed for patch %s") % patch)
869
869
870 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
870 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
871
871
872 # apply failed, strip away that rev and merge.
872 # apply failed, strip away that rev and merge.
873 hg.clean(repo, head)
873 hg.clean(repo, head)
874 strip(self.ui, repo, [n], update=False, backup=False)
874 strip(self.ui, repo, [n], update=False, backup=False)
875
875
876 ctx = repo[rev]
876 ctx = repo[rev]
877 ret = hg.merge(ctx, remind=False)
877 ret = hg.merge(ctx, remind=False)
878 if ret:
878 if ret:
879 raise error.Abort(_(b"update returned %d") % ret)
879 raise error.Abort(_(b"update returned %d") % ret)
880 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
880 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
881 if n is None:
881 if n is None:
882 raise error.Abort(_(b"repo commit failed"))
882 raise error.Abort(_(b"repo commit failed"))
883 try:
883 try:
884 ph = patchheader(mergeq.join(patch), self.plainmode)
884 ph = patchheader(mergeq.join(patch), self.plainmode)
885 except Exception:
885 except Exception:
886 raise error.Abort(_(b"unable to read %s") % patch)
886 raise error.Abort(_(b"unable to read %s") % patch)
887
887
888 diffopts = self.patchopts(diffopts, patch)
888 diffopts = self.patchopts(diffopts, patch)
889 patchf = self.opener(patch, b"w")
889 patchf = self.opener(patch, b"w")
890 comments = bytes(ph)
890 comments = bytes(ph)
891 if comments:
891 if comments:
892 patchf.write(comments)
892 patchf.write(comments)
893 self.printdiff(repo, diffopts, head, n, fp=patchf)
893 self.printdiff(repo, diffopts, head, n, fp=patchf)
894 patchf.close()
894 patchf.close()
895 self.removeundo(repo)
895 self.removeundo(repo)
896 return (0, n)
896 return (0, n)
897
897
898 def qparents(self, repo, rev=None):
898 def qparents(self, repo, rev=None):
899 """return the mq handled parent or p1
899 """return the mq handled parent or p1
900
900
901 In some case where mq get himself in being the parent of a merge the
901 In some case where mq get himself in being the parent of a merge the
902 appropriate parent may be p2.
902 appropriate parent may be p2.
903 (eg: an in progress merge started with mq disabled)
903 (eg: an in progress merge started with mq disabled)
904
904
905 If no parent are managed by mq, p1 is returned.
905 If no parent are managed by mq, p1 is returned.
906 """
906 """
907 if rev is None:
907 if rev is None:
908 (p1, p2) = repo.dirstate.parents()
908 (p1, p2) = repo.dirstate.parents()
909 if p2 == repo.nullid:
909 if p2 == repo.nullid:
910 return p1
910 return p1
911 if not self.applied:
911 if not self.applied:
912 return None
912 return None
913 return self.applied[-1].node
913 return self.applied[-1].node
914 p1, p2 = repo.changelog.parents(rev)
914 p1, p2 = repo.changelog.parents(rev)
915 if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
915 if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
916 return p2
916 return p2
917 return p1
917 return p1
918
918
919 def mergepatch(self, repo, mergeq, series, diffopts):
919 def mergepatch(self, repo, mergeq, series, diffopts):
920 if not self.applied:
920 if not self.applied:
921 # each of the patches merged in will have two parents. This
921 # each of the patches merged in will have two parents. This
922 # can confuse the qrefresh, qdiff, and strip code because it
922 # can confuse the qrefresh, qdiff, and strip code because it
923 # needs to know which parent is actually in the patch queue.
923 # needs to know which parent is actually in the patch queue.
924 # so, we insert a merge marker with only one parent. This way
924 # so, we insert a merge marker with only one parent. This way
925 # the first patch in the queue is never a merge patch
925 # the first patch in the queue is never a merge patch
926 #
926 #
927 pname = b".hg.patches.merge.marker"
927 pname = b".hg.patches.merge.marker"
928 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
928 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
929 self.removeundo(repo)
929 self.removeundo(repo)
930 self.applied.append(statusentry(n, pname))
930 self.applied.append(statusentry(n, pname))
931 self.applieddirty = True
931 self.applieddirty = True
932
932
933 head = self.qparents(repo)
933 head = self.qparents(repo)
934
934
935 for patch in series:
935 for patch in series:
936 patch = mergeq.lookup(patch, strict=True)
936 patch = mergeq.lookup(patch, strict=True)
937 if not patch:
937 if not patch:
938 self.ui.warn(_(b"patch %s does not exist\n") % patch)
938 self.ui.warn(_(b"patch %s does not exist\n") % patch)
939 return (1, None)
939 return (1, None)
940 pushable, reason = self.pushable(patch)
940 pushable, reason = self.pushable(patch)
941 if not pushable:
941 if not pushable:
942 self.explainpushable(patch, all_patches=True)
942 self.explainpushable(patch, all_patches=True)
943 continue
943 continue
944 info = mergeq.isapplied(patch)
944 info = mergeq.isapplied(patch)
945 if not info:
945 if not info:
946 self.ui.warn(_(b"patch %s is not applied\n") % patch)
946 self.ui.warn(_(b"patch %s is not applied\n") % patch)
947 return (1, None)
947 return (1, None)
948 rev = info[1]
948 rev = info[1]
949 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
949 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
950 if head:
950 if head:
951 self.applied.append(statusentry(head, patch))
951 self.applied.append(statusentry(head, patch))
952 self.applieddirty = True
952 self.applieddirty = True
953 if err:
953 if err:
954 return (err, head)
954 return (err, head)
955 self.savedirty()
955 self.savedirty()
956 return (0, head)
956 return (0, head)
957
957
958 def patch(self, repo, patchfile):
958 def patch(self, repo, patchfile):
959 """Apply patchfile to the working directory.
959 """Apply patchfile to the working directory.
960 patchfile: name of patch file"""
960 patchfile: name of patch file"""
961 files = set()
961 files = set()
962 try:
962 try:
963 fuzz = patchmod.patch(
963 fuzz = patchmod.patch(
964 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
964 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
965 )
965 )
966 return (True, list(files), fuzz)
966 return (True, list(files), fuzz)
967 except Exception as inst:
967 except Exception as inst:
968 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
968 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
969 if not self.ui.verbose:
969 if not self.ui.verbose:
970 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
970 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
971 self.ui.traceback()
971 self.ui.traceback()
972 return (False, list(files), False)
972 return (False, list(files), False)
973
973
974 def apply(
974 def apply(
975 self,
975 self,
976 repo,
976 repo,
977 series,
977 series,
978 list=False,
978 list=False,
979 update_status=True,
979 update_status=True,
980 strict=False,
980 strict=False,
981 patchdir=None,
981 patchdir=None,
982 merge=None,
982 merge=None,
983 all_files=None,
983 all_files=None,
984 tobackup=None,
984 tobackup=None,
985 keepchanges=False,
985 keepchanges=False,
986 ):
986 ):
987 wlock = lock = tr = None
987 wlock = lock = tr = None
988 try:
988 try:
989 wlock = repo.wlock()
989 wlock = repo.wlock()
990 lock = repo.lock()
990 lock = repo.lock()
991 tr = repo.transaction(b"qpush")
991 tr = repo.transaction(b"qpush")
992 try:
992 try:
993 ret = self._apply(
993 ret = self._apply(
994 repo,
994 repo,
995 series,
995 series,
996 list,
996 list,
997 update_status,
997 update_status,
998 strict,
998 strict,
999 patchdir,
999 patchdir,
1000 merge,
1000 merge,
1001 all_files=all_files,
1001 all_files=all_files,
1002 tobackup=tobackup,
1002 tobackup=tobackup,
1003 keepchanges=keepchanges,
1003 keepchanges=keepchanges,
1004 )
1004 )
1005 tr.close()
1005 tr.close()
1006 self.savedirty()
1006 self.savedirty()
1007 return ret
1007 return ret
1008 except AbortNoCleanup:
1008 except AbortNoCleanup:
1009 tr.close()
1009 tr.close()
1010 self.savedirty()
1010 self.savedirty()
1011 raise
1011 raise
1012 except: # re-raises
1012 except: # re-raises
1013 try:
1013 try:
1014 tr.abort()
1014 tr.abort()
1015 finally:
1015 finally:
1016 self.invalidate()
1016 self.invalidate()
1017 raise
1017 raise
1018 finally:
1018 finally:
1019 release(tr, lock, wlock)
1019 release(tr, lock, wlock)
1020 self.removeundo(repo)
1020 self.removeundo(repo)
1021
1021
1022 def _apply(
1022 def _apply(
1023 self,
1023 self,
1024 repo,
1024 repo,
1025 series,
1025 series,
1026 list=False,
1026 list=False,
1027 update_status=True,
1027 update_status=True,
1028 strict=False,
1028 strict=False,
1029 patchdir=None,
1029 patchdir=None,
1030 merge=None,
1030 merge=None,
1031 all_files=None,
1031 all_files=None,
1032 tobackup=None,
1032 tobackup=None,
1033 keepchanges=False,
1033 keepchanges=False,
1034 ):
1034 ):
1035 """returns (error, hash)
1035 """returns (error, hash)
1036
1036
1037 error = 1 for unable to read, 2 for patch failed, 3 for patch
1037 error = 1 for unable to read, 2 for patch failed, 3 for patch
1038 fuzz. tobackup is None or a set of files to backup before they
1038 fuzz. tobackup is None or a set of files to backup before they
1039 are modified by a patch.
1039 are modified by a patch.
1040 """
1040 """
1041 # TODO unify with commands.py
1041 # TODO unify with commands.py
1042 if not patchdir:
1042 if not patchdir:
1043 patchdir = self.path
1043 patchdir = self.path
1044 err = 0
1044 err = 0
1045 n = None
1045 n = None
1046 for patchname in series:
1046 for patchname in series:
1047 pushable, reason = self.pushable(patchname)
1047 pushable, reason = self.pushable(patchname)
1048 if not pushable:
1048 if not pushable:
1049 self.explainpushable(patchname, all_patches=True)
1049 self.explainpushable(patchname, all_patches=True)
1050 continue
1050 continue
1051 self.ui.status(_(b"applying %s\n") % patchname)
1051 self.ui.status(_(b"applying %s\n") % patchname)
1052 pf = os.path.join(patchdir, patchname)
1052 pf = os.path.join(patchdir, patchname)
1053
1053
1054 try:
1054 try:
1055 ph = patchheader(self.join(patchname), self.plainmode)
1055 ph = patchheader(self.join(patchname), self.plainmode)
1056 except IOError:
1056 except IOError:
1057 self.ui.warn(_(b"unable to read %s\n") % patchname)
1057 self.ui.warn(_(b"unable to read %s\n") % patchname)
1058 err = 1
1058 err = 1
1059 break
1059 break
1060
1060
1061 message = ph.message
1061 message = ph.message
1062 if not message:
1062 if not message:
1063 # The commit message should not be translated
1063 # The commit message should not be translated
1064 message = b"imported patch %s\n" % patchname
1064 message = b"imported patch %s\n" % patchname
1065 else:
1065 else:
1066 if list:
1066 if list:
1067 # The commit message should not be translated
1067 # The commit message should not be translated
1068 message.append(b"\nimported patch %s" % patchname)
1068 message.append(b"\nimported patch %s" % patchname)
1069 message = b'\n'.join(message)
1069 message = b'\n'.join(message)
1070
1070
1071 if ph.haspatch:
1071 if ph.haspatch:
1072 if tobackup:
1072 if tobackup:
1073 touched = patchmod.changedfiles(self.ui, repo, pf)
1073 touched = patchmod.changedfiles(self.ui, repo, pf)
1074 touched = set(touched) & tobackup
1074 touched = set(touched) & tobackup
1075 if touched and keepchanges:
1075 if touched and keepchanges:
1076 raise AbortNoCleanup(
1076 raise AbortNoCleanup(
1077 _(b"conflicting local changes found"),
1077 _(b"conflicting local changes found"),
1078 hint=_(b"did you forget to qrefresh?"),
1078 hint=_(b"did you forget to qrefresh?"),
1079 )
1079 )
1080 self.backup(repo, touched, copy=True)
1080 self.backup(repo, touched, copy=True)
1081 tobackup = tobackup - touched
1081 tobackup = tobackup - touched
1082 (patcherr, files, fuzz) = self.patch(repo, pf)
1082 (patcherr, files, fuzz) = self.patch(repo, pf)
1083 if all_files is not None:
1083 if all_files is not None:
1084 all_files.update(files)
1084 all_files.update(files)
1085 patcherr = not patcherr
1085 patcherr = not patcherr
1086 else:
1086 else:
1087 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1087 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1088 patcherr, files, fuzz = 0, [], 0
1088 patcherr, files, fuzz = 0, [], 0
1089
1089
1090 if merge and files:
1090 if merge and files:
1091 # Mark as removed/merged and update dirstate parent info
1091 # Mark as removed/merged and update dirstate parent info
1092 with repo.dirstate.parentchange():
1092 with repo.dirstate.parentchange():
1093 for f in files:
1093 for f in files:
1094 repo.dirstate.update_file_p1(f, p1_tracked=True)
1094 repo.dirstate.update_file_p1(f, p1_tracked=True)
1095 p1 = repo.dirstate.p1()
1095 p1 = repo.dirstate.p1()
1096 repo.setparents(p1, merge)
1096 repo.setparents(p1, merge)
1097
1097
1098 if all_files and b'.hgsubstate' in all_files:
1098 if all_files and b'.hgsubstate' in all_files:
1099 wctx = repo[None]
1099 wctx = repo[None]
1100 pctx = repo[b'.']
1100 pctx = repo[b'.']
1101 overwrite = False
1101 overwrite = False
1102 mergedsubstate = subrepoutil.submerge(
1102 mergedsubstate = subrepoutil.submerge(
1103 repo, pctx, wctx, wctx, overwrite
1103 repo, pctx, wctx, wctx, overwrite
1104 )
1104 )
1105 files += mergedsubstate.keys()
1105 files += mergedsubstate.keys()
1106
1106
1107 match = scmutil.matchfiles(repo, files or [])
1107 match = scmutil.matchfiles(repo, files or [])
1108 oldtip = repo.changelog.tip()
1108 oldtip = repo.changelog.tip()
1109 n = newcommit(
1109 n = newcommit(
1110 repo, None, message, ph.user, ph.date, match=match, force=True
1110 repo, None, message, ph.user, ph.date, match=match, force=True
1111 )
1111 )
1112 if repo.changelog.tip() == oldtip:
1112 if repo.changelog.tip() == oldtip:
1113 raise error.Abort(
1113 raise error.Abort(
1114 _(b"qpush exactly duplicates child changeset")
1114 _(b"qpush exactly duplicates child changeset")
1115 )
1115 )
1116 if n is None:
1116 if n is None:
1117 raise error.Abort(_(b"repository commit failed"))
1117 raise error.Abort(_(b"repository commit failed"))
1118
1118
1119 if update_status:
1119 if update_status:
1120 self.applied.append(statusentry(n, patchname))
1120 self.applied.append(statusentry(n, patchname))
1121
1121
1122 if patcherr:
1122 if patcherr:
1123 self.ui.warn(
1123 self.ui.warn(
1124 _(b"patch failed, rejects left in working directory\n")
1124 _(b"patch failed, rejects left in working directory\n")
1125 )
1125 )
1126 err = 2
1126 err = 2
1127 break
1127 break
1128
1128
1129 if fuzz and strict:
1129 if fuzz and strict:
1130 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1130 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1131 err = 3
1131 err = 3
1132 break
1132 break
1133 return (err, n)
1133 return (err, n)
1134
1134
1135 def _cleanup(self, patches, numrevs, keep=False):
1135 def _cleanup(self, patches, numrevs, keep=False):
1136 if not keep:
1136 if not keep:
1137 r = self.qrepo()
1137 r = self.qrepo()
1138 if r:
1138 if r:
1139 r[None].forget(patches)
1139 r[None].forget(patches)
1140 for p in patches:
1140 for p in patches:
1141 try:
1141 try:
1142 os.unlink(self.join(p))
1142 os.unlink(self.join(p))
1143 except OSError as inst:
1143 except OSError as inst:
1144 if inst.errno != errno.ENOENT:
1144 if inst.errno != errno.ENOENT:
1145 raise
1145 raise
1146
1146
1147 qfinished = []
1147 qfinished = []
1148 if numrevs:
1148 if numrevs:
1149 qfinished = self.applied[:numrevs]
1149 qfinished = self.applied[:numrevs]
1150 del self.applied[:numrevs]
1150 del self.applied[:numrevs]
1151 self.applieddirty = True
1151 self.applieddirty = True
1152
1152
1153 unknown = []
1153 unknown = []
1154
1154
1155 sortedseries = []
1155 sortedseries = []
1156 for p in patches:
1156 for p in patches:
1157 idx = self.findseries(p)
1157 idx = self.findseries(p)
1158 if idx is None:
1158 if idx is None:
1159 sortedseries.append((-1, p))
1159 sortedseries.append((-1, p))
1160 else:
1160 else:
1161 sortedseries.append((idx, p))
1161 sortedseries.append((idx, p))
1162
1162
1163 sortedseries.sort(reverse=True)
1163 sortedseries.sort(reverse=True)
1164 for (i, p) in sortedseries:
1164 for (i, p) in sortedseries:
1165 if i != -1:
1165 if i != -1:
1166 del self.fullseries[i]
1166 del self.fullseries[i]
1167 else:
1167 else:
1168 unknown.append(p)
1168 unknown.append(p)
1169
1169
1170 if unknown:
1170 if unknown:
1171 if numrevs:
1171 if numrevs:
1172 rev = {entry.name: entry.node for entry in qfinished}
1172 rev = {entry.name: entry.node for entry in qfinished}
1173 for p in unknown:
1173 for p in unknown:
1174 msg = _(b'revision %s refers to unknown patches: %s\n')
1174 msg = _(b'revision %s refers to unknown patches: %s\n')
1175 self.ui.warn(msg % (short(rev[p]), p))
1175 self.ui.warn(msg % (short(rev[p]), p))
1176 else:
1176 else:
1177 msg = _(b'unknown patches: %s\n')
1177 msg = _(b'unknown patches: %s\n')
1178 raise error.Abort(b''.join(msg % p for p in unknown))
1178 raise error.Abort(b''.join(msg % p for p in unknown))
1179
1179
1180 self.parseseries()
1180 self.parseseries()
1181 self.seriesdirty = True
1181 self.seriesdirty = True
1182 return [entry.node for entry in qfinished]
1182 return [entry.node for entry in qfinished]
1183
1183
1184 def _revpatches(self, repo, revs):
1184 def _revpatches(self, repo, revs):
1185 firstrev = repo[self.applied[0].node].rev()
1185 firstrev = repo[self.applied[0].node].rev()
1186 patches = []
1186 patches = []
1187 for i, rev in enumerate(revs):
1187 for i, rev in enumerate(revs):
1188
1188
1189 if rev < firstrev:
1189 if rev < firstrev:
1190 raise error.Abort(_(b'revision %d is not managed') % rev)
1190 raise error.Abort(_(b'revision %d is not managed') % rev)
1191
1191
1192 ctx = repo[rev]
1192 ctx = repo[rev]
1193 base = self.applied[i].node
1193 base = self.applied[i].node
1194 if ctx.node() != base:
1194 if ctx.node() != base:
1195 msg = _(b'cannot delete revision %d above applied patches')
1195 msg = _(b'cannot delete revision %d above applied patches')
1196 raise error.Abort(msg % rev)
1196 raise error.Abort(msg % rev)
1197
1197
1198 patch = self.applied[i].name
1198 patch = self.applied[i].name
1199 for fmt in (b'[mq]: %s', b'imported patch %s'):
1199 for fmt in (b'[mq]: %s', b'imported patch %s'):
1200 if ctx.description() == fmt % patch:
1200 if ctx.description() == fmt % patch:
1201 msg = _(b'patch %s finalized without changeset message\n')
1201 msg = _(b'patch %s finalized without changeset message\n')
1202 repo.ui.status(msg % patch)
1202 repo.ui.status(msg % patch)
1203 break
1203 break
1204
1204
1205 patches.append(patch)
1205 patches.append(patch)
1206 return patches
1206 return patches
1207
1207
1208 def finish(self, repo, revs):
1208 def finish(self, repo, revs):
1209 # Manually trigger phase computation to ensure phasedefaults is
1209 # Manually trigger phase computation to ensure phasedefaults is
1210 # executed before we remove the patches.
1210 # executed before we remove the patches.
1211 repo._phasecache
1211 repo._phasecache
1212 patches = self._revpatches(repo, sorted(revs))
1212 patches = self._revpatches(repo, sorted(revs))
1213 qfinished = self._cleanup(patches, len(patches))
1213 qfinished = self._cleanup(patches, len(patches))
1214 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1214 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1215 # only use this logic when the secret option is added
1215 # only use this logic when the secret option is added
1216 oldqbase = repo[qfinished[0]]
1216 oldqbase = repo[qfinished[0]]
1217 tphase = phases.newcommitphase(repo.ui)
1217 tphase = phases.newcommitphase(repo.ui)
1218 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1218 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1219 with repo.transaction(b'qfinish') as tr:
1219 with repo.transaction(b'qfinish') as tr:
1220 phases.advanceboundary(repo, tr, tphase, qfinished)
1220 phases.advanceboundary(repo, tr, tphase, qfinished)
1221
1221
1222 def delete(self, repo, patches, opts):
1222 def delete(self, repo, patches, opts):
1223 if not patches and not opts.get(b'rev'):
1223 if not patches and not opts.get(b'rev'):
1224 raise error.Abort(
1224 raise error.Abort(
1225 _(b'qdelete requires at least one revision or patch name')
1225 _(b'qdelete requires at least one revision or patch name')
1226 )
1226 )
1227
1227
1228 realpatches = []
1228 realpatches = []
1229 for patch in patches:
1229 for patch in patches:
1230 patch = self.lookup(patch, strict=True)
1230 patch = self.lookup(patch, strict=True)
1231 info = self.isapplied(patch)
1231 info = self.isapplied(patch)
1232 if info:
1232 if info:
1233 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1233 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1234 if patch not in self.series:
1234 if patch not in self.series:
1235 raise error.Abort(_(b"patch %s not in series file") % patch)
1235 raise error.Abort(_(b"patch %s not in series file") % patch)
1236 if patch not in realpatches:
1236 if patch not in realpatches:
1237 realpatches.append(patch)
1237 realpatches.append(patch)
1238
1238
1239 numrevs = 0
1239 numrevs = 0
1240 if opts.get(b'rev'):
1240 if opts.get(b'rev'):
1241 if not self.applied:
1241 if not self.applied:
1242 raise error.Abort(_(b'no patches applied'))
1242 raise error.Abort(_(b'no patches applied'))
1243 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1243 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1244 revs.sort()
1244 revs.sort()
1245 revpatches = self._revpatches(repo, revs)
1245 revpatches = self._revpatches(repo, revs)
1246 realpatches += revpatches
1246 realpatches += revpatches
1247 numrevs = len(revpatches)
1247 numrevs = len(revpatches)
1248
1248
1249 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1249 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1250
1250
1251 def checktoppatch(self, repo):
1251 def checktoppatch(self, repo):
1252 '''check that working directory is at qtip'''
1252 '''check that working directory is at qtip'''
1253 if self.applied:
1253 if self.applied:
1254 top = self.applied[-1].node
1254 top = self.applied[-1].node
1255 patch = self.applied[-1].name
1255 patch = self.applied[-1].name
1256 if repo.dirstate.p1() != top:
1256 if repo.dirstate.p1() != top:
1257 raise error.Abort(_(b"working directory revision is not qtip"))
1257 raise error.Abort(_(b"working directory revision is not qtip"))
1258 return top, patch
1258 return top, patch
1259 return None, None
1259 return None, None
1260
1260
1261 def putsubstate2changes(self, substatestate, changes):
1261 def putsubstate2changes(self, substatestate, changes):
1262 if isinstance(changes, list):
1262 if isinstance(changes, list):
1263 mar = changes[:3]
1263 mar = changes[:3]
1264 else:
1264 else:
1265 mar = (changes.modified, changes.added, changes.removed)
1265 mar = (changes.modified, changes.added, changes.removed)
1266 if any((b'.hgsubstate' in files for files in mar)):
1266 if any((b'.hgsubstate' in files for files in mar)):
1267 return # already listed up
1267 return # already listed up
1268 # not yet listed up
1268 # not yet listed up
1269 if substatestate.added or not substatestate.any_tracked:
1269 if substatestate.added or not substatestate.any_tracked:
1270 mar[1].append(b'.hgsubstate')
1270 mar[1].append(b'.hgsubstate')
1271 elif substatestate.removed:
1271 elif substatestate.removed:
1272 mar[2].append(b'.hgsubstate')
1272 mar[2].append(b'.hgsubstate')
1273 else: # modified
1273 else: # modified
1274 mar[0].append(b'.hgsubstate')
1274 mar[0].append(b'.hgsubstate')
1275
1275
1276 def checklocalchanges(self, repo, force=False, refresh=True):
1276 def checklocalchanges(self, repo, force=False, refresh=True):
1277 excsuffix = b''
1277 excsuffix = b''
1278 if refresh:
1278 if refresh:
1279 excsuffix = b', qrefresh first'
1279 excsuffix = b', qrefresh first'
1280 # plain versions for i18n tool to detect them
1280 # plain versions for i18n tool to detect them
1281 _(b"local changes found, qrefresh first")
1281 _(b"local changes found, qrefresh first")
1282 _(b"local changed subrepos found, qrefresh first")
1282 _(b"local changed subrepos found, qrefresh first")
1283
1283
1284 s = repo.status()
1284 s = repo.status()
1285 if not force:
1285 if not force:
1286 cmdutil.checkunfinished(repo)
1286 cmdutil.checkunfinished(repo)
1287 if s.modified or s.added or s.removed or s.deleted:
1287 if s.modified or s.added or s.removed or s.deleted:
1288 _(b"local changes found") # i18n tool detection
1288 _(b"local changes found") # i18n tool detection
1289 raise error.Abort(_(b"local changes found" + excsuffix))
1289 raise error.Abort(_(b"local changes found" + excsuffix))
1290 if checksubstate(repo):
1290 if checksubstate(repo):
1291 _(b"local changed subrepos found") # i18n tool detection
1291 _(b"local changed subrepos found") # i18n tool detection
1292 raise error.Abort(
1292 raise error.Abort(
1293 _(b"local changed subrepos found" + excsuffix)
1293 _(b"local changed subrepos found" + excsuffix)
1294 )
1294 )
1295 else:
1295 else:
1296 cmdutil.checkunfinished(repo, skipmerge=True)
1296 cmdutil.checkunfinished(repo, skipmerge=True)
1297 return s
1297 return s
1298
1298
1299 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1299 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1300
1300
1301 def checkreservedname(self, name):
1301 def checkreservedname(self, name):
1302 if name in self._reserved:
1302 if name in self._reserved:
1303 raise error.Abort(
1303 raise error.Abort(
1304 _(b'"%s" cannot be used as the name of a patch') % name
1304 _(b'"%s" cannot be used as the name of a patch') % name
1305 )
1305 )
1306 if name != name.strip():
1306 if name != name.strip():
1307 # whitespace is stripped by parseseries()
1307 # whitespace is stripped by parseseries()
1308 raise error.Abort(
1308 raise error.Abort(
1309 _(b'patch name cannot begin or end with whitespace')
1309 _(b'patch name cannot begin or end with whitespace')
1310 )
1310 )
1311 for prefix in (b'.hg', b'.mq'):
1311 for prefix in (b'.hg', b'.mq'):
1312 if name.startswith(prefix):
1312 if name.startswith(prefix):
1313 raise error.Abort(
1313 raise error.Abort(
1314 _(b'patch name cannot begin with "%s"') % prefix
1314 _(b'patch name cannot begin with "%s"') % prefix
1315 )
1315 )
1316 for c in (b'#', b':', b'\r', b'\n'):
1316 for c in (b'#', b':', b'\r', b'\n'):
1317 if c in name:
1317 if c in name:
1318 raise error.Abort(
1318 raise error.Abort(
1319 _(b'%r cannot be used in the name of a patch')
1319 _(b'%r cannot be used in the name of a patch')
1320 % pycompat.bytestr(c)
1320 % pycompat.bytestr(c)
1321 )
1321 )
1322
1322
1323 def checkpatchname(self, name, force=False):
1323 def checkpatchname(self, name, force=False):
1324 self.checkreservedname(name)
1324 self.checkreservedname(name)
1325 if not force and os.path.exists(self.join(name)):
1325 if not force and os.path.exists(self.join(name)):
1326 if os.path.isdir(self.join(name)):
1326 if os.path.isdir(self.join(name)):
1327 raise error.Abort(
1327 raise error.Abort(
1328 _(b'"%s" already exists as a directory') % name
1328 _(b'"%s" already exists as a directory') % name
1329 )
1329 )
1330 else:
1330 else:
1331 raise error.Abort(_(b'patch "%s" already exists') % name)
1331 raise error.Abort(_(b'patch "%s" already exists') % name)
1332
1332
1333 def makepatchname(self, title, fallbackname):
1333 def makepatchname(self, title, fallbackname):
1334 """Return a suitable filename for title, adding a suffix to make
1334 """Return a suitable filename for title, adding a suffix to make
1335 it unique in the existing list"""
1335 it unique in the existing list"""
1336 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1336 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1337 namebase = namebase[:75] # avoid too long name (issue5117)
1337 namebase = namebase[:75] # avoid too long name (issue5117)
1338 if namebase:
1338 if namebase:
1339 try:
1339 try:
1340 self.checkreservedname(namebase)
1340 self.checkreservedname(namebase)
1341 except error.Abort:
1341 except error.Abort:
1342 namebase = fallbackname
1342 namebase = fallbackname
1343 else:
1343 else:
1344 namebase = fallbackname
1344 namebase = fallbackname
1345 name = namebase
1345 name = namebase
1346 i = 0
1346 i = 0
1347 while True:
1347 while True:
1348 if name not in self.fullseries:
1348 if name not in self.fullseries:
1349 try:
1349 try:
1350 self.checkpatchname(name)
1350 self.checkpatchname(name)
1351 break
1351 break
1352 except error.Abort:
1352 except error.Abort:
1353 pass
1353 pass
1354 i += 1
1354 i += 1
1355 name = b'%s__%d' % (namebase, i)
1355 name = b'%s__%d' % (namebase, i)
1356 return name
1356 return name
1357
1357
1358 def checkkeepchanges(self, keepchanges, force):
1358 def checkkeepchanges(self, keepchanges, force):
1359 if force and keepchanges:
1359 if force and keepchanges:
1360 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1360 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1361
1361
1362 def new(self, repo, patchfn, *pats, **opts):
1362 def new(self, repo, patchfn, *pats, **opts):
1363 """options:
1363 """options:
1364 msg: a string or a no-argument function returning a string
1364 msg: a string or a no-argument function returning a string
1365 """
1365 """
1366 opts = pycompat.byteskwargs(opts)
1366 opts = pycompat.byteskwargs(opts)
1367 msg = opts.get(b'msg')
1367 msg = opts.get(b'msg')
1368 edit = opts.get(b'edit')
1368 edit = opts.get(b'edit')
1369 editform = opts.get(b'editform', b'mq.qnew')
1369 editform = opts.get(b'editform', b'mq.qnew')
1370 user = opts.get(b'user')
1370 user = opts.get(b'user')
1371 date = opts.get(b'date')
1371 date = opts.get(b'date')
1372 if date:
1372 if date:
1373 date = dateutil.parsedate(date)
1373 date = dateutil.parsedate(date)
1374 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1374 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1375 if opts.get(b'checkname', True):
1375 if opts.get(b'checkname', True):
1376 self.checkpatchname(patchfn)
1376 self.checkpatchname(patchfn)
1377 inclsubs = checksubstate(repo)
1377 inclsubs = checksubstate(repo)
1378 if inclsubs:
1378 if inclsubs:
1379 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1379 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1380 if opts.get(b'include') or opts.get(b'exclude') or pats:
1380 if opts.get(b'include') or opts.get(b'exclude') or pats:
1381 # detect missing files in pats
1381 # detect missing files in pats
1382 def badfn(f, msg):
1382 def badfn(f, msg):
1383 if f != b'.hgsubstate': # .hgsubstate is auto-created
1383 if f != b'.hgsubstate': # .hgsubstate is auto-created
1384 raise error.Abort(b'%s: %s' % (f, msg))
1384 raise error.Abort(b'%s: %s' % (f, msg))
1385
1385
1386 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1386 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1387 changes = repo.status(match=match)
1387 changes = repo.status(match=match)
1388 else:
1388 else:
1389 changes = self.checklocalchanges(repo, force=True)
1389 changes = self.checklocalchanges(repo, force=True)
1390 commitfiles = list(inclsubs)
1390 commitfiles = list(inclsubs)
1391 commitfiles.extend(changes.modified)
1391 commitfiles.extend(changes.modified)
1392 commitfiles.extend(changes.added)
1392 commitfiles.extend(changes.added)
1393 commitfiles.extend(changes.removed)
1393 commitfiles.extend(changes.removed)
1394 match = scmutil.matchfiles(repo, commitfiles)
1394 match = scmutil.matchfiles(repo, commitfiles)
1395 if len(repo[None].parents()) > 1:
1395 if len(repo[None].parents()) > 1:
1396 raise error.Abort(_(b'cannot manage merge changesets'))
1396 raise error.Abort(_(b'cannot manage merge changesets'))
1397 self.checktoppatch(repo)
1397 self.checktoppatch(repo)
1398 insert = self.fullseriesend()
1398 insert = self.fullseriesend()
1399 with repo.wlock():
1399 with repo.wlock():
1400 try:
1400 try:
1401 # if patch file write fails, abort early
1401 # if patch file write fails, abort early
1402 p = self.opener(patchfn, b"w")
1402 p = self.opener(patchfn, b"w")
1403 except IOError as e:
1403 except IOError as e:
1404 raise error.Abort(
1404 raise error.Abort(
1405 _(b'cannot write patch "%s": %s')
1405 _(b'cannot write patch "%s": %s')
1406 % (patchfn, encoding.strtolocal(e.strerror))
1406 % (patchfn, encoding.strtolocal(e.strerror))
1407 )
1407 )
1408 try:
1408 try:
1409 defaultmsg = b"[mq]: %s" % patchfn
1409 defaultmsg = b"[mq]: %s" % patchfn
1410 editor = cmdutil.getcommiteditor(editform=editform)
1410 editor = cmdutil.getcommiteditor(editform=editform)
1411 if edit:
1411 if edit:
1412
1412
1413 def finishdesc(desc):
1413 def finishdesc(desc):
1414 if desc.rstrip():
1414 if desc.rstrip():
1415 return desc
1415 return desc
1416 else:
1416 else:
1417 return defaultmsg
1417 return defaultmsg
1418
1418
1419 # i18n: this message is shown in editor with "HG: " prefix
1419 # i18n: this message is shown in editor with "HG: " prefix
1420 extramsg = _(b'Leave message empty to use default message.')
1420 extramsg = _(b'Leave message empty to use default message.')
1421 editor = cmdutil.getcommiteditor(
1421 editor = cmdutil.getcommiteditor(
1422 finishdesc=finishdesc,
1422 finishdesc=finishdesc,
1423 extramsg=extramsg,
1423 extramsg=extramsg,
1424 editform=editform,
1424 editform=editform,
1425 )
1425 )
1426 commitmsg = msg
1426 commitmsg = msg
1427 else:
1427 else:
1428 commitmsg = msg or defaultmsg
1428 commitmsg = msg or defaultmsg
1429
1429
1430 n = newcommit(
1430 n = newcommit(
1431 repo,
1431 repo,
1432 None,
1432 None,
1433 commitmsg,
1433 commitmsg,
1434 user,
1434 user,
1435 date,
1435 date,
1436 match=match,
1436 match=match,
1437 force=True,
1437 force=True,
1438 editor=editor,
1438 editor=editor,
1439 )
1439 )
1440 if n is None:
1440 if n is None:
1441 raise error.Abort(_(b"repo commit failed"))
1441 raise error.Abort(_(b"repo commit failed"))
1442 try:
1442 try:
1443 self.fullseries[insert:insert] = [patchfn]
1443 self.fullseries[insert:insert] = [patchfn]
1444 self.applied.append(statusentry(n, patchfn))
1444 self.applied.append(statusentry(n, patchfn))
1445 self.parseseries()
1445 self.parseseries()
1446 self.seriesdirty = True
1446 self.seriesdirty = True
1447 self.applieddirty = True
1447 self.applieddirty = True
1448 nctx = repo[n]
1448 nctx = repo[n]
1449 ph = patchheader(self.join(patchfn), self.plainmode)
1449 ph = patchheader(self.join(patchfn), self.plainmode)
1450 if user:
1450 if user:
1451 ph.setuser(user)
1451 ph.setuser(user)
1452 if date:
1452 if date:
1453 ph.setdate(b'%d %d' % date)
1453 ph.setdate(b'%d %d' % date)
1454 ph.setparent(hex(nctx.p1().node()))
1454 ph.setparent(hex(nctx.p1().node()))
1455 msg = nctx.description().strip()
1455 msg = nctx.description().strip()
1456 if msg == defaultmsg.strip():
1456 if msg == defaultmsg.strip():
1457 msg = b''
1457 msg = b''
1458 ph.setmessage(msg)
1458 ph.setmessage(msg)
1459 p.write(bytes(ph))
1459 p.write(bytes(ph))
1460 if commitfiles:
1460 if commitfiles:
1461 parent = self.qparents(repo, n)
1461 parent = self.qparents(repo, n)
1462 if inclsubs:
1462 if inclsubs:
1463 self.putsubstate2changes(substatestate, changes)
1463 self.putsubstate2changes(substatestate, changes)
1464 chunks = patchmod.diff(
1464 chunks = patchmod.diff(
1465 repo,
1465 repo,
1466 node1=parent,
1466 node1=parent,
1467 node2=n,
1467 node2=n,
1468 changes=changes,
1468 changes=changes,
1469 opts=diffopts,
1469 opts=diffopts,
1470 )
1470 )
1471 for chunk in chunks:
1471 for chunk in chunks:
1472 p.write(chunk)
1472 p.write(chunk)
1473 p.close()
1473 p.close()
1474 r = self.qrepo()
1474 r = self.qrepo()
1475 if r:
1475 if r:
1476 r[None].add([patchfn])
1476 r[None].add([patchfn])
1477 except: # re-raises
1477 except: # re-raises
1478 repo.rollback()
1478 repo.rollback()
1479 raise
1479 raise
1480 except Exception:
1480 except Exception:
1481 patchpath = self.join(patchfn)
1481 patchpath = self.join(patchfn)
1482 try:
1482 try:
1483 os.unlink(patchpath)
1483 os.unlink(patchpath)
1484 except OSError:
1484 except OSError:
1485 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1485 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1486 raise
1486 raise
1487 self.removeundo(repo)
1487 self.removeundo(repo)
1488
1488
1489 def isapplied(self, patch):
1489 def isapplied(self, patch):
1490 """returns (index, rev, patch)"""
1490 """returns (index, rev, patch)"""
1491 for i, a in enumerate(self.applied):
1491 for i, a in enumerate(self.applied):
1492 if a.name == patch:
1492 if a.name == patch:
1493 return (i, a.node, a.name)
1493 return (i, a.node, a.name)
1494 return None
1494 return None
1495
1495
1496 # if the exact patch name does not exist, we try a few
1496 # if the exact patch name does not exist, we try a few
1497 # variations. If strict is passed, we try only #1
1497 # variations. If strict is passed, we try only #1
1498 #
1498 #
1499 # 1) a number (as string) to indicate an offset in the series file
1499 # 1) a number (as string) to indicate an offset in the series file
1500 # 2) a unique substring of the patch name was given
1500 # 2) a unique substring of the patch name was given
1501 # 3) patchname[-+]num to indicate an offset in the series file
1501 # 3) patchname[-+]num to indicate an offset in the series file
1502 def lookup(self, patch, strict=False):
1502 def lookup(self, patch, strict=False):
1503 def partialname(s):
1503 def partialname(s):
1504 if s in self.series:
1504 if s in self.series:
1505 return s
1505 return s
1506 matches = [x for x in self.series if s in x]
1506 matches = [x for x in self.series if s in x]
1507 if len(matches) > 1:
1507 if len(matches) > 1:
1508 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1508 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1509 for m in matches:
1509 for m in matches:
1510 self.ui.warn(b' %s\n' % m)
1510 self.ui.warn(b' %s\n' % m)
1511 return None
1511 return None
1512 if matches:
1512 if matches:
1513 return matches[0]
1513 return matches[0]
1514 if self.series and self.applied:
1514 if self.series and self.applied:
1515 if s == b'qtip':
1515 if s == b'qtip':
1516 return self.series[self.seriesend(True) - 1]
1516 return self.series[self.seriesend(True) - 1]
1517 if s == b'qbase':
1517 if s == b'qbase':
1518 return self.series[0]
1518 return self.series[0]
1519 return None
1519 return None
1520
1520
1521 if patch in self.series:
1521 if patch in self.series:
1522 return patch
1522 return patch
1523
1523
1524 if not os.path.isfile(self.join(patch)):
1524 if not os.path.isfile(self.join(patch)):
1525 try:
1525 try:
1526 sno = int(patch)
1526 sno = int(patch)
1527 except (ValueError, OverflowError):
1527 except (ValueError, OverflowError):
1528 pass
1528 pass
1529 else:
1529 else:
1530 if -len(self.series) <= sno < len(self.series):
1530 if -len(self.series) <= sno < len(self.series):
1531 return self.series[sno]
1531 return self.series[sno]
1532
1532
1533 if not strict:
1533 if not strict:
1534 res = partialname(patch)
1534 res = partialname(patch)
1535 if res:
1535 if res:
1536 return res
1536 return res
1537 minus = patch.rfind(b'-')
1537 minus = patch.rfind(b'-')
1538 if minus >= 0:
1538 if minus >= 0:
1539 res = partialname(patch[:minus])
1539 res = partialname(patch[:minus])
1540 if res:
1540 if res:
1541 i = self.series.index(res)
1541 i = self.series.index(res)
1542 try:
1542 try:
1543 off = int(patch[minus + 1 :] or 1)
1543 off = int(patch[minus + 1 :] or 1)
1544 except (ValueError, OverflowError):
1544 except (ValueError, OverflowError):
1545 pass
1545 pass
1546 else:
1546 else:
1547 if i - off >= 0:
1547 if i - off >= 0:
1548 return self.series[i - off]
1548 return self.series[i - off]
1549 plus = patch.rfind(b'+')
1549 plus = patch.rfind(b'+')
1550 if plus >= 0:
1550 if plus >= 0:
1551 res = partialname(patch[:plus])
1551 res = partialname(patch[:plus])
1552 if res:
1552 if res:
1553 i = self.series.index(res)
1553 i = self.series.index(res)
1554 try:
1554 try:
1555 off = int(patch[plus + 1 :] or 1)
1555 off = int(patch[plus + 1 :] or 1)
1556 except (ValueError, OverflowError):
1556 except (ValueError, OverflowError):
1557 pass
1557 pass
1558 else:
1558 else:
1559 if i + off < len(self.series):
1559 if i + off < len(self.series):
1560 return self.series[i + off]
1560 return self.series[i + off]
1561 raise error.Abort(_(b"patch %s not in series") % patch)
1561 raise error.Abort(_(b"patch %s not in series") % patch)
1562
1562
1563 def push(
1563 def push(
1564 self,
1564 self,
1565 repo,
1565 repo,
1566 patch=None,
1566 patch=None,
1567 force=False,
1567 force=False,
1568 list=False,
1568 list=False,
1569 mergeq=None,
1569 mergeq=None,
1570 all=False,
1570 all=False,
1571 move=False,
1571 move=False,
1572 exact=False,
1572 exact=False,
1573 nobackup=False,
1573 nobackup=False,
1574 keepchanges=False,
1574 keepchanges=False,
1575 ):
1575 ):
1576 self.checkkeepchanges(keepchanges, force)
1576 self.checkkeepchanges(keepchanges, force)
1577 diffopts = self.diffopts()
1577 diffopts = self.diffopts()
1578 with repo.wlock():
1578 with repo.wlock():
1579 heads = []
1579 heads = []
1580 for hs in repo.branchmap().iterheads():
1580 for hs in repo.branchmap().iterheads():
1581 heads.extend(hs)
1581 heads.extend(hs)
1582 if not heads:
1582 if not heads:
1583 heads = [repo.nullid]
1583 heads = [repo.nullid]
1584 if repo.dirstate.p1() not in heads and not exact:
1584 if repo.dirstate.p1() not in heads and not exact:
1585 self.ui.status(_(b"(working directory not at a head)\n"))
1585 self.ui.status(_(b"(working directory not at a head)\n"))
1586
1586
1587 if not self.series:
1587 if not self.series:
1588 self.ui.warn(_(b'no patches in series\n'))
1588 self.ui.warn(_(b'no patches in series\n'))
1589 return 0
1589 return 0
1590
1590
1591 # Suppose our series file is: A B C and the current 'top'
1591 # Suppose our series file is: A B C and the current 'top'
1592 # patch is B. qpush C should be performed (moving forward)
1592 # patch is B. qpush C should be performed (moving forward)
1593 # qpush B is a NOP (no change) qpush A is an error (can't
1593 # qpush B is a NOP (no change) qpush A is an error (can't
1594 # go backwards with qpush)
1594 # go backwards with qpush)
1595 if patch:
1595 if patch:
1596 patch = self.lookup(patch)
1596 patch = self.lookup(patch)
1597 info = self.isapplied(patch)
1597 info = self.isapplied(patch)
1598 if info and info[0] >= len(self.applied) - 1:
1598 if info and info[0] >= len(self.applied) - 1:
1599 self.ui.warn(
1599 self.ui.warn(
1600 _(b'qpush: %s is already at the top\n') % patch
1600 _(b'qpush: %s is already at the top\n') % patch
1601 )
1601 )
1602 return 0
1602 return 0
1603
1603
1604 pushable, reason = self.pushable(patch)
1604 pushable, reason = self.pushable(patch)
1605 if pushable:
1605 if pushable:
1606 if self.series.index(patch) < self.seriesend():
1606 if self.series.index(patch) < self.seriesend():
1607 raise error.Abort(
1607 raise error.Abort(
1608 _(b"cannot push to a previous patch: %s") % patch
1608 _(b"cannot push to a previous patch: %s") % patch
1609 )
1609 )
1610 else:
1610 else:
1611 if reason:
1611 if reason:
1612 reason = _(b'guarded by %s') % reason
1612 reason = _(b'guarded by %s') % reason
1613 else:
1613 else:
1614 reason = _(b'no matching guards')
1614 reason = _(b'no matching guards')
1615 self.ui.warn(
1615 self.ui.warn(
1616 _(b"cannot push '%s' - %s\n") % (patch, reason)
1616 _(b"cannot push '%s' - %s\n") % (patch, reason)
1617 )
1617 )
1618 return 1
1618 return 1
1619 elif all:
1619 elif all:
1620 patch = self.series[-1]
1620 patch = self.series[-1]
1621 if self.isapplied(patch):
1621 if self.isapplied(patch):
1622 self.ui.warn(_(b'all patches are currently applied\n'))
1622 self.ui.warn(_(b'all patches are currently applied\n'))
1623 return 0
1623 return 0
1624
1624
1625 # Following the above example, starting at 'top' of B:
1625 # Following the above example, starting at 'top' of B:
1626 # qpush should be performed (pushes C), but a subsequent
1626 # qpush should be performed (pushes C), but a subsequent
1627 # qpush without an argument is an error (nothing to
1627 # qpush without an argument is an error (nothing to
1628 # apply). This allows a loop of "...while hg qpush..." to
1628 # apply). This allows a loop of "...while hg qpush..." to
1629 # work as it detects an error when done
1629 # work as it detects an error when done
1630 start = self.seriesend()
1630 start = self.seriesend()
1631 if start == len(self.series):
1631 if start == len(self.series):
1632 self.ui.warn(_(b'patch series already fully applied\n'))
1632 self.ui.warn(_(b'patch series already fully applied\n'))
1633 return 1
1633 return 1
1634 if not force and not keepchanges:
1634 if not force and not keepchanges:
1635 self.checklocalchanges(repo, refresh=self.applied)
1635 self.checklocalchanges(repo, refresh=self.applied)
1636
1636
1637 if exact:
1637 if exact:
1638 if keepchanges:
1638 if keepchanges:
1639 raise error.Abort(
1639 raise error.Abort(
1640 _(b"cannot use --exact and --keep-changes together")
1640 _(b"cannot use --exact and --keep-changes together")
1641 )
1641 )
1642 if move:
1642 if move:
1643 raise error.Abort(
1643 raise error.Abort(
1644 _(b'cannot use --exact and --move together')
1644 _(b'cannot use --exact and --move together')
1645 )
1645 )
1646 if self.applied:
1646 if self.applied:
1647 raise error.Abort(
1647 raise error.Abort(
1648 _(b'cannot push --exact with applied patches')
1648 _(b'cannot push --exact with applied patches')
1649 )
1649 )
1650 root = self.series[start]
1650 root = self.series[start]
1651 target = patchheader(self.join(root), self.plainmode).parent
1651 target = patchheader(self.join(root), self.plainmode).parent
1652 if not target:
1652 if not target:
1653 raise error.Abort(
1653 raise error.Abort(
1654 _(b"%s does not have a parent recorded") % root
1654 _(b"%s does not have a parent recorded") % root
1655 )
1655 )
1656 if not repo[target] == repo[b'.']:
1656 if not repo[target] == repo[b'.']:
1657 hg.update(repo, target)
1657 hg.update(repo, target)
1658
1658
1659 if move:
1659 if move:
1660 if not patch:
1660 if not patch:
1661 raise error.Abort(_(b"please specify the patch to move"))
1661 raise error.Abort(_(b"please specify the patch to move"))
1662 for fullstart, rpn in enumerate(self.fullseries):
1662 for fullstart, rpn in enumerate(self.fullseries):
1663 # strip markers for patch guards
1663 # strip markers for patch guards
1664 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1664 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1665 break
1665 break
1666 for i, rpn in enumerate(self.fullseries[fullstart:]):
1666 for i, rpn in enumerate(self.fullseries[fullstart:]):
1667 # strip markers for patch guards
1667 # strip markers for patch guards
1668 if self.guard_re.split(rpn, 1)[0] == patch:
1668 if self.guard_re.split(rpn, 1)[0] == patch:
1669 break
1669 break
1670 index = fullstart + i
1670 index = fullstart + i
1671 assert index < len(self.fullseries)
1671 assert index < len(self.fullseries)
1672 fullpatch = self.fullseries[index]
1672 fullpatch = self.fullseries[index]
1673 del self.fullseries[index]
1673 del self.fullseries[index]
1674 self.fullseries.insert(fullstart, fullpatch)
1674 self.fullseries.insert(fullstart, fullpatch)
1675 self.parseseries()
1675 self.parseseries()
1676 self.seriesdirty = True
1676 self.seriesdirty = True
1677
1677
1678 self.applieddirty = True
1678 self.applieddirty = True
1679 if start > 0:
1679 if start > 0:
1680 self.checktoppatch(repo)
1680 self.checktoppatch(repo)
1681 if not patch:
1681 if not patch:
1682 patch = self.series[start]
1682 patch = self.series[start]
1683 end = start + 1
1683 end = start + 1
1684 else:
1684 else:
1685 end = self.series.index(patch, start) + 1
1685 end = self.series.index(patch, start) + 1
1686
1686
1687 tobackup = set()
1687 tobackup = set()
1688 if (not nobackup and force) or keepchanges:
1688 if (not nobackup and force) or keepchanges:
1689 status = self.checklocalchanges(repo, force=True)
1689 status = self.checklocalchanges(repo, force=True)
1690 if keepchanges:
1690 if keepchanges:
1691 tobackup.update(
1691 tobackup.update(
1692 status.modified
1692 status.modified
1693 + status.added
1693 + status.added
1694 + status.removed
1694 + status.removed
1695 + status.deleted
1695 + status.deleted
1696 )
1696 )
1697 else:
1697 else:
1698 tobackup.update(status.modified + status.added)
1698 tobackup.update(status.modified + status.added)
1699
1699
1700 s = self.series[start:end]
1700 s = self.series[start:end]
1701 all_files = set()
1701 all_files = set()
1702 try:
1702 try:
1703 if mergeq:
1703 if mergeq:
1704 ret = self.mergepatch(repo, mergeq, s, diffopts)
1704 ret = self.mergepatch(repo, mergeq, s, diffopts)
1705 else:
1705 else:
1706 ret = self.apply(
1706 ret = self.apply(
1707 repo,
1707 repo,
1708 s,
1708 s,
1709 list,
1709 list,
1710 all_files=all_files,
1710 all_files=all_files,
1711 tobackup=tobackup,
1711 tobackup=tobackup,
1712 keepchanges=keepchanges,
1712 keepchanges=keepchanges,
1713 )
1713 )
1714 except AbortNoCleanup:
1714 except AbortNoCleanup:
1715 raise
1715 raise
1716 except: # re-raises
1716 except: # re-raises
1717 self.ui.warn(_(b'cleaning up working directory...\n'))
1717 self.ui.warn(_(b'cleaning up working directory...\n'))
1718 cmdutil.revert(
1718 cmdutil.revert(
1719 self.ui,
1719 self.ui,
1720 repo,
1720 repo,
1721 repo[b'.'],
1721 repo[b'.'],
1722 no_backup=True,
1722 no_backup=True,
1723 )
1723 )
1724 # only remove unknown files that we know we touched or
1724 # only remove unknown files that we know we touched or
1725 # created while patching
1725 # created while patching
1726 for f in all_files:
1726 for f in all_files:
1727 if f not in repo.dirstate:
1727 if f not in repo.dirstate:
1728 repo.wvfs.unlinkpath(f, ignoremissing=True)
1728 repo.wvfs.unlinkpath(f, ignoremissing=True)
1729 self.ui.warn(_(b'done\n'))
1729 self.ui.warn(_(b'done\n'))
1730 raise
1730 raise
1731
1731
1732 if not self.applied:
1732 if not self.applied:
1733 return ret[0]
1733 return ret[0]
1734 top = self.applied[-1].name
1734 top = self.applied[-1].name
1735 if ret[0] and ret[0] > 1:
1735 if ret[0] and ret[0] > 1:
1736 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1736 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1737 self.ui.write(msg % top)
1737 self.ui.write(msg % top)
1738 else:
1738 else:
1739 self.ui.write(_(b"now at: %s\n") % top)
1739 self.ui.write(_(b"now at: %s\n") % top)
1740 return ret[0]
1740 return ret[0]
1741
1741
1742 def pop(
1742 def pop(
1743 self,
1743 self,
1744 repo,
1744 repo,
1745 patch=None,
1745 patch=None,
1746 force=False,
1746 force=False,
1747 update=True,
1747 update=True,
1748 all=False,
1748 all=False,
1749 nobackup=False,
1749 nobackup=False,
1750 keepchanges=False,
1750 keepchanges=False,
1751 ):
1751 ):
1752 self.checkkeepchanges(keepchanges, force)
1752 self.checkkeepchanges(keepchanges, force)
1753 with repo.wlock():
1753 with repo.wlock():
1754 if patch:
1754 if patch:
1755 # index, rev, patch
1755 # index, rev, patch
1756 info = self.isapplied(patch)
1756 info = self.isapplied(patch)
1757 if not info:
1757 if not info:
1758 patch = self.lookup(patch)
1758 patch = self.lookup(patch)
1759 info = self.isapplied(patch)
1759 info = self.isapplied(patch)
1760 if not info:
1760 if not info:
1761 raise error.Abort(_(b"patch %s is not applied") % patch)
1761 raise error.Abort(_(b"patch %s is not applied") % patch)
1762
1762
1763 if not self.applied:
1763 if not self.applied:
1764 # Allow qpop -a to work repeatedly,
1764 # Allow qpop -a to work repeatedly,
1765 # but not qpop without an argument
1765 # but not qpop without an argument
1766 self.ui.warn(_(b"no patches applied\n"))
1766 self.ui.warn(_(b"no patches applied\n"))
1767 return not all
1767 return not all
1768
1768
1769 if all:
1769 if all:
1770 start = 0
1770 start = 0
1771 elif patch:
1771 elif patch:
1772 start = info[0] + 1
1772 start = info[0] + 1
1773 else:
1773 else:
1774 start = len(self.applied) - 1
1774 start = len(self.applied) - 1
1775
1775
1776 if start >= len(self.applied):
1776 if start >= len(self.applied):
1777 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1777 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1778 return
1778 return
1779
1779
1780 if not update:
1780 if not update:
1781 parents = repo.dirstate.parents()
1781 parents = repo.dirstate.parents()
1782 rr = [x.node for x in self.applied]
1782 rr = [x.node for x in self.applied]
1783 for p in parents:
1783 for p in parents:
1784 if p in rr:
1784 if p in rr:
1785 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1785 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1786 update = True
1786 update = True
1787 else:
1787 else:
1788 parents = [p.node() for p in repo[None].parents()]
1788 parents = [p.node() for p in repo[None].parents()]
1789 update = any(
1789 update = any(
1790 entry.node in parents for entry in self.applied[start:]
1790 entry.node in parents for entry in self.applied[start:]
1791 )
1791 )
1792
1792
1793 tobackup = set()
1793 tobackup = set()
1794 if update:
1794 if update:
1795 s = self.checklocalchanges(repo, force=force or keepchanges)
1795 s = self.checklocalchanges(repo, force=force or keepchanges)
1796 if force:
1796 if force:
1797 if not nobackup:
1797 if not nobackup:
1798 tobackup.update(s.modified + s.added)
1798 tobackup.update(s.modified + s.added)
1799 elif keepchanges:
1799 elif keepchanges:
1800 tobackup.update(
1800 tobackup.update(
1801 s.modified + s.added + s.removed + s.deleted
1801 s.modified + s.added + s.removed + s.deleted
1802 )
1802 )
1803
1803
1804 self.applieddirty = True
1804 self.applieddirty = True
1805 end = len(self.applied)
1805 end = len(self.applied)
1806 rev = self.applied[start].node
1806 rev = self.applied[start].node
1807
1807
1808 try:
1808 try:
1809 heads = repo.changelog.heads(rev)
1809 heads = repo.changelog.heads(rev)
1810 except error.LookupError:
1810 except error.LookupError:
1811 node = short(rev)
1811 node = short(rev)
1812 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1812 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1813
1813
1814 if heads != [self.applied[-1].node]:
1814 if heads != [self.applied[-1].node]:
1815 raise error.Abort(
1815 raise error.Abort(
1816 _(
1816 _(
1817 b"popping would remove a revision not "
1817 b"popping would remove a revision not "
1818 b"managed by this patch queue"
1818 b"managed by this patch queue"
1819 )
1819 )
1820 )
1820 )
1821 if not repo[self.applied[-1].node].mutable():
1821 if not repo[self.applied[-1].node].mutable():
1822 raise error.Abort(
1822 raise error.Abort(
1823 _(b"popping would remove a public revision"),
1823 _(b"popping would remove a public revision"),
1824 hint=_(b"see 'hg help phases' for details"),
1824 hint=_(b"see 'hg help phases' for details"),
1825 )
1825 )
1826
1826
1827 # we know there are no local changes, so we can make a simplified
1827 # we know there are no local changes, so we can make a simplified
1828 # form of hg.update.
1828 # form of hg.update.
1829 if update:
1829 if update:
1830 qp = self.qparents(repo, rev)
1830 qp = self.qparents(repo, rev)
1831 ctx = repo[qp]
1831 ctx = repo[qp]
1832 st = repo.status(qp, b'.')
1832 st = repo.status(qp, b'.')
1833 m, a, r, d = st.modified, st.added, st.removed, st.deleted
1833 m, a, r, d = st.modified, st.added, st.removed, st.deleted
1834 if d:
1834 if d:
1835 raise error.Abort(_(b"deletions found between repo revs"))
1835 raise error.Abort(_(b"deletions found between repo revs"))
1836
1836
1837 tobackup = set(a + m + r) & tobackup
1837 tobackup = set(a + m + r) & tobackup
1838 if keepchanges and tobackup:
1838 if keepchanges and tobackup:
1839 raise error.Abort(_(b"local changes found, qrefresh first"))
1839 raise error.Abort(_(b"local changes found, qrefresh first"))
1840 self.backup(repo, tobackup)
1840 self.backup(repo, tobackup)
1841 with repo.dirstate.parentchange():
1841 with repo.dirstate.parentchange():
1842 for f in a:
1842 for f in a:
1843 repo.wvfs.unlinkpath(f, ignoremissing=True)
1843 repo.wvfs.unlinkpath(f, ignoremissing=True)
1844 repo.dirstate.update_file(
1844 repo.dirstate.update_file(
1845 f, p1_tracked=False, wc_tracked=False
1845 f, p1_tracked=False, wc_tracked=False
1846 )
1846 )
1847 for f in m + r:
1847 for f in m + r:
1848 fctx = ctx[f]
1848 fctx = ctx[f]
1849 repo.wwrite(f, fctx.data(), fctx.flags())
1849 repo.wwrite(f, fctx.data(), fctx.flags())
1850 repo.dirstate.update_file(
1850 repo.dirstate.update_file(
1851 f, p1_tracked=True, wc_tracked=True
1851 f, p1_tracked=True, wc_tracked=True
1852 )
1852 )
1853 repo.setparents(qp, repo.nullid)
1853 repo.setparents(qp, repo.nullid)
1854 for patch in reversed(self.applied[start:end]):
1854 for patch in reversed(self.applied[start:end]):
1855 self.ui.status(_(b"popping %s\n") % patch.name)
1855 self.ui.status(_(b"popping %s\n") % patch.name)
1856 del self.applied[start:end]
1856 del self.applied[start:end]
1857 strip(self.ui, repo, [rev], update=False, backup=False)
1857 strip(self.ui, repo, [rev], update=False, backup=False)
1858 for s, state in repo[b'.'].substate.items():
1858 for s, state in repo[b'.'].substate.items():
1859 repo[b'.'].sub(s).get(state)
1859 repo[b'.'].sub(s).get(state)
1860 if self.applied:
1860 if self.applied:
1861 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1861 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1862 else:
1862 else:
1863 self.ui.write(_(b"patch queue now empty\n"))
1863 self.ui.write(_(b"patch queue now empty\n"))
1864
1864
1865 def diff(self, repo, pats, opts):
1865 def diff(self, repo, pats, opts):
1866 top, patch = self.checktoppatch(repo)
1866 top, patch = self.checktoppatch(repo)
1867 if not top:
1867 if not top:
1868 self.ui.write(_(b"no patches applied\n"))
1868 self.ui.write(_(b"no patches applied\n"))
1869 return
1869 return
1870 qp = self.qparents(repo, top)
1870 qp = self.qparents(repo, top)
1871 if opts.get(b'reverse'):
1871 if opts.get(b'reverse'):
1872 node1, node2 = None, qp
1872 node1, node2 = None, qp
1873 else:
1873 else:
1874 node1, node2 = qp, None
1874 node1, node2 = qp, None
1875 diffopts = self.diffopts(opts, patch)
1875 diffopts = self.diffopts(opts, patch)
1876 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1876 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1877
1877
1878 def refresh(self, repo, pats=None, **opts):
1878 def refresh(self, repo, pats=None, **opts):
1879 opts = pycompat.byteskwargs(opts)
1879 opts = pycompat.byteskwargs(opts)
1880 if not self.applied:
1880 if not self.applied:
1881 self.ui.write(_(b"no patches applied\n"))
1881 self.ui.write(_(b"no patches applied\n"))
1882 return 1
1882 return 1
1883 msg = opts.get(b'msg', b'').rstrip()
1883 msg = opts.get(b'msg', b'').rstrip()
1884 edit = opts.get(b'edit')
1884 edit = opts.get(b'edit')
1885 editform = opts.get(b'editform', b'mq.qrefresh')
1885 editform = opts.get(b'editform', b'mq.qrefresh')
1886 newuser = opts.get(b'user')
1886 newuser = opts.get(b'user')
1887 newdate = opts.get(b'date')
1887 newdate = opts.get(b'date')
1888 if newdate:
1888 if newdate:
1889 newdate = b'%d %d' % dateutil.parsedate(newdate)
1889 newdate = b'%d %d' % dateutil.parsedate(newdate)
1890 wlock = repo.wlock()
1890 wlock = repo.wlock()
1891
1891
1892 try:
1892 try:
1893 self.checktoppatch(repo)
1893 self.checktoppatch(repo)
1894 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1894 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1895 if repo.changelog.heads(top) != [top]:
1895 if repo.changelog.heads(top) != [top]:
1896 raise error.Abort(
1896 raise error.Abort(
1897 _(b"cannot qrefresh a revision with children")
1897 _(b"cannot qrefresh a revision with children")
1898 )
1898 )
1899 if not repo[top].mutable():
1899 if not repo[top].mutable():
1900 raise error.Abort(
1900 raise error.Abort(
1901 _(b"cannot qrefresh public revision"),
1901 _(b"cannot qrefresh public revision"),
1902 hint=_(b"see 'hg help phases' for details"),
1902 hint=_(b"see 'hg help phases' for details"),
1903 )
1903 )
1904
1904
1905 cparents = repo.changelog.parents(top)
1905 cparents = repo.changelog.parents(top)
1906 patchparent = self.qparents(repo, top)
1906 patchparent = self.qparents(repo, top)
1907
1907
1908 inclsubs = checksubstate(repo, patchparent)
1908 inclsubs = checksubstate(repo, patchparent)
1909 if inclsubs:
1909 if inclsubs:
1910 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1910 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1911
1911
1912 ph = patchheader(self.join(patchfn), self.plainmode)
1912 ph = patchheader(self.join(patchfn), self.plainmode)
1913 diffopts = self.diffopts(
1913 diffopts = self.diffopts(
1914 {b'git': opts.get(b'git')}, patchfn, plain=True
1914 {b'git': opts.get(b'git')}, patchfn, plain=True
1915 )
1915 )
1916 if newuser:
1916 if newuser:
1917 ph.setuser(newuser)
1917 ph.setuser(newuser)
1918 if newdate:
1918 if newdate:
1919 ph.setdate(newdate)
1919 ph.setdate(newdate)
1920 ph.setparent(hex(patchparent))
1920 ph.setparent(hex(patchparent))
1921
1921
1922 # only commit new patch when write is complete
1922 # only commit new patch when write is complete
1923 patchf = self.opener(patchfn, b'w', atomictemp=True)
1923 patchf = self.opener(patchfn, b'w', atomictemp=True)
1924
1924
1925 # update the dirstate in place, strip off the qtip commit
1925 # update the dirstate in place, strip off the qtip commit
1926 # and then commit.
1926 # and then commit.
1927 #
1927 #
1928 # this should really read:
1928 # this should really read:
1929 # st = repo.status(top, patchparent)
1929 # st = repo.status(top, patchparent)
1930 # but we do it backwards to take advantage of manifest/changelog
1930 # but we do it backwards to take advantage of manifest/changelog
1931 # caching against the next repo.status call
1931 # caching against the next repo.status call
1932 st = repo.status(patchparent, top)
1932 st = repo.status(patchparent, top)
1933 mm, aa, dd = st.modified, st.added, st.removed
1933 mm, aa, dd = st.modified, st.added, st.removed
1934 ctx = repo[top]
1934 ctx = repo[top]
1935 aaa = aa[:]
1935 aaa = aa[:]
1936 match1 = scmutil.match(repo[None], pats, opts)
1936 match1 = scmutil.match(repo[None], pats, opts)
1937 # in short mode, we only diff the files included in the
1937 # in short mode, we only diff the files included in the
1938 # patch already plus specified files
1938 # patch already plus specified files
1939 if opts.get(b'short'):
1939 if opts.get(b'short'):
1940 # if amending a patch, we start with existing
1940 # if amending a patch, we start with existing
1941 # files plus specified files - unfiltered
1941 # files plus specified files - unfiltered
1942 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1942 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1943 # filter with include/exclude options
1943 # filter with include/exclude options
1944 match1 = scmutil.match(repo[None], opts=opts)
1944 match1 = scmutil.match(repo[None], opts=opts)
1945 else:
1945 else:
1946 match = scmutil.matchall(repo)
1946 match = scmutil.matchall(repo)
1947 stb = repo.status(match=match)
1947 stb = repo.status(match=match)
1948 m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
1948 m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
1949 mm = set(mm)
1949 mm = set(mm)
1950 aa = set(aa)
1950 aa = set(aa)
1951 dd = set(dd)
1951 dd = set(dd)
1952
1952
1953 # we might end up with files that were added between
1953 # we might end up with files that were added between
1954 # qtip and the dirstate parent, but then changed in the
1954 # qtip and the dirstate parent, but then changed in the
1955 # local dirstate. in this case, we want them to only
1955 # local dirstate. in this case, we want them to only
1956 # show up in the added section
1956 # show up in the added section
1957 for x in m:
1957 for x in m:
1958 if x not in aa:
1958 if x not in aa:
1959 mm.add(x)
1959 mm.add(x)
1960 # we might end up with files added by the local dirstate that
1960 # we might end up with files added by the local dirstate that
1961 # were deleted by the patch. In this case, they should only
1961 # were deleted by the patch. In this case, they should only
1962 # show up in the changed section.
1962 # show up in the changed section.
1963 for x in a:
1963 for x in a:
1964 if x in dd:
1964 if x in dd:
1965 dd.remove(x)
1965 dd.remove(x)
1966 mm.add(x)
1966 mm.add(x)
1967 else:
1967 else:
1968 aa.add(x)
1968 aa.add(x)
1969 # make sure any files deleted in the local dirstate
1969 # make sure any files deleted in the local dirstate
1970 # are not in the add or change column of the patch
1970 # are not in the add or change column of the patch
1971 forget = []
1971 forget = []
1972 for x in d + r:
1972 for x in d + r:
1973 if x in aa:
1973 if x in aa:
1974 aa.remove(x)
1974 aa.remove(x)
1975 forget.append(x)
1975 forget.append(x)
1976 continue
1976 continue
1977 else:
1977 else:
1978 mm.discard(x)
1978 mm.discard(x)
1979 dd.add(x)
1979 dd.add(x)
1980
1980
1981 m = list(mm)
1981 m = list(mm)
1982 r = list(dd)
1982 r = list(dd)
1983 a = list(aa)
1983 a = list(aa)
1984
1984
1985 # create 'match' that includes the files to be recommitted.
1985 # create 'match' that includes the files to be recommitted.
1986 # apply match1 via repo.status to ensure correct case handling.
1986 # apply match1 via repo.status to ensure correct case handling.
1987 st = repo.status(patchparent, match=match1)
1987 st = repo.status(patchparent, match=match1)
1988 cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
1988 cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
1989 allmatches = set(cm + ca + cr + cd)
1989 allmatches = set(cm + ca + cr + cd)
1990 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1990 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1991
1991
1992 files = set(inclsubs)
1992 files = set(inclsubs)
1993 for x in refreshchanges:
1993 for x in refreshchanges:
1994 files.update(x)
1994 files.update(x)
1995 match = scmutil.matchfiles(repo, files)
1995 match = scmutil.matchfiles(repo, files)
1996
1996
1997 bmlist = repo[top].bookmarks()
1997 bmlist = repo[top].bookmarks()
1998
1998
1999 with repo.dirstate.parentchange():
1999 with repo.dirstate.parentchange():
2000 # XXX do we actually need the dirstateguard
2000 # XXX do we actually need the dirstateguard
2001 dsguard = None
2001 dsguard = None
2002 try:
2002 try:
2003 dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
2003 dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
2004 if diffopts.git or diffopts.upgrade:
2004 if diffopts.git or diffopts.upgrade:
2005 copies = {}
2005 copies = {}
2006 for dst in a:
2006 for dst in a:
2007 src = repo.dirstate.copied(dst)
2007 src = repo.dirstate.copied(dst)
2008 # during qfold, the source file for copies may
2008 # during qfold, the source file for copies may
2009 # be removed. Treat this as a simple add.
2009 # be removed. Treat this as a simple add.
2010 if src is not None and src in repo.dirstate:
2010 if src is not None and src in repo.dirstate:
2011 copies.setdefault(src, []).append(dst)
2011 copies.setdefault(src, []).append(dst)
2012 repo.dirstate.update_file(
2012 repo.dirstate.update_file(
2013 dst, p1_tracked=False, wc_tracked=True
2013 dst, p1_tracked=False, wc_tracked=True
2014 )
2014 )
2015 # remember the copies between patchparent and qtip
2015 # remember the copies between patchparent and qtip
2016 for dst in aaa:
2016 for dst in aaa:
2017 src = ctx[dst].copysource()
2017 src = ctx[dst].copysource()
2018 if src:
2018 if src:
2019 copies.setdefault(src, []).extend(
2019 copies.setdefault(src, []).extend(
2020 copies.get(dst, [])
2020 copies.get(dst, [])
2021 )
2021 )
2022 if dst in a:
2022 if dst in a:
2023 copies[src].append(dst)
2023 copies[src].append(dst)
2024 # we can't copy a file created by the patch itself
2024 # we can't copy a file created by the patch itself
2025 if dst in copies:
2025 if dst in copies:
2026 del copies[dst]
2026 del copies[dst]
2027 for src, dsts in copies.items():
2027 for src, dsts in copies.items():
2028 for dst in dsts:
2028 for dst in dsts:
2029 repo.dirstate.copy(src, dst)
2029 repo.dirstate.copy(src, dst)
2030 else:
2030 else:
2031 for dst in a:
2031 for dst in a:
2032 repo.dirstate.update_file(
2032 repo.dirstate.update_file(
2033 dst, p1_tracked=False, wc_tracked=True
2033 dst, p1_tracked=False, wc_tracked=True
2034 )
2034 )
2035 # Drop useless copy information
2035 # Drop useless copy information
2036 for f in list(repo.dirstate.copies()):
2036 for f in list(repo.dirstate.copies()):
2037 repo.dirstate.copy(None, f)
2037 repo.dirstate.copy(None, f)
2038 for f in r:
2038 for f in r:
2039 repo.dirstate.update_file_p1(f, p1_tracked=True)
2039 repo.dirstate.update_file_p1(f, p1_tracked=True)
2040 # if the patch excludes a modified file, mark that
2040 # if the patch excludes a modified file, mark that
2041 # file with mtime=0 so status can see it.
2041 # file with mtime=0 so status can see it.
2042 mm = []
2042 mm = []
2043 for i in pycompat.xrange(len(m) - 1, -1, -1):
2043 for i in range(len(m) - 1, -1, -1):
2044 if not match1(m[i]):
2044 if not match1(m[i]):
2045 mm.append(m[i])
2045 mm.append(m[i])
2046 del m[i]
2046 del m[i]
2047 for f in m:
2047 for f in m:
2048 repo.dirstate.update_file_p1(f, p1_tracked=True)
2048 repo.dirstate.update_file_p1(f, p1_tracked=True)
2049 for f in mm:
2049 for f in mm:
2050 repo.dirstate.update_file_p1(f, p1_tracked=True)
2050 repo.dirstate.update_file_p1(f, p1_tracked=True)
2051 for f in forget:
2051 for f in forget:
2052 repo.dirstate.update_file_p1(f, p1_tracked=False)
2052 repo.dirstate.update_file_p1(f, p1_tracked=False)
2053
2053
2054 user = ph.user or ctx.user()
2054 user = ph.user or ctx.user()
2055
2055
2056 oldphase = repo[top].phase()
2056 oldphase = repo[top].phase()
2057
2057
2058 # assumes strip can roll itself back if interrupted
2058 # assumes strip can roll itself back if interrupted
2059 repo.setparents(*cparents)
2059 repo.setparents(*cparents)
2060 self.applied.pop()
2060 self.applied.pop()
2061 self.applieddirty = True
2061 self.applieddirty = True
2062 strip(self.ui, repo, [top], update=False, backup=False)
2062 strip(self.ui, repo, [top], update=False, backup=False)
2063 dsguard.close()
2063 dsguard.close()
2064 finally:
2064 finally:
2065 release(dsguard)
2065 release(dsguard)
2066
2066
2067 try:
2067 try:
2068 # might be nice to attempt to roll back strip after this
2068 # might be nice to attempt to roll back strip after this
2069
2069
2070 defaultmsg = b"[mq]: %s" % patchfn
2070 defaultmsg = b"[mq]: %s" % patchfn
2071 editor = cmdutil.getcommiteditor(editform=editform)
2071 editor = cmdutil.getcommiteditor(editform=editform)
2072 if edit:
2072 if edit:
2073
2073
2074 def finishdesc(desc):
2074 def finishdesc(desc):
2075 if desc.rstrip():
2075 if desc.rstrip():
2076 ph.setmessage(desc)
2076 ph.setmessage(desc)
2077 return desc
2077 return desc
2078 return defaultmsg
2078 return defaultmsg
2079
2079
2080 # i18n: this message is shown in editor with "HG: " prefix
2080 # i18n: this message is shown in editor with "HG: " prefix
2081 extramsg = _(b'Leave message empty to use default message.')
2081 extramsg = _(b'Leave message empty to use default message.')
2082 editor = cmdutil.getcommiteditor(
2082 editor = cmdutil.getcommiteditor(
2083 finishdesc=finishdesc,
2083 finishdesc=finishdesc,
2084 extramsg=extramsg,
2084 extramsg=extramsg,
2085 editform=editform,
2085 editform=editform,
2086 )
2086 )
2087 message = msg or b"\n".join(ph.message)
2087 message = msg or b"\n".join(ph.message)
2088 elif not msg:
2088 elif not msg:
2089 if not ph.message:
2089 if not ph.message:
2090 message = defaultmsg
2090 message = defaultmsg
2091 else:
2091 else:
2092 message = b"\n".join(ph.message)
2092 message = b"\n".join(ph.message)
2093 else:
2093 else:
2094 message = msg
2094 message = msg
2095 ph.setmessage(msg)
2095 ph.setmessage(msg)
2096
2096
2097 # Ensure we create a new changeset in the same phase than
2097 # Ensure we create a new changeset in the same phase than
2098 # the old one.
2098 # the old one.
2099 lock = tr = None
2099 lock = tr = None
2100 try:
2100 try:
2101 lock = repo.lock()
2101 lock = repo.lock()
2102 tr = repo.transaction(b'mq')
2102 tr = repo.transaction(b'mq')
2103 n = newcommit(
2103 n = newcommit(
2104 repo,
2104 repo,
2105 oldphase,
2105 oldphase,
2106 message,
2106 message,
2107 user,
2107 user,
2108 ph.date,
2108 ph.date,
2109 match=match,
2109 match=match,
2110 force=True,
2110 force=True,
2111 editor=editor,
2111 editor=editor,
2112 )
2112 )
2113 # only write patch after a successful commit
2113 # only write patch after a successful commit
2114 c = [list(x) for x in refreshchanges]
2114 c = [list(x) for x in refreshchanges]
2115 if inclsubs:
2115 if inclsubs:
2116 self.putsubstate2changes(substatestate, c)
2116 self.putsubstate2changes(substatestate, c)
2117 chunks = patchmod.diff(
2117 chunks = patchmod.diff(
2118 repo, patchparent, changes=c, opts=diffopts
2118 repo, patchparent, changes=c, opts=diffopts
2119 )
2119 )
2120 comments = bytes(ph)
2120 comments = bytes(ph)
2121 if comments:
2121 if comments:
2122 patchf.write(comments)
2122 patchf.write(comments)
2123 for chunk in chunks:
2123 for chunk in chunks:
2124 patchf.write(chunk)
2124 patchf.write(chunk)
2125 patchf.close()
2125 patchf.close()
2126
2126
2127 marks = repo._bookmarks
2127 marks = repo._bookmarks
2128 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2128 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2129 tr.close()
2129 tr.close()
2130
2130
2131 self.applied.append(statusentry(n, patchfn))
2131 self.applied.append(statusentry(n, patchfn))
2132 finally:
2132 finally:
2133 lockmod.release(tr, lock)
2133 lockmod.release(tr, lock)
2134 except: # re-raises
2134 except: # re-raises
2135 ctx = repo[cparents[0]]
2135 ctx = repo[cparents[0]]
2136 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2136 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2137 self.savedirty()
2137 self.savedirty()
2138 self.ui.warn(
2138 self.ui.warn(
2139 _(
2139 _(
2140 b'qrefresh interrupted while patch was popped! '
2140 b'qrefresh interrupted while patch was popped! '
2141 b'(revert --all, qpush to recover)\n'
2141 b'(revert --all, qpush to recover)\n'
2142 )
2142 )
2143 )
2143 )
2144 raise
2144 raise
2145 finally:
2145 finally:
2146 wlock.release()
2146 wlock.release()
2147 self.removeundo(repo)
2147 self.removeundo(repo)
2148
2148
2149 def init(self, repo, create=False):
2149 def init(self, repo, create=False):
2150 if not create and os.path.isdir(self.path):
2150 if not create and os.path.isdir(self.path):
2151 raise error.Abort(_(b"patch queue directory already exists"))
2151 raise error.Abort(_(b"patch queue directory already exists"))
2152 try:
2152 try:
2153 os.mkdir(self.path)
2153 os.mkdir(self.path)
2154 except OSError as inst:
2154 except OSError as inst:
2155 if inst.errno != errno.EEXIST or not create:
2155 if inst.errno != errno.EEXIST or not create:
2156 raise
2156 raise
2157 if create:
2157 if create:
2158 return self.qrepo(create=True)
2158 return self.qrepo(create=True)
2159
2159
2160 def unapplied(self, repo, patch=None):
2160 def unapplied(self, repo, patch=None):
2161 if patch and patch not in self.series:
2161 if patch and patch not in self.series:
2162 raise error.Abort(_(b"patch %s is not in series file") % patch)
2162 raise error.Abort(_(b"patch %s is not in series file") % patch)
2163 if not patch:
2163 if not patch:
2164 start = self.seriesend()
2164 start = self.seriesend()
2165 else:
2165 else:
2166 start = self.series.index(patch) + 1
2166 start = self.series.index(patch) + 1
2167 unapplied = []
2167 unapplied = []
2168 for i in pycompat.xrange(start, len(self.series)):
2168 for i in range(start, len(self.series)):
2169 pushable, reason = self.pushable(i)
2169 pushable, reason = self.pushable(i)
2170 if pushable:
2170 if pushable:
2171 unapplied.append((i, self.series[i]))
2171 unapplied.append((i, self.series[i]))
2172 self.explainpushable(i)
2172 self.explainpushable(i)
2173 return unapplied
2173 return unapplied
2174
2174
2175 def qseries(
2175 def qseries(
2176 self,
2176 self,
2177 repo,
2177 repo,
2178 missing=None,
2178 missing=None,
2179 start=0,
2179 start=0,
2180 length=None,
2180 length=None,
2181 status=None,
2181 status=None,
2182 summary=False,
2182 summary=False,
2183 ):
2183 ):
2184 def displayname(pfx, patchname, state):
2184 def displayname(pfx, patchname, state):
2185 if pfx:
2185 if pfx:
2186 self.ui.write(pfx)
2186 self.ui.write(pfx)
2187 if summary:
2187 if summary:
2188 ph = patchheader(self.join(patchname), self.plainmode)
2188 ph = patchheader(self.join(patchname), self.plainmode)
2189 if ph.message:
2189 if ph.message:
2190 msg = ph.message[0]
2190 msg = ph.message[0]
2191 else:
2191 else:
2192 msg = b''
2192 msg = b''
2193
2193
2194 if self.ui.formatted():
2194 if self.ui.formatted():
2195 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2195 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2196 if width > 0:
2196 if width > 0:
2197 msg = stringutil.ellipsis(msg, width)
2197 msg = stringutil.ellipsis(msg, width)
2198 else:
2198 else:
2199 msg = b''
2199 msg = b''
2200 self.ui.write(patchname, label=b'qseries.' + state)
2200 self.ui.write(patchname, label=b'qseries.' + state)
2201 self.ui.write(b': ')
2201 self.ui.write(b': ')
2202 self.ui.write(msg, label=b'qseries.message.' + state)
2202 self.ui.write(msg, label=b'qseries.message.' + state)
2203 else:
2203 else:
2204 self.ui.write(patchname, label=b'qseries.' + state)
2204 self.ui.write(patchname, label=b'qseries.' + state)
2205 self.ui.write(b'\n')
2205 self.ui.write(b'\n')
2206
2206
2207 applied = {p.name for p in self.applied}
2207 applied = {p.name for p in self.applied}
2208 if length is None:
2208 if length is None:
2209 length = len(self.series) - start
2209 length = len(self.series) - start
2210 if not missing:
2210 if not missing:
2211 if self.ui.verbose:
2211 if self.ui.verbose:
2212 idxwidth = len(b"%d" % (start + length - 1))
2212 idxwidth = len(b"%d" % (start + length - 1))
2213 for i in pycompat.xrange(start, start + length):
2213 for i in range(start, start + length):
2214 patch = self.series[i]
2214 patch = self.series[i]
2215 if patch in applied:
2215 if patch in applied:
2216 char, state = b'A', b'applied'
2216 char, state = b'A', b'applied'
2217 elif self.pushable(i)[0]:
2217 elif self.pushable(i)[0]:
2218 char, state = b'U', b'unapplied'
2218 char, state = b'U', b'unapplied'
2219 else:
2219 else:
2220 char, state = b'G', b'guarded'
2220 char, state = b'G', b'guarded'
2221 pfx = b''
2221 pfx = b''
2222 if self.ui.verbose:
2222 if self.ui.verbose:
2223 pfx = b'%*d %s ' % (idxwidth, i, char)
2223 pfx = b'%*d %s ' % (idxwidth, i, char)
2224 elif status and status != char:
2224 elif status and status != char:
2225 continue
2225 continue
2226 displayname(pfx, patch, state)
2226 displayname(pfx, patch, state)
2227 else:
2227 else:
2228 msng_list = []
2228 msng_list = []
2229 for root, dirs, files in os.walk(self.path):
2229 for root, dirs, files in os.walk(self.path):
2230 d = root[len(self.path) + 1 :]
2230 d = root[len(self.path) + 1 :]
2231 for f in files:
2231 for f in files:
2232 fl = os.path.join(d, f)
2232 fl = os.path.join(d, f)
2233 if (
2233 if (
2234 fl not in self.series
2234 fl not in self.series
2235 and fl
2235 and fl
2236 not in (
2236 not in (
2237 self.statuspath,
2237 self.statuspath,
2238 self.seriespath,
2238 self.seriespath,
2239 self.guardspath,
2239 self.guardspath,
2240 )
2240 )
2241 and not fl.startswith(b'.')
2241 and not fl.startswith(b'.')
2242 ):
2242 ):
2243 msng_list.append(fl)
2243 msng_list.append(fl)
2244 for x in sorted(msng_list):
2244 for x in sorted(msng_list):
2245 pfx = self.ui.verbose and b'D ' or b''
2245 pfx = self.ui.verbose and b'D ' or b''
2246 displayname(pfx, x, b'missing')
2246 displayname(pfx, x, b'missing')
2247
2247
2248 def issaveline(self, l):
2248 def issaveline(self, l):
2249 if l.name == b'.hg.patches.save.line':
2249 if l.name == b'.hg.patches.save.line':
2250 return True
2250 return True
2251
2251
2252 def qrepo(self, create=False):
2252 def qrepo(self, create=False):
2253 ui = self.baseui.copy()
2253 ui = self.baseui.copy()
2254 # copy back attributes set by ui.pager()
2254 # copy back attributes set by ui.pager()
2255 if self.ui.pageractive and not ui.pageractive:
2255 if self.ui.pageractive and not ui.pageractive:
2256 ui.pageractive = self.ui.pageractive
2256 ui.pageractive = self.ui.pageractive
2257 # internal config: ui.formatted
2257 # internal config: ui.formatted
2258 ui.setconfig(
2258 ui.setconfig(
2259 b'ui',
2259 b'ui',
2260 b'formatted',
2260 b'formatted',
2261 self.ui.config(b'ui', b'formatted'),
2261 self.ui.config(b'ui', b'formatted'),
2262 b'mqpager',
2262 b'mqpager',
2263 )
2263 )
2264 ui.setconfig(
2264 ui.setconfig(
2265 b'ui',
2265 b'ui',
2266 b'interactive',
2266 b'interactive',
2267 self.ui.config(b'ui', b'interactive'),
2267 self.ui.config(b'ui', b'interactive'),
2268 b'mqpager',
2268 b'mqpager',
2269 )
2269 )
2270 if create or os.path.isdir(self.join(b".hg")):
2270 if create or os.path.isdir(self.join(b".hg")):
2271 return hg.repository(ui, path=self.path, create=create)
2271 return hg.repository(ui, path=self.path, create=create)
2272
2272
2273 def restore(self, repo, rev, delete=None, qupdate=None):
2273 def restore(self, repo, rev, delete=None, qupdate=None):
2274 desc = repo[rev].description().strip()
2274 desc = repo[rev].description().strip()
2275 lines = desc.splitlines()
2275 lines = desc.splitlines()
2276 datastart = None
2276 datastart = None
2277 series = []
2277 series = []
2278 applied = []
2278 applied = []
2279 qpp = None
2279 qpp = None
2280 for i, line in enumerate(lines):
2280 for i, line in enumerate(lines):
2281 if line == b'Patch Data:':
2281 if line == b'Patch Data:':
2282 datastart = i + 1
2282 datastart = i + 1
2283 elif line.startswith(b'Dirstate:'):
2283 elif line.startswith(b'Dirstate:'):
2284 l = line.rstrip()
2284 l = line.rstrip()
2285 l = l[10:].split(b' ')
2285 l = l[10:].split(b' ')
2286 qpp = [bin(x) for x in l]
2286 qpp = [bin(x) for x in l]
2287 elif datastart is not None:
2287 elif datastart is not None:
2288 l = line.rstrip()
2288 l = line.rstrip()
2289 n, name = l.split(b':', 1)
2289 n, name = l.split(b':', 1)
2290 if n:
2290 if n:
2291 applied.append(statusentry(bin(n), name))
2291 applied.append(statusentry(bin(n), name))
2292 else:
2292 else:
2293 series.append(l)
2293 series.append(l)
2294 if datastart is None:
2294 if datastart is None:
2295 self.ui.warn(_(b"no saved patch data found\n"))
2295 self.ui.warn(_(b"no saved patch data found\n"))
2296 return 1
2296 return 1
2297 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2297 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2298 self.fullseries = series
2298 self.fullseries = series
2299 self.applied = applied
2299 self.applied = applied
2300 self.parseseries()
2300 self.parseseries()
2301 self.seriesdirty = True
2301 self.seriesdirty = True
2302 self.applieddirty = True
2302 self.applieddirty = True
2303 heads = repo.changelog.heads()
2303 heads = repo.changelog.heads()
2304 if delete:
2304 if delete:
2305 if rev not in heads:
2305 if rev not in heads:
2306 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2306 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2307 else:
2307 else:
2308 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2308 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2309 pp = repo.dirstate.parents()
2309 pp = repo.dirstate.parents()
2310 if rev in pp:
2310 if rev in pp:
2311 update = True
2311 update = True
2312 else:
2312 else:
2313 update = False
2313 update = False
2314 strip(self.ui, repo, [rev], update=update, backup=False)
2314 strip(self.ui, repo, [rev], update=update, backup=False)
2315 if qpp:
2315 if qpp:
2316 self.ui.warn(
2316 self.ui.warn(
2317 _(b"saved queue repository parents: %s %s\n")
2317 _(b"saved queue repository parents: %s %s\n")
2318 % (short(qpp[0]), short(qpp[1]))
2318 % (short(qpp[0]), short(qpp[1]))
2319 )
2319 )
2320 if qupdate:
2320 if qupdate:
2321 self.ui.status(_(b"updating queue directory\n"))
2321 self.ui.status(_(b"updating queue directory\n"))
2322 r = self.qrepo()
2322 r = self.qrepo()
2323 if not r:
2323 if not r:
2324 self.ui.warn(_(b"unable to load queue repository\n"))
2324 self.ui.warn(_(b"unable to load queue repository\n"))
2325 return 1
2325 return 1
2326 hg.clean(r, qpp[0])
2326 hg.clean(r, qpp[0])
2327
2327
2328 def save(self, repo, msg=None):
2328 def save(self, repo, msg=None):
2329 if not self.applied:
2329 if not self.applied:
2330 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2330 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2331 return 1
2331 return 1
2332 if self.issaveline(self.applied[-1]):
2332 if self.issaveline(self.applied[-1]):
2333 self.ui.warn(_(b"status is already saved\n"))
2333 self.ui.warn(_(b"status is already saved\n"))
2334 return 1
2334 return 1
2335
2335
2336 if not msg:
2336 if not msg:
2337 msg = _(b"hg patches saved state")
2337 msg = _(b"hg patches saved state")
2338 else:
2338 else:
2339 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2339 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2340 r = self.qrepo()
2340 r = self.qrepo()
2341 if r:
2341 if r:
2342 pp = r.dirstate.parents()
2342 pp = r.dirstate.parents()
2343 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2343 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2344 msg += b"\n\nPatch Data:\n"
2344 msg += b"\n\nPatch Data:\n"
2345 msg += b''.join(b'%s\n' % x for x in self.applied)
2345 msg += b''.join(b'%s\n' % x for x in self.applied)
2346 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2346 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2347 n = repo.commit(msg, force=True)
2347 n = repo.commit(msg, force=True)
2348 if not n:
2348 if not n:
2349 self.ui.warn(_(b"repo commit failed\n"))
2349 self.ui.warn(_(b"repo commit failed\n"))
2350 return 1
2350 return 1
2351 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2351 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2352 self.applieddirty = True
2352 self.applieddirty = True
2353 self.removeundo(repo)
2353 self.removeundo(repo)
2354
2354
2355 def fullseriesend(self):
2355 def fullseriesend(self):
2356 if self.applied:
2356 if self.applied:
2357 p = self.applied[-1].name
2357 p = self.applied[-1].name
2358 end = self.findseries(p)
2358 end = self.findseries(p)
2359 if end is None:
2359 if end is None:
2360 return len(self.fullseries)
2360 return len(self.fullseries)
2361 return end + 1
2361 return end + 1
2362 return 0
2362 return 0
2363
2363
2364 def seriesend(self, all_patches=False):
2364 def seriesend(self, all_patches=False):
2365 """If all_patches is False, return the index of the next pushable patch
2365 """If all_patches is False, return the index of the next pushable patch
2366 in the series, or the series length. If all_patches is True, return the
2366 in the series, or the series length. If all_patches is True, return the
2367 index of the first patch past the last applied one.
2367 index of the first patch past the last applied one.
2368 """
2368 """
2369 end = 0
2369 end = 0
2370
2370
2371 def nextpatch(start):
2371 def nextpatch(start):
2372 if all_patches or start >= len(self.series):
2372 if all_patches or start >= len(self.series):
2373 return start
2373 return start
2374 for i in pycompat.xrange(start, len(self.series)):
2374 for i in range(start, len(self.series)):
2375 p, reason = self.pushable(i)
2375 p, reason = self.pushable(i)
2376 if p:
2376 if p:
2377 return i
2377 return i
2378 self.explainpushable(i)
2378 self.explainpushable(i)
2379 return len(self.series)
2379 return len(self.series)
2380
2380
2381 if self.applied:
2381 if self.applied:
2382 p = self.applied[-1].name
2382 p = self.applied[-1].name
2383 try:
2383 try:
2384 end = self.series.index(p)
2384 end = self.series.index(p)
2385 except ValueError:
2385 except ValueError:
2386 return 0
2386 return 0
2387 return nextpatch(end + 1)
2387 return nextpatch(end + 1)
2388 return nextpatch(end)
2388 return nextpatch(end)
2389
2389
2390 def appliedname(self, index):
2390 def appliedname(self, index):
2391 pname = self.applied[index].name
2391 pname = self.applied[index].name
2392 if not self.ui.verbose:
2392 if not self.ui.verbose:
2393 p = pname
2393 p = pname
2394 else:
2394 else:
2395 p = (b"%d" % self.series.index(pname)) + b" " + pname
2395 p = (b"%d" % self.series.index(pname)) + b" " + pname
2396 return p
2396 return p
2397
2397
2398 def qimport(
2398 def qimport(
2399 self,
2399 self,
2400 repo,
2400 repo,
2401 files,
2401 files,
2402 patchname=None,
2402 patchname=None,
2403 rev=None,
2403 rev=None,
2404 existing=None,
2404 existing=None,
2405 force=None,
2405 force=None,
2406 git=False,
2406 git=False,
2407 ):
2407 ):
2408 def checkseries(patchname):
2408 def checkseries(patchname):
2409 if patchname in self.series:
2409 if patchname in self.series:
2410 raise error.Abort(
2410 raise error.Abort(
2411 _(b'patch %s is already in the series file') % patchname
2411 _(b'patch %s is already in the series file') % patchname
2412 )
2412 )
2413
2413
2414 if rev:
2414 if rev:
2415 if files:
2415 if files:
2416 raise error.Abort(
2416 raise error.Abort(
2417 _(b'option "-r" not valid when importing files')
2417 _(b'option "-r" not valid when importing files')
2418 )
2418 )
2419 rev = logcmdutil.revrange(repo, rev)
2419 rev = logcmdutil.revrange(repo, rev)
2420 rev.sort(reverse=True)
2420 rev.sort(reverse=True)
2421 elif not files:
2421 elif not files:
2422 raise error.Abort(_(b'no files or revisions specified'))
2422 raise error.Abort(_(b'no files or revisions specified'))
2423 if (len(files) > 1 or len(rev) > 1) and patchname:
2423 if (len(files) > 1 or len(rev) > 1) and patchname:
2424 raise error.Abort(
2424 raise error.Abort(
2425 _(b'option "-n" not valid when importing multiple patches')
2425 _(b'option "-n" not valid when importing multiple patches')
2426 )
2426 )
2427 imported = []
2427 imported = []
2428 if rev:
2428 if rev:
2429 # If mq patches are applied, we can only import revisions
2429 # If mq patches are applied, we can only import revisions
2430 # that form a linear path to qbase.
2430 # that form a linear path to qbase.
2431 # Otherwise, they should form a linear path to a head.
2431 # Otherwise, they should form a linear path to a head.
2432 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2432 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2433 if len(heads) > 1:
2433 if len(heads) > 1:
2434 raise error.Abort(
2434 raise error.Abort(
2435 _(b'revision %d is the root of more than one branch')
2435 _(b'revision %d is the root of more than one branch')
2436 % rev.last()
2436 % rev.last()
2437 )
2437 )
2438 if self.applied:
2438 if self.applied:
2439 base = repo.changelog.node(rev.first())
2439 base = repo.changelog.node(rev.first())
2440 if base in [n.node for n in self.applied]:
2440 if base in [n.node for n in self.applied]:
2441 raise error.Abort(
2441 raise error.Abort(
2442 _(b'revision %d is already managed') % rev.first()
2442 _(b'revision %d is already managed') % rev.first()
2443 )
2443 )
2444 if heads != [self.applied[-1].node]:
2444 if heads != [self.applied[-1].node]:
2445 raise error.Abort(
2445 raise error.Abort(
2446 _(b'revision %d is not the parent of the queue')
2446 _(b'revision %d is not the parent of the queue')
2447 % rev.first()
2447 % rev.first()
2448 )
2448 )
2449 base = repo.changelog.rev(self.applied[0].node)
2449 base = repo.changelog.rev(self.applied[0].node)
2450 lastparent = repo.changelog.parentrevs(base)[0]
2450 lastparent = repo.changelog.parentrevs(base)[0]
2451 else:
2451 else:
2452 if heads != [repo.changelog.node(rev.first())]:
2452 if heads != [repo.changelog.node(rev.first())]:
2453 raise error.Abort(
2453 raise error.Abort(
2454 _(b'revision %d has unmanaged children') % rev.first()
2454 _(b'revision %d has unmanaged children') % rev.first()
2455 )
2455 )
2456 lastparent = None
2456 lastparent = None
2457
2457
2458 diffopts = self.diffopts({b'git': git})
2458 diffopts = self.diffopts({b'git': git})
2459 with repo.transaction(b'qimport') as tr:
2459 with repo.transaction(b'qimport') as tr:
2460 for r in rev:
2460 for r in rev:
2461 if not repo[r].mutable():
2461 if not repo[r].mutable():
2462 raise error.Abort(
2462 raise error.Abort(
2463 _(b'revision %d is not mutable') % r,
2463 _(b'revision %d is not mutable') % r,
2464 hint=_(b"see 'hg help phases' " b'for details'),
2464 hint=_(b"see 'hg help phases' " b'for details'),
2465 )
2465 )
2466 p1, p2 = repo.changelog.parentrevs(r)
2466 p1, p2 = repo.changelog.parentrevs(r)
2467 n = repo.changelog.node(r)
2467 n = repo.changelog.node(r)
2468 if p2 != nullrev:
2468 if p2 != nullrev:
2469 raise error.Abort(
2469 raise error.Abort(
2470 _(b'cannot import merge revision %d') % r
2470 _(b'cannot import merge revision %d') % r
2471 )
2471 )
2472 if lastparent and lastparent != r:
2472 if lastparent and lastparent != r:
2473 raise error.Abort(
2473 raise error.Abort(
2474 _(b'revision %d is not the parent of %d')
2474 _(b'revision %d is not the parent of %d')
2475 % (r, lastparent)
2475 % (r, lastparent)
2476 )
2476 )
2477 lastparent = p1
2477 lastparent = p1
2478
2478
2479 if not patchname:
2479 if not patchname:
2480 patchname = self.makepatchname(
2480 patchname = self.makepatchname(
2481 repo[r].description().split(b'\n', 1)[0],
2481 repo[r].description().split(b'\n', 1)[0],
2482 b'%d.diff' % r,
2482 b'%d.diff' % r,
2483 )
2483 )
2484 checkseries(patchname)
2484 checkseries(patchname)
2485 self.checkpatchname(patchname, force)
2485 self.checkpatchname(patchname, force)
2486 self.fullseries.insert(0, patchname)
2486 self.fullseries.insert(0, patchname)
2487
2487
2488 with self.opener(patchname, b"w") as fp:
2488 with self.opener(patchname, b"w") as fp:
2489 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2489 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2490
2490
2491 se = statusentry(n, patchname)
2491 se = statusentry(n, patchname)
2492 self.applied.insert(0, se)
2492 self.applied.insert(0, se)
2493
2493
2494 self.added.append(patchname)
2494 self.added.append(patchname)
2495 imported.append(patchname)
2495 imported.append(patchname)
2496 patchname = None
2496 patchname = None
2497 if rev and repo.ui.configbool(b'mq', b'secret'):
2497 if rev and repo.ui.configbool(b'mq', b'secret'):
2498 # if we added anything with --rev, move the secret root
2498 # if we added anything with --rev, move the secret root
2499 phases.retractboundary(repo, tr, phases.secret, [n])
2499 phases.retractboundary(repo, tr, phases.secret, [n])
2500 self.parseseries()
2500 self.parseseries()
2501 self.applieddirty = True
2501 self.applieddirty = True
2502 self.seriesdirty = True
2502 self.seriesdirty = True
2503
2503
2504 for i, filename in enumerate(files):
2504 for i, filename in enumerate(files):
2505 if existing:
2505 if existing:
2506 if filename == b'-':
2506 if filename == b'-':
2507 raise error.Abort(
2507 raise error.Abort(
2508 _(b'-e is incompatible with import from -')
2508 _(b'-e is incompatible with import from -')
2509 )
2509 )
2510 filename = normname(filename)
2510 filename = normname(filename)
2511 self.checkreservedname(filename)
2511 self.checkreservedname(filename)
2512 if urlutil.url(filename).islocal():
2512 if urlutil.url(filename).islocal():
2513 originpath = self.join(filename)
2513 originpath = self.join(filename)
2514 if not os.path.isfile(originpath):
2514 if not os.path.isfile(originpath):
2515 raise error.Abort(
2515 raise error.Abort(
2516 _(b"patch %s does not exist") % filename
2516 _(b"patch %s does not exist") % filename
2517 )
2517 )
2518
2518
2519 if patchname:
2519 if patchname:
2520 self.checkpatchname(patchname, force)
2520 self.checkpatchname(patchname, force)
2521
2521
2522 self.ui.write(
2522 self.ui.write(
2523 _(b'renaming %s to %s\n') % (filename, patchname)
2523 _(b'renaming %s to %s\n') % (filename, patchname)
2524 )
2524 )
2525 util.rename(originpath, self.join(patchname))
2525 util.rename(originpath, self.join(patchname))
2526 else:
2526 else:
2527 patchname = filename
2527 patchname = filename
2528
2528
2529 else:
2529 else:
2530 if filename == b'-' and not patchname:
2530 if filename == b'-' and not patchname:
2531 raise error.Abort(
2531 raise error.Abort(
2532 _(b'need --name to import a patch from -')
2532 _(b'need --name to import a patch from -')
2533 )
2533 )
2534 elif not patchname:
2534 elif not patchname:
2535 patchname = normname(
2535 patchname = normname(
2536 os.path.basename(filename.rstrip(b'/'))
2536 os.path.basename(filename.rstrip(b'/'))
2537 )
2537 )
2538 self.checkpatchname(patchname, force)
2538 self.checkpatchname(patchname, force)
2539 try:
2539 try:
2540 if filename == b'-':
2540 if filename == b'-':
2541 text = self.ui.fin.read()
2541 text = self.ui.fin.read()
2542 else:
2542 else:
2543 fp = hg.openpath(self.ui, filename)
2543 fp = hg.openpath(self.ui, filename)
2544 text = fp.read()
2544 text = fp.read()
2545 fp.close()
2545 fp.close()
2546 except (OSError, IOError):
2546 except (OSError, IOError):
2547 raise error.Abort(_(b"unable to read file %s") % filename)
2547 raise error.Abort(_(b"unable to read file %s") % filename)
2548 patchf = self.opener(patchname, b"w")
2548 patchf = self.opener(patchname, b"w")
2549 patchf.write(text)
2549 patchf.write(text)
2550 patchf.close()
2550 patchf.close()
2551 if not force:
2551 if not force:
2552 checkseries(patchname)
2552 checkseries(patchname)
2553 if patchname not in self.series:
2553 if patchname not in self.series:
2554 index = self.fullseriesend() + i
2554 index = self.fullseriesend() + i
2555 self.fullseries[index:index] = [patchname]
2555 self.fullseries[index:index] = [patchname]
2556 self.parseseries()
2556 self.parseseries()
2557 self.seriesdirty = True
2557 self.seriesdirty = True
2558 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2558 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2559 self.added.append(patchname)
2559 self.added.append(patchname)
2560 imported.append(patchname)
2560 imported.append(patchname)
2561 patchname = None
2561 patchname = None
2562
2562
2563 self.removeundo(repo)
2563 self.removeundo(repo)
2564 return imported
2564 return imported
2565
2565
2566
2566
2567 def fixkeepchangesopts(ui, opts):
2567 def fixkeepchangesopts(ui, opts):
2568 if (
2568 if (
2569 not ui.configbool(b'mq', b'keepchanges')
2569 not ui.configbool(b'mq', b'keepchanges')
2570 or opts.get(b'force')
2570 or opts.get(b'force')
2571 or opts.get(b'exact')
2571 or opts.get(b'exact')
2572 ):
2572 ):
2573 return opts
2573 return opts
2574 opts = dict(opts)
2574 opts = dict(opts)
2575 opts[b'keep_changes'] = True
2575 opts[b'keep_changes'] = True
2576 return opts
2576 return opts
2577
2577
2578
2578
2579 @command(
2579 @command(
2580 b"qdelete|qremove|qrm",
2580 b"qdelete|qremove|qrm",
2581 [
2581 [
2582 (b'k', b'keep', None, _(b'keep patch file')),
2582 (b'k', b'keep', None, _(b'keep patch file')),
2583 (
2583 (
2584 b'r',
2584 b'r',
2585 b'rev',
2585 b'rev',
2586 [],
2586 [],
2587 _(b'stop managing a revision (DEPRECATED)'),
2587 _(b'stop managing a revision (DEPRECATED)'),
2588 _(b'REV'),
2588 _(b'REV'),
2589 ),
2589 ),
2590 ],
2590 ],
2591 _(b'hg qdelete [-k] [PATCH]...'),
2591 _(b'hg qdelete [-k] [PATCH]...'),
2592 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2592 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2593 )
2593 )
2594 def delete(ui, repo, *patches, **opts):
2594 def delete(ui, repo, *patches, **opts):
2595 """remove patches from queue
2595 """remove patches from queue
2596
2596
2597 The patches must not be applied, and at least one patch is required. Exact
2597 The patches must not be applied, and at least one patch is required. Exact
2598 patch identifiers must be given. With -k/--keep, the patch files are
2598 patch identifiers must be given. With -k/--keep, the patch files are
2599 preserved in the patch directory.
2599 preserved in the patch directory.
2600
2600
2601 To stop managing a patch and move it into permanent history,
2601 To stop managing a patch and move it into permanent history,
2602 use the :hg:`qfinish` command."""
2602 use the :hg:`qfinish` command."""
2603 q = repo.mq
2603 q = repo.mq
2604 q.delete(repo, patches, pycompat.byteskwargs(opts))
2604 q.delete(repo, patches, pycompat.byteskwargs(opts))
2605 q.savedirty()
2605 q.savedirty()
2606 return 0
2606 return 0
2607
2607
2608
2608
2609 @command(
2609 @command(
2610 b"qapplied",
2610 b"qapplied",
2611 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2611 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2612 + seriesopts,
2612 + seriesopts,
2613 _(b'hg qapplied [-1] [-s] [PATCH]'),
2613 _(b'hg qapplied [-1] [-s] [PATCH]'),
2614 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2614 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2615 )
2615 )
2616 def applied(ui, repo, patch=None, **opts):
2616 def applied(ui, repo, patch=None, **opts):
2617 """print the patches already applied
2617 """print the patches already applied
2618
2618
2619 Returns 0 on success."""
2619 Returns 0 on success."""
2620
2620
2621 q = repo.mq
2621 q = repo.mq
2622 opts = pycompat.byteskwargs(opts)
2622 opts = pycompat.byteskwargs(opts)
2623
2623
2624 if patch:
2624 if patch:
2625 if patch not in q.series:
2625 if patch not in q.series:
2626 raise error.Abort(_(b"patch %s is not in series file") % patch)
2626 raise error.Abort(_(b"patch %s is not in series file") % patch)
2627 end = q.series.index(patch) + 1
2627 end = q.series.index(patch) + 1
2628 else:
2628 else:
2629 end = q.seriesend(True)
2629 end = q.seriesend(True)
2630
2630
2631 if opts.get(b'last') and not end:
2631 if opts.get(b'last') and not end:
2632 ui.write(_(b"no patches applied\n"))
2632 ui.write(_(b"no patches applied\n"))
2633 return 1
2633 return 1
2634 elif opts.get(b'last') and end == 1:
2634 elif opts.get(b'last') and end == 1:
2635 ui.write(_(b"only one patch applied\n"))
2635 ui.write(_(b"only one patch applied\n"))
2636 return 1
2636 return 1
2637 elif opts.get(b'last'):
2637 elif opts.get(b'last'):
2638 start = end - 2
2638 start = end - 2
2639 end = 1
2639 end = 1
2640 else:
2640 else:
2641 start = 0
2641 start = 0
2642
2642
2643 q.qseries(
2643 q.qseries(
2644 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2644 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2645 )
2645 )
2646
2646
2647
2647
2648 @command(
2648 @command(
2649 b"qunapplied",
2649 b"qunapplied",
2650 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2650 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2651 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2651 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2652 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2652 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2653 )
2653 )
2654 def unapplied(ui, repo, patch=None, **opts):
2654 def unapplied(ui, repo, patch=None, **opts):
2655 """print the patches not yet applied
2655 """print the patches not yet applied
2656
2656
2657 Returns 0 on success."""
2657 Returns 0 on success."""
2658
2658
2659 q = repo.mq
2659 q = repo.mq
2660 opts = pycompat.byteskwargs(opts)
2660 opts = pycompat.byteskwargs(opts)
2661 if patch:
2661 if patch:
2662 if patch not in q.series:
2662 if patch not in q.series:
2663 raise error.Abort(_(b"patch %s is not in series file") % patch)
2663 raise error.Abort(_(b"patch %s is not in series file") % patch)
2664 start = q.series.index(patch) + 1
2664 start = q.series.index(patch) + 1
2665 else:
2665 else:
2666 start = q.seriesend(True)
2666 start = q.seriesend(True)
2667
2667
2668 if start == len(q.series) and opts.get(b'first'):
2668 if start == len(q.series) and opts.get(b'first'):
2669 ui.write(_(b"all patches applied\n"))
2669 ui.write(_(b"all patches applied\n"))
2670 return 1
2670 return 1
2671
2671
2672 if opts.get(b'first'):
2672 if opts.get(b'first'):
2673 length = 1
2673 length = 1
2674 else:
2674 else:
2675 length = None
2675 length = None
2676 q.qseries(
2676 q.qseries(
2677 repo,
2677 repo,
2678 start=start,
2678 start=start,
2679 length=length,
2679 length=length,
2680 status=b'U',
2680 status=b'U',
2681 summary=opts.get(b'summary'),
2681 summary=opts.get(b'summary'),
2682 )
2682 )
2683
2683
2684
2684
2685 @command(
2685 @command(
2686 b"qimport",
2686 b"qimport",
2687 [
2687 [
2688 (b'e', b'existing', None, _(b'import file in patch directory')),
2688 (b'e', b'existing', None, _(b'import file in patch directory')),
2689 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2689 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2690 (b'f', b'force', None, _(b'overwrite existing files')),
2690 (b'f', b'force', None, _(b'overwrite existing files')),
2691 (
2691 (
2692 b'r',
2692 b'r',
2693 b'rev',
2693 b'rev',
2694 [],
2694 [],
2695 _(b'place existing revisions under mq control'),
2695 _(b'place existing revisions under mq control'),
2696 _(b'REV'),
2696 _(b'REV'),
2697 ),
2697 ),
2698 (b'g', b'git', None, _(b'use git extended diff format')),
2698 (b'g', b'git', None, _(b'use git extended diff format')),
2699 (b'P', b'push', None, _(b'qpush after importing')),
2699 (b'P', b'push', None, _(b'qpush after importing')),
2700 ],
2700 ],
2701 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2701 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2702 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2702 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2703 )
2703 )
2704 def qimport(ui, repo, *filename, **opts):
2704 def qimport(ui, repo, *filename, **opts):
2705 """import a patch or existing changeset
2705 """import a patch or existing changeset
2706
2706
2707 The patch is inserted into the series after the last applied
2707 The patch is inserted into the series after the last applied
2708 patch. If no patches have been applied, qimport prepends the patch
2708 patch. If no patches have been applied, qimport prepends the patch
2709 to the series.
2709 to the series.
2710
2710
2711 The patch will have the same name as its source file unless you
2711 The patch will have the same name as its source file unless you
2712 give it a new one with -n/--name.
2712 give it a new one with -n/--name.
2713
2713
2714 You can register an existing patch inside the patch directory with
2714 You can register an existing patch inside the patch directory with
2715 the -e/--existing flag.
2715 the -e/--existing flag.
2716
2716
2717 With -f/--force, an existing patch of the same name will be
2717 With -f/--force, an existing patch of the same name will be
2718 overwritten.
2718 overwritten.
2719
2719
2720 An existing changeset may be placed under mq control with -r/--rev
2720 An existing changeset may be placed under mq control with -r/--rev
2721 (e.g. qimport --rev . -n patch will place the current revision
2721 (e.g. qimport --rev . -n patch will place the current revision
2722 under mq control). With -g/--git, patches imported with --rev will
2722 under mq control). With -g/--git, patches imported with --rev will
2723 use the git diff format. See the diffs help topic for information
2723 use the git diff format. See the diffs help topic for information
2724 on why this is important for preserving rename/copy information
2724 on why this is important for preserving rename/copy information
2725 and permission changes. Use :hg:`qfinish` to remove changesets
2725 and permission changes. Use :hg:`qfinish` to remove changesets
2726 from mq control.
2726 from mq control.
2727
2727
2728 To import a patch from standard input, pass - as the patch file.
2728 To import a patch from standard input, pass - as the patch file.
2729 When importing from standard input, a patch name must be specified
2729 When importing from standard input, a patch name must be specified
2730 using the --name flag.
2730 using the --name flag.
2731
2731
2732 To import an existing patch while renaming it::
2732 To import an existing patch while renaming it::
2733
2733
2734 hg qimport -e existing-patch -n new-name
2734 hg qimport -e existing-patch -n new-name
2735
2735
2736 Returns 0 if import succeeded.
2736 Returns 0 if import succeeded.
2737 """
2737 """
2738 opts = pycompat.byteskwargs(opts)
2738 opts = pycompat.byteskwargs(opts)
2739 with repo.lock(): # cause this may move phase
2739 with repo.lock(): # cause this may move phase
2740 q = repo.mq
2740 q = repo.mq
2741 try:
2741 try:
2742 imported = q.qimport(
2742 imported = q.qimport(
2743 repo,
2743 repo,
2744 filename,
2744 filename,
2745 patchname=opts.get(b'name'),
2745 patchname=opts.get(b'name'),
2746 existing=opts.get(b'existing'),
2746 existing=opts.get(b'existing'),
2747 force=opts.get(b'force'),
2747 force=opts.get(b'force'),
2748 rev=opts.get(b'rev'),
2748 rev=opts.get(b'rev'),
2749 git=opts.get(b'git'),
2749 git=opts.get(b'git'),
2750 )
2750 )
2751 finally:
2751 finally:
2752 q.savedirty()
2752 q.savedirty()
2753
2753
2754 if imported and opts.get(b'push') and not opts.get(b'rev'):
2754 if imported and opts.get(b'push') and not opts.get(b'rev'):
2755 return q.push(repo, imported[-1])
2755 return q.push(repo, imported[-1])
2756 return 0
2756 return 0
2757
2757
2758
2758
2759 def qinit(ui, repo, create):
2759 def qinit(ui, repo, create):
2760 """initialize a new queue repository
2760 """initialize a new queue repository
2761
2761
2762 This command also creates a series file for ordering patches, and
2762 This command also creates a series file for ordering patches, and
2763 an mq-specific .hgignore file in the queue repository, to exclude
2763 an mq-specific .hgignore file in the queue repository, to exclude
2764 the status and guards files (these contain mostly transient state).
2764 the status and guards files (these contain mostly transient state).
2765
2765
2766 Returns 0 if initialization succeeded."""
2766 Returns 0 if initialization succeeded."""
2767 q = repo.mq
2767 q = repo.mq
2768 r = q.init(repo, create)
2768 r = q.init(repo, create)
2769 q.savedirty()
2769 q.savedirty()
2770 if r:
2770 if r:
2771 if not os.path.exists(r.wjoin(b'.hgignore')):
2771 if not os.path.exists(r.wjoin(b'.hgignore')):
2772 fp = r.wvfs(b'.hgignore', b'w')
2772 fp = r.wvfs(b'.hgignore', b'w')
2773 fp.write(b'^\\.hg\n')
2773 fp.write(b'^\\.hg\n')
2774 fp.write(b'^\\.mq\n')
2774 fp.write(b'^\\.mq\n')
2775 fp.write(b'syntax: glob\n')
2775 fp.write(b'syntax: glob\n')
2776 fp.write(b'status\n')
2776 fp.write(b'status\n')
2777 fp.write(b'guards\n')
2777 fp.write(b'guards\n')
2778 fp.close()
2778 fp.close()
2779 if not os.path.exists(r.wjoin(b'series')):
2779 if not os.path.exists(r.wjoin(b'series')):
2780 r.wvfs(b'series', b'w').close()
2780 r.wvfs(b'series', b'w').close()
2781 r[None].add([b'.hgignore', b'series'])
2781 r[None].add([b'.hgignore', b'series'])
2782 commands.add(ui, r)
2782 commands.add(ui, r)
2783 return 0
2783 return 0
2784
2784
2785
2785
2786 @command(
2786 @command(
2787 b"qinit",
2787 b"qinit",
2788 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2788 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2789 _(b'hg qinit [-c]'),
2789 _(b'hg qinit [-c]'),
2790 helpcategory=command.CATEGORY_REPO_CREATION,
2790 helpcategory=command.CATEGORY_REPO_CREATION,
2791 helpbasic=True,
2791 helpbasic=True,
2792 )
2792 )
2793 def init(ui, repo, **opts):
2793 def init(ui, repo, **opts):
2794 """init a new queue repository (DEPRECATED)
2794 """init a new queue repository (DEPRECATED)
2795
2795
2796 The queue repository is unversioned by default. If
2796 The queue repository is unversioned by default. If
2797 -c/--create-repo is specified, qinit will create a separate nested
2797 -c/--create-repo is specified, qinit will create a separate nested
2798 repository for patches (qinit -c may also be run later to convert
2798 repository for patches (qinit -c may also be run later to convert
2799 an unversioned patch repository into a versioned one). You can use
2799 an unversioned patch repository into a versioned one). You can use
2800 qcommit to commit changes to this queue repository.
2800 qcommit to commit changes to this queue repository.
2801
2801
2802 This command is deprecated. Without -c, it's implied by other relevant
2802 This command is deprecated. Without -c, it's implied by other relevant
2803 commands. With -c, use :hg:`init --mq` instead."""
2803 commands. With -c, use :hg:`init --mq` instead."""
2804 return qinit(ui, repo, create=opts.get('create_repo'))
2804 return qinit(ui, repo, create=opts.get('create_repo'))
2805
2805
2806
2806
2807 @command(
2807 @command(
2808 b"qclone",
2808 b"qclone",
2809 [
2809 [
2810 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2810 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2811 (
2811 (
2812 b'U',
2812 b'U',
2813 b'noupdate',
2813 b'noupdate',
2814 None,
2814 None,
2815 _(b'do not update the new working directories'),
2815 _(b'do not update the new working directories'),
2816 ),
2816 ),
2817 (
2817 (
2818 b'',
2818 b'',
2819 b'uncompressed',
2819 b'uncompressed',
2820 None,
2820 None,
2821 _(b'use uncompressed transfer (fast over LAN)'),
2821 _(b'use uncompressed transfer (fast over LAN)'),
2822 ),
2822 ),
2823 (
2823 (
2824 b'p',
2824 b'p',
2825 b'patches',
2825 b'patches',
2826 b'',
2826 b'',
2827 _(b'location of source patch repository'),
2827 _(b'location of source patch repository'),
2828 _(b'REPO'),
2828 _(b'REPO'),
2829 ),
2829 ),
2830 ]
2830 ]
2831 + cmdutil.remoteopts,
2831 + cmdutil.remoteopts,
2832 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2832 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2833 helpcategory=command.CATEGORY_REPO_CREATION,
2833 helpcategory=command.CATEGORY_REPO_CREATION,
2834 norepo=True,
2834 norepo=True,
2835 )
2835 )
2836 def clone(ui, source, dest=None, **opts):
2836 def clone(ui, source, dest=None, **opts):
2837 """clone main and patch repository at same time
2837 """clone main and patch repository at same time
2838
2838
2839 If source is local, destination will have no patches applied. If
2839 If source is local, destination will have no patches applied. If
2840 source is remote, this command can not check if patches are
2840 source is remote, this command can not check if patches are
2841 applied in source, so cannot guarantee that patches are not
2841 applied in source, so cannot guarantee that patches are not
2842 applied in destination. If you clone remote repository, be sure
2842 applied in destination. If you clone remote repository, be sure
2843 before that it has no patches applied.
2843 before that it has no patches applied.
2844
2844
2845 Source patch repository is looked for in <src>/.hg/patches by
2845 Source patch repository is looked for in <src>/.hg/patches by
2846 default. Use -p <url> to change.
2846 default. Use -p <url> to change.
2847
2847
2848 The patch directory must be a nested Mercurial repository, as
2848 The patch directory must be a nested Mercurial repository, as
2849 would be created by :hg:`init --mq`.
2849 would be created by :hg:`init --mq`.
2850
2850
2851 Return 0 on success.
2851 Return 0 on success.
2852 """
2852 """
2853 opts = pycompat.byteskwargs(opts)
2853 opts = pycompat.byteskwargs(opts)
2854
2854
2855 def patchdir(repo):
2855 def patchdir(repo):
2856 """compute a patch repo url from a repo object"""
2856 """compute a patch repo url from a repo object"""
2857 url = repo.url()
2857 url = repo.url()
2858 if url.endswith(b'/'):
2858 if url.endswith(b'/'):
2859 url = url[:-1]
2859 url = url[:-1]
2860 return url + b'/.hg/patches'
2860 return url + b'/.hg/patches'
2861
2861
2862 # main repo (destination and sources)
2862 # main repo (destination and sources)
2863 if dest is None:
2863 if dest is None:
2864 dest = hg.defaultdest(source)
2864 dest = hg.defaultdest(source)
2865 __, source_path, __ = urlutil.get_clone_path(ui, source)
2865 __, source_path, __ = urlutil.get_clone_path(ui, source)
2866 sr = hg.peer(ui, opts, source_path)
2866 sr = hg.peer(ui, opts, source_path)
2867
2867
2868 # patches repo (source only)
2868 # patches repo (source only)
2869 if opts.get(b'patches'):
2869 if opts.get(b'patches'):
2870 __, patchespath, __ = urlutil.get_clone_path(ui, opts.get(b'patches'))
2870 __, patchespath, __ = urlutil.get_clone_path(ui, opts.get(b'patches'))
2871 else:
2871 else:
2872 patchespath = patchdir(sr)
2872 patchespath = patchdir(sr)
2873 try:
2873 try:
2874 hg.peer(ui, opts, patchespath)
2874 hg.peer(ui, opts, patchespath)
2875 except error.RepoError:
2875 except error.RepoError:
2876 raise error.Abort(
2876 raise error.Abort(
2877 _(b'versioned patch repository not found (see init --mq)')
2877 _(b'versioned patch repository not found (see init --mq)')
2878 )
2878 )
2879 qbase, destrev = None, None
2879 qbase, destrev = None, None
2880 if sr.local():
2880 if sr.local():
2881 repo = sr.local()
2881 repo = sr.local()
2882 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2882 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2883 qbase = repo.mq.applied[0].node
2883 qbase = repo.mq.applied[0].node
2884 if not hg.islocal(dest):
2884 if not hg.islocal(dest):
2885 heads = set(repo.heads())
2885 heads = set(repo.heads())
2886 destrev = list(heads.difference(repo.heads(qbase)))
2886 destrev = list(heads.difference(repo.heads(qbase)))
2887 destrev.append(repo.changelog.parents(qbase)[0])
2887 destrev.append(repo.changelog.parents(qbase)[0])
2888 elif sr.capable(b'lookup'):
2888 elif sr.capable(b'lookup'):
2889 try:
2889 try:
2890 qbase = sr.lookup(b'qbase')
2890 qbase = sr.lookup(b'qbase')
2891 except error.RepoError:
2891 except error.RepoError:
2892 pass
2892 pass
2893
2893
2894 ui.note(_(b'cloning main repository\n'))
2894 ui.note(_(b'cloning main repository\n'))
2895 sr, dr = hg.clone(
2895 sr, dr = hg.clone(
2896 ui,
2896 ui,
2897 opts,
2897 opts,
2898 sr.url(),
2898 sr.url(),
2899 dest,
2899 dest,
2900 pull=opts.get(b'pull'),
2900 pull=opts.get(b'pull'),
2901 revs=destrev,
2901 revs=destrev,
2902 update=False,
2902 update=False,
2903 stream=opts.get(b'uncompressed'),
2903 stream=opts.get(b'uncompressed'),
2904 )
2904 )
2905
2905
2906 ui.note(_(b'cloning patch repository\n'))
2906 ui.note(_(b'cloning patch repository\n'))
2907 hg.clone(
2907 hg.clone(
2908 ui,
2908 ui,
2909 opts,
2909 opts,
2910 opts.get(b'patches') or patchdir(sr),
2910 opts.get(b'patches') or patchdir(sr),
2911 patchdir(dr),
2911 patchdir(dr),
2912 pull=opts.get(b'pull'),
2912 pull=opts.get(b'pull'),
2913 update=not opts.get(b'noupdate'),
2913 update=not opts.get(b'noupdate'),
2914 stream=opts.get(b'uncompressed'),
2914 stream=opts.get(b'uncompressed'),
2915 )
2915 )
2916
2916
2917 if dr.local():
2917 if dr.local():
2918 repo = dr.local()
2918 repo = dr.local()
2919 if qbase:
2919 if qbase:
2920 ui.note(
2920 ui.note(
2921 _(
2921 _(
2922 b'stripping applied patches from destination '
2922 b'stripping applied patches from destination '
2923 b'repository\n'
2923 b'repository\n'
2924 )
2924 )
2925 )
2925 )
2926 strip(ui, repo, [qbase], update=False, backup=None)
2926 strip(ui, repo, [qbase], update=False, backup=None)
2927 if not opts.get(b'noupdate'):
2927 if not opts.get(b'noupdate'):
2928 ui.note(_(b'updating destination repository\n'))
2928 ui.note(_(b'updating destination repository\n'))
2929 hg.update(repo, repo.changelog.tip())
2929 hg.update(repo, repo.changelog.tip())
2930
2930
2931
2931
2932 @command(
2932 @command(
2933 b"qcommit|qci",
2933 b"qcommit|qci",
2934 commands.table[b"commit|ci"][1],
2934 commands.table[b"commit|ci"][1],
2935 _(b'hg qcommit [OPTION]... [FILE]...'),
2935 _(b'hg qcommit [OPTION]... [FILE]...'),
2936 helpcategory=command.CATEGORY_COMMITTING,
2936 helpcategory=command.CATEGORY_COMMITTING,
2937 inferrepo=True,
2937 inferrepo=True,
2938 )
2938 )
2939 def commit(ui, repo, *pats, **opts):
2939 def commit(ui, repo, *pats, **opts):
2940 """commit changes in the queue repository (DEPRECATED)
2940 """commit changes in the queue repository (DEPRECATED)
2941
2941
2942 This command is deprecated; use :hg:`commit --mq` instead."""
2942 This command is deprecated; use :hg:`commit --mq` instead."""
2943 q = repo.mq
2943 q = repo.mq
2944 r = q.qrepo()
2944 r = q.qrepo()
2945 if not r:
2945 if not r:
2946 raise error.Abort(b'no queue repository')
2946 raise error.Abort(b'no queue repository')
2947 commands.commit(r.ui, r, *pats, **opts)
2947 commands.commit(r.ui, r, *pats, **opts)
2948
2948
2949
2949
2950 @command(
2950 @command(
2951 b"qseries",
2951 b"qseries",
2952 [
2952 [
2953 (b'm', b'missing', None, _(b'print patches not in series')),
2953 (b'm', b'missing', None, _(b'print patches not in series')),
2954 ]
2954 ]
2955 + seriesopts,
2955 + seriesopts,
2956 _(b'hg qseries [-ms]'),
2956 _(b'hg qseries [-ms]'),
2957 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2957 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2958 )
2958 )
2959 def series(ui, repo, **opts):
2959 def series(ui, repo, **opts):
2960 """print the entire series file
2960 """print the entire series file
2961
2961
2962 Returns 0 on success."""
2962 Returns 0 on success."""
2963 repo.mq.qseries(
2963 repo.mq.qseries(
2964 repo, missing=opts.get('missing'), summary=opts.get('summary')
2964 repo, missing=opts.get('missing'), summary=opts.get('summary')
2965 )
2965 )
2966 return 0
2966 return 0
2967
2967
2968
2968
2969 @command(
2969 @command(
2970 b"qtop",
2970 b"qtop",
2971 seriesopts,
2971 seriesopts,
2972 _(b'hg qtop [-s]'),
2972 _(b'hg qtop [-s]'),
2973 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2973 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2974 )
2974 )
2975 def top(ui, repo, **opts):
2975 def top(ui, repo, **opts):
2976 """print the name of the current patch
2976 """print the name of the current patch
2977
2977
2978 Returns 0 on success."""
2978 Returns 0 on success."""
2979 q = repo.mq
2979 q = repo.mq
2980 if q.applied:
2980 if q.applied:
2981 t = q.seriesend(True)
2981 t = q.seriesend(True)
2982 else:
2982 else:
2983 t = 0
2983 t = 0
2984
2984
2985 if t:
2985 if t:
2986 q.qseries(
2986 q.qseries(
2987 repo,
2987 repo,
2988 start=t - 1,
2988 start=t - 1,
2989 length=1,
2989 length=1,
2990 status=b'A',
2990 status=b'A',
2991 summary=opts.get('summary'),
2991 summary=opts.get('summary'),
2992 )
2992 )
2993 else:
2993 else:
2994 ui.write(_(b"no patches applied\n"))
2994 ui.write(_(b"no patches applied\n"))
2995 return 1
2995 return 1
2996
2996
2997
2997
2998 @command(
2998 @command(
2999 b"qnext",
2999 b"qnext",
3000 seriesopts,
3000 seriesopts,
3001 _(b'hg qnext [-s]'),
3001 _(b'hg qnext [-s]'),
3002 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3002 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3003 )
3003 )
3004 def next(ui, repo, **opts):
3004 def next(ui, repo, **opts):
3005 """print the name of the next pushable patch
3005 """print the name of the next pushable patch
3006
3006
3007 Returns 0 on success."""
3007 Returns 0 on success."""
3008 q = repo.mq
3008 q = repo.mq
3009 end = q.seriesend()
3009 end = q.seriesend()
3010 if end == len(q.series):
3010 if end == len(q.series):
3011 ui.write(_(b"all patches applied\n"))
3011 ui.write(_(b"all patches applied\n"))
3012 return 1
3012 return 1
3013 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
3013 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
3014
3014
3015
3015
3016 @command(
3016 @command(
3017 b"qprev",
3017 b"qprev",
3018 seriesopts,
3018 seriesopts,
3019 _(b'hg qprev [-s]'),
3019 _(b'hg qprev [-s]'),
3020 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3020 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3021 )
3021 )
3022 def prev(ui, repo, **opts):
3022 def prev(ui, repo, **opts):
3023 """print the name of the preceding applied patch
3023 """print the name of the preceding applied patch
3024
3024
3025 Returns 0 on success."""
3025 Returns 0 on success."""
3026 q = repo.mq
3026 q = repo.mq
3027 l = len(q.applied)
3027 l = len(q.applied)
3028 if l == 1:
3028 if l == 1:
3029 ui.write(_(b"only one patch applied\n"))
3029 ui.write(_(b"only one patch applied\n"))
3030 return 1
3030 return 1
3031 if not l:
3031 if not l:
3032 ui.write(_(b"no patches applied\n"))
3032 ui.write(_(b"no patches applied\n"))
3033 return 1
3033 return 1
3034 idx = q.series.index(q.applied[-2].name)
3034 idx = q.series.index(q.applied[-2].name)
3035 q.qseries(
3035 q.qseries(
3036 repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
3036 repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
3037 )
3037 )
3038
3038
3039
3039
3040 def setupheaderopts(ui, opts):
3040 def setupheaderopts(ui, opts):
3041 if not opts.get(b'user') and opts.get(b'currentuser'):
3041 if not opts.get(b'user') and opts.get(b'currentuser'):
3042 opts[b'user'] = ui.username()
3042 opts[b'user'] = ui.username()
3043 if not opts.get(b'date') and opts.get(b'currentdate'):
3043 if not opts.get(b'date') and opts.get(b'currentdate'):
3044 opts[b'date'] = b"%d %d" % dateutil.makedate()
3044 opts[b'date'] = b"%d %d" % dateutil.makedate()
3045
3045
3046
3046
3047 @command(
3047 @command(
3048 b"qnew",
3048 b"qnew",
3049 [
3049 [
3050 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3050 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3051 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3051 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3052 (b'g', b'git', None, _(b'use git extended diff format')),
3052 (b'g', b'git', None, _(b'use git extended diff format')),
3053 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3053 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3054 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3054 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3055 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3055 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3056 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3056 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3057 ]
3057 ]
3058 + cmdutil.walkopts
3058 + cmdutil.walkopts
3059 + cmdutil.commitopts,
3059 + cmdutil.commitopts,
3060 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3060 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3061 helpcategory=command.CATEGORY_COMMITTING,
3061 helpcategory=command.CATEGORY_COMMITTING,
3062 helpbasic=True,
3062 helpbasic=True,
3063 inferrepo=True,
3063 inferrepo=True,
3064 )
3064 )
3065 def new(ui, repo, patch, *args, **opts):
3065 def new(ui, repo, patch, *args, **opts):
3066 """create a new patch
3066 """create a new patch
3067
3067
3068 qnew creates a new patch on top of the currently-applied patch (if
3068 qnew creates a new patch on top of the currently-applied patch (if
3069 any). The patch will be initialized with any outstanding changes
3069 any). The patch will be initialized with any outstanding changes
3070 in the working directory. You may also use -I/--include,
3070 in the working directory. You may also use -I/--include,
3071 -X/--exclude, and/or a list of files after the patch name to add
3071 -X/--exclude, and/or a list of files after the patch name to add
3072 only changes to matching files to the new patch, leaving the rest
3072 only changes to matching files to the new patch, leaving the rest
3073 as uncommitted modifications.
3073 as uncommitted modifications.
3074
3074
3075 -u/--user and -d/--date can be used to set the (given) user and
3075 -u/--user and -d/--date can be used to set the (given) user and
3076 date, respectively. -U/--currentuser and -D/--currentdate set user
3076 date, respectively. -U/--currentuser and -D/--currentdate set user
3077 to current user and date to current date.
3077 to current user and date to current date.
3078
3078
3079 -e/--edit, -m/--message or -l/--logfile set the patch header as
3079 -e/--edit, -m/--message or -l/--logfile set the patch header as
3080 well as the commit message. If none is specified, the header is
3080 well as the commit message. If none is specified, the header is
3081 empty and the commit message is '[mq]: PATCH'.
3081 empty and the commit message is '[mq]: PATCH'.
3082
3082
3083 Use the -g/--git option to keep the patch in the git extended diff
3083 Use the -g/--git option to keep the patch in the git extended diff
3084 format. Read the diffs help topic for more information on why this
3084 format. Read the diffs help topic for more information on why this
3085 is important for preserving permission changes and copy/rename
3085 is important for preserving permission changes and copy/rename
3086 information.
3086 information.
3087
3087
3088 Returns 0 on successful creation of a new patch.
3088 Returns 0 on successful creation of a new patch.
3089 """
3089 """
3090 opts = pycompat.byteskwargs(opts)
3090 opts = pycompat.byteskwargs(opts)
3091 msg = cmdutil.logmessage(ui, opts)
3091 msg = cmdutil.logmessage(ui, opts)
3092 q = repo.mq
3092 q = repo.mq
3093 opts[b'msg'] = msg
3093 opts[b'msg'] = msg
3094 setupheaderopts(ui, opts)
3094 setupheaderopts(ui, opts)
3095 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3095 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3096 q.savedirty()
3096 q.savedirty()
3097 return 0
3097 return 0
3098
3098
3099
3099
3100 @command(
3100 @command(
3101 b"qrefresh",
3101 b"qrefresh",
3102 [
3102 [
3103 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3103 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3104 (b'g', b'git', None, _(b'use git extended diff format')),
3104 (b'g', b'git', None, _(b'use git extended diff format')),
3105 (
3105 (
3106 b's',
3106 b's',
3107 b'short',
3107 b'short',
3108 None,
3108 None,
3109 _(b'refresh only files already in the patch and specified files'),
3109 _(b'refresh only files already in the patch and specified files'),
3110 ),
3110 ),
3111 (
3111 (
3112 b'U',
3112 b'U',
3113 b'currentuser',
3113 b'currentuser',
3114 None,
3114 None,
3115 _(b'add/update author field in patch with current user'),
3115 _(b'add/update author field in patch with current user'),
3116 ),
3116 ),
3117 (
3117 (
3118 b'u',
3118 b'u',
3119 b'user',
3119 b'user',
3120 b'',
3120 b'',
3121 _(b'add/update author field in patch with given user'),
3121 _(b'add/update author field in patch with given user'),
3122 _(b'USER'),
3122 _(b'USER'),
3123 ),
3123 ),
3124 (
3124 (
3125 b'D',
3125 b'D',
3126 b'currentdate',
3126 b'currentdate',
3127 None,
3127 None,
3128 _(b'add/update date field in patch with current date'),
3128 _(b'add/update date field in patch with current date'),
3129 ),
3129 ),
3130 (
3130 (
3131 b'd',
3131 b'd',
3132 b'date',
3132 b'date',
3133 b'',
3133 b'',
3134 _(b'add/update date field in patch with given date'),
3134 _(b'add/update date field in patch with given date'),
3135 _(b'DATE'),
3135 _(b'DATE'),
3136 ),
3136 ),
3137 ]
3137 ]
3138 + cmdutil.walkopts
3138 + cmdutil.walkopts
3139 + cmdutil.commitopts,
3139 + cmdutil.commitopts,
3140 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3140 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3141 helpcategory=command.CATEGORY_COMMITTING,
3141 helpcategory=command.CATEGORY_COMMITTING,
3142 helpbasic=True,
3142 helpbasic=True,
3143 inferrepo=True,
3143 inferrepo=True,
3144 )
3144 )
3145 def refresh(ui, repo, *pats, **opts):
3145 def refresh(ui, repo, *pats, **opts):
3146 """update the current patch
3146 """update the current patch
3147
3147
3148 If any file patterns are provided, the refreshed patch will
3148 If any file patterns are provided, the refreshed patch will
3149 contain only the modifications that match those patterns; the
3149 contain only the modifications that match those patterns; the
3150 remaining modifications will remain in the working directory.
3150 remaining modifications will remain in the working directory.
3151
3151
3152 If -s/--short is specified, files currently included in the patch
3152 If -s/--short is specified, files currently included in the patch
3153 will be refreshed just like matched files and remain in the patch.
3153 will be refreshed just like matched files and remain in the patch.
3154
3154
3155 If -e/--edit is specified, Mercurial will start your configured editor for
3155 If -e/--edit is specified, Mercurial will start your configured editor for
3156 you to enter a message. In case qrefresh fails, you will find a backup of
3156 you to enter a message. In case qrefresh fails, you will find a backup of
3157 your message in ``.hg/last-message.txt``.
3157 your message in ``.hg/last-message.txt``.
3158
3158
3159 hg add/remove/copy/rename work as usual, though you might want to
3159 hg add/remove/copy/rename work as usual, though you might want to
3160 use git-style patches (-g/--git or [diff] git=1) to track copies
3160 use git-style patches (-g/--git or [diff] git=1) to track copies
3161 and renames. See the diffs help topic for more information on the
3161 and renames. See the diffs help topic for more information on the
3162 git diff format.
3162 git diff format.
3163
3163
3164 Returns 0 on success.
3164 Returns 0 on success.
3165 """
3165 """
3166 opts = pycompat.byteskwargs(opts)
3166 opts = pycompat.byteskwargs(opts)
3167 q = repo.mq
3167 q = repo.mq
3168 message = cmdutil.logmessage(ui, opts)
3168 message = cmdutil.logmessage(ui, opts)
3169 setupheaderopts(ui, opts)
3169 setupheaderopts(ui, opts)
3170 with repo.wlock():
3170 with repo.wlock():
3171 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3171 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3172 q.savedirty()
3172 q.savedirty()
3173 return ret
3173 return ret
3174
3174
3175
3175
3176 @command(
3176 @command(
3177 b"qdiff",
3177 b"qdiff",
3178 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3178 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3179 _(b'hg qdiff [OPTION]... [FILE]...'),
3179 _(b'hg qdiff [OPTION]... [FILE]...'),
3180 helpcategory=command.CATEGORY_FILE_CONTENTS,
3180 helpcategory=command.CATEGORY_FILE_CONTENTS,
3181 helpbasic=True,
3181 helpbasic=True,
3182 inferrepo=True,
3182 inferrepo=True,
3183 )
3183 )
3184 def diff(ui, repo, *pats, **opts):
3184 def diff(ui, repo, *pats, **opts):
3185 """diff of the current patch and subsequent modifications
3185 """diff of the current patch and subsequent modifications
3186
3186
3187 Shows a diff which includes the current patch as well as any
3187 Shows a diff which includes the current patch as well as any
3188 changes which have been made in the working directory since the
3188 changes which have been made in the working directory since the
3189 last refresh (thus showing what the current patch would become
3189 last refresh (thus showing what the current patch would become
3190 after a qrefresh).
3190 after a qrefresh).
3191
3191
3192 Use :hg:`diff` if you only want to see the changes made since the
3192 Use :hg:`diff` if you only want to see the changes made since the
3193 last qrefresh, or :hg:`export qtip` if you want to see changes
3193 last qrefresh, or :hg:`export qtip` if you want to see changes
3194 made by the current patch without including changes made since the
3194 made by the current patch without including changes made since the
3195 qrefresh.
3195 qrefresh.
3196
3196
3197 Returns 0 on success.
3197 Returns 0 on success.
3198 """
3198 """
3199 ui.pager(b'qdiff')
3199 ui.pager(b'qdiff')
3200 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3200 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3201 return 0
3201 return 0
3202
3202
3203
3203
3204 @command(
3204 @command(
3205 b'qfold',
3205 b'qfold',
3206 [
3206 [
3207 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3207 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3208 (b'k', b'keep', None, _(b'keep folded patch files')),
3208 (b'k', b'keep', None, _(b'keep folded patch files')),
3209 ]
3209 ]
3210 + cmdutil.commitopts,
3210 + cmdutil.commitopts,
3211 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3211 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3212 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3212 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3213 )
3213 )
3214 def fold(ui, repo, *files, **opts):
3214 def fold(ui, repo, *files, **opts):
3215 """fold the named patches into the current patch
3215 """fold the named patches into the current patch
3216
3216
3217 Patches must not yet be applied. Each patch will be successively
3217 Patches must not yet be applied. Each patch will be successively
3218 applied to the current patch in the order given. If all the
3218 applied to the current patch in the order given. If all the
3219 patches apply successfully, the current patch will be refreshed
3219 patches apply successfully, the current patch will be refreshed
3220 with the new cumulative patch, and the folded patches will be
3220 with the new cumulative patch, and the folded patches will be
3221 deleted. With -k/--keep, the folded patch files will not be
3221 deleted. With -k/--keep, the folded patch files will not be
3222 removed afterwards.
3222 removed afterwards.
3223
3223
3224 The header for each folded patch will be concatenated with the
3224 The header for each folded patch will be concatenated with the
3225 current patch header, separated by a line of ``* * *``.
3225 current patch header, separated by a line of ``* * *``.
3226
3226
3227 Returns 0 on success."""
3227 Returns 0 on success."""
3228 opts = pycompat.byteskwargs(opts)
3228 opts = pycompat.byteskwargs(opts)
3229 q = repo.mq
3229 q = repo.mq
3230 if not files:
3230 if not files:
3231 raise error.Abort(_(b'qfold requires at least one patch name'))
3231 raise error.Abort(_(b'qfold requires at least one patch name'))
3232 if not q.checktoppatch(repo)[0]:
3232 if not q.checktoppatch(repo)[0]:
3233 raise error.Abort(_(b'no patches applied'))
3233 raise error.Abort(_(b'no patches applied'))
3234 q.checklocalchanges(repo)
3234 q.checklocalchanges(repo)
3235
3235
3236 message = cmdutil.logmessage(ui, opts)
3236 message = cmdutil.logmessage(ui, opts)
3237
3237
3238 parent = q.lookup(b'qtip')
3238 parent = q.lookup(b'qtip')
3239 patches = []
3239 patches = []
3240 messages = []
3240 messages = []
3241 for f in files:
3241 for f in files:
3242 p = q.lookup(f)
3242 p = q.lookup(f)
3243 if p in patches or p == parent:
3243 if p in patches or p == parent:
3244 ui.warn(_(b'skipping already folded patch %s\n') % p)
3244 ui.warn(_(b'skipping already folded patch %s\n') % p)
3245 if q.isapplied(p):
3245 if q.isapplied(p):
3246 raise error.Abort(
3246 raise error.Abort(
3247 _(b'qfold cannot fold already applied patch %s') % p
3247 _(b'qfold cannot fold already applied patch %s') % p
3248 )
3248 )
3249 patches.append(p)
3249 patches.append(p)
3250
3250
3251 for p in patches:
3251 for p in patches:
3252 if not message:
3252 if not message:
3253 ph = patchheader(q.join(p), q.plainmode)
3253 ph = patchheader(q.join(p), q.plainmode)
3254 if ph.message:
3254 if ph.message:
3255 messages.append(ph.message)
3255 messages.append(ph.message)
3256 pf = q.join(p)
3256 pf = q.join(p)
3257 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3257 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3258 if not patchsuccess:
3258 if not patchsuccess:
3259 raise error.Abort(_(b'error folding patch %s') % p)
3259 raise error.Abort(_(b'error folding patch %s') % p)
3260
3260
3261 if not message:
3261 if not message:
3262 ph = patchheader(q.join(parent), q.plainmode)
3262 ph = patchheader(q.join(parent), q.plainmode)
3263 message = ph.message
3263 message = ph.message
3264 for msg in messages:
3264 for msg in messages:
3265 if msg:
3265 if msg:
3266 if message:
3266 if message:
3267 message.append(b'* * *')
3267 message.append(b'* * *')
3268 message.extend(msg)
3268 message.extend(msg)
3269 message = b'\n'.join(message)
3269 message = b'\n'.join(message)
3270
3270
3271 diffopts = q.patchopts(q.diffopts(), *patches)
3271 diffopts = q.patchopts(q.diffopts(), *patches)
3272 with repo.wlock():
3272 with repo.wlock():
3273 q.refresh(
3273 q.refresh(
3274 repo,
3274 repo,
3275 msg=message,
3275 msg=message,
3276 git=diffopts.git,
3276 git=diffopts.git,
3277 edit=opts.get(b'edit'),
3277 edit=opts.get(b'edit'),
3278 editform=b'mq.qfold',
3278 editform=b'mq.qfold',
3279 )
3279 )
3280 q.delete(repo, patches, opts)
3280 q.delete(repo, patches, opts)
3281 q.savedirty()
3281 q.savedirty()
3282
3282
3283
3283
3284 @command(
3284 @command(
3285 b"qgoto",
3285 b"qgoto",
3286 [
3286 [
3287 (
3287 (
3288 b'',
3288 b'',
3289 b'keep-changes',
3289 b'keep-changes',
3290 None,
3290 None,
3291 _(b'tolerate non-conflicting local changes'),
3291 _(b'tolerate non-conflicting local changes'),
3292 ),
3292 ),
3293 (b'f', b'force', None, _(b'overwrite any local changes')),
3293 (b'f', b'force', None, _(b'overwrite any local changes')),
3294 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3294 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3295 ],
3295 ],
3296 _(b'hg qgoto [OPTION]... PATCH'),
3296 _(b'hg qgoto [OPTION]... PATCH'),
3297 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3297 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3298 )
3298 )
3299 def goto(ui, repo, patch, **opts):
3299 def goto(ui, repo, patch, **opts):
3300 """push or pop patches until named patch is at top of stack
3300 """push or pop patches until named patch is at top of stack
3301
3301
3302 Returns 0 on success."""
3302 Returns 0 on success."""
3303 opts = pycompat.byteskwargs(opts)
3303 opts = pycompat.byteskwargs(opts)
3304 opts = fixkeepchangesopts(ui, opts)
3304 opts = fixkeepchangesopts(ui, opts)
3305 q = repo.mq
3305 q = repo.mq
3306 patch = q.lookup(patch)
3306 patch = q.lookup(patch)
3307 nobackup = opts.get(b'no_backup')
3307 nobackup = opts.get(b'no_backup')
3308 keepchanges = opts.get(b'keep_changes')
3308 keepchanges = opts.get(b'keep_changes')
3309 if q.isapplied(patch):
3309 if q.isapplied(patch):
3310 ret = q.pop(
3310 ret = q.pop(
3311 repo,
3311 repo,
3312 patch,
3312 patch,
3313 force=opts.get(b'force'),
3313 force=opts.get(b'force'),
3314 nobackup=nobackup,
3314 nobackup=nobackup,
3315 keepchanges=keepchanges,
3315 keepchanges=keepchanges,
3316 )
3316 )
3317 else:
3317 else:
3318 ret = q.push(
3318 ret = q.push(
3319 repo,
3319 repo,
3320 patch,
3320 patch,
3321 force=opts.get(b'force'),
3321 force=opts.get(b'force'),
3322 nobackup=nobackup,
3322 nobackup=nobackup,
3323 keepchanges=keepchanges,
3323 keepchanges=keepchanges,
3324 )
3324 )
3325 q.savedirty()
3325 q.savedirty()
3326 return ret
3326 return ret
3327
3327
3328
3328
3329 @command(
3329 @command(
3330 b"qguard",
3330 b"qguard",
3331 [
3331 [
3332 (b'l', b'list', None, _(b'list all patches and guards')),
3332 (b'l', b'list', None, _(b'list all patches and guards')),
3333 (b'n', b'none', None, _(b'drop all guards')),
3333 (b'n', b'none', None, _(b'drop all guards')),
3334 ],
3334 ],
3335 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3335 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3336 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3336 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3337 )
3337 )
3338 def guard(ui, repo, *args, **opts):
3338 def guard(ui, repo, *args, **opts):
3339 """set or print guards for a patch
3339 """set or print guards for a patch
3340
3340
3341 Guards control whether a patch can be pushed. A patch with no
3341 Guards control whether a patch can be pushed. A patch with no
3342 guards is always pushed. A patch with a positive guard ("+foo") is
3342 guards is always pushed. A patch with a positive guard ("+foo") is
3343 pushed only if the :hg:`qselect` command has activated it. A patch with
3343 pushed only if the :hg:`qselect` command has activated it. A patch with
3344 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3344 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3345 has activated it.
3345 has activated it.
3346
3346
3347 With no arguments, print the currently active guards.
3347 With no arguments, print the currently active guards.
3348 With arguments, set guards for the named patch.
3348 With arguments, set guards for the named patch.
3349
3349
3350 .. note::
3350 .. note::
3351
3351
3352 Specifying negative guards now requires '--'.
3352 Specifying negative guards now requires '--'.
3353
3353
3354 To set guards on another patch::
3354 To set guards on another patch::
3355
3355
3356 hg qguard other.patch -- +2.6.17 -stable
3356 hg qguard other.patch -- +2.6.17 -stable
3357
3357
3358 Returns 0 on success.
3358 Returns 0 on success.
3359 """
3359 """
3360
3360
3361 def status(idx):
3361 def status(idx):
3362 guards = q.seriesguards[idx] or [b'unguarded']
3362 guards = q.seriesguards[idx] or [b'unguarded']
3363 if q.series[idx] in applied:
3363 if q.series[idx] in applied:
3364 state = b'applied'
3364 state = b'applied'
3365 elif q.pushable(idx)[0]:
3365 elif q.pushable(idx)[0]:
3366 state = b'unapplied'
3366 state = b'unapplied'
3367 else:
3367 else:
3368 state = b'guarded'
3368 state = b'guarded'
3369 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3369 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3370 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3370 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3371
3371
3372 for i, guard in enumerate(guards):
3372 for i, guard in enumerate(guards):
3373 if guard.startswith(b'+'):
3373 if guard.startswith(b'+'):
3374 ui.write(guard, label=b'qguard.positive')
3374 ui.write(guard, label=b'qguard.positive')
3375 elif guard.startswith(b'-'):
3375 elif guard.startswith(b'-'):
3376 ui.write(guard, label=b'qguard.negative')
3376 ui.write(guard, label=b'qguard.negative')
3377 else:
3377 else:
3378 ui.write(guard, label=b'qguard.unguarded')
3378 ui.write(guard, label=b'qguard.unguarded')
3379 if i != len(guards) - 1:
3379 if i != len(guards) - 1:
3380 ui.write(b' ')
3380 ui.write(b' ')
3381 ui.write(b'\n')
3381 ui.write(b'\n')
3382
3382
3383 q = repo.mq
3383 q = repo.mq
3384 applied = {p.name for p in q.applied}
3384 applied = {p.name for p in q.applied}
3385 patch = None
3385 patch = None
3386 args = list(args)
3386 args = list(args)
3387 if opts.get('list'):
3387 if opts.get('list'):
3388 if args or opts.get('none'):
3388 if args or opts.get('none'):
3389 raise error.Abort(
3389 raise error.Abort(
3390 _(b'cannot mix -l/--list with options or arguments')
3390 _(b'cannot mix -l/--list with options or arguments')
3391 )
3391 )
3392 for i in pycompat.xrange(len(q.series)):
3392 for i in range(len(q.series)):
3393 status(i)
3393 status(i)
3394 return
3394 return
3395 if not args or args[0][0:1] in b'-+':
3395 if not args or args[0][0:1] in b'-+':
3396 if not q.applied:
3396 if not q.applied:
3397 raise error.Abort(_(b'no patches applied'))
3397 raise error.Abort(_(b'no patches applied'))
3398 patch = q.applied[-1].name
3398 patch = q.applied[-1].name
3399 if patch is None and args[0][0:1] not in b'-+':
3399 if patch is None and args[0][0:1] not in b'-+':
3400 patch = args.pop(0)
3400 patch = args.pop(0)
3401 if patch is None:
3401 if patch is None:
3402 raise error.Abort(_(b'no patch to work with'))
3402 raise error.Abort(_(b'no patch to work with'))
3403 if args or opts.get('none'):
3403 if args or opts.get('none'):
3404 idx = q.findseries(patch)
3404 idx = q.findseries(patch)
3405 if idx is None:
3405 if idx is None:
3406 raise error.Abort(_(b'no patch named %s') % patch)
3406 raise error.Abort(_(b'no patch named %s') % patch)
3407 q.setguards(idx, args)
3407 q.setguards(idx, args)
3408 q.savedirty()
3408 q.savedirty()
3409 else:
3409 else:
3410 status(q.series.index(q.lookup(patch)))
3410 status(q.series.index(q.lookup(patch)))
3411
3411
3412
3412
3413 @command(
3413 @command(
3414 b"qheader",
3414 b"qheader",
3415 [],
3415 [],
3416 _(b'hg qheader [PATCH]'),
3416 _(b'hg qheader [PATCH]'),
3417 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3417 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3418 )
3418 )
3419 def header(ui, repo, patch=None):
3419 def header(ui, repo, patch=None):
3420 """print the header of the topmost or specified patch
3420 """print the header of the topmost or specified patch
3421
3421
3422 Returns 0 on success."""
3422 Returns 0 on success."""
3423 q = repo.mq
3423 q = repo.mq
3424
3424
3425 if patch:
3425 if patch:
3426 patch = q.lookup(patch)
3426 patch = q.lookup(patch)
3427 else:
3427 else:
3428 if not q.applied:
3428 if not q.applied:
3429 ui.write(_(b'no patches applied\n'))
3429 ui.write(_(b'no patches applied\n'))
3430 return 1
3430 return 1
3431 patch = q.lookup(b'qtip')
3431 patch = q.lookup(b'qtip')
3432 ph = patchheader(q.join(patch), q.plainmode)
3432 ph = patchheader(q.join(patch), q.plainmode)
3433
3433
3434 ui.write(b'\n'.join(ph.message) + b'\n')
3434 ui.write(b'\n'.join(ph.message) + b'\n')
3435
3435
3436
3436
3437 def lastsavename(path):
3437 def lastsavename(path):
3438 (directory, base) = os.path.split(path)
3438 (directory, base) = os.path.split(path)
3439 names = os.listdir(directory)
3439 names = os.listdir(directory)
3440 namere = re.compile(b"%s.([0-9]+)" % base)
3440 namere = re.compile(b"%s.([0-9]+)" % base)
3441 maxindex = None
3441 maxindex = None
3442 maxname = None
3442 maxname = None
3443 for f in names:
3443 for f in names:
3444 m = namere.match(f)
3444 m = namere.match(f)
3445 if m:
3445 if m:
3446 index = int(m.group(1))
3446 index = int(m.group(1))
3447 if maxindex is None or index > maxindex:
3447 if maxindex is None or index > maxindex:
3448 maxindex = index
3448 maxindex = index
3449 maxname = f
3449 maxname = f
3450 if maxname:
3450 if maxname:
3451 return (os.path.join(directory, maxname), maxindex)
3451 return (os.path.join(directory, maxname), maxindex)
3452 return (None, None)
3452 return (None, None)
3453
3453
3454
3454
3455 def savename(path):
3455 def savename(path):
3456 (last, index) = lastsavename(path)
3456 (last, index) = lastsavename(path)
3457 if last is None:
3457 if last is None:
3458 index = 0
3458 index = 0
3459 newpath = path + b".%d" % (index + 1)
3459 newpath = path + b".%d" % (index + 1)
3460 return newpath
3460 return newpath
3461
3461
3462
3462
3463 @command(
3463 @command(
3464 b"qpush",
3464 b"qpush",
3465 [
3465 [
3466 (
3466 (
3467 b'',
3467 b'',
3468 b'keep-changes',
3468 b'keep-changes',
3469 None,
3469 None,
3470 _(b'tolerate non-conflicting local changes'),
3470 _(b'tolerate non-conflicting local changes'),
3471 ),
3471 ),
3472 (b'f', b'force', None, _(b'apply on top of local changes')),
3472 (b'f', b'force', None, _(b'apply on top of local changes')),
3473 (
3473 (
3474 b'e',
3474 b'e',
3475 b'exact',
3475 b'exact',
3476 None,
3476 None,
3477 _(b'apply the target patch to its recorded parent'),
3477 _(b'apply the target patch to its recorded parent'),
3478 ),
3478 ),
3479 (b'l', b'list', None, _(b'list patch name in commit text')),
3479 (b'l', b'list', None, _(b'list patch name in commit text')),
3480 (b'a', b'all', None, _(b'apply all patches')),
3480 (b'a', b'all', None, _(b'apply all patches')),
3481 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3481 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3482 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3482 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3483 (
3483 (
3484 b'',
3484 b'',
3485 b'move',
3485 b'move',
3486 None,
3486 None,
3487 _(b'reorder patch series and apply only the patch'),
3487 _(b'reorder patch series and apply only the patch'),
3488 ),
3488 ),
3489 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3489 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3490 ],
3490 ],
3491 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3491 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3492 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3492 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3493 helpbasic=True,
3493 helpbasic=True,
3494 )
3494 )
3495 def push(ui, repo, patch=None, **opts):
3495 def push(ui, repo, patch=None, **opts):
3496 """push the next patch onto the stack
3496 """push the next patch onto the stack
3497
3497
3498 By default, abort if the working directory contains uncommitted
3498 By default, abort if the working directory contains uncommitted
3499 changes. With --keep-changes, abort only if the uncommitted files
3499 changes. With --keep-changes, abort only if the uncommitted files
3500 overlap with patched files. With -f/--force, backup and patch over
3500 overlap with patched files. With -f/--force, backup and patch over
3501 uncommitted changes.
3501 uncommitted changes.
3502
3502
3503 Return 0 on success.
3503 Return 0 on success.
3504 """
3504 """
3505 q = repo.mq
3505 q = repo.mq
3506 mergeq = None
3506 mergeq = None
3507
3507
3508 opts = pycompat.byteskwargs(opts)
3508 opts = pycompat.byteskwargs(opts)
3509 opts = fixkeepchangesopts(ui, opts)
3509 opts = fixkeepchangesopts(ui, opts)
3510 if opts.get(b'merge'):
3510 if opts.get(b'merge'):
3511 if opts.get(b'name'):
3511 if opts.get(b'name'):
3512 newpath = repo.vfs.join(opts.get(b'name'))
3512 newpath = repo.vfs.join(opts.get(b'name'))
3513 else:
3513 else:
3514 newpath, i = lastsavename(q.path)
3514 newpath, i = lastsavename(q.path)
3515 if not newpath:
3515 if not newpath:
3516 ui.warn(_(b"no saved queues found, please use -n\n"))
3516 ui.warn(_(b"no saved queues found, please use -n\n"))
3517 return 1
3517 return 1
3518 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3518 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3519 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3519 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3520 ret = q.push(
3520 ret = q.push(
3521 repo,
3521 repo,
3522 patch,
3522 patch,
3523 force=opts.get(b'force'),
3523 force=opts.get(b'force'),
3524 list=opts.get(b'list'),
3524 list=opts.get(b'list'),
3525 mergeq=mergeq,
3525 mergeq=mergeq,
3526 all=opts.get(b'all'),
3526 all=opts.get(b'all'),
3527 move=opts.get(b'move'),
3527 move=opts.get(b'move'),
3528 exact=opts.get(b'exact'),
3528 exact=opts.get(b'exact'),
3529 nobackup=opts.get(b'no_backup'),
3529 nobackup=opts.get(b'no_backup'),
3530 keepchanges=opts.get(b'keep_changes'),
3530 keepchanges=opts.get(b'keep_changes'),
3531 )
3531 )
3532 return ret
3532 return ret
3533
3533
3534
3534
3535 @command(
3535 @command(
3536 b"qpop",
3536 b"qpop",
3537 [
3537 [
3538 (b'a', b'all', None, _(b'pop all patches')),
3538 (b'a', b'all', None, _(b'pop all patches')),
3539 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3539 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3540 (
3540 (
3541 b'',
3541 b'',
3542 b'keep-changes',
3542 b'keep-changes',
3543 None,
3543 None,
3544 _(b'tolerate non-conflicting local changes'),
3544 _(b'tolerate non-conflicting local changes'),
3545 ),
3545 ),
3546 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3546 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3547 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3547 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3548 ],
3548 ],
3549 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3549 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3550 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3550 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3551 helpbasic=True,
3551 helpbasic=True,
3552 )
3552 )
3553 def pop(ui, repo, patch=None, **opts):
3553 def pop(ui, repo, patch=None, **opts):
3554 """pop the current patch off the stack
3554 """pop the current patch off the stack
3555
3555
3556 Without argument, pops off the top of the patch stack. If given a
3556 Without argument, pops off the top of the patch stack. If given a
3557 patch name, keeps popping off patches until the named patch is at
3557 patch name, keeps popping off patches until the named patch is at
3558 the top of the stack.
3558 the top of the stack.
3559
3559
3560 By default, abort if the working directory contains uncommitted
3560 By default, abort if the working directory contains uncommitted
3561 changes. With --keep-changes, abort only if the uncommitted files
3561 changes. With --keep-changes, abort only if the uncommitted files
3562 overlap with patched files. With -f/--force, backup and discard
3562 overlap with patched files. With -f/--force, backup and discard
3563 changes made to such files.
3563 changes made to such files.
3564
3564
3565 Return 0 on success.
3565 Return 0 on success.
3566 """
3566 """
3567 opts = pycompat.byteskwargs(opts)
3567 opts = pycompat.byteskwargs(opts)
3568 opts = fixkeepchangesopts(ui, opts)
3568 opts = fixkeepchangesopts(ui, opts)
3569 localupdate = True
3569 localupdate = True
3570 if opts.get(b'name'):
3570 if opts.get(b'name'):
3571 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3571 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3572 ui.warn(_(b'using patch queue: %s\n') % q.path)
3572 ui.warn(_(b'using patch queue: %s\n') % q.path)
3573 localupdate = False
3573 localupdate = False
3574 else:
3574 else:
3575 q = repo.mq
3575 q = repo.mq
3576 ret = q.pop(
3576 ret = q.pop(
3577 repo,
3577 repo,
3578 patch,
3578 patch,
3579 force=opts.get(b'force'),
3579 force=opts.get(b'force'),
3580 update=localupdate,
3580 update=localupdate,
3581 all=opts.get(b'all'),
3581 all=opts.get(b'all'),
3582 nobackup=opts.get(b'no_backup'),
3582 nobackup=opts.get(b'no_backup'),
3583 keepchanges=opts.get(b'keep_changes'),
3583 keepchanges=opts.get(b'keep_changes'),
3584 )
3584 )
3585 q.savedirty()
3585 q.savedirty()
3586 return ret
3586 return ret
3587
3587
3588
3588
3589 @command(
3589 @command(
3590 b"qrename|qmv",
3590 b"qrename|qmv",
3591 [],
3591 [],
3592 _(b'hg qrename PATCH1 [PATCH2]'),
3592 _(b'hg qrename PATCH1 [PATCH2]'),
3593 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3593 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3594 )
3594 )
3595 def rename(ui, repo, patch, name=None, **opts):
3595 def rename(ui, repo, patch, name=None, **opts):
3596 """rename a patch
3596 """rename a patch
3597
3597
3598 With one argument, renames the current patch to PATCH1.
3598 With one argument, renames the current patch to PATCH1.
3599 With two arguments, renames PATCH1 to PATCH2.
3599 With two arguments, renames PATCH1 to PATCH2.
3600
3600
3601 Returns 0 on success."""
3601 Returns 0 on success."""
3602 q = repo.mq
3602 q = repo.mq
3603 if not name:
3603 if not name:
3604 name = patch
3604 name = patch
3605 patch = None
3605 patch = None
3606
3606
3607 if patch:
3607 if patch:
3608 patch = q.lookup(patch)
3608 patch = q.lookup(patch)
3609 else:
3609 else:
3610 if not q.applied:
3610 if not q.applied:
3611 ui.write(_(b'no patches applied\n'))
3611 ui.write(_(b'no patches applied\n'))
3612 return
3612 return
3613 patch = q.lookup(b'qtip')
3613 patch = q.lookup(b'qtip')
3614 absdest = q.join(name)
3614 absdest = q.join(name)
3615 if os.path.isdir(absdest):
3615 if os.path.isdir(absdest):
3616 name = normname(os.path.join(name, os.path.basename(patch)))
3616 name = normname(os.path.join(name, os.path.basename(patch)))
3617 absdest = q.join(name)
3617 absdest = q.join(name)
3618 q.checkpatchname(name)
3618 q.checkpatchname(name)
3619
3619
3620 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3620 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3621 i = q.findseries(patch)
3621 i = q.findseries(patch)
3622 guards = q.guard_re.findall(q.fullseries[i])
3622 guards = q.guard_re.findall(q.fullseries[i])
3623 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3623 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3624 q.parseseries()
3624 q.parseseries()
3625 q.seriesdirty = True
3625 q.seriesdirty = True
3626
3626
3627 info = q.isapplied(patch)
3627 info = q.isapplied(patch)
3628 if info:
3628 if info:
3629 q.applied[info[0]] = statusentry(info[1], name)
3629 q.applied[info[0]] = statusentry(info[1], name)
3630 q.applieddirty = True
3630 q.applieddirty = True
3631
3631
3632 destdir = os.path.dirname(absdest)
3632 destdir = os.path.dirname(absdest)
3633 if not os.path.isdir(destdir):
3633 if not os.path.isdir(destdir):
3634 os.makedirs(destdir)
3634 os.makedirs(destdir)
3635 util.rename(q.join(patch), absdest)
3635 util.rename(q.join(patch), absdest)
3636 r = q.qrepo()
3636 r = q.qrepo()
3637 if r and patch in r.dirstate:
3637 if r and patch in r.dirstate:
3638 wctx = r[None]
3638 wctx = r[None]
3639 with r.wlock():
3639 with r.wlock():
3640 if r.dirstate.get_entry(patch).added:
3640 if r.dirstate.get_entry(patch).added:
3641 r.dirstate.set_untracked(patch)
3641 r.dirstate.set_untracked(patch)
3642 r.dirstate.set_tracked(name)
3642 r.dirstate.set_tracked(name)
3643 else:
3643 else:
3644 wctx.copy(patch, name)
3644 wctx.copy(patch, name)
3645 wctx.forget([patch])
3645 wctx.forget([patch])
3646
3646
3647 q.savedirty()
3647 q.savedirty()
3648
3648
3649
3649
3650 @command(
3650 @command(
3651 b"qrestore",
3651 b"qrestore",
3652 [
3652 [
3653 (b'd', b'delete', None, _(b'delete save entry')),
3653 (b'd', b'delete', None, _(b'delete save entry')),
3654 (b'u', b'update', None, _(b'update queue working directory')),
3654 (b'u', b'update', None, _(b'update queue working directory')),
3655 ],
3655 ],
3656 _(b'hg qrestore [-d] [-u] REV'),
3656 _(b'hg qrestore [-d] [-u] REV'),
3657 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3657 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3658 )
3658 )
3659 def restore(ui, repo, rev, **opts):
3659 def restore(ui, repo, rev, **opts):
3660 """restore the queue state saved by a revision (DEPRECATED)
3660 """restore the queue state saved by a revision (DEPRECATED)
3661
3661
3662 This command is deprecated, use :hg:`rebase` instead."""
3662 This command is deprecated, use :hg:`rebase` instead."""
3663 rev = repo.lookup(rev)
3663 rev = repo.lookup(rev)
3664 q = repo.mq
3664 q = repo.mq
3665 q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
3665 q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
3666 q.savedirty()
3666 q.savedirty()
3667 return 0
3667 return 0
3668
3668
3669
3669
3670 @command(
3670 @command(
3671 b"qsave",
3671 b"qsave",
3672 [
3672 [
3673 (b'c', b'copy', None, _(b'copy patch directory')),
3673 (b'c', b'copy', None, _(b'copy patch directory')),
3674 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3674 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3675 (b'e', b'empty', None, _(b'clear queue status file')),
3675 (b'e', b'empty', None, _(b'clear queue status file')),
3676 (b'f', b'force', None, _(b'force copy')),
3676 (b'f', b'force', None, _(b'force copy')),
3677 ]
3677 ]
3678 + cmdutil.commitopts,
3678 + cmdutil.commitopts,
3679 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3679 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3680 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3680 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3681 )
3681 )
3682 def save(ui, repo, **opts):
3682 def save(ui, repo, **opts):
3683 """save current queue state (DEPRECATED)
3683 """save current queue state (DEPRECATED)
3684
3684
3685 This command is deprecated, use :hg:`rebase` instead."""
3685 This command is deprecated, use :hg:`rebase` instead."""
3686 q = repo.mq
3686 q = repo.mq
3687 opts = pycompat.byteskwargs(opts)
3687 opts = pycompat.byteskwargs(opts)
3688 message = cmdutil.logmessage(ui, opts)
3688 message = cmdutil.logmessage(ui, opts)
3689 ret = q.save(repo, msg=message)
3689 ret = q.save(repo, msg=message)
3690 if ret:
3690 if ret:
3691 return ret
3691 return ret
3692 q.savedirty() # save to .hg/patches before copying
3692 q.savedirty() # save to .hg/patches before copying
3693 if opts.get(b'copy'):
3693 if opts.get(b'copy'):
3694 path = q.path
3694 path = q.path
3695 if opts.get(b'name'):
3695 if opts.get(b'name'):
3696 newpath = os.path.join(q.basepath, opts.get(b'name'))
3696 newpath = os.path.join(q.basepath, opts.get(b'name'))
3697 if os.path.exists(newpath):
3697 if os.path.exists(newpath):
3698 if not os.path.isdir(newpath):
3698 if not os.path.isdir(newpath):
3699 raise error.Abort(
3699 raise error.Abort(
3700 _(b'destination %s exists and is not a directory')
3700 _(b'destination %s exists and is not a directory')
3701 % newpath
3701 % newpath
3702 )
3702 )
3703 if not opts.get(b'force'):
3703 if not opts.get(b'force'):
3704 raise error.Abort(
3704 raise error.Abort(
3705 _(b'destination %s exists, use -f to force') % newpath
3705 _(b'destination %s exists, use -f to force') % newpath
3706 )
3706 )
3707 else:
3707 else:
3708 newpath = savename(path)
3708 newpath = savename(path)
3709 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3709 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3710 util.copyfiles(path, newpath)
3710 util.copyfiles(path, newpath)
3711 if opts.get(b'empty'):
3711 if opts.get(b'empty'):
3712 del q.applied[:]
3712 del q.applied[:]
3713 q.applieddirty = True
3713 q.applieddirty = True
3714 q.savedirty()
3714 q.savedirty()
3715 return 0
3715 return 0
3716
3716
3717
3717
3718 @command(
3718 @command(
3719 b"qselect",
3719 b"qselect",
3720 [
3720 [
3721 (b'n', b'none', None, _(b'disable all guards')),
3721 (b'n', b'none', None, _(b'disable all guards')),
3722 (b's', b'series', None, _(b'list all guards in series file')),
3722 (b's', b'series', None, _(b'list all guards in series file')),
3723 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3723 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3724 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3724 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3725 ],
3725 ],
3726 _(b'hg qselect [OPTION]... [GUARD]...'),
3726 _(b'hg qselect [OPTION]... [GUARD]...'),
3727 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3727 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3728 )
3728 )
3729 def select(ui, repo, *args, **opts):
3729 def select(ui, repo, *args, **opts):
3730 """set or print guarded patches to push
3730 """set or print guarded patches to push
3731
3731
3732 Use the :hg:`qguard` command to set or print guards on patch, then use
3732 Use the :hg:`qguard` command to set or print guards on patch, then use
3733 qselect to tell mq which guards to use. A patch will be pushed if
3733 qselect to tell mq which guards to use. A patch will be pushed if
3734 it has no guards or any positive guards match the currently
3734 it has no guards or any positive guards match the currently
3735 selected guard, but will not be pushed if any negative guards
3735 selected guard, but will not be pushed if any negative guards
3736 match the current guard. For example::
3736 match the current guard. For example::
3737
3737
3738 qguard foo.patch -- -stable (negative guard)
3738 qguard foo.patch -- -stable (negative guard)
3739 qguard bar.patch +stable (positive guard)
3739 qguard bar.patch +stable (positive guard)
3740 qselect stable
3740 qselect stable
3741
3741
3742 This activates the "stable" guard. mq will skip foo.patch (because
3742 This activates the "stable" guard. mq will skip foo.patch (because
3743 it has a negative match) but push bar.patch (because it has a
3743 it has a negative match) but push bar.patch (because it has a
3744 positive match).
3744 positive match).
3745
3745
3746 With no arguments, prints the currently active guards.
3746 With no arguments, prints the currently active guards.
3747 With one argument, sets the active guard.
3747 With one argument, sets the active guard.
3748
3748
3749 Use -n/--none to deactivate guards (no other arguments needed).
3749 Use -n/--none to deactivate guards (no other arguments needed).
3750 When no guards are active, patches with positive guards are
3750 When no guards are active, patches with positive guards are
3751 skipped and patches with negative guards are pushed.
3751 skipped and patches with negative guards are pushed.
3752
3752
3753 qselect can change the guards on applied patches. It does not pop
3753 qselect can change the guards on applied patches. It does not pop
3754 guarded patches by default. Use --pop to pop back to the last
3754 guarded patches by default. Use --pop to pop back to the last
3755 applied patch that is not guarded. Use --reapply (which implies
3755 applied patch that is not guarded. Use --reapply (which implies
3756 --pop) to push back to the current patch afterwards, but skip
3756 --pop) to push back to the current patch afterwards, but skip
3757 guarded patches.
3757 guarded patches.
3758
3758
3759 Use -s/--series to print a list of all guards in the series file
3759 Use -s/--series to print a list of all guards in the series file
3760 (no other arguments needed). Use -v for more information.
3760 (no other arguments needed). Use -v for more information.
3761
3761
3762 Returns 0 on success."""
3762 Returns 0 on success."""
3763
3763
3764 q = repo.mq
3764 q = repo.mq
3765 opts = pycompat.byteskwargs(opts)
3765 opts = pycompat.byteskwargs(opts)
3766 guards = q.active()
3766 guards = q.active()
3767 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3767 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3768 if args or opts.get(b'none'):
3768 if args or opts.get(b'none'):
3769 old_unapplied = q.unapplied(repo)
3769 old_unapplied = q.unapplied(repo)
3770 old_guarded = [
3770 old_guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3771 i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
3772 ]
3773 q.setactive(args)
3771 q.setactive(args)
3774 q.savedirty()
3772 q.savedirty()
3775 if not args:
3773 if not args:
3776 ui.status(_(b'guards deactivated\n'))
3774 ui.status(_(b'guards deactivated\n'))
3777 if not opts.get(b'pop') and not opts.get(b'reapply'):
3775 if not opts.get(b'pop') and not opts.get(b'reapply'):
3778 unapplied = q.unapplied(repo)
3776 unapplied = q.unapplied(repo)
3779 guarded = [
3777 guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3780 i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
3781 ]
3782 if len(unapplied) != len(old_unapplied):
3778 if len(unapplied) != len(old_unapplied):
3783 ui.status(
3779 ui.status(
3784 _(
3780 _(
3785 b'number of unguarded, unapplied patches has '
3781 b'number of unguarded, unapplied patches has '
3786 b'changed from %d to %d\n'
3782 b'changed from %d to %d\n'
3787 )
3783 )
3788 % (len(old_unapplied), len(unapplied))
3784 % (len(old_unapplied), len(unapplied))
3789 )
3785 )
3790 if len(guarded) != len(old_guarded):
3786 if len(guarded) != len(old_guarded):
3791 ui.status(
3787 ui.status(
3792 _(
3788 _(
3793 b'number of guarded, applied patches has changed '
3789 b'number of guarded, applied patches has changed '
3794 b'from %d to %d\n'
3790 b'from %d to %d\n'
3795 )
3791 )
3796 % (len(old_guarded), len(guarded))
3792 % (len(old_guarded), len(guarded))
3797 )
3793 )
3798 elif opts.get(b'series'):
3794 elif opts.get(b'series'):
3799 guards = {}
3795 guards = {}
3800 noguards = 0
3796 noguards = 0
3801 for gs in q.seriesguards:
3797 for gs in q.seriesguards:
3802 if not gs:
3798 if not gs:
3803 noguards += 1
3799 noguards += 1
3804 for g in gs:
3800 for g in gs:
3805 guards.setdefault(g, 0)
3801 guards.setdefault(g, 0)
3806 guards[g] += 1
3802 guards[g] += 1
3807 if ui.verbose:
3803 if ui.verbose:
3808 guards[b'NONE'] = noguards
3804 guards[b'NONE'] = noguards
3809 guards = list(guards.items())
3805 guards = list(guards.items())
3810 guards.sort(key=lambda x: x[0][1:])
3806 guards.sort(key=lambda x: x[0][1:])
3811 if guards:
3807 if guards:
3812 ui.note(_(b'guards in series file:\n'))
3808 ui.note(_(b'guards in series file:\n'))
3813 for guard, count in guards:
3809 for guard, count in guards:
3814 ui.note(b'%2d ' % count)
3810 ui.note(b'%2d ' % count)
3815 ui.write(guard, b'\n')
3811 ui.write(guard, b'\n')
3816 else:
3812 else:
3817 ui.note(_(b'no guards in series file\n'))
3813 ui.note(_(b'no guards in series file\n'))
3818 else:
3814 else:
3819 if guards:
3815 if guards:
3820 ui.note(_(b'active guards:\n'))
3816 ui.note(_(b'active guards:\n'))
3821 for g in guards:
3817 for g in guards:
3822 ui.write(g, b'\n')
3818 ui.write(g, b'\n')
3823 else:
3819 else:
3824 ui.write(_(b'no active guards\n'))
3820 ui.write(_(b'no active guards\n'))
3825 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3821 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3826 popped = False
3822 popped = False
3827 if opts.get(b'pop') or opts.get(b'reapply'):
3823 if opts.get(b'pop') or opts.get(b'reapply'):
3828 for i in pycompat.xrange(len(q.applied)):
3824 for i in range(len(q.applied)):
3829 if not pushable(i):
3825 if not pushable(i):
3830 ui.status(_(b'popping guarded patches\n'))
3826 ui.status(_(b'popping guarded patches\n'))
3831 popped = True
3827 popped = True
3832 if i == 0:
3828 if i == 0:
3833 q.pop(repo, all=True)
3829 q.pop(repo, all=True)
3834 else:
3830 else:
3835 q.pop(repo, q.applied[i - 1].name)
3831 q.pop(repo, q.applied[i - 1].name)
3836 break
3832 break
3837 if popped:
3833 if popped:
3838 try:
3834 try:
3839 if reapply:
3835 if reapply:
3840 ui.status(_(b'reapplying unguarded patches\n'))
3836 ui.status(_(b'reapplying unguarded patches\n'))
3841 q.push(repo, reapply)
3837 q.push(repo, reapply)
3842 finally:
3838 finally:
3843 q.savedirty()
3839 q.savedirty()
3844
3840
3845
3841
3846 @command(
3842 @command(
3847 b"qfinish",
3843 b"qfinish",
3848 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3844 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3849 _(b'hg qfinish [-a] [REV]...'),
3845 _(b'hg qfinish [-a] [REV]...'),
3850 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3846 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3851 )
3847 )
3852 def finish(ui, repo, *revrange, **opts):
3848 def finish(ui, repo, *revrange, **opts):
3853 """move applied patches into repository history
3849 """move applied patches into repository history
3854
3850
3855 Finishes the specified revisions (corresponding to applied
3851 Finishes the specified revisions (corresponding to applied
3856 patches) by moving them out of mq control into regular repository
3852 patches) by moving them out of mq control into regular repository
3857 history.
3853 history.
3858
3854
3859 Accepts a revision range or the -a/--applied option. If --applied
3855 Accepts a revision range or the -a/--applied option. If --applied
3860 is specified, all applied mq revisions are removed from mq
3856 is specified, all applied mq revisions are removed from mq
3861 control. Otherwise, the given revisions must be at the base of the
3857 control. Otherwise, the given revisions must be at the base of the
3862 stack of applied patches.
3858 stack of applied patches.
3863
3859
3864 This can be especially useful if your changes have been applied to
3860 This can be especially useful if your changes have been applied to
3865 an upstream repository, or if you are about to push your changes
3861 an upstream repository, or if you are about to push your changes
3866 to upstream.
3862 to upstream.
3867
3863
3868 Returns 0 on success.
3864 Returns 0 on success.
3869 """
3865 """
3870 if not opts.get('applied') and not revrange:
3866 if not opts.get('applied') and not revrange:
3871 raise error.Abort(_(b'no revisions specified'))
3867 raise error.Abort(_(b'no revisions specified'))
3872 elif opts.get('applied'):
3868 elif opts.get('applied'):
3873 revrange = (b'qbase::qtip',) + revrange
3869 revrange = (b'qbase::qtip',) + revrange
3874
3870
3875 q = repo.mq
3871 q = repo.mq
3876 if not q.applied:
3872 if not q.applied:
3877 ui.status(_(b'no patches applied\n'))
3873 ui.status(_(b'no patches applied\n'))
3878 return 0
3874 return 0
3879
3875
3880 revs = logcmdutil.revrange(repo, revrange)
3876 revs = logcmdutil.revrange(repo, revrange)
3881 if repo[b'.'].rev() in revs and repo[None].files():
3877 if repo[b'.'].rev() in revs and repo[None].files():
3882 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3878 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3883 # queue.finish may changes phases but leave the responsibility to lock the
3879 # queue.finish may changes phases but leave the responsibility to lock the
3884 # repo to the caller to avoid deadlock with wlock. This command code is
3880 # repo to the caller to avoid deadlock with wlock. This command code is
3885 # responsibility for this locking.
3881 # responsibility for this locking.
3886 with repo.lock():
3882 with repo.lock():
3887 q.finish(repo, revs)
3883 q.finish(repo, revs)
3888 q.savedirty()
3884 q.savedirty()
3889 return 0
3885 return 0
3890
3886
3891
3887
3892 @command(
3888 @command(
3893 b"qqueue",
3889 b"qqueue",
3894 [
3890 [
3895 (b'l', b'list', False, _(b'list all available queues')),
3891 (b'l', b'list', False, _(b'list all available queues')),
3896 (b'', b'active', False, _(b'print name of active queue')),
3892 (b'', b'active', False, _(b'print name of active queue')),
3897 (b'c', b'create', False, _(b'create new queue')),
3893 (b'c', b'create', False, _(b'create new queue')),
3898 (b'', b'rename', False, _(b'rename active queue')),
3894 (b'', b'rename', False, _(b'rename active queue')),
3899 (b'', b'delete', False, _(b'delete reference to queue')),
3895 (b'', b'delete', False, _(b'delete reference to queue')),
3900 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3896 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3901 ],
3897 ],
3902 _(b'[OPTION] [QUEUE]'),
3898 _(b'[OPTION] [QUEUE]'),
3903 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3899 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3904 )
3900 )
3905 def qqueue(ui, repo, name=None, **opts):
3901 def qqueue(ui, repo, name=None, **opts):
3906 """manage multiple patch queues
3902 """manage multiple patch queues
3907
3903
3908 Supports switching between different patch queues, as well as creating
3904 Supports switching between different patch queues, as well as creating
3909 new patch queues and deleting existing ones.
3905 new patch queues and deleting existing ones.
3910
3906
3911 Omitting a queue name or specifying -l/--list will show you the registered
3907 Omitting a queue name or specifying -l/--list will show you the registered
3912 queues - by default the "normal" patches queue is registered. The currently
3908 queues - by default the "normal" patches queue is registered. The currently
3913 active queue will be marked with "(active)". Specifying --active will print
3909 active queue will be marked with "(active)". Specifying --active will print
3914 only the name of the active queue.
3910 only the name of the active queue.
3915
3911
3916 To create a new queue, use -c/--create. The queue is automatically made
3912 To create a new queue, use -c/--create. The queue is automatically made
3917 active, except in the case where there are applied patches from the
3913 active, except in the case where there are applied patches from the
3918 currently active queue in the repository. Then the queue will only be
3914 currently active queue in the repository. Then the queue will only be
3919 created and switching will fail.
3915 created and switching will fail.
3920
3916
3921 To delete an existing queue, use --delete. You cannot delete the currently
3917 To delete an existing queue, use --delete. You cannot delete the currently
3922 active queue.
3918 active queue.
3923
3919
3924 Returns 0 on success.
3920 Returns 0 on success.
3925 """
3921 """
3926 q = repo.mq
3922 q = repo.mq
3927 _defaultqueue = b'patches'
3923 _defaultqueue = b'patches'
3928 _allqueues = b'patches.queues'
3924 _allqueues = b'patches.queues'
3929 _activequeue = b'patches.queue'
3925 _activequeue = b'patches.queue'
3930
3926
3931 def _getcurrent():
3927 def _getcurrent():
3932 cur = os.path.basename(q.path)
3928 cur = os.path.basename(q.path)
3933 if cur.startswith(b'patches-'):
3929 if cur.startswith(b'patches-'):
3934 cur = cur[8:]
3930 cur = cur[8:]
3935 return cur
3931 return cur
3936
3932
3937 def _noqueues():
3933 def _noqueues():
3938 try:
3934 try:
3939 fh = repo.vfs(_allqueues, b'r')
3935 fh = repo.vfs(_allqueues, b'r')
3940 fh.close()
3936 fh.close()
3941 except IOError:
3937 except IOError:
3942 return True
3938 return True
3943
3939
3944 return False
3940 return False
3945
3941
3946 def _getqueues():
3942 def _getqueues():
3947 current = _getcurrent()
3943 current = _getcurrent()
3948
3944
3949 try:
3945 try:
3950 fh = repo.vfs(_allqueues, b'r')
3946 fh = repo.vfs(_allqueues, b'r')
3951 queues = [queue.strip() for queue in fh if queue.strip()]
3947 queues = [queue.strip() for queue in fh if queue.strip()]
3952 fh.close()
3948 fh.close()
3953 if current not in queues:
3949 if current not in queues:
3954 queues.append(current)
3950 queues.append(current)
3955 except IOError:
3951 except IOError:
3956 queues = [_defaultqueue]
3952 queues = [_defaultqueue]
3957
3953
3958 return sorted(queues)
3954 return sorted(queues)
3959
3955
3960 def _setactive(name):
3956 def _setactive(name):
3961 if q.applied:
3957 if q.applied:
3962 raise error.Abort(
3958 raise error.Abort(
3963 _(
3959 _(
3964 b'new queue created, but cannot make active '
3960 b'new queue created, but cannot make active '
3965 b'as patches are applied'
3961 b'as patches are applied'
3966 )
3962 )
3967 )
3963 )
3968 _setactivenocheck(name)
3964 _setactivenocheck(name)
3969
3965
3970 def _setactivenocheck(name):
3966 def _setactivenocheck(name):
3971 fh = repo.vfs(_activequeue, b'w')
3967 fh = repo.vfs(_activequeue, b'w')
3972 if name != b'patches':
3968 if name != b'patches':
3973 fh.write(name)
3969 fh.write(name)
3974 fh.close()
3970 fh.close()
3975
3971
3976 def _addqueue(name):
3972 def _addqueue(name):
3977 fh = repo.vfs(_allqueues, b'a')
3973 fh = repo.vfs(_allqueues, b'a')
3978 fh.write(b'%s\n' % (name,))
3974 fh.write(b'%s\n' % (name,))
3979 fh.close()
3975 fh.close()
3980
3976
3981 def _queuedir(name):
3977 def _queuedir(name):
3982 if name == b'patches':
3978 if name == b'patches':
3983 return repo.vfs.join(b'patches')
3979 return repo.vfs.join(b'patches')
3984 else:
3980 else:
3985 return repo.vfs.join(b'patches-' + name)
3981 return repo.vfs.join(b'patches-' + name)
3986
3982
3987 def _validname(name):
3983 def _validname(name):
3988 for n in name:
3984 for n in name:
3989 if n in b':\\/.':
3985 if n in b':\\/.':
3990 return False
3986 return False
3991 return True
3987 return True
3992
3988
3993 def _delete(name):
3989 def _delete(name):
3994 if name not in existing:
3990 if name not in existing:
3995 raise error.Abort(_(b'cannot delete queue that does not exist'))
3991 raise error.Abort(_(b'cannot delete queue that does not exist'))
3996
3992
3997 current = _getcurrent()
3993 current = _getcurrent()
3998
3994
3999 if name == current:
3995 if name == current:
4000 raise error.Abort(_(b'cannot delete currently active queue'))
3996 raise error.Abort(_(b'cannot delete currently active queue'))
4001
3997
4002 fh = repo.vfs(b'patches.queues.new', b'w')
3998 fh = repo.vfs(b'patches.queues.new', b'w')
4003 for queue in existing:
3999 for queue in existing:
4004 if queue == name:
4000 if queue == name:
4005 continue
4001 continue
4006 fh.write(b'%s\n' % (queue,))
4002 fh.write(b'%s\n' % (queue,))
4007 fh.close()
4003 fh.close()
4008 repo.vfs.rename(b'patches.queues.new', _allqueues)
4004 repo.vfs.rename(b'patches.queues.new', _allqueues)
4009
4005
4010 opts = pycompat.byteskwargs(opts)
4006 opts = pycompat.byteskwargs(opts)
4011 if not name or opts.get(b'list') or opts.get(b'active'):
4007 if not name or opts.get(b'list') or opts.get(b'active'):
4012 current = _getcurrent()
4008 current = _getcurrent()
4013 if opts.get(b'active'):
4009 if opts.get(b'active'):
4014 ui.write(b'%s\n' % (current,))
4010 ui.write(b'%s\n' % (current,))
4015 return
4011 return
4016 for queue in _getqueues():
4012 for queue in _getqueues():
4017 ui.write(b'%s' % (queue,))
4013 ui.write(b'%s' % (queue,))
4018 if queue == current and not ui.quiet:
4014 if queue == current and not ui.quiet:
4019 ui.write(_(b' (active)\n'))
4015 ui.write(_(b' (active)\n'))
4020 else:
4016 else:
4021 ui.write(b'\n')
4017 ui.write(b'\n')
4022 return
4018 return
4023
4019
4024 if not _validname(name):
4020 if not _validname(name):
4025 raise error.Abort(
4021 raise error.Abort(
4026 _(b'invalid queue name, may not contain the characters ":\\/."')
4022 _(b'invalid queue name, may not contain the characters ":\\/."')
4027 )
4023 )
4028
4024
4029 with repo.wlock():
4025 with repo.wlock():
4030 existing = _getqueues()
4026 existing = _getqueues()
4031
4027
4032 if opts.get(b'create'):
4028 if opts.get(b'create'):
4033 if name in existing:
4029 if name in existing:
4034 raise error.Abort(_(b'queue "%s" already exists') % name)
4030 raise error.Abort(_(b'queue "%s" already exists') % name)
4035 if _noqueues():
4031 if _noqueues():
4036 _addqueue(_defaultqueue)
4032 _addqueue(_defaultqueue)
4037 _addqueue(name)
4033 _addqueue(name)
4038 _setactive(name)
4034 _setactive(name)
4039 elif opts.get(b'rename'):
4035 elif opts.get(b'rename'):
4040 current = _getcurrent()
4036 current = _getcurrent()
4041 if name == current:
4037 if name == current:
4042 raise error.Abort(
4038 raise error.Abort(
4043 _(b'can\'t rename "%s" to its current name') % name
4039 _(b'can\'t rename "%s" to its current name') % name
4044 )
4040 )
4045 if name in existing:
4041 if name in existing:
4046 raise error.Abort(_(b'queue "%s" already exists') % name)
4042 raise error.Abort(_(b'queue "%s" already exists') % name)
4047
4043
4048 olddir = _queuedir(current)
4044 olddir = _queuedir(current)
4049 newdir = _queuedir(name)
4045 newdir = _queuedir(name)
4050
4046
4051 if os.path.exists(newdir):
4047 if os.path.exists(newdir):
4052 raise error.Abort(
4048 raise error.Abort(
4053 _(b'non-queue directory "%s" already exists') % newdir
4049 _(b'non-queue directory "%s" already exists') % newdir
4054 )
4050 )
4055
4051
4056 fh = repo.vfs(b'patches.queues.new', b'w')
4052 fh = repo.vfs(b'patches.queues.new', b'w')
4057 for queue in existing:
4053 for queue in existing:
4058 if queue == current:
4054 if queue == current:
4059 fh.write(b'%s\n' % (name,))
4055 fh.write(b'%s\n' % (name,))
4060 if os.path.exists(olddir):
4056 if os.path.exists(olddir):
4061 util.rename(olddir, newdir)
4057 util.rename(olddir, newdir)
4062 else:
4058 else:
4063 fh.write(b'%s\n' % (queue,))
4059 fh.write(b'%s\n' % (queue,))
4064 fh.close()
4060 fh.close()
4065 repo.vfs.rename(b'patches.queues.new', _allqueues)
4061 repo.vfs.rename(b'patches.queues.new', _allqueues)
4066 _setactivenocheck(name)
4062 _setactivenocheck(name)
4067 elif opts.get(b'delete'):
4063 elif opts.get(b'delete'):
4068 _delete(name)
4064 _delete(name)
4069 elif opts.get(b'purge'):
4065 elif opts.get(b'purge'):
4070 if name in existing:
4066 if name in existing:
4071 _delete(name)
4067 _delete(name)
4072 qdir = _queuedir(name)
4068 qdir = _queuedir(name)
4073 if os.path.exists(qdir):
4069 if os.path.exists(qdir):
4074 shutil.rmtree(qdir)
4070 shutil.rmtree(qdir)
4075 else:
4071 else:
4076 if name not in existing:
4072 if name not in existing:
4077 raise error.Abort(_(b'use --create to create a new queue'))
4073 raise error.Abort(_(b'use --create to create a new queue'))
4078 _setactive(name)
4074 _setactive(name)
4079
4075
4080
4076
4081 def mqphasedefaults(repo, roots):
4077 def mqphasedefaults(repo, roots):
4082 """callback used to set mq changeset as secret when no phase data exists"""
4078 """callback used to set mq changeset as secret when no phase data exists"""
4083 if repo.mq.applied:
4079 if repo.mq.applied:
4084 if repo.ui.configbool(b'mq', b'secret'):
4080 if repo.ui.configbool(b'mq', b'secret'):
4085 mqphase = phases.secret
4081 mqphase = phases.secret
4086 else:
4082 else:
4087 mqphase = phases.draft
4083 mqphase = phases.draft
4088 qbase = repo[repo.mq.applied[0].node]
4084 qbase = repo[repo.mq.applied[0].node]
4089 roots[mqphase].add(qbase.node())
4085 roots[mqphase].add(qbase.node())
4090 return roots
4086 return roots
4091
4087
4092
4088
4093 def reposetup(ui, repo):
4089 def reposetup(ui, repo):
4094 class mqrepo(repo.__class__):
4090 class mqrepo(repo.__class__):
4095 @localrepo.unfilteredpropertycache
4091 @localrepo.unfilteredpropertycache
4096 def mq(self):
4092 def mq(self):
4097 return queue(self.ui, self.baseui, self.path)
4093 return queue(self.ui, self.baseui, self.path)
4098
4094
4099 def invalidateall(self):
4095 def invalidateall(self):
4100 super(mqrepo, self).invalidateall()
4096 super(mqrepo, self).invalidateall()
4101 if localrepo.hasunfilteredcache(self, 'mq'):
4097 if localrepo.hasunfilteredcache(self, 'mq'):
4102 # recreate mq in case queue path was changed
4098 # recreate mq in case queue path was changed
4103 delattr(self.unfiltered(), 'mq')
4099 delattr(self.unfiltered(), 'mq')
4104
4100
4105 def abortifwdirpatched(self, errmsg, force=False):
4101 def abortifwdirpatched(self, errmsg, force=False):
4106 if self.mq.applied and self.mq.checkapplied and not force:
4102 if self.mq.applied and self.mq.checkapplied and not force:
4107 parents = self.dirstate.parents()
4103 parents = self.dirstate.parents()
4108 patches = [s.node for s in self.mq.applied]
4104 patches = [s.node for s in self.mq.applied]
4109 if any(p in patches for p in parents):
4105 if any(p in patches for p in parents):
4110 raise error.Abort(errmsg)
4106 raise error.Abort(errmsg)
4111
4107
4112 def commit(
4108 def commit(
4113 self,
4109 self,
4114 text=b"",
4110 text=b"",
4115 user=None,
4111 user=None,
4116 date=None,
4112 date=None,
4117 match=None,
4113 match=None,
4118 force=False,
4114 force=False,
4119 editor=False,
4115 editor=False,
4120 extra=None,
4116 extra=None,
4121 ):
4117 ):
4122 if extra is None:
4118 if extra is None:
4123 extra = {}
4119 extra = {}
4124 self.abortifwdirpatched(
4120 self.abortifwdirpatched(
4125 _(b'cannot commit over an applied mq patch'), force
4121 _(b'cannot commit over an applied mq patch'), force
4126 )
4122 )
4127
4123
4128 return super(mqrepo, self).commit(
4124 return super(mqrepo, self).commit(
4129 text, user, date, match, force, editor, extra
4125 text, user, date, match, force, editor, extra
4130 )
4126 )
4131
4127
4132 def checkpush(self, pushop):
4128 def checkpush(self, pushop):
4133 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4129 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4134 outapplied = [e.node for e in self.mq.applied]
4130 outapplied = [e.node for e in self.mq.applied]
4135 if pushop.revs:
4131 if pushop.revs:
4136 # Assume applied patches have no non-patch descendants and
4132 # Assume applied patches have no non-patch descendants and
4137 # are not on remote already. Filtering any changeset not
4133 # are not on remote already. Filtering any changeset not
4138 # pushed.
4134 # pushed.
4139 heads = set(pushop.revs)
4135 heads = set(pushop.revs)
4140 for node in reversed(outapplied):
4136 for node in reversed(outapplied):
4141 if node in heads:
4137 if node in heads:
4142 break
4138 break
4143 else:
4139 else:
4144 outapplied.pop()
4140 outapplied.pop()
4145 # looking for pushed and shared changeset
4141 # looking for pushed and shared changeset
4146 for node in outapplied:
4142 for node in outapplied:
4147 if self[node].phase() < phases.secret:
4143 if self[node].phase() < phases.secret:
4148 raise error.Abort(_(b'source has mq patches applied'))
4144 raise error.Abort(_(b'source has mq patches applied'))
4149 # no non-secret patches pushed
4145 # no non-secret patches pushed
4150 super(mqrepo, self).checkpush(pushop)
4146 super(mqrepo, self).checkpush(pushop)
4151
4147
4152 def _findtags(self):
4148 def _findtags(self):
4153 '''augment tags from base class with patch tags'''
4149 '''augment tags from base class with patch tags'''
4154 result = super(mqrepo, self)._findtags()
4150 result = super(mqrepo, self)._findtags()
4155
4151
4156 q = self.mq
4152 q = self.mq
4157 if not q.applied:
4153 if not q.applied:
4158 return result
4154 return result
4159
4155
4160 mqtags = [(patch.node, patch.name) for patch in q.applied]
4156 mqtags = [(patch.node, patch.name) for patch in q.applied]
4161
4157
4162 try:
4158 try:
4163 # for now ignore filtering business
4159 # for now ignore filtering business
4164 self.unfiltered().changelog.rev(mqtags[-1][0])
4160 self.unfiltered().changelog.rev(mqtags[-1][0])
4165 except error.LookupError:
4161 except error.LookupError:
4166 self.ui.warn(
4162 self.ui.warn(
4167 _(b'mq status file refers to unknown node %s\n')
4163 _(b'mq status file refers to unknown node %s\n')
4168 % short(mqtags[-1][0])
4164 % short(mqtags[-1][0])
4169 )
4165 )
4170 return result
4166 return result
4171
4167
4172 # do not add fake tags for filtered revisions
4168 # do not add fake tags for filtered revisions
4173 included = self.changelog.hasnode
4169 included = self.changelog.hasnode
4174 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4170 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4175 if not mqtags:
4171 if not mqtags:
4176 return result
4172 return result
4177
4173
4178 mqtags.append((mqtags[-1][0], b'qtip'))
4174 mqtags.append((mqtags[-1][0], b'qtip'))
4179 mqtags.append((mqtags[0][0], b'qbase'))
4175 mqtags.append((mqtags[0][0], b'qbase'))
4180 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4176 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4181 tags = result[0]
4177 tags = result[0]
4182 for patch in mqtags:
4178 for patch in mqtags:
4183 if patch[1] in tags:
4179 if patch[1] in tags:
4184 self.ui.warn(
4180 self.ui.warn(
4185 _(b'tag %s overrides mq patch of the same name\n')
4181 _(b'tag %s overrides mq patch of the same name\n')
4186 % patch[1]
4182 % patch[1]
4187 )
4183 )
4188 else:
4184 else:
4189 tags[patch[1]] = patch[0]
4185 tags[patch[1]] = patch[0]
4190
4186
4191 return result
4187 return result
4192
4188
4193 if repo.local():
4189 if repo.local():
4194 repo.__class__ = mqrepo
4190 repo.__class__ = mqrepo
4195
4191
4196 repo._phasedefaults.append(mqphasedefaults)
4192 repo._phasedefaults.append(mqphasedefaults)
4197
4193
4198
4194
4199 def mqimport(orig, ui, repo, *args, **kwargs):
4195 def mqimport(orig, ui, repo, *args, **kwargs):
4200 if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
4196 if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
4201 'no_commit', False
4197 'no_commit', False
4202 ):
4198 ):
4203 repo.abortifwdirpatched(
4199 repo.abortifwdirpatched(
4204 _(b'cannot import over an applied patch'), kwargs.get('force')
4200 _(b'cannot import over an applied patch'), kwargs.get('force')
4205 )
4201 )
4206 return orig(ui, repo, *args, **kwargs)
4202 return orig(ui, repo, *args, **kwargs)
4207
4203
4208
4204
4209 def mqinit(orig, ui, *args, **kwargs):
4205 def mqinit(orig, ui, *args, **kwargs):
4210 mq = kwargs.pop('mq', None)
4206 mq = kwargs.pop('mq', None)
4211
4207
4212 if not mq:
4208 if not mq:
4213 return orig(ui, *args, **kwargs)
4209 return orig(ui, *args, **kwargs)
4214
4210
4215 if args:
4211 if args:
4216 repopath = args[0]
4212 repopath = args[0]
4217 if not hg.islocal(repopath):
4213 if not hg.islocal(repopath):
4218 raise error.Abort(
4214 raise error.Abort(
4219 _(b'only a local queue repository may be initialized')
4215 _(b'only a local queue repository may be initialized')
4220 )
4216 )
4221 else:
4217 else:
4222 repopath = cmdutil.findrepo(encoding.getcwd())
4218 repopath = cmdutil.findrepo(encoding.getcwd())
4223 if not repopath:
4219 if not repopath:
4224 raise error.Abort(
4220 raise error.Abort(
4225 _(b'there is no Mercurial repository here (.hg not found)')
4221 _(b'there is no Mercurial repository here (.hg not found)')
4226 )
4222 )
4227 repo = hg.repository(ui, repopath)
4223 repo = hg.repository(ui, repopath)
4228 return qinit(ui, repo, True)
4224 return qinit(ui, repo, True)
4229
4225
4230
4226
4231 def mqcommand(orig, ui, repo, *args, **kwargs):
4227 def mqcommand(orig, ui, repo, *args, **kwargs):
4232 """Add --mq option to operate on patch repository instead of main"""
4228 """Add --mq option to operate on patch repository instead of main"""
4233
4229
4234 # some commands do not like getting unknown options
4230 # some commands do not like getting unknown options
4235 mq = kwargs.pop('mq', None)
4231 mq = kwargs.pop('mq', None)
4236
4232
4237 if not mq:
4233 if not mq:
4238 return orig(ui, repo, *args, **kwargs)
4234 return orig(ui, repo, *args, **kwargs)
4239
4235
4240 q = repo.mq
4236 q = repo.mq
4241 r = q.qrepo()
4237 r = q.qrepo()
4242 if not r:
4238 if not r:
4243 raise error.Abort(_(b'no queue repository'))
4239 raise error.Abort(_(b'no queue repository'))
4244 return orig(r.ui, r, *args, **kwargs)
4240 return orig(r.ui, r, *args, **kwargs)
4245
4241
4246
4242
4247 def summaryhook(ui, repo):
4243 def summaryhook(ui, repo):
4248 q = repo.mq
4244 q = repo.mq
4249 m = []
4245 m = []
4250 a, u = len(q.applied), len(q.unapplied(repo))
4246 a, u = len(q.applied), len(q.unapplied(repo))
4251 if a:
4247 if a:
4252 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4248 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4253 if u:
4249 if u:
4254 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4250 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4255 if m:
4251 if m:
4256 # i18n: column positioning for "hg summary"
4252 # i18n: column positioning for "hg summary"
4257 ui.write(_(b"mq: %s\n") % b', '.join(m))
4253 ui.write(_(b"mq: %s\n") % b', '.join(m))
4258 else:
4254 else:
4259 # i18n: column positioning for "hg summary"
4255 # i18n: column positioning for "hg summary"
4260 ui.note(_(b"mq: (empty queue)\n"))
4256 ui.note(_(b"mq: (empty queue)\n"))
4261
4257
4262
4258
4263 revsetpredicate = registrar.revsetpredicate()
4259 revsetpredicate = registrar.revsetpredicate()
4264
4260
4265
4261
4266 @revsetpredicate(b'mq()')
4262 @revsetpredicate(b'mq()')
4267 def revsetmq(repo, subset, x):
4263 def revsetmq(repo, subset, x):
4268 """Changesets managed by MQ."""
4264 """Changesets managed by MQ."""
4269 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4265 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4270 applied = {repo[r.node].rev() for r in repo.mq.applied}
4266 applied = {repo[r.node].rev() for r in repo.mq.applied}
4271 return smartset.baseset([r for r in subset if r in applied])
4267 return smartset.baseset([r for r in subset if r in applied])
4272
4268
4273
4269
4274 # tell hggettext to extract docstrings from these functions:
4270 # tell hggettext to extract docstrings from these functions:
4275 i18nfunctions = [revsetmq]
4271 i18nfunctions = [revsetmq]
4276
4272
4277
4273
4278 def extsetup(ui):
4274 def extsetup(ui):
4279 # Ensure mq wrappers are called first, regardless of extension load order by
4275 # Ensure mq wrappers are called first, regardless of extension load order by
4280 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4276 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4281 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4277 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4282
4278
4283 extensions.wrapcommand(commands.table, b'import', mqimport)
4279 extensions.wrapcommand(commands.table, b'import', mqimport)
4284 cmdutil.summaryhooks.add(b'mq', summaryhook)
4280 cmdutil.summaryhooks.add(b'mq', summaryhook)
4285
4281
4286 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4282 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4287 entry[1].extend(mqopt)
4283 entry[1].extend(mqopt)
4288
4284
4289 def dotable(cmdtable):
4285 def dotable(cmdtable):
4290 for cmd, entry in cmdtable.items():
4286 for cmd, entry in cmdtable.items():
4291 cmd = cmdutil.parsealiases(cmd)[0]
4287 cmd = cmdutil.parsealiases(cmd)[0]
4292 func = entry[0]
4288 func = entry[0]
4293 if func.norepo:
4289 if func.norepo:
4294 continue
4290 continue
4295 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4291 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4296 entry[1].extend(mqopt)
4292 entry[1].extend(mqopt)
4297
4293
4298 dotable(commands.table)
4294 dotable(commands.table)
4299
4295
4300 thismodule = sys.modules["hgext.mq"]
4296 thismodule = sys.modules["hgext.mq"]
4301 for extname, extmodule in extensions.extensions():
4297 for extname, extmodule in extensions.extensions():
4302 if extmodule != thismodule:
4298 if extmodule != thismodule:
4303 dotable(getattr(extmodule, 'cmdtable', {}))
4299 dotable(getattr(extmodule, 'cmdtable', {}))
4304
4300
4305
4301
4306 colortable = {
4302 colortable = {
4307 b'qguard.negative': b'red',
4303 b'qguard.negative': b'red',
4308 b'qguard.positive': b'yellow',
4304 b'qguard.positive': b'yellow',
4309 b'qguard.unguarded': b'green',
4305 b'qguard.unguarded': b'green',
4310 b'qseries.applied': b'blue bold underline',
4306 b'qseries.applied': b'blue bold underline',
4311 b'qseries.guarded': b'black bold',
4307 b'qseries.guarded': b'black bold',
4312 b'qseries.missing': b'red bold',
4308 b'qseries.missing': b'red bold',
4313 b'qseries.unapplied': b'black bold',
4309 b'qseries.unapplied': b'black bold',
4314 }
4310 }
@@ -1,559 +1,559 b''
1 import collections
1 import collections
2 import errno
2 import errno
3 import mmap
3 import mmap
4 import os
4 import os
5 import struct
5 import struct
6 import time
6 import time
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.pycompat import (
9 from mercurial.pycompat import (
10 getattr,
10 getattr,
11 open,
11 open,
12 )
12 )
13 from mercurial.node import hex
13 from mercurial.node import hex
14 from mercurial import (
14 from mercurial import (
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 util,
17 util,
18 vfs as vfsmod,
18 vfs as vfsmod,
19 )
19 )
20 from mercurial.utils import hashutil
20 from mercurial.utils import hashutil
21 from . import shallowutil
21 from . import shallowutil
22
22
23 osutil = policy.importmod('osutil')
23 osutil = policy.importmod('osutil')
24
24
25 # The pack version supported by this implementation. This will need to be
25 # The pack version supported by this implementation. This will need to be
26 # rev'd whenever the byte format changes. Ex: changing the fanout prefix,
26 # rev'd whenever the byte format changes. Ex: changing the fanout prefix,
27 # changing any of the int sizes, changing the delta algorithm, etc.
27 # changing any of the int sizes, changing the delta algorithm, etc.
28 PACKVERSIONSIZE = 1
28 PACKVERSIONSIZE = 1
29 INDEXVERSIONSIZE = 2
29 INDEXVERSIONSIZE = 2
30
30
31 FANOUTSTART = INDEXVERSIONSIZE
31 FANOUTSTART = INDEXVERSIONSIZE
32
32
33 # Constant that indicates a fanout table entry hasn't been filled in. (This does
33 # Constant that indicates a fanout table entry hasn't been filled in. (This does
34 # not get serialized)
34 # not get serialized)
35 EMPTYFANOUT = -1
35 EMPTYFANOUT = -1
36
36
37 # The fanout prefix is the number of bytes that can be addressed by the fanout
37 # The fanout prefix is the number of bytes that can be addressed by the fanout
38 # table. Example: a fanout prefix of 1 means we use the first byte of a hash to
38 # table. Example: a fanout prefix of 1 means we use the first byte of a hash to
39 # look in the fanout table (which will be 2^8 entries long).
39 # look in the fanout table (which will be 2^8 entries long).
40 SMALLFANOUTPREFIX = 1
40 SMALLFANOUTPREFIX = 1
41 LARGEFANOUTPREFIX = 2
41 LARGEFANOUTPREFIX = 2
42
42
43 # The number of entries in the index at which point we switch to a large fanout.
43 # The number of entries in the index at which point we switch to a large fanout.
44 # It is chosen to balance the linear scan through a sparse fanout, with the
44 # It is chosen to balance the linear scan through a sparse fanout, with the
45 # size of the bisect in actual index.
45 # size of the bisect in actual index.
46 # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step
46 # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step
47 # bisect) with (8 step fanout scan + 1 step bisect)
47 # bisect) with (8 step fanout scan + 1 step bisect)
48 # 5 step bisect = log(2^16 / 8 / 255) # fanout
48 # 5 step bisect = log(2^16 / 8 / 255) # fanout
49 # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
49 # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
50 SMALLFANOUTCUTOFF = 2 ** 16 // 8
50 SMALLFANOUTCUTOFF = 2 ** 16 // 8
51
51
52 # The amount of time to wait between checking for new packs. This prevents an
52 # The amount of time to wait between checking for new packs. This prevents an
53 # exception when data is moved to a new pack after the process has already
53 # exception when data is moved to a new pack after the process has already
54 # loaded the pack list.
54 # loaded the pack list.
55 REFRESHRATE = 0.1
55 REFRESHRATE = 0.1
56
56
57 if pycompat.isposix and not pycompat.ispy3:
57 if pycompat.isposix and not pycompat.ispy3:
58 # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening.
58 # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening.
59 # The 'e' flag will be ignored on older versions of glibc.
59 # The 'e' flag will be ignored on older versions of glibc.
60 # Python 3 can't handle the 'e' flag.
60 # Python 3 can't handle the 'e' flag.
61 PACKOPENMODE = b'rbe'
61 PACKOPENMODE = b'rbe'
62 else:
62 else:
63 PACKOPENMODE = b'rb'
63 PACKOPENMODE = b'rb'
64
64
65
65
66 class _cachebackedpacks:
66 class _cachebackedpacks:
67 def __init__(self, packs, cachesize):
67 def __init__(self, packs, cachesize):
68 self._packs = set(packs)
68 self._packs = set(packs)
69 self._lrucache = util.lrucachedict(cachesize)
69 self._lrucache = util.lrucachedict(cachesize)
70 self._lastpack = None
70 self._lastpack = None
71
71
72 # Avoid cold start of the cache by populating the most recent packs
72 # Avoid cold start of the cache by populating the most recent packs
73 # in the cache.
73 # in the cache.
74 for i in reversed(range(min(cachesize, len(packs)))):
74 for i in reversed(range(min(cachesize, len(packs)))):
75 self._movetofront(packs[i])
75 self._movetofront(packs[i])
76
76
77 def _movetofront(self, pack):
77 def _movetofront(self, pack):
78 # This effectively makes pack the first entry in the cache.
78 # This effectively makes pack the first entry in the cache.
79 self._lrucache[pack] = True
79 self._lrucache[pack] = True
80
80
81 def _registerlastpackusage(self):
81 def _registerlastpackusage(self):
82 if self._lastpack is not None:
82 if self._lastpack is not None:
83 self._movetofront(self._lastpack)
83 self._movetofront(self._lastpack)
84 self._lastpack = None
84 self._lastpack = None
85
85
86 def add(self, pack):
86 def add(self, pack):
87 self._registerlastpackusage()
87 self._registerlastpackusage()
88
88
89 # This method will mostly be called when packs are not in cache.
89 # This method will mostly be called when packs are not in cache.
90 # Therefore, adding pack to the cache.
90 # Therefore, adding pack to the cache.
91 self._movetofront(pack)
91 self._movetofront(pack)
92 self._packs.add(pack)
92 self._packs.add(pack)
93
93
94 def __iter__(self):
94 def __iter__(self):
95 self._registerlastpackusage()
95 self._registerlastpackusage()
96
96
97 # Cache iteration is based on LRU.
97 # Cache iteration is based on LRU.
98 for pack in self._lrucache:
98 for pack in self._lrucache:
99 self._lastpack = pack
99 self._lastpack = pack
100 yield pack
100 yield pack
101
101
102 cachedpacks = {pack for pack in self._lrucache}
102 cachedpacks = {pack for pack in self._lrucache}
103 # Yield for paths not in the cache.
103 # Yield for paths not in the cache.
104 for pack in self._packs - cachedpacks:
104 for pack in self._packs - cachedpacks:
105 self._lastpack = pack
105 self._lastpack = pack
106 yield pack
106 yield pack
107
107
108 # Data not found in any pack.
108 # Data not found in any pack.
109 self._lastpack = None
109 self._lastpack = None
110
110
111
111
112 class basepackstore:
112 class basepackstore:
113 # Default cache size limit for the pack files.
113 # Default cache size limit for the pack files.
114 DEFAULTCACHESIZE = 100
114 DEFAULTCACHESIZE = 100
115
115
116 def __init__(self, ui, path):
116 def __init__(self, ui, path):
117 self.ui = ui
117 self.ui = ui
118 self.path = path
118 self.path = path
119
119
120 # lastrefesh is 0 so we'll immediately check for new packs on the first
120 # lastrefesh is 0 so we'll immediately check for new packs on the first
121 # failure.
121 # failure.
122 self.lastrefresh = 0
122 self.lastrefresh = 0
123
123
124 packs = []
124 packs = []
125 for filepath, __, __ in self._getavailablepackfilessorted():
125 for filepath, __, __ in self._getavailablepackfilessorted():
126 try:
126 try:
127 pack = self.getpack(filepath)
127 pack = self.getpack(filepath)
128 except Exception as ex:
128 except Exception as ex:
129 # An exception may be thrown if the pack file is corrupted
129 # An exception may be thrown if the pack file is corrupted
130 # somehow. Log a warning but keep going in this case, just
130 # somehow. Log a warning but keep going in this case, just
131 # skipping this pack file.
131 # skipping this pack file.
132 #
132 #
133 # If this is an ENOENT error then don't even bother logging.
133 # If this is an ENOENT error then don't even bother logging.
134 # Someone could have removed the file since we retrieved the
134 # Someone could have removed the file since we retrieved the
135 # list of paths.
135 # list of paths.
136 if getattr(ex, 'errno', None) != errno.ENOENT:
136 if getattr(ex, 'errno', None) != errno.ENOENT:
137 ui.warn(_(b'unable to load pack %s: %s\n') % (filepath, ex))
137 ui.warn(_(b'unable to load pack %s: %s\n') % (filepath, ex))
138 continue
138 continue
139 packs.append(pack)
139 packs.append(pack)
140
140
141 self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE)
141 self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE)
142
142
143 def _getavailablepackfiles(self):
143 def _getavailablepackfiles(self):
144 """For each pack file (a index/data file combo), yields:
144 """For each pack file (a index/data file combo), yields:
145 (full path without extension, mtime, size)
145 (full path without extension, mtime, size)
146
146
147 mtime will be the mtime of the index/data file (whichever is newer)
147 mtime will be the mtime of the index/data file (whichever is newer)
148 size is the combined size of index/data file
148 size is the combined size of index/data file
149 """
149 """
150 indexsuffixlen = len(self.INDEXSUFFIX)
150 indexsuffixlen = len(self.INDEXSUFFIX)
151 packsuffixlen = len(self.PACKSUFFIX)
151 packsuffixlen = len(self.PACKSUFFIX)
152
152
153 ids = set()
153 ids = set()
154 sizes = collections.defaultdict(lambda: 0)
154 sizes = collections.defaultdict(lambda: 0)
155 mtimes = collections.defaultdict(lambda: [])
155 mtimes = collections.defaultdict(lambda: [])
156 try:
156 try:
157 for filename, type, stat in osutil.listdir(self.path, stat=True):
157 for filename, type, stat in osutil.listdir(self.path, stat=True):
158 id = None
158 id = None
159 if filename[-indexsuffixlen:] == self.INDEXSUFFIX:
159 if filename[-indexsuffixlen:] == self.INDEXSUFFIX:
160 id = filename[:-indexsuffixlen]
160 id = filename[:-indexsuffixlen]
161 elif filename[-packsuffixlen:] == self.PACKSUFFIX:
161 elif filename[-packsuffixlen:] == self.PACKSUFFIX:
162 id = filename[:-packsuffixlen]
162 id = filename[:-packsuffixlen]
163
163
164 # Since we expect to have two files corresponding to each ID
164 # Since we expect to have two files corresponding to each ID
165 # (the index file and the pack file), we can yield once we see
165 # (the index file and the pack file), we can yield once we see
166 # it twice.
166 # it twice.
167 if id:
167 if id:
168 sizes[id] += stat.st_size # Sum both files' sizes together
168 sizes[id] += stat.st_size # Sum both files' sizes together
169 mtimes[id].append(stat.st_mtime)
169 mtimes[id].append(stat.st_mtime)
170 if id in ids:
170 if id in ids:
171 yield (
171 yield (
172 os.path.join(self.path, id),
172 os.path.join(self.path, id),
173 max(mtimes[id]),
173 max(mtimes[id]),
174 sizes[id],
174 sizes[id],
175 )
175 )
176 else:
176 else:
177 ids.add(id)
177 ids.add(id)
178 except OSError as ex:
178 except OSError as ex:
179 if ex.errno != errno.ENOENT:
179 if ex.errno != errno.ENOENT:
180 raise
180 raise
181
181
182 def _getavailablepackfilessorted(self):
182 def _getavailablepackfilessorted(self):
183 """Like `_getavailablepackfiles`, but also sorts the files by mtime,
183 """Like `_getavailablepackfiles`, but also sorts the files by mtime,
184 yielding newest files first.
184 yielding newest files first.
185
185
186 This is desirable, since it is more likely newer packfiles have more
186 This is desirable, since it is more likely newer packfiles have more
187 desirable data.
187 desirable data.
188 """
188 """
189 files = []
189 files = []
190 for path, mtime, size in self._getavailablepackfiles():
190 for path, mtime, size in self._getavailablepackfiles():
191 files.append((mtime, size, path))
191 files.append((mtime, size, path))
192 files = sorted(files, reverse=True)
192 files = sorted(files, reverse=True)
193 for mtime, size, path in files:
193 for mtime, size, path in files:
194 yield path, mtime, size
194 yield path, mtime, size
195
195
196 def gettotalsizeandcount(self):
196 def gettotalsizeandcount(self):
197 """Returns the total disk size (in bytes) of all the pack files in
197 """Returns the total disk size (in bytes) of all the pack files in
198 this store, and the count of pack files.
198 this store, and the count of pack files.
199
199
200 (This might be smaller than the total size of the ``self.path``
200 (This might be smaller than the total size of the ``self.path``
201 directory, since this only considers fuly-writen pack files, and not
201 directory, since this only considers fuly-writen pack files, and not
202 temporary files or other detritus on the directory.)
202 temporary files or other detritus on the directory.)
203 """
203 """
204 totalsize = 0
204 totalsize = 0
205 count = 0
205 count = 0
206 for __, __, size in self._getavailablepackfiles():
206 for __, __, size in self._getavailablepackfiles():
207 totalsize += size
207 totalsize += size
208 count += 1
208 count += 1
209 return totalsize, count
209 return totalsize, count
210
210
211 def getmetrics(self):
211 def getmetrics(self):
212 """Returns metrics on the state of this store."""
212 """Returns metrics on the state of this store."""
213 size, count = self.gettotalsizeandcount()
213 size, count = self.gettotalsizeandcount()
214 return {
214 return {
215 b'numpacks': count,
215 b'numpacks': count,
216 b'totalpacksize': size,
216 b'totalpacksize': size,
217 }
217 }
218
218
219 def getpack(self, path):
219 def getpack(self, path):
220 raise NotImplementedError()
220 raise NotImplementedError()
221
221
222 def getmissing(self, keys):
222 def getmissing(self, keys):
223 missing = keys
223 missing = keys
224 for pack in self.packs:
224 for pack in self.packs:
225 missing = pack.getmissing(missing)
225 missing = pack.getmissing(missing)
226
226
227 # Ensures better performance of the cache by keeping the most
227 # Ensures better performance of the cache by keeping the most
228 # recently accessed pack at the beginning in subsequent iterations.
228 # recently accessed pack at the beginning in subsequent iterations.
229 if not missing:
229 if not missing:
230 return missing
230 return missing
231
231
232 if missing:
232 if missing:
233 for pack in self.refresh():
233 for pack in self.refresh():
234 missing = pack.getmissing(missing)
234 missing = pack.getmissing(missing)
235
235
236 return missing
236 return missing
237
237
238 def markledger(self, ledger, options=None):
238 def markledger(self, ledger, options=None):
239 for pack in self.packs:
239 for pack in self.packs:
240 pack.markledger(ledger)
240 pack.markledger(ledger)
241
241
242 def markforrefresh(self):
242 def markforrefresh(self):
243 """Tells the store that there may be new pack files, so the next time it
243 """Tells the store that there may be new pack files, so the next time it
244 has a lookup miss it should check for new files."""
244 has a lookup miss it should check for new files."""
245 self.lastrefresh = 0
245 self.lastrefresh = 0
246
246
247 def refresh(self):
247 def refresh(self):
248 """Checks for any new packs on disk, adds them to the main pack list,
248 """Checks for any new packs on disk, adds them to the main pack list,
249 and returns a list of just the new packs."""
249 and returns a list of just the new packs."""
250 now = time.time()
250 now = time.time()
251
251
252 # If we experience a lot of misses (like in the case of getmissing() on
252 # If we experience a lot of misses (like in the case of getmissing() on
253 # new objects), let's only actually check disk for new stuff every once
253 # new objects), let's only actually check disk for new stuff every once
254 # in a while. Generally this code path should only ever matter when a
254 # in a while. Generally this code path should only ever matter when a
255 # repack is going on in the background, and that should be pretty rare
255 # repack is going on in the background, and that should be pretty rare
256 # to have that happen twice in quick succession.
256 # to have that happen twice in quick succession.
257 newpacks = []
257 newpacks = []
258 if now > self.lastrefresh + REFRESHRATE:
258 if now > self.lastrefresh + REFRESHRATE:
259 self.lastrefresh = now
259 self.lastrefresh = now
260 previous = {p.path for p in self.packs}
260 previous = {p.path for p in self.packs}
261 for filepath, __, __ in self._getavailablepackfilessorted():
261 for filepath, __, __ in self._getavailablepackfilessorted():
262 if filepath not in previous:
262 if filepath not in previous:
263 newpack = self.getpack(filepath)
263 newpack = self.getpack(filepath)
264 newpacks.append(newpack)
264 newpacks.append(newpack)
265 self.packs.add(newpack)
265 self.packs.add(newpack)
266
266
267 return newpacks
267 return newpacks
268
268
269
269
270 class versionmixin:
270 class versionmixin:
271 # Mix-in for classes with multiple supported versions
271 # Mix-in for classes with multiple supported versions
272 VERSION = None
272 VERSION = None
273 SUPPORTED_VERSIONS = [2]
273 SUPPORTED_VERSIONS = [2]
274
274
275 def _checkversion(self, version):
275 def _checkversion(self, version):
276 if version in self.SUPPORTED_VERSIONS:
276 if version in self.SUPPORTED_VERSIONS:
277 if self.VERSION is None:
277 if self.VERSION is None:
278 # only affect this instance
278 # only affect this instance
279 self.VERSION = version
279 self.VERSION = version
280 elif self.VERSION != version:
280 elif self.VERSION != version:
281 raise RuntimeError(b'inconsistent version: %d' % version)
281 raise RuntimeError(b'inconsistent version: %d' % version)
282 else:
282 else:
283 raise RuntimeError(b'unsupported version: %d' % version)
283 raise RuntimeError(b'unsupported version: %d' % version)
284
284
285
285
286 class basepack(versionmixin):
286 class basepack(versionmixin):
287 # The maximum amount we should read via mmap before remmaping so the old
287 # The maximum amount we should read via mmap before remmaping so the old
288 # pages can be released (100MB)
288 # pages can be released (100MB)
289 MAXPAGEDIN = 100 * 1024 ** 2
289 MAXPAGEDIN = 100 * 1024 ** 2
290
290
291 SUPPORTED_VERSIONS = [2]
291 SUPPORTED_VERSIONS = [2]
292
292
293 def __init__(self, path):
293 def __init__(self, path):
294 self.path = path
294 self.path = path
295 self.packpath = path + self.PACKSUFFIX
295 self.packpath = path + self.PACKSUFFIX
296 self.indexpath = path + self.INDEXSUFFIX
296 self.indexpath = path + self.INDEXSUFFIX
297
297
298 self.indexsize = os.stat(self.indexpath).st_size
298 self.indexsize = os.stat(self.indexpath).st_size
299 self.datasize = os.stat(self.packpath).st_size
299 self.datasize = os.stat(self.packpath).st_size
300
300
301 self._index = None
301 self._index = None
302 self._data = None
302 self._data = None
303 self.freememory() # initialize the mmap
303 self.freememory() # initialize the mmap
304
304
305 version = struct.unpack(b'!B', self._data[:PACKVERSIONSIZE])[0]
305 version = struct.unpack(b'!B', self._data[:PACKVERSIONSIZE])[0]
306 self._checkversion(version)
306 self._checkversion(version)
307
307
308 version, config = struct.unpack(b'!BB', self._index[:INDEXVERSIONSIZE])
308 version, config = struct.unpack(b'!BB', self._index[:INDEXVERSIONSIZE])
309 self._checkversion(version)
309 self._checkversion(version)
310
310
311 if 0b10000000 & config:
311 if 0b10000000 & config:
312 self.params = indexparams(LARGEFANOUTPREFIX, version)
312 self.params = indexparams(LARGEFANOUTPREFIX, version)
313 else:
313 else:
314 self.params = indexparams(SMALLFANOUTPREFIX, version)
314 self.params = indexparams(SMALLFANOUTPREFIX, version)
315
315
316 @util.propertycache
316 @util.propertycache
317 def _fanouttable(self):
317 def _fanouttable(self):
318 params = self.params
318 params = self.params
319 rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize]
319 rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize]
320 fanouttable = []
320 fanouttable = []
321 for i in pycompat.xrange(0, params.fanoutcount):
321 for i in range(0, params.fanoutcount):
322 loc = i * 4
322 loc = i * 4
323 fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0]
323 fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0]
324 fanouttable.append(fanoutentry)
324 fanouttable.append(fanoutentry)
325 return fanouttable
325 return fanouttable
326
326
327 @util.propertycache
327 @util.propertycache
328 def _indexend(self):
328 def _indexend(self):
329 nodecount = struct.unpack_from(
329 nodecount = struct.unpack_from(
330 b'!Q', self._index, self.params.indexstart - 8
330 b'!Q', self._index, self.params.indexstart - 8
331 )[0]
331 )[0]
332 return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
332 return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
333
333
334 def freememory(self):
334 def freememory(self):
335 """Unmap and remap the memory to free it up after known expensive
335 """Unmap and remap the memory to free it up after known expensive
336 operations. Return True if self._data and self._index were reloaded.
336 operations. Return True if self._data and self._index were reloaded.
337 """
337 """
338 if self._index:
338 if self._index:
339 if self._pagedin < self.MAXPAGEDIN:
339 if self._pagedin < self.MAXPAGEDIN:
340 return False
340 return False
341
341
342 self._index.close()
342 self._index.close()
343 self._data.close()
343 self._data.close()
344
344
345 # TODO: use an opener/vfs to access these paths
345 # TODO: use an opener/vfs to access these paths
346 with open(self.indexpath, PACKOPENMODE) as indexfp:
346 with open(self.indexpath, PACKOPENMODE) as indexfp:
347 # memory-map the file, size 0 means whole file
347 # memory-map the file, size 0 means whole file
348 self._index = mmap.mmap(
348 self._index = mmap.mmap(
349 indexfp.fileno(), 0, access=mmap.ACCESS_READ
349 indexfp.fileno(), 0, access=mmap.ACCESS_READ
350 )
350 )
351 with open(self.packpath, PACKOPENMODE) as datafp:
351 with open(self.packpath, PACKOPENMODE) as datafp:
352 self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
352 self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
353
353
354 self._pagedin = 0
354 self._pagedin = 0
355 return True
355 return True
356
356
357 def getmissing(self, keys):
357 def getmissing(self, keys):
358 raise NotImplementedError()
358 raise NotImplementedError()
359
359
360 def markledger(self, ledger, options=None):
360 def markledger(self, ledger, options=None):
361 raise NotImplementedError()
361 raise NotImplementedError()
362
362
363 def cleanup(self, ledger):
363 def cleanup(self, ledger):
364 raise NotImplementedError()
364 raise NotImplementedError()
365
365
366 def __iter__(self):
366 def __iter__(self):
367 raise NotImplementedError()
367 raise NotImplementedError()
368
368
369 def iterentries(self):
369 def iterentries(self):
370 raise NotImplementedError()
370 raise NotImplementedError()
371
371
372
372
373 class mutablebasepack(versionmixin):
373 class mutablebasepack(versionmixin):
374 def __init__(self, ui, packdir, version=2):
374 def __init__(self, ui, packdir, version=2):
375 self._checkversion(version)
375 self._checkversion(version)
376 # TODO(augie): make this configurable
376 # TODO(augie): make this configurable
377 self._compressor = b'GZ'
377 self._compressor = b'GZ'
378 opener = vfsmod.vfs(packdir)
378 opener = vfsmod.vfs(packdir)
379 opener.createmode = 0o444
379 opener.createmode = 0o444
380 self.opener = opener
380 self.opener = opener
381
381
382 self.entries = {}
382 self.entries = {}
383
383
384 shallowutil.mkstickygroupdir(ui, packdir)
384 shallowutil.mkstickygroupdir(ui, packdir)
385 self.packfp, self.packpath = opener.mkstemp(
385 self.packfp, self.packpath = opener.mkstemp(
386 suffix=self.PACKSUFFIX + b'-tmp'
386 suffix=self.PACKSUFFIX + b'-tmp'
387 )
387 )
388 self.idxfp, self.idxpath = opener.mkstemp(
388 self.idxfp, self.idxpath = opener.mkstemp(
389 suffix=self.INDEXSUFFIX + b'-tmp'
389 suffix=self.INDEXSUFFIX + b'-tmp'
390 )
390 )
391 self.packfp = os.fdopen(self.packfp, 'wb+')
391 self.packfp = os.fdopen(self.packfp, 'wb+')
392 self.idxfp = os.fdopen(self.idxfp, 'wb+')
392 self.idxfp = os.fdopen(self.idxfp, 'wb+')
393 self.sha = hashutil.sha1()
393 self.sha = hashutil.sha1()
394 self._closed = False
394 self._closed = False
395
395
396 # The opener provides no way of doing permission fixup on files created
396 # The opener provides no way of doing permission fixup on files created
397 # via mkstemp, so we must fix it ourselves. We can probably fix this
397 # via mkstemp, so we must fix it ourselves. We can probably fix this
398 # upstream in vfs.mkstemp so we don't need to use the private method.
398 # upstream in vfs.mkstemp so we don't need to use the private method.
399 opener._fixfilemode(opener.join(self.packpath))
399 opener._fixfilemode(opener.join(self.packpath))
400 opener._fixfilemode(opener.join(self.idxpath))
400 opener._fixfilemode(opener.join(self.idxpath))
401
401
402 # Write header
402 # Write header
403 # TODO: make it extensible (ex: allow specifying compression algorithm,
403 # TODO: make it extensible (ex: allow specifying compression algorithm,
404 # a flexible key/value header, delta algorithm, fanout size, etc)
404 # a flexible key/value header, delta algorithm, fanout size, etc)
405 versionbuf = struct.pack(b'!B', self.VERSION) # unsigned 1 byte int
405 versionbuf = struct.pack(b'!B', self.VERSION) # unsigned 1 byte int
406 self.writeraw(versionbuf)
406 self.writeraw(versionbuf)
407
407
408 def __enter__(self):
408 def __enter__(self):
409 return self
409 return self
410
410
411 def __exit__(self, exc_type, exc_value, traceback):
411 def __exit__(self, exc_type, exc_value, traceback):
412 if exc_type is None:
412 if exc_type is None:
413 self.close()
413 self.close()
414 else:
414 else:
415 self.abort()
415 self.abort()
416
416
417 def abort(self):
417 def abort(self):
418 # Unclean exit
418 # Unclean exit
419 self._cleantemppacks()
419 self._cleantemppacks()
420
420
421 def writeraw(self, data):
421 def writeraw(self, data):
422 self.packfp.write(data)
422 self.packfp.write(data)
423 self.sha.update(data)
423 self.sha.update(data)
424
424
425 def close(self, ledger=None):
425 def close(self, ledger=None):
426 if self._closed:
426 if self._closed:
427 return
427 return
428
428
429 try:
429 try:
430 sha = hex(self.sha.digest())
430 sha = hex(self.sha.digest())
431 self.packfp.close()
431 self.packfp.close()
432 self.writeindex()
432 self.writeindex()
433
433
434 if len(self.entries) == 0:
434 if len(self.entries) == 0:
435 # Empty pack
435 # Empty pack
436 self._cleantemppacks()
436 self._cleantemppacks()
437 self._closed = True
437 self._closed = True
438 return None
438 return None
439
439
440 self.opener.rename(self.packpath, sha + self.PACKSUFFIX)
440 self.opener.rename(self.packpath, sha + self.PACKSUFFIX)
441 try:
441 try:
442 self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX)
442 self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX)
443 except Exception as ex:
443 except Exception as ex:
444 try:
444 try:
445 self.opener.unlink(sha + self.PACKSUFFIX)
445 self.opener.unlink(sha + self.PACKSUFFIX)
446 except Exception:
446 except Exception:
447 pass
447 pass
448 # Throw exception 'ex' explicitly since a normal 'raise' would
448 # Throw exception 'ex' explicitly since a normal 'raise' would
449 # potentially throw an exception from the unlink cleanup.
449 # potentially throw an exception from the unlink cleanup.
450 raise ex
450 raise ex
451 except Exception:
451 except Exception:
452 # Clean up temp packs in all exception cases
452 # Clean up temp packs in all exception cases
453 self._cleantemppacks()
453 self._cleantemppacks()
454 raise
454 raise
455
455
456 self._closed = True
456 self._closed = True
457 result = self.opener.join(sha)
457 result = self.opener.join(sha)
458 if ledger:
458 if ledger:
459 ledger.addcreated(result)
459 ledger.addcreated(result)
460 return result
460 return result
461
461
462 def _cleantemppacks(self):
462 def _cleantemppacks(self):
463 try:
463 try:
464 self.opener.unlink(self.packpath)
464 self.opener.unlink(self.packpath)
465 except Exception:
465 except Exception:
466 pass
466 pass
467 try:
467 try:
468 self.opener.unlink(self.idxpath)
468 self.opener.unlink(self.idxpath)
469 except Exception:
469 except Exception:
470 pass
470 pass
471
471
472 def writeindex(self):
472 def writeindex(self):
473 largefanout = len(self.entries) > SMALLFANOUTCUTOFF
473 largefanout = len(self.entries) > SMALLFANOUTCUTOFF
474 if largefanout:
474 if largefanout:
475 params = indexparams(LARGEFANOUTPREFIX, self.VERSION)
475 params = indexparams(LARGEFANOUTPREFIX, self.VERSION)
476 else:
476 else:
477 params = indexparams(SMALLFANOUTPREFIX, self.VERSION)
477 params = indexparams(SMALLFANOUTPREFIX, self.VERSION)
478
478
479 fanouttable = [EMPTYFANOUT] * params.fanoutcount
479 fanouttable = [EMPTYFANOUT] * params.fanoutcount
480
480
481 # Precompute the location of each entry
481 # Precompute the location of each entry
482 locations = {}
482 locations = {}
483 count = 0
483 count = 0
484 for node in sorted(self.entries):
484 for node in sorted(self.entries):
485 location = count * self.INDEXENTRYLENGTH
485 location = count * self.INDEXENTRYLENGTH
486 locations[node] = location
486 locations[node] = location
487 count += 1
487 count += 1
488
488
489 # Must use [0] on the unpack result since it's always a tuple.
489 # Must use [0] on the unpack result since it's always a tuple.
490 fanoutkey = struct.unpack(
490 fanoutkey = struct.unpack(
491 params.fanoutstruct, node[: params.fanoutprefix]
491 params.fanoutstruct, node[: params.fanoutprefix]
492 )[0]
492 )[0]
493 if fanouttable[fanoutkey] == EMPTYFANOUT:
493 if fanouttable[fanoutkey] == EMPTYFANOUT:
494 fanouttable[fanoutkey] = location
494 fanouttable[fanoutkey] = location
495
495
496 rawfanouttable = b''
496 rawfanouttable = b''
497 last = 0
497 last = 0
498 for offset in fanouttable:
498 for offset in fanouttable:
499 offset = offset if offset != EMPTYFANOUT else last
499 offset = offset if offset != EMPTYFANOUT else last
500 last = offset
500 last = offset
501 rawfanouttable += struct.pack(b'!I', offset)
501 rawfanouttable += struct.pack(b'!I', offset)
502
502
503 rawentrieslength = struct.pack(b'!Q', len(self.entries))
503 rawentrieslength = struct.pack(b'!Q', len(self.entries))
504
504
505 # The index offset is the it's location in the file. So after the 2 byte
505 # The index offset is the it's location in the file. So after the 2 byte
506 # header and the fanouttable.
506 # header and the fanouttable.
507 rawindex = self.createindex(locations, 2 + len(rawfanouttable))
507 rawindex = self.createindex(locations, 2 + len(rawfanouttable))
508
508
509 self._writeheader(params)
509 self._writeheader(params)
510 self.idxfp.write(rawfanouttable)
510 self.idxfp.write(rawfanouttable)
511 self.idxfp.write(rawentrieslength)
511 self.idxfp.write(rawentrieslength)
512 self.idxfp.write(rawindex)
512 self.idxfp.write(rawindex)
513 self.idxfp.close()
513 self.idxfp.close()
514
514
515 def createindex(self, nodelocations):
515 def createindex(self, nodelocations):
516 raise NotImplementedError()
516 raise NotImplementedError()
517
517
518 def _writeheader(self, indexparams):
518 def _writeheader(self, indexparams):
519 # Index header
519 # Index header
520 # <version: 1 byte>
520 # <version: 1 byte>
521 # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8
521 # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8
522 # <unused: 7 bit> # future use (compression, delta format, etc)
522 # <unused: 7 bit> # future use (compression, delta format, etc)
523 config = 0
523 config = 0
524 if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
524 if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
525 config = 0b10000000
525 config = 0b10000000
526 self.idxfp.write(struct.pack(b'!BB', self.VERSION, config))
526 self.idxfp.write(struct.pack(b'!BB', self.VERSION, config))
527
527
528
528
529 class indexparams:
529 class indexparams:
530 __slots__ = (
530 __slots__ = (
531 'fanoutprefix',
531 'fanoutprefix',
532 'fanoutstruct',
532 'fanoutstruct',
533 'fanoutcount',
533 'fanoutcount',
534 'fanoutsize',
534 'fanoutsize',
535 'indexstart',
535 'indexstart',
536 )
536 )
537
537
538 def __init__(self, prefixsize, version):
538 def __init__(self, prefixsize, version):
539 self.fanoutprefix = prefixsize
539 self.fanoutprefix = prefixsize
540
540
541 # The struct pack format for fanout table location (i.e. the format that
541 # The struct pack format for fanout table location (i.e. the format that
542 # converts the node prefix into an integer location in the fanout
542 # converts the node prefix into an integer location in the fanout
543 # table).
543 # table).
544 if prefixsize == SMALLFANOUTPREFIX:
544 if prefixsize == SMALLFANOUTPREFIX:
545 self.fanoutstruct = b'!B'
545 self.fanoutstruct = b'!B'
546 elif prefixsize == LARGEFANOUTPREFIX:
546 elif prefixsize == LARGEFANOUTPREFIX:
547 self.fanoutstruct = b'!H'
547 self.fanoutstruct = b'!H'
548 else:
548 else:
549 raise ValueError(b"invalid fanout prefix size: %s" % prefixsize)
549 raise ValueError(b"invalid fanout prefix size: %s" % prefixsize)
550
550
551 # The number of fanout table entries
551 # The number of fanout table entries
552 self.fanoutcount = 2 ** (prefixsize * 8)
552 self.fanoutcount = 2 ** (prefixsize * 8)
553
553
554 # The total bytes used by the fanout table
554 # The total bytes used by the fanout table
555 self.fanoutsize = self.fanoutcount * 4
555 self.fanoutsize = self.fanoutcount * 4
556
556
557 self.indexstart = FANOUTSTART + self.fanoutsize
557 self.indexstart = FANOUTSTART + self.fanoutsize
558 # Skip the index length
558 # Skip the index length
559 self.indexstart += 8
559 self.indexstart += 8
@@ -1,459 +1,459 b''
1 import errno
1 import errno
2 import os
2 import os
3 import shutil
3 import shutil
4 import stat
4 import stat
5 import time
5 import time
6
6
7 from mercurial.i18n import _
7 from mercurial.i18n import _
8 from mercurial.node import bin, hex
8 from mercurial.node import bin, hex
9 from mercurial.pycompat import open
9 from mercurial.pycompat import open
10 from mercurial import (
10 from mercurial import (
11 error,
11 error,
12 pycompat,
12 pycompat,
13 util,
13 util,
14 )
14 )
15 from mercurial.utils import hashutil
15 from mercurial.utils import hashutil
16 from . import (
16 from . import (
17 constants,
17 constants,
18 shallowutil,
18 shallowutil,
19 )
19 )
20
20
21
21
22 class basestore:
22 class basestore:
23 def __init__(self, repo, path, reponame, shared=False):
23 def __init__(self, repo, path, reponame, shared=False):
24 """Creates a remotefilelog store object for the given repo name.
24 """Creates a remotefilelog store object for the given repo name.
25
25
26 `path` - The file path where this store keeps its data
26 `path` - The file path where this store keeps its data
27 `reponame` - The name of the repo. This is used to partition data from
27 `reponame` - The name of the repo. This is used to partition data from
28 many repos.
28 many repos.
29 `shared` - True if this store is a shared cache of data from the central
29 `shared` - True if this store is a shared cache of data from the central
30 server, for many repos on this machine. False means this store is for
30 server, for many repos on this machine. False means this store is for
31 the local data for one repo.
31 the local data for one repo.
32 """
32 """
33 self.repo = repo
33 self.repo = repo
34 self.ui = repo.ui
34 self.ui = repo.ui
35 self._path = path
35 self._path = path
36 self._reponame = reponame
36 self._reponame = reponame
37 self._shared = shared
37 self._shared = shared
38 self._uid = os.getuid() if not pycompat.iswindows else None
38 self._uid = os.getuid() if not pycompat.iswindows else None
39
39
40 self._validatecachelog = self.ui.config(
40 self._validatecachelog = self.ui.config(
41 b"remotefilelog", b"validatecachelog"
41 b"remotefilelog", b"validatecachelog"
42 )
42 )
43 self._validatecache = self.ui.config(
43 self._validatecache = self.ui.config(
44 b"remotefilelog", b"validatecache", b'on'
44 b"remotefilelog", b"validatecache", b'on'
45 )
45 )
46 if self._validatecache not in (b'on', b'strict', b'off'):
46 if self._validatecache not in (b'on', b'strict', b'off'):
47 self._validatecache = b'on'
47 self._validatecache = b'on'
48 if self._validatecache == b'off':
48 if self._validatecache == b'off':
49 self._validatecache = False
49 self._validatecache = False
50
50
51 if shared:
51 if shared:
52 shallowutil.mkstickygroupdir(self.ui, path)
52 shallowutil.mkstickygroupdir(self.ui, path)
53
53
54 def getmissing(self, keys):
54 def getmissing(self, keys):
55 missing = []
55 missing = []
56 for name, node in keys:
56 for name, node in keys:
57 filepath = self._getfilepath(name, node)
57 filepath = self._getfilepath(name, node)
58 exists = os.path.exists(filepath)
58 exists = os.path.exists(filepath)
59 if (
59 if (
60 exists
60 exists
61 and self._validatecache == b'strict'
61 and self._validatecache == b'strict'
62 and not self._validatekey(filepath, b'contains')
62 and not self._validatekey(filepath, b'contains')
63 ):
63 ):
64 exists = False
64 exists = False
65 if not exists:
65 if not exists:
66 missing.append((name, node))
66 missing.append((name, node))
67
67
68 return missing
68 return missing
69
69
70 # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE
70 # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE
71
71
72 def markledger(self, ledger, options=None):
72 def markledger(self, ledger, options=None):
73 if options and options.get(constants.OPTION_PACKSONLY):
73 if options and options.get(constants.OPTION_PACKSONLY):
74 return
74 return
75 if self._shared:
75 if self._shared:
76 for filename, nodes in self._getfiles():
76 for filename, nodes in self._getfiles():
77 for node in nodes:
77 for node in nodes:
78 ledger.markdataentry(self, filename, node)
78 ledger.markdataentry(self, filename, node)
79 ledger.markhistoryentry(self, filename, node)
79 ledger.markhistoryentry(self, filename, node)
80
80
81 def cleanup(self, ledger):
81 def cleanup(self, ledger):
82 ui = self.ui
82 ui = self.ui
83 entries = ledger.sources.get(self, [])
83 entries = ledger.sources.get(self, [])
84 count = 0
84 count = 0
85 progress = ui.makeprogress(
85 progress = ui.makeprogress(
86 _(b"cleaning up"), unit=b"files", total=len(entries)
86 _(b"cleaning up"), unit=b"files", total=len(entries)
87 )
87 )
88 for entry in entries:
88 for entry in entries:
89 if entry.gced or (entry.datarepacked and entry.historyrepacked):
89 if entry.gced or (entry.datarepacked and entry.historyrepacked):
90 progress.update(count)
90 progress.update(count)
91 path = self._getfilepath(entry.filename, entry.node)
91 path = self._getfilepath(entry.filename, entry.node)
92 util.tryunlink(path)
92 util.tryunlink(path)
93 count += 1
93 count += 1
94 progress.complete()
94 progress.complete()
95
95
96 # Clean up the repo cache directory.
96 # Clean up the repo cache directory.
97 self._cleanupdirectory(self._getrepocachepath())
97 self._cleanupdirectory(self._getrepocachepath())
98
98
99 # BELOW THIS ARE NON-STANDARD APIS
99 # BELOW THIS ARE NON-STANDARD APIS
100
100
101 def _cleanupdirectory(self, rootdir):
101 def _cleanupdirectory(self, rootdir):
102 """Removes the empty directories and unnecessary files within the root
102 """Removes the empty directories and unnecessary files within the root
103 directory recursively. Note that this method does not remove the root
103 directory recursively. Note that this method does not remove the root
104 directory itself."""
104 directory itself."""
105
105
106 oldfiles = set()
106 oldfiles = set()
107 otherfiles = set()
107 otherfiles = set()
108 # osutil.listdir returns stat information which saves some rmdir/listdir
108 # osutil.listdir returns stat information which saves some rmdir/listdir
109 # syscalls.
109 # syscalls.
110 for name, mode in util.osutil.listdir(rootdir):
110 for name, mode in util.osutil.listdir(rootdir):
111 if stat.S_ISDIR(mode):
111 if stat.S_ISDIR(mode):
112 dirpath = os.path.join(rootdir, name)
112 dirpath = os.path.join(rootdir, name)
113 self._cleanupdirectory(dirpath)
113 self._cleanupdirectory(dirpath)
114
114
115 # Now that the directory specified by dirpath is potentially
115 # Now that the directory specified by dirpath is potentially
116 # empty, try and remove it.
116 # empty, try and remove it.
117 try:
117 try:
118 os.rmdir(dirpath)
118 os.rmdir(dirpath)
119 except OSError:
119 except OSError:
120 pass
120 pass
121
121
122 elif stat.S_ISREG(mode):
122 elif stat.S_ISREG(mode):
123 if name.endswith(b'_old'):
123 if name.endswith(b'_old'):
124 oldfiles.add(name[:-4])
124 oldfiles.add(name[:-4])
125 else:
125 else:
126 otherfiles.add(name)
126 otherfiles.add(name)
127
127
128 # Remove the files which end with suffix '_old' and have no
128 # Remove the files which end with suffix '_old' and have no
129 # corresponding file without the suffix '_old'. See addremotefilelognode
129 # corresponding file without the suffix '_old'. See addremotefilelognode
130 # method for the generation/purpose of files with '_old' suffix.
130 # method for the generation/purpose of files with '_old' suffix.
131 for filename in oldfiles - otherfiles:
131 for filename in oldfiles - otherfiles:
132 filepath = os.path.join(rootdir, filename + b'_old')
132 filepath = os.path.join(rootdir, filename + b'_old')
133 util.tryunlink(filepath)
133 util.tryunlink(filepath)
134
134
135 def _getfiles(self):
135 def _getfiles(self):
136 """Return a list of (filename, [node,...]) for all the revisions that
136 """Return a list of (filename, [node,...]) for all the revisions that
137 exist in the store.
137 exist in the store.
138
138
139 This is useful for obtaining a list of all the contents of the store
139 This is useful for obtaining a list of all the contents of the store
140 when performing a repack to another store, since the store API requires
140 when performing a repack to another store, since the store API requires
141 name+node keys and not namehash+node keys.
141 name+node keys and not namehash+node keys.
142 """
142 """
143 existing = {}
143 existing = {}
144 for filenamehash, node in self._listkeys():
144 for filenamehash, node in self._listkeys():
145 existing.setdefault(filenamehash, []).append(node)
145 existing.setdefault(filenamehash, []).append(node)
146
146
147 filenamemap = self._resolvefilenames(existing.keys())
147 filenamemap = self._resolvefilenames(existing.keys())
148
148
149 for filename, sha in filenamemap.items():
149 for filename, sha in filenamemap.items():
150 yield (filename, existing[sha])
150 yield (filename, existing[sha])
151
151
152 def _resolvefilenames(self, hashes):
152 def _resolvefilenames(self, hashes):
153 """Given a list of filename hashes that are present in the
153 """Given a list of filename hashes that are present in the
154 remotefilelog store, return a mapping from filename->hash.
154 remotefilelog store, return a mapping from filename->hash.
155
155
156 This is useful when converting remotefilelog blobs into other storage
156 This is useful when converting remotefilelog blobs into other storage
157 formats.
157 formats.
158 """
158 """
159 if not hashes:
159 if not hashes:
160 return {}
160 return {}
161
161
162 filenames = {}
162 filenames = {}
163 missingfilename = set(hashes)
163 missingfilename = set(hashes)
164
164
165 # Start with a full manifest, since it'll cover the majority of files
165 # Start with a full manifest, since it'll cover the majority of files
166 for filename in self.repo[b'tip'].manifest():
166 for filename in self.repo[b'tip'].manifest():
167 sha = hashutil.sha1(filename).digest()
167 sha = hashutil.sha1(filename).digest()
168 if sha in missingfilename:
168 if sha in missingfilename:
169 filenames[filename] = sha
169 filenames[filename] = sha
170 missingfilename.discard(sha)
170 missingfilename.discard(sha)
171
171
172 # Scan the changelog until we've found every file name
172 # Scan the changelog until we've found every file name
173 cl = self.repo.unfiltered().changelog
173 cl = self.repo.unfiltered().changelog
174 for rev in pycompat.xrange(len(cl) - 1, -1, -1):
174 for rev in range(len(cl) - 1, -1, -1):
175 if not missingfilename:
175 if not missingfilename:
176 break
176 break
177 files = cl.readfiles(cl.node(rev))
177 files = cl.readfiles(cl.node(rev))
178 for filename in files:
178 for filename in files:
179 sha = hashutil.sha1(filename).digest()
179 sha = hashutil.sha1(filename).digest()
180 if sha in missingfilename:
180 if sha in missingfilename:
181 filenames[filename] = sha
181 filenames[filename] = sha
182 missingfilename.discard(sha)
182 missingfilename.discard(sha)
183
183
184 return filenames
184 return filenames
185
185
186 def _getrepocachepath(self):
186 def _getrepocachepath(self):
187 return (
187 return (
188 os.path.join(self._path, self._reponame)
188 os.path.join(self._path, self._reponame)
189 if self._shared
189 if self._shared
190 else self._path
190 else self._path
191 )
191 )
192
192
193 def _listkeys(self):
193 def _listkeys(self):
194 """List all the remotefilelog keys that exist in the store.
194 """List all the remotefilelog keys that exist in the store.
195
195
196 Returns a iterator of (filename hash, filecontent hash) tuples.
196 Returns a iterator of (filename hash, filecontent hash) tuples.
197 """
197 """
198
198
199 for root, dirs, files in os.walk(self._getrepocachepath()):
199 for root, dirs, files in os.walk(self._getrepocachepath()):
200 for filename in files:
200 for filename in files:
201 if len(filename) != 40:
201 if len(filename) != 40:
202 continue
202 continue
203 node = filename
203 node = filename
204 if self._shared:
204 if self._shared:
205 # .../1a/85ffda..be21
205 # .../1a/85ffda..be21
206 filenamehash = root[-41:-39] + root[-38:]
206 filenamehash = root[-41:-39] + root[-38:]
207 else:
207 else:
208 filenamehash = root[-40:]
208 filenamehash = root[-40:]
209 yield (bin(filenamehash), bin(node))
209 yield (bin(filenamehash), bin(node))
210
210
211 def _getfilepath(self, name, node):
211 def _getfilepath(self, name, node):
212 node = hex(node)
212 node = hex(node)
213 if self._shared:
213 if self._shared:
214 key = shallowutil.getcachekey(self._reponame, name, node)
214 key = shallowutil.getcachekey(self._reponame, name, node)
215 else:
215 else:
216 key = shallowutil.getlocalkey(name, node)
216 key = shallowutil.getlocalkey(name, node)
217
217
218 return os.path.join(self._path, key)
218 return os.path.join(self._path, key)
219
219
220 def _getdata(self, name, node):
220 def _getdata(self, name, node):
221 filepath = self._getfilepath(name, node)
221 filepath = self._getfilepath(name, node)
222 try:
222 try:
223 data = shallowutil.readfile(filepath)
223 data = shallowutil.readfile(filepath)
224 if self._validatecache and not self._validatedata(data, filepath):
224 if self._validatecache and not self._validatedata(data, filepath):
225 if self._validatecachelog:
225 if self._validatecachelog:
226 with open(self._validatecachelog, b'ab+') as f:
226 with open(self._validatecachelog, b'ab+') as f:
227 f.write(b"corrupt %s during read\n" % filepath)
227 f.write(b"corrupt %s during read\n" % filepath)
228 os.rename(filepath, filepath + b".corrupt")
228 os.rename(filepath, filepath + b".corrupt")
229 raise KeyError(b"corrupt local cache file %s" % filepath)
229 raise KeyError(b"corrupt local cache file %s" % filepath)
230 except IOError:
230 except IOError:
231 raise KeyError(
231 raise KeyError(
232 b"no file found at %s for %s:%s" % (filepath, name, hex(node))
232 b"no file found at %s for %s:%s" % (filepath, name, hex(node))
233 )
233 )
234
234
235 return data
235 return data
236
236
237 def addremotefilelognode(self, name, node, data):
237 def addremotefilelognode(self, name, node, data):
238 filepath = self._getfilepath(name, node)
238 filepath = self._getfilepath(name, node)
239
239
240 oldumask = os.umask(0o002)
240 oldumask = os.umask(0o002)
241 try:
241 try:
242 # if this node already exists, save the old version for
242 # if this node already exists, save the old version for
243 # recovery/debugging purposes.
243 # recovery/debugging purposes.
244 if os.path.exists(filepath):
244 if os.path.exists(filepath):
245 newfilename = filepath + b'_old'
245 newfilename = filepath + b'_old'
246 # newfilename can be read-only and shutil.copy will fail.
246 # newfilename can be read-only and shutil.copy will fail.
247 # Delete newfilename to avoid it
247 # Delete newfilename to avoid it
248 if os.path.exists(newfilename):
248 if os.path.exists(newfilename):
249 shallowutil.unlinkfile(newfilename)
249 shallowutil.unlinkfile(newfilename)
250 shutil.copy(filepath, newfilename)
250 shutil.copy(filepath, newfilename)
251
251
252 shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath))
252 shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath))
253 shallowutil.writefile(filepath, data, readonly=True)
253 shallowutil.writefile(filepath, data, readonly=True)
254
254
255 if self._validatecache:
255 if self._validatecache:
256 if not self._validatekey(filepath, b'write'):
256 if not self._validatekey(filepath, b'write'):
257 raise error.Abort(
257 raise error.Abort(
258 _(b"local cache write was corrupted %s") % filepath
258 _(b"local cache write was corrupted %s") % filepath
259 )
259 )
260 finally:
260 finally:
261 os.umask(oldumask)
261 os.umask(oldumask)
262
262
263 def markrepo(self, path):
263 def markrepo(self, path):
264 """Call this to add the given repo path to the store's list of
264 """Call this to add the given repo path to the store's list of
265 repositories that are using it. This is useful later when doing garbage
265 repositories that are using it. This is useful later when doing garbage
266 collection, since it allows us to insecpt the repos to see what nodes
266 collection, since it allows us to insecpt the repos to see what nodes
267 they want to be kept alive in the store.
267 they want to be kept alive in the store.
268 """
268 """
269 repospath = os.path.join(self._path, b"repos")
269 repospath = os.path.join(self._path, b"repos")
270 with open(repospath, b'ab') as reposfile:
270 with open(repospath, b'ab') as reposfile:
271 reposfile.write(os.path.dirname(path) + b"\n")
271 reposfile.write(os.path.dirname(path) + b"\n")
272
272
273 repospathstat = os.stat(repospath)
273 repospathstat = os.stat(repospath)
274 if repospathstat.st_uid == self._uid:
274 if repospathstat.st_uid == self._uid:
275 os.chmod(repospath, 0o0664)
275 os.chmod(repospath, 0o0664)
276
276
277 def _validatekey(self, path, action):
277 def _validatekey(self, path, action):
278 with open(path, b'rb') as f:
278 with open(path, b'rb') as f:
279 data = f.read()
279 data = f.read()
280
280
281 if self._validatedata(data, path):
281 if self._validatedata(data, path):
282 return True
282 return True
283
283
284 if self._validatecachelog:
284 if self._validatecachelog:
285 with open(self._validatecachelog, b'ab+') as f:
285 with open(self._validatecachelog, b'ab+') as f:
286 f.write(b"corrupt %s during %s\n" % (path, action))
286 f.write(b"corrupt %s during %s\n" % (path, action))
287
287
288 os.rename(path, path + b".corrupt")
288 os.rename(path, path + b".corrupt")
289 return False
289 return False
290
290
291 def _validatedata(self, data, path):
291 def _validatedata(self, data, path):
292 try:
292 try:
293 if len(data) > 0:
293 if len(data) > 0:
294 # see remotefilelogserver.createfileblob for the format
294 # see remotefilelogserver.createfileblob for the format
295 offset, size, flags = shallowutil.parsesizeflags(data)
295 offset, size, flags = shallowutil.parsesizeflags(data)
296 if len(data) <= size:
296 if len(data) <= size:
297 # it is truncated
297 # it is truncated
298 return False
298 return False
299
299
300 # extract the node from the metadata
300 # extract the node from the metadata
301 offset += size
301 offset += size
302 datanode = data[offset : offset + 20]
302 datanode = data[offset : offset + 20]
303
303
304 # and compare against the path
304 # and compare against the path
305 if os.path.basename(path) == hex(datanode):
305 if os.path.basename(path) == hex(datanode):
306 # Content matches the intended path
306 # Content matches the intended path
307 return True
307 return True
308 return False
308 return False
309 except (ValueError, shallowutil.BadRemotefilelogHeader):
309 except (ValueError, shallowutil.BadRemotefilelogHeader):
310 pass
310 pass
311
311
312 return False
312 return False
313
313
314 def gc(self, keepkeys):
314 def gc(self, keepkeys):
315 ui = self.ui
315 ui = self.ui
316 cachepath = self._path
316 cachepath = self._path
317
317
318 # prune cache
318 # prune cache
319 queue = pycompat.queue.PriorityQueue()
319 queue = pycompat.queue.PriorityQueue()
320 originalsize = 0
320 originalsize = 0
321 size = 0
321 size = 0
322 count = 0
322 count = 0
323 removed = 0
323 removed = 0
324
324
325 # keep files newer than a day even if they aren't needed
325 # keep files newer than a day even if they aren't needed
326 limit = time.time() - (60 * 60 * 24)
326 limit = time.time() - (60 * 60 * 24)
327
327
328 progress = ui.makeprogress(
328 progress = ui.makeprogress(
329 _(b"removing unnecessary files"), unit=b"files"
329 _(b"removing unnecessary files"), unit=b"files"
330 )
330 )
331 progress.update(0)
331 progress.update(0)
332 for root, dirs, files in os.walk(cachepath):
332 for root, dirs, files in os.walk(cachepath):
333 for file in files:
333 for file in files:
334 if file == b'repos':
334 if file == b'repos':
335 continue
335 continue
336
336
337 # Don't delete pack files
337 # Don't delete pack files
338 if b'/packs/' in root:
338 if b'/packs/' in root:
339 continue
339 continue
340
340
341 progress.update(count)
341 progress.update(count)
342 path = os.path.join(root, file)
342 path = os.path.join(root, file)
343 key = os.path.relpath(path, cachepath)
343 key = os.path.relpath(path, cachepath)
344 count += 1
344 count += 1
345 try:
345 try:
346 pathstat = os.stat(path)
346 pathstat = os.stat(path)
347 except OSError as e:
347 except OSError as e:
348 # errno.ENOENT = no such file or directory
348 # errno.ENOENT = no such file or directory
349 if e.errno != errno.ENOENT:
349 if e.errno != errno.ENOENT:
350 raise
350 raise
351 msg = _(
351 msg = _(
352 b"warning: file %s was removed by another process\n"
352 b"warning: file %s was removed by another process\n"
353 )
353 )
354 ui.warn(msg % path)
354 ui.warn(msg % path)
355 continue
355 continue
356
356
357 originalsize += pathstat.st_size
357 originalsize += pathstat.st_size
358
358
359 if key in keepkeys or pathstat.st_atime > limit:
359 if key in keepkeys or pathstat.st_atime > limit:
360 queue.put((pathstat.st_atime, path, pathstat))
360 queue.put((pathstat.st_atime, path, pathstat))
361 size += pathstat.st_size
361 size += pathstat.st_size
362 else:
362 else:
363 try:
363 try:
364 shallowutil.unlinkfile(path)
364 shallowutil.unlinkfile(path)
365 except OSError as e:
365 except OSError as e:
366 # errno.ENOENT = no such file or directory
366 # errno.ENOENT = no such file or directory
367 if e.errno != errno.ENOENT:
367 if e.errno != errno.ENOENT:
368 raise
368 raise
369 msg = _(
369 msg = _(
370 b"warning: file %s was removed by another "
370 b"warning: file %s was removed by another "
371 b"process\n"
371 b"process\n"
372 )
372 )
373 ui.warn(msg % path)
373 ui.warn(msg % path)
374 continue
374 continue
375 removed += 1
375 removed += 1
376 progress.complete()
376 progress.complete()
377
377
378 # remove oldest files until under limit
378 # remove oldest files until under limit
379 limit = ui.configbytes(b"remotefilelog", b"cachelimit")
379 limit = ui.configbytes(b"remotefilelog", b"cachelimit")
380 if size > limit:
380 if size > limit:
381 excess = size - limit
381 excess = size - limit
382 progress = ui.makeprogress(
382 progress = ui.makeprogress(
383 _(b"enforcing cache limit"), unit=b"bytes", total=excess
383 _(b"enforcing cache limit"), unit=b"bytes", total=excess
384 )
384 )
385 removedexcess = 0
385 removedexcess = 0
386 while queue and size > limit and size > 0:
386 while queue and size > limit and size > 0:
387 progress.update(removedexcess)
387 progress.update(removedexcess)
388 atime, oldpath, oldpathstat = queue.get()
388 atime, oldpath, oldpathstat = queue.get()
389 try:
389 try:
390 shallowutil.unlinkfile(oldpath)
390 shallowutil.unlinkfile(oldpath)
391 except OSError as e:
391 except OSError as e:
392 # errno.ENOENT = no such file or directory
392 # errno.ENOENT = no such file or directory
393 if e.errno != errno.ENOENT:
393 if e.errno != errno.ENOENT:
394 raise
394 raise
395 msg = _(
395 msg = _(
396 b"warning: file %s was removed by another process\n"
396 b"warning: file %s was removed by another process\n"
397 )
397 )
398 ui.warn(msg % oldpath)
398 ui.warn(msg % oldpath)
399 size -= oldpathstat.st_size
399 size -= oldpathstat.st_size
400 removed += 1
400 removed += 1
401 removedexcess += oldpathstat.st_size
401 removedexcess += oldpathstat.st_size
402 progress.complete()
402 progress.complete()
403
403
404 ui.status(
404 ui.status(
405 _(b"finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n")
405 _(b"finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n")
406 % (
406 % (
407 removed,
407 removed,
408 count,
408 count,
409 float(originalsize) / 1024.0 / 1024.0 / 1024.0,
409 float(originalsize) / 1024.0 / 1024.0 / 1024.0,
410 float(size) / 1024.0 / 1024.0 / 1024.0,
410 float(size) / 1024.0 / 1024.0 / 1024.0,
411 )
411 )
412 )
412 )
413
413
414
414
415 class baseunionstore:
415 class baseunionstore:
416 def __init__(self, *args, **kwargs):
416 def __init__(self, *args, **kwargs):
417 # If one of the functions that iterates all of the stores is about to
417 # If one of the functions that iterates all of the stores is about to
418 # throw a KeyError, try this many times with a full refresh between
418 # throw a KeyError, try this many times with a full refresh between
419 # attempts. A repack operation may have moved data from one store to
419 # attempts. A repack operation may have moved data from one store to
420 # another while we were running.
420 # another while we were running.
421 self.numattempts = kwargs.get('numretries', 0) + 1
421 self.numattempts = kwargs.get('numretries', 0) + 1
422 # If not-None, call this function on every retry and if the attempts are
422 # If not-None, call this function on every retry and if the attempts are
423 # exhausted.
423 # exhausted.
424 self.retrylog = kwargs.get('retrylog', None)
424 self.retrylog = kwargs.get('retrylog', None)
425
425
426 def markforrefresh(self):
426 def markforrefresh(self):
427 for store in self.stores:
427 for store in self.stores:
428 if util.safehasattr(store, b'markforrefresh'):
428 if util.safehasattr(store, b'markforrefresh'):
429 store.markforrefresh()
429 store.markforrefresh()
430
430
431 @staticmethod
431 @staticmethod
432 def retriable(fn):
432 def retriable(fn):
433 def noop(*args):
433 def noop(*args):
434 pass
434 pass
435
435
436 def wrapped(self, *args, **kwargs):
436 def wrapped(self, *args, **kwargs):
437 retrylog = self.retrylog or noop
437 retrylog = self.retrylog or noop
438 funcname = fn.__name__
438 funcname = fn.__name__
439 i = 0
439 i = 0
440 while i < self.numattempts:
440 while i < self.numattempts:
441 if i > 0:
441 if i > 0:
442 retrylog(
442 retrylog(
443 b're-attempting (n=%d) %s\n'
443 b're-attempting (n=%d) %s\n'
444 % (i, pycompat.sysbytes(funcname))
444 % (i, pycompat.sysbytes(funcname))
445 )
445 )
446 self.markforrefresh()
446 self.markforrefresh()
447 i += 1
447 i += 1
448 try:
448 try:
449 return fn(self, *args, **kwargs)
449 return fn(self, *args, **kwargs)
450 except KeyError:
450 except KeyError:
451 if i == self.numattempts:
451 if i == self.numattempts:
452 # retries exhausted
452 # retries exhausted
453 retrylog(
453 retrylog(
454 b'retries exhausted in %s, raising KeyError\n'
454 b'retries exhausted in %s, raising KeyError\n'
455 % pycompat.sysbytes(funcname)
455 % pycompat.sysbytes(funcname)
456 )
456 )
457 raise
457 raise
458
458
459 return wrapped
459 return wrapped
@@ -1,397 +1,396 b''
1 import threading
1 import threading
2
2
3 from mercurial.node import (
3 from mercurial.node import (
4 hex,
4 hex,
5 sha1nodeconstants,
5 sha1nodeconstants,
6 )
6 )
7 from mercurial.pycompat import getattr
7 from mercurial.pycompat import getattr
8 from mercurial import (
8 from mercurial import (
9 mdiff,
9 mdiff,
10 pycompat,
11 revlog,
10 revlog,
12 )
11 )
13 from . import (
12 from . import (
14 basestore,
13 basestore,
15 constants,
14 constants,
16 shallowutil,
15 shallowutil,
17 )
16 )
18
17
19
18
20 class ChainIndicies:
19 class ChainIndicies:
21 """A static class for easy reference to the delta chain indicies."""
20 """A static class for easy reference to the delta chain indicies."""
22
21
23 # The filename of this revision delta
22 # The filename of this revision delta
24 NAME = 0
23 NAME = 0
25 # The mercurial file node for this revision delta
24 # The mercurial file node for this revision delta
26 NODE = 1
25 NODE = 1
27 # The filename of the delta base's revision. This is useful when delta
26 # The filename of the delta base's revision. This is useful when delta
28 # between different files (like in the case of a move or copy, we can delta
27 # between different files (like in the case of a move or copy, we can delta
29 # against the original file content).
28 # against the original file content).
30 BASENAME = 2
29 BASENAME = 2
31 # The mercurial file node for the delta base revision. This is the nullid if
30 # The mercurial file node for the delta base revision. This is the nullid if
32 # this delta is a full text.
31 # this delta is a full text.
33 BASENODE = 3
32 BASENODE = 3
34 # The actual delta or full text data.
33 # The actual delta or full text data.
35 DATA = 4
34 DATA = 4
36
35
37
36
38 class unioncontentstore(basestore.baseunionstore):
37 class unioncontentstore(basestore.baseunionstore):
39 def __init__(self, *args, **kwargs):
38 def __init__(self, *args, **kwargs):
40 super(unioncontentstore, self).__init__(*args, **kwargs)
39 super(unioncontentstore, self).__init__(*args, **kwargs)
41
40
42 self.stores = args
41 self.stores = args
43 self.writestore = kwargs.get('writestore')
42 self.writestore = kwargs.get('writestore')
44
43
45 # If allowincomplete==True then the union store can return partial
44 # If allowincomplete==True then the union store can return partial
46 # delta chains, otherwise it will throw a KeyError if a full
45 # delta chains, otherwise it will throw a KeyError if a full
47 # deltachain can't be found.
46 # deltachain can't be found.
48 self.allowincomplete = kwargs.get('allowincomplete', False)
47 self.allowincomplete = kwargs.get('allowincomplete', False)
49
48
50 def get(self, name, node):
49 def get(self, name, node):
51 """Fetches the full text revision contents of the given name+node pair.
50 """Fetches the full text revision contents of the given name+node pair.
52 If the full text doesn't exist, throws a KeyError.
51 If the full text doesn't exist, throws a KeyError.
53
52
54 Under the hood, this uses getdeltachain() across all the stores to build
53 Under the hood, this uses getdeltachain() across all the stores to build
55 up a full chain to produce the full text.
54 up a full chain to produce the full text.
56 """
55 """
57 chain = self.getdeltachain(name, node)
56 chain = self.getdeltachain(name, node)
58
57
59 if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
58 if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
60 # If we didn't receive a full chain, throw
59 # If we didn't receive a full chain, throw
61 raise KeyError((name, hex(node)))
60 raise KeyError((name, hex(node)))
62
61
63 # The last entry in the chain is a full text, so we start our delta
62 # The last entry in the chain is a full text, so we start our delta
64 # applies with that.
63 # applies with that.
65 fulltext = chain.pop()[ChainIndicies.DATA]
64 fulltext = chain.pop()[ChainIndicies.DATA]
66
65
67 text = fulltext
66 text = fulltext
68 while chain:
67 while chain:
69 delta = chain.pop()[ChainIndicies.DATA]
68 delta = chain.pop()[ChainIndicies.DATA]
70 text = mdiff.patches(text, [delta])
69 text = mdiff.patches(text, [delta])
71
70
72 return text
71 return text
73
72
74 @basestore.baseunionstore.retriable
73 @basestore.baseunionstore.retriable
75 def getdelta(self, name, node):
74 def getdelta(self, name, node):
76 """Return the single delta entry for the given name/node pair."""
75 """Return the single delta entry for the given name/node pair."""
77 for store in self.stores:
76 for store in self.stores:
78 try:
77 try:
79 return store.getdelta(name, node)
78 return store.getdelta(name, node)
80 except KeyError:
79 except KeyError:
81 pass
80 pass
82
81
83 raise KeyError((name, hex(node)))
82 raise KeyError((name, hex(node)))
84
83
85 def getdeltachain(self, name, node):
84 def getdeltachain(self, name, node):
86 """Returns the deltachain for the given name/node pair.
85 """Returns the deltachain for the given name/node pair.
87
86
88 Returns an ordered list of:
87 Returns an ordered list of:
89
88
90 [(name, node, deltabasename, deltabasenode, deltacontent),...]
89 [(name, node, deltabasename, deltabasenode, deltacontent),...]
91
90
92 where the chain is terminated by a full text entry with a nullid
91 where the chain is terminated by a full text entry with a nullid
93 deltabasenode.
92 deltabasenode.
94 """
93 """
95 chain = self._getpartialchain(name, node)
94 chain = self._getpartialchain(name, node)
96 while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
95 while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
97 x, x, deltabasename, deltabasenode, x = chain[-1]
96 x, x, deltabasename, deltabasenode, x = chain[-1]
98 try:
97 try:
99 morechain = self._getpartialchain(deltabasename, deltabasenode)
98 morechain = self._getpartialchain(deltabasename, deltabasenode)
100 chain.extend(morechain)
99 chain.extend(morechain)
101 except KeyError:
100 except KeyError:
102 # If we allow incomplete chains, don't throw.
101 # If we allow incomplete chains, don't throw.
103 if not self.allowincomplete:
102 if not self.allowincomplete:
104 raise
103 raise
105 break
104 break
106
105
107 return chain
106 return chain
108
107
109 @basestore.baseunionstore.retriable
108 @basestore.baseunionstore.retriable
110 def getmeta(self, name, node):
109 def getmeta(self, name, node):
111 """Returns the metadata dict for given node."""
110 """Returns the metadata dict for given node."""
112 for store in self.stores:
111 for store in self.stores:
113 try:
112 try:
114 return store.getmeta(name, node)
113 return store.getmeta(name, node)
115 except KeyError:
114 except KeyError:
116 pass
115 pass
117 raise KeyError((name, hex(node)))
116 raise KeyError((name, hex(node)))
118
117
119 def getmetrics(self):
118 def getmetrics(self):
120 metrics = [s.getmetrics() for s in self.stores]
119 metrics = [s.getmetrics() for s in self.stores]
121 return shallowutil.sumdicts(*metrics)
120 return shallowutil.sumdicts(*metrics)
122
121
123 @basestore.baseunionstore.retriable
122 @basestore.baseunionstore.retriable
124 def _getpartialchain(self, name, node):
123 def _getpartialchain(self, name, node):
125 """Returns a partial delta chain for the given name/node pair.
124 """Returns a partial delta chain for the given name/node pair.
126
125
127 A partial chain is a chain that may not be terminated in a full-text.
126 A partial chain is a chain that may not be terminated in a full-text.
128 """
127 """
129 for store in self.stores:
128 for store in self.stores:
130 try:
129 try:
131 return store.getdeltachain(name, node)
130 return store.getdeltachain(name, node)
132 except KeyError:
131 except KeyError:
133 pass
132 pass
134
133
135 raise KeyError((name, hex(node)))
134 raise KeyError((name, hex(node)))
136
135
137 def add(self, name, node, data):
136 def add(self, name, node, data):
138 raise RuntimeError(
137 raise RuntimeError(
139 b"cannot add content only to remotefilelog contentstore"
138 b"cannot add content only to remotefilelog contentstore"
140 )
139 )
141
140
142 def getmissing(self, keys):
141 def getmissing(self, keys):
143 missing = keys
142 missing = keys
144 for store in self.stores:
143 for store in self.stores:
145 if missing:
144 if missing:
146 missing = store.getmissing(missing)
145 missing = store.getmissing(missing)
147 return missing
146 return missing
148
147
149 def addremotefilelognode(self, name, node, data):
148 def addremotefilelognode(self, name, node, data):
150 if self.writestore:
149 if self.writestore:
151 self.writestore.addremotefilelognode(name, node, data)
150 self.writestore.addremotefilelognode(name, node, data)
152 else:
151 else:
153 raise RuntimeError(b"no writable store configured")
152 raise RuntimeError(b"no writable store configured")
154
153
155 def markledger(self, ledger, options=None):
154 def markledger(self, ledger, options=None):
156 for store in self.stores:
155 for store in self.stores:
157 store.markledger(ledger, options)
156 store.markledger(ledger, options)
158
157
159
158
160 class remotefilelogcontentstore(basestore.basestore):
159 class remotefilelogcontentstore(basestore.basestore):
161 def __init__(self, *args, **kwargs):
160 def __init__(self, *args, **kwargs):
162 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
161 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
163 self._threaddata = threading.local()
162 self._threaddata = threading.local()
164
163
165 def get(self, name, node):
164 def get(self, name, node):
166 # return raw revision text
165 # return raw revision text
167 data = self._getdata(name, node)
166 data = self._getdata(name, node)
168
167
169 offset, size, flags = shallowutil.parsesizeflags(data)
168 offset, size, flags = shallowutil.parsesizeflags(data)
170 content = data[offset : offset + size]
169 content = data[offset : offset + size]
171
170
172 ancestormap = shallowutil.ancestormap(data)
171 ancestormap = shallowutil.ancestormap(data)
173 p1, p2, linknode, copyfrom = ancestormap[node]
172 p1, p2, linknode, copyfrom = ancestormap[node]
174 copyrev = None
173 copyrev = None
175 if copyfrom:
174 if copyfrom:
176 copyrev = hex(p1)
175 copyrev = hex(p1)
177
176
178 self._updatemetacache(node, size, flags)
177 self._updatemetacache(node, size, flags)
179
178
180 # lfs tracks renames in its own metadata, remove hg copy metadata,
179 # lfs tracks renames in its own metadata, remove hg copy metadata,
181 # because copy metadata will be re-added by lfs flag processor.
180 # because copy metadata will be re-added by lfs flag processor.
182 if flags & revlog.REVIDX_EXTSTORED:
181 if flags & revlog.REVIDX_EXTSTORED:
183 copyrev = copyfrom = None
182 copyrev = copyfrom = None
184 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
183 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
185 return revision
184 return revision
186
185
187 def getdelta(self, name, node):
186 def getdelta(self, name, node):
188 # Since remotefilelog content stores only contain full texts, just
187 # Since remotefilelog content stores only contain full texts, just
189 # return that.
188 # return that.
190 revision = self.get(name, node)
189 revision = self.get(name, node)
191 return (
190 return (
192 revision,
191 revision,
193 name,
192 name,
194 sha1nodeconstants.nullid,
193 sha1nodeconstants.nullid,
195 self.getmeta(name, node),
194 self.getmeta(name, node),
196 )
195 )
197
196
198 def getdeltachain(self, name, node):
197 def getdeltachain(self, name, node):
199 # Since remotefilelog content stores just contain full texts, we return
198 # Since remotefilelog content stores just contain full texts, we return
200 # a fake delta chain that just consists of a single full text revision.
199 # a fake delta chain that just consists of a single full text revision.
201 # The nullid in the deltabasenode slot indicates that the revision is a
200 # The nullid in the deltabasenode slot indicates that the revision is a
202 # fulltext.
201 # fulltext.
203 revision = self.get(name, node)
202 revision = self.get(name, node)
204 return [(name, node, None, sha1nodeconstants.nullid, revision)]
203 return [(name, node, None, sha1nodeconstants.nullid, revision)]
205
204
206 def getmeta(self, name, node):
205 def getmeta(self, name, node):
207 self._sanitizemetacache()
206 self._sanitizemetacache()
208 if node != self._threaddata.metacache[0]:
207 if node != self._threaddata.metacache[0]:
209 data = self._getdata(name, node)
208 data = self._getdata(name, node)
210 offset, size, flags = shallowutil.parsesizeflags(data)
209 offset, size, flags = shallowutil.parsesizeflags(data)
211 self._updatemetacache(node, size, flags)
210 self._updatemetacache(node, size, flags)
212 return self._threaddata.metacache[1]
211 return self._threaddata.metacache[1]
213
212
214 def add(self, name, node, data):
213 def add(self, name, node, data):
215 raise RuntimeError(
214 raise RuntimeError(
216 b"cannot add content only to remotefilelog contentstore"
215 b"cannot add content only to remotefilelog contentstore"
217 )
216 )
218
217
219 def _sanitizemetacache(self):
218 def _sanitizemetacache(self):
220 metacache = getattr(self._threaddata, 'metacache', None)
219 metacache = getattr(self._threaddata, 'metacache', None)
221 if metacache is None:
220 if metacache is None:
222 self._threaddata.metacache = (None, None) # (node, meta)
221 self._threaddata.metacache = (None, None) # (node, meta)
223
222
224 def _updatemetacache(self, node, size, flags):
223 def _updatemetacache(self, node, size, flags):
225 self._sanitizemetacache()
224 self._sanitizemetacache()
226 if node == self._threaddata.metacache[0]:
225 if node == self._threaddata.metacache[0]:
227 return
226 return
228 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
227 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
229 self._threaddata.metacache = (node, meta)
228 self._threaddata.metacache = (node, meta)
230
229
231
230
232 class remotecontentstore:
231 class remotecontentstore:
233 def __init__(self, ui, fileservice, shared):
232 def __init__(self, ui, fileservice, shared):
234 self._fileservice = fileservice
233 self._fileservice = fileservice
235 # type(shared) is usually remotefilelogcontentstore
234 # type(shared) is usually remotefilelogcontentstore
236 self._shared = shared
235 self._shared = shared
237
236
238 def get(self, name, node):
237 def get(self, name, node):
239 self._fileservice.prefetch(
238 self._fileservice.prefetch(
240 [(name, hex(node))], force=True, fetchdata=True
239 [(name, hex(node))], force=True, fetchdata=True
241 )
240 )
242 return self._shared.get(name, node)
241 return self._shared.get(name, node)
243
242
244 def getdelta(self, name, node):
243 def getdelta(self, name, node):
245 revision = self.get(name, node)
244 revision = self.get(name, node)
246 return (
245 return (
247 revision,
246 revision,
248 name,
247 name,
249 sha1nodeconstants.nullid,
248 sha1nodeconstants.nullid,
250 self._shared.getmeta(name, node),
249 self._shared.getmeta(name, node),
251 )
250 )
252
251
253 def getdeltachain(self, name, node):
252 def getdeltachain(self, name, node):
254 # Since our remote content stores just contain full texts, we return a
253 # Since our remote content stores just contain full texts, we return a
255 # fake delta chain that just consists of a single full text revision.
254 # fake delta chain that just consists of a single full text revision.
256 # The nullid in the deltabasenode slot indicates that the revision is a
255 # The nullid in the deltabasenode slot indicates that the revision is a
257 # fulltext.
256 # fulltext.
258 revision = self.get(name, node)
257 revision = self.get(name, node)
259 return [(name, node, None, sha1nodeconstants.nullid, revision)]
258 return [(name, node, None, sha1nodeconstants.nullid, revision)]
260
259
261 def getmeta(self, name, node):
260 def getmeta(self, name, node):
262 self._fileservice.prefetch(
261 self._fileservice.prefetch(
263 [(name, hex(node))], force=True, fetchdata=True
262 [(name, hex(node))], force=True, fetchdata=True
264 )
263 )
265 return self._shared.getmeta(name, node)
264 return self._shared.getmeta(name, node)
266
265
267 def add(self, name, node, data):
266 def add(self, name, node, data):
268 raise RuntimeError(b"cannot add to a remote store")
267 raise RuntimeError(b"cannot add to a remote store")
269
268
270 def getmissing(self, keys):
269 def getmissing(self, keys):
271 return keys
270 return keys
272
271
273 def markledger(self, ledger, options=None):
272 def markledger(self, ledger, options=None):
274 pass
273 pass
275
274
276
275
277 class manifestrevlogstore:
276 class manifestrevlogstore:
278 def __init__(self, repo):
277 def __init__(self, repo):
279 self._store = repo.store
278 self._store = repo.store
280 self._svfs = repo.svfs
279 self._svfs = repo.svfs
281 self._revlogs = dict()
280 self._revlogs = dict()
282 self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
281 self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
283 self._repackstartlinkrev = 0
282 self._repackstartlinkrev = 0
284
283
285 def get(self, name, node):
284 def get(self, name, node):
286 return self._revlog(name).rawdata(node)
285 return self._revlog(name).rawdata(node)
287
286
288 def getdelta(self, name, node):
287 def getdelta(self, name, node):
289 revision = self.get(name, node)
288 revision = self.get(name, node)
290 return revision, name, self._cl.nullid, self.getmeta(name, node)
289 return revision, name, self._cl.nullid, self.getmeta(name, node)
291
290
292 def getdeltachain(self, name, node):
291 def getdeltachain(self, name, node):
293 revision = self.get(name, node)
292 revision = self.get(name, node)
294 return [(name, node, None, self._cl.nullid, revision)]
293 return [(name, node, None, self._cl.nullid, revision)]
295
294
296 def getmeta(self, name, node):
295 def getmeta(self, name, node):
297 rl = self._revlog(name)
296 rl = self._revlog(name)
298 rev = rl.rev(node)
297 rev = rl.rev(node)
299 return {
298 return {
300 constants.METAKEYFLAG: rl.flags(rev),
299 constants.METAKEYFLAG: rl.flags(rev),
301 constants.METAKEYSIZE: rl.rawsize(rev),
300 constants.METAKEYSIZE: rl.rawsize(rev),
302 }
301 }
303
302
304 def getancestors(self, name, node, known=None):
303 def getancestors(self, name, node, known=None):
305 if known is None:
304 if known is None:
306 known = set()
305 known = set()
307 if node in known:
306 if node in known:
308 return []
307 return []
309
308
310 rl = self._revlog(name)
309 rl = self._revlog(name)
311 ancestors = {}
310 ancestors = {}
312 missing = {node}
311 missing = {node}
313 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
312 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
314 ancnode = rl.node(ancrev)
313 ancnode = rl.node(ancrev)
315 missing.discard(ancnode)
314 missing.discard(ancnode)
316
315
317 p1, p2 = rl.parents(ancnode)
316 p1, p2 = rl.parents(ancnode)
318 if p1 != self._cl.nullid and p1 not in known:
317 if p1 != self._cl.nullid and p1 not in known:
319 missing.add(p1)
318 missing.add(p1)
320 if p2 != self._cl.nullid and p2 not in known:
319 if p2 != self._cl.nullid and p2 not in known:
321 missing.add(p2)
320 missing.add(p2)
322
321
323 linknode = self._cl.node(rl.linkrev(ancrev))
322 linknode = self._cl.node(rl.linkrev(ancrev))
324 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
323 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
325 if not missing:
324 if not missing:
326 break
325 break
327 return ancestors
326 return ancestors
328
327
329 def getnodeinfo(self, name, node):
328 def getnodeinfo(self, name, node):
330 cl = self._cl
329 cl = self._cl
331 rl = self._revlog(name)
330 rl = self._revlog(name)
332 parents = rl.parents(node)
331 parents = rl.parents(node)
333 linkrev = rl.linkrev(rl.rev(node))
332 linkrev = rl.linkrev(rl.rev(node))
334 return (parents[0], parents[1], cl.node(linkrev), None)
333 return (parents[0], parents[1], cl.node(linkrev), None)
335
334
336 def add(self, *args):
335 def add(self, *args):
337 raise RuntimeError(b"cannot add to a revlog store")
336 raise RuntimeError(b"cannot add to a revlog store")
338
337
339 def _revlog(self, name):
338 def _revlog(self, name):
340 rl = self._revlogs.get(name)
339 rl = self._revlogs.get(name)
341 if rl is None:
340 if rl is None:
342 revlogname = b'00manifesttree'
341 revlogname = b'00manifesttree'
343 if name != b'':
342 if name != b'':
344 revlogname = b'meta/%s/00manifest' % name
343 revlogname = b'meta/%s/00manifest' % name
345 rl = revlog.revlog(self._svfs, radix=revlogname)
344 rl = revlog.revlog(self._svfs, radix=revlogname)
346 self._revlogs[name] = rl
345 self._revlogs[name] = rl
347 return rl
346 return rl
348
347
349 def getmissing(self, keys):
348 def getmissing(self, keys):
350 missing = []
349 missing = []
351 for name, node in keys:
350 for name, node in keys:
352 mfrevlog = self._revlog(name)
351 mfrevlog = self._revlog(name)
353 if node not in mfrevlog.nodemap:
352 if node not in mfrevlog.nodemap:
354 missing.append((name, node))
353 missing.append((name, node))
355
354
356 return missing
355 return missing
357
356
358 def setrepacklinkrevrange(self, startrev, endrev):
357 def setrepacklinkrevrange(self, startrev, endrev):
359 self._repackstartlinkrev = startrev
358 self._repackstartlinkrev = startrev
360 self._repackendlinkrev = endrev
359 self._repackendlinkrev = endrev
361
360
362 def markledger(self, ledger, options=None):
361 def markledger(self, ledger, options=None):
363 if options and options.get(constants.OPTION_PACKSONLY):
362 if options and options.get(constants.OPTION_PACKSONLY):
364 return
363 return
365 treename = b''
364 treename = b''
366 rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
365 rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
367 startlinkrev = self._repackstartlinkrev
366 startlinkrev = self._repackstartlinkrev
368 endlinkrev = self._repackendlinkrev
367 endlinkrev = self._repackendlinkrev
369 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
368 for rev in range(len(rl) - 1, -1, -1):
370 linkrev = rl.linkrev(rev)
369 linkrev = rl.linkrev(rev)
371 if linkrev < startlinkrev:
370 if linkrev < startlinkrev:
372 break
371 break
373 if linkrev > endlinkrev:
372 if linkrev > endlinkrev:
374 continue
373 continue
375 node = rl.node(rev)
374 node = rl.node(rev)
376 ledger.markdataentry(self, treename, node)
375 ledger.markdataentry(self, treename, node)
377 ledger.markhistoryentry(self, treename, node)
376 ledger.markhistoryentry(self, treename, node)
378
377
379 for t, path, size in self._store.datafiles():
378 for t, path, size in self._store.datafiles():
380 if path[:5] != b'meta/' or path[-2:] != b'.i':
379 if path[:5] != b'meta/' or path[-2:] != b'.i':
381 continue
380 continue
382
381
383 treename = path[5 : -len(b'/00manifest')]
382 treename = path[5 : -len(b'/00manifest')]
384
383
385 rl = revlog.revlog(self._svfs, indexfile=path[:-2])
384 rl = revlog.revlog(self._svfs, indexfile=path[:-2])
386 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
385 for rev in range(len(rl) - 1, -1, -1):
387 linkrev = rl.linkrev(rev)
386 linkrev = rl.linkrev(rev)
388 if linkrev < startlinkrev:
387 if linkrev < startlinkrev:
389 break
388 break
390 if linkrev > endlinkrev:
389 if linkrev > endlinkrev:
391 continue
390 continue
392 node = rl.node(rev)
391 node = rl.node(rev)
393 ledger.markdataentry(self, treename, node)
392 ledger.markdataentry(self, treename, node)
394 ledger.markhistoryentry(self, treename, node)
393 ledger.markhistoryentry(self, treename, node)
395
394
396 def cleanup(self, ledger):
395 def cleanup(self, ledger):
397 pass
396 pass
@@ -1,474 +1,473 b''
1 import struct
1 import struct
2 import zlib
2 import zlib
3
3
4 from mercurial.node import (
4 from mercurial.node import (
5 hex,
5 hex,
6 sha1nodeconstants,
6 sha1nodeconstants,
7 )
7 )
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial import (
9 from mercurial import (
10 pycompat,
11 util,
10 util,
12 )
11 )
13 from . import (
12 from . import (
14 basepack,
13 basepack,
15 constants,
14 constants,
16 shallowutil,
15 shallowutil,
17 )
16 )
18
17
19 NODELENGTH = 20
18 NODELENGTH = 20
20
19
21 # The indicator value in the index for a fulltext entry.
20 # The indicator value in the index for a fulltext entry.
22 FULLTEXTINDEXMARK = -1
21 FULLTEXTINDEXMARK = -1
23 NOBASEINDEXMARK = -2
22 NOBASEINDEXMARK = -2
24
23
25 INDEXSUFFIX = b'.dataidx'
24 INDEXSUFFIX = b'.dataidx'
26 PACKSUFFIX = b'.datapack'
25 PACKSUFFIX = b'.datapack'
27
26
28
27
29 class datapackstore(basepack.basepackstore):
28 class datapackstore(basepack.basepackstore):
30 INDEXSUFFIX = INDEXSUFFIX
29 INDEXSUFFIX = INDEXSUFFIX
31 PACKSUFFIX = PACKSUFFIX
30 PACKSUFFIX = PACKSUFFIX
32
31
33 def __init__(self, ui, path):
32 def __init__(self, ui, path):
34 super(datapackstore, self).__init__(ui, path)
33 super(datapackstore, self).__init__(ui, path)
35
34
36 def getpack(self, path):
35 def getpack(self, path):
37 return datapack(path)
36 return datapack(path)
38
37
39 def get(self, name, node):
38 def get(self, name, node):
40 raise RuntimeError(b"must use getdeltachain with datapackstore")
39 raise RuntimeError(b"must use getdeltachain with datapackstore")
41
40
42 def getmeta(self, name, node):
41 def getmeta(self, name, node):
43 for pack in self.packs:
42 for pack in self.packs:
44 try:
43 try:
45 return pack.getmeta(name, node)
44 return pack.getmeta(name, node)
46 except KeyError:
45 except KeyError:
47 pass
46 pass
48
47
49 for pack in self.refresh():
48 for pack in self.refresh():
50 try:
49 try:
51 return pack.getmeta(name, node)
50 return pack.getmeta(name, node)
52 except KeyError:
51 except KeyError:
53 pass
52 pass
54
53
55 raise KeyError((name, hex(node)))
54 raise KeyError((name, hex(node)))
56
55
57 def getdelta(self, name, node):
56 def getdelta(self, name, node):
58 for pack in self.packs:
57 for pack in self.packs:
59 try:
58 try:
60 return pack.getdelta(name, node)
59 return pack.getdelta(name, node)
61 except KeyError:
60 except KeyError:
62 pass
61 pass
63
62
64 for pack in self.refresh():
63 for pack in self.refresh():
65 try:
64 try:
66 return pack.getdelta(name, node)
65 return pack.getdelta(name, node)
67 except KeyError:
66 except KeyError:
68 pass
67 pass
69
68
70 raise KeyError((name, hex(node)))
69 raise KeyError((name, hex(node)))
71
70
72 def getdeltachain(self, name, node):
71 def getdeltachain(self, name, node):
73 for pack in self.packs:
72 for pack in self.packs:
74 try:
73 try:
75 return pack.getdeltachain(name, node)
74 return pack.getdeltachain(name, node)
76 except KeyError:
75 except KeyError:
77 pass
76 pass
78
77
79 for pack in self.refresh():
78 for pack in self.refresh():
80 try:
79 try:
81 return pack.getdeltachain(name, node)
80 return pack.getdeltachain(name, node)
82 except KeyError:
81 except KeyError:
83 pass
82 pass
84
83
85 raise KeyError((name, hex(node)))
84 raise KeyError((name, hex(node)))
86
85
87 def add(self, name, node, data):
86 def add(self, name, node, data):
88 raise RuntimeError(b"cannot add to datapackstore")
87 raise RuntimeError(b"cannot add to datapackstore")
89
88
90
89
91 class datapack(basepack.basepack):
90 class datapack(basepack.basepack):
92 INDEXSUFFIX = INDEXSUFFIX
91 INDEXSUFFIX = INDEXSUFFIX
93 PACKSUFFIX = PACKSUFFIX
92 PACKSUFFIX = PACKSUFFIX
94
93
95 # Format is <node><delta offset><pack data offset><pack data size>
94 # Format is <node><delta offset><pack data offset><pack data size>
96 # See the mutabledatapack doccomment for more details.
95 # See the mutabledatapack doccomment for more details.
97 INDEXFORMAT = b'!20siQQ'
96 INDEXFORMAT = b'!20siQQ'
98 INDEXENTRYLENGTH = 40
97 INDEXENTRYLENGTH = 40
99
98
100 SUPPORTED_VERSIONS = [2]
99 SUPPORTED_VERSIONS = [2]
101
100
102 def getmissing(self, keys):
101 def getmissing(self, keys):
103 missing = []
102 missing = []
104 for name, node in keys:
103 for name, node in keys:
105 value = self._find(node)
104 value = self._find(node)
106 if not value:
105 if not value:
107 missing.append((name, node))
106 missing.append((name, node))
108
107
109 return missing
108 return missing
110
109
111 def get(self, name, node):
110 def get(self, name, node):
112 raise RuntimeError(
111 raise RuntimeError(
113 b"must use getdeltachain with datapack (%s:%s)" % (name, hex(node))
112 b"must use getdeltachain with datapack (%s:%s)" % (name, hex(node))
114 )
113 )
115
114
116 def getmeta(self, name, node):
115 def getmeta(self, name, node):
117 value = self._find(node)
116 value = self._find(node)
118 if value is None:
117 if value is None:
119 raise KeyError((name, hex(node)))
118 raise KeyError((name, hex(node)))
120
119
121 node, deltabaseoffset, offset, size = value
120 node, deltabaseoffset, offset, size = value
122 rawentry = self._data[offset : offset + size]
121 rawentry = self._data[offset : offset + size]
123
122
124 # see docstring of mutabledatapack for the format
123 # see docstring of mutabledatapack for the format
125 offset = 0
124 offset = 0
126 offset += struct.unpack_from(b'!H', rawentry, offset)[0] + 2 # filename
125 offset += struct.unpack_from(b'!H', rawentry, offset)[0] + 2 # filename
127 offset += 40 # node, deltabase node
126 offset += 40 # node, deltabase node
128 offset += struct.unpack_from(b'!Q', rawentry, offset)[0] + 8 # delta
127 offset += struct.unpack_from(b'!Q', rawentry, offset)[0] + 8 # delta
129
128
130 metalen = struct.unpack_from(b'!I', rawentry, offset)[0]
129 metalen = struct.unpack_from(b'!I', rawentry, offset)[0]
131 offset += 4
130 offset += 4
132
131
133 meta = shallowutil.parsepackmeta(rawentry[offset : offset + metalen])
132 meta = shallowutil.parsepackmeta(rawentry[offset : offset + metalen])
134
133
135 return meta
134 return meta
136
135
137 def getdelta(self, name, node):
136 def getdelta(self, name, node):
138 value = self._find(node)
137 value = self._find(node)
139 if value is None:
138 if value is None:
140 raise KeyError((name, hex(node)))
139 raise KeyError((name, hex(node)))
141
140
142 node, deltabaseoffset, offset, size = value
141 node, deltabaseoffset, offset, size = value
143 entry = self._readentry(offset, size, getmeta=True)
142 entry = self._readentry(offset, size, getmeta=True)
144 filename, node, deltabasenode, delta, meta = entry
143 filename, node, deltabasenode, delta, meta = entry
145
144
146 # If we've read a lot of data from the mmap, free some memory.
145 # If we've read a lot of data from the mmap, free some memory.
147 self.freememory()
146 self.freememory()
148
147
149 return delta, filename, deltabasenode, meta
148 return delta, filename, deltabasenode, meta
150
149
151 def getdeltachain(self, name, node):
150 def getdeltachain(self, name, node):
152 value = self._find(node)
151 value = self._find(node)
153 if value is None:
152 if value is None:
154 raise KeyError((name, hex(node)))
153 raise KeyError((name, hex(node)))
155
154
156 params = self.params
155 params = self.params
157
156
158 # Precompute chains
157 # Precompute chains
159 chain = [value]
158 chain = [value]
160 deltabaseoffset = value[1]
159 deltabaseoffset = value[1]
161 entrylen = self.INDEXENTRYLENGTH
160 entrylen = self.INDEXENTRYLENGTH
162 while (
161 while (
163 deltabaseoffset != FULLTEXTINDEXMARK
162 deltabaseoffset != FULLTEXTINDEXMARK
164 and deltabaseoffset != NOBASEINDEXMARK
163 and deltabaseoffset != NOBASEINDEXMARK
165 ):
164 ):
166 loc = params.indexstart + deltabaseoffset
165 loc = params.indexstart + deltabaseoffset
167 value = struct.unpack(
166 value = struct.unpack(
168 self.INDEXFORMAT, self._index[loc : loc + entrylen]
167 self.INDEXFORMAT, self._index[loc : loc + entrylen]
169 )
168 )
170 deltabaseoffset = value[1]
169 deltabaseoffset = value[1]
171 chain.append(value)
170 chain.append(value)
172
171
173 # Read chain data
172 # Read chain data
174 deltachain = []
173 deltachain = []
175 for node, deltabaseoffset, offset, size in chain:
174 for node, deltabaseoffset, offset, size in chain:
176 filename, node, deltabasenode, delta = self._readentry(offset, size)
175 filename, node, deltabasenode, delta = self._readentry(offset, size)
177 deltachain.append((filename, node, filename, deltabasenode, delta))
176 deltachain.append((filename, node, filename, deltabasenode, delta))
178
177
179 # If we've read a lot of data from the mmap, free some memory.
178 # If we've read a lot of data from the mmap, free some memory.
180 self.freememory()
179 self.freememory()
181
180
182 return deltachain
181 return deltachain
183
182
184 def _readentry(self, offset, size, getmeta=False):
183 def _readentry(self, offset, size, getmeta=False):
185 rawentry = self._data[offset : offset + size]
184 rawentry = self._data[offset : offset + size]
186 self._pagedin += len(rawentry)
185 self._pagedin += len(rawentry)
187
186
188 # <2 byte len> + <filename>
187 # <2 byte len> + <filename>
189 lengthsize = 2
188 lengthsize = 2
190 filenamelen = struct.unpack(b'!H', rawentry[:2])[0]
189 filenamelen = struct.unpack(b'!H', rawentry[:2])[0]
191 filename = rawentry[lengthsize : lengthsize + filenamelen]
190 filename = rawentry[lengthsize : lengthsize + filenamelen]
192
191
193 # <20 byte node> + <20 byte deltabase>
192 # <20 byte node> + <20 byte deltabase>
194 nodestart = lengthsize + filenamelen
193 nodestart = lengthsize + filenamelen
195 deltabasestart = nodestart + NODELENGTH
194 deltabasestart = nodestart + NODELENGTH
196 node = rawentry[nodestart:deltabasestart]
195 node = rawentry[nodestart:deltabasestart]
197 deltabasenode = rawentry[deltabasestart : deltabasestart + NODELENGTH]
196 deltabasenode = rawentry[deltabasestart : deltabasestart + NODELENGTH]
198
197
199 # <8 byte len> + <delta>
198 # <8 byte len> + <delta>
200 deltastart = deltabasestart + NODELENGTH
199 deltastart = deltabasestart + NODELENGTH
201 rawdeltalen = rawentry[deltastart : deltastart + 8]
200 rawdeltalen = rawentry[deltastart : deltastart + 8]
202 deltalen = struct.unpack(b'!Q', rawdeltalen)[0]
201 deltalen = struct.unpack(b'!Q', rawdeltalen)[0]
203
202
204 delta = rawentry[deltastart + 8 : deltastart + 8 + deltalen]
203 delta = rawentry[deltastart + 8 : deltastart + 8 + deltalen]
205 delta = self._decompress(delta)
204 delta = self._decompress(delta)
206
205
207 if getmeta:
206 if getmeta:
208 metastart = deltastart + 8 + deltalen
207 metastart = deltastart + 8 + deltalen
209 metalen = struct.unpack_from(b'!I', rawentry, metastart)[0]
208 metalen = struct.unpack_from(b'!I', rawentry, metastart)[0]
210
209
211 rawmeta = rawentry[metastart + 4 : metastart + 4 + metalen]
210 rawmeta = rawentry[metastart + 4 : metastart + 4 + metalen]
212 meta = shallowutil.parsepackmeta(rawmeta)
211 meta = shallowutil.parsepackmeta(rawmeta)
213 return filename, node, deltabasenode, delta, meta
212 return filename, node, deltabasenode, delta, meta
214 else:
213 else:
215 return filename, node, deltabasenode, delta
214 return filename, node, deltabasenode, delta
216
215
217 def _decompress(self, data):
216 def _decompress(self, data):
218 return zlib.decompress(data)
217 return zlib.decompress(data)
219
218
220 def add(self, name, node, data):
219 def add(self, name, node, data):
221 raise RuntimeError(b"cannot add to datapack (%s:%s)" % (name, node))
220 raise RuntimeError(b"cannot add to datapack (%s:%s)" % (name, node))
222
221
223 def _find(self, node):
222 def _find(self, node):
224 params = self.params
223 params = self.params
225 fanoutkey = struct.unpack(
224 fanoutkey = struct.unpack(
226 params.fanoutstruct, node[: params.fanoutprefix]
225 params.fanoutstruct, node[: params.fanoutprefix]
227 )[0]
226 )[0]
228 fanout = self._fanouttable
227 fanout = self._fanouttable
229
228
230 start = fanout[fanoutkey] + params.indexstart
229 start = fanout[fanoutkey] + params.indexstart
231 indexend = self._indexend
230 indexend = self._indexend
232
231
233 # Scan forward to find the first non-same entry, which is the upper
232 # Scan forward to find the first non-same entry, which is the upper
234 # bound.
233 # bound.
235 for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount):
234 for i in range(fanoutkey + 1, params.fanoutcount):
236 end = fanout[i] + params.indexstart
235 end = fanout[i] + params.indexstart
237 if end != start:
236 if end != start:
238 break
237 break
239 else:
238 else:
240 end = indexend
239 end = indexend
241
240
242 # Bisect between start and end to find node
241 # Bisect between start and end to find node
243 index = self._index
242 index = self._index
244 startnode = index[start : start + NODELENGTH]
243 startnode = index[start : start + NODELENGTH]
245 endnode = index[end : end + NODELENGTH]
244 endnode = index[end : end + NODELENGTH]
246 entrylen = self.INDEXENTRYLENGTH
245 entrylen = self.INDEXENTRYLENGTH
247 if startnode == node:
246 if startnode == node:
248 entry = index[start : start + entrylen]
247 entry = index[start : start + entrylen]
249 elif endnode == node:
248 elif endnode == node:
250 entry = index[end : end + entrylen]
249 entry = index[end : end + entrylen]
251 else:
250 else:
252 while start < end - entrylen:
251 while start < end - entrylen:
253 mid = start + (end - start) // 2
252 mid = start + (end - start) // 2
254 mid = mid - ((mid - params.indexstart) % entrylen)
253 mid = mid - ((mid - params.indexstart) % entrylen)
255 midnode = index[mid : mid + NODELENGTH]
254 midnode = index[mid : mid + NODELENGTH]
256 if midnode == node:
255 if midnode == node:
257 entry = index[mid : mid + entrylen]
256 entry = index[mid : mid + entrylen]
258 break
257 break
259 if node > midnode:
258 if node > midnode:
260 start = mid
259 start = mid
261 elif node < midnode:
260 elif node < midnode:
262 end = mid
261 end = mid
263 else:
262 else:
264 return None
263 return None
265
264
266 return struct.unpack(self.INDEXFORMAT, entry)
265 return struct.unpack(self.INDEXFORMAT, entry)
267
266
268 def markledger(self, ledger, options=None):
267 def markledger(self, ledger, options=None):
269 for filename, node in self:
268 for filename, node in self:
270 ledger.markdataentry(self, filename, node)
269 ledger.markdataentry(self, filename, node)
271
270
272 def cleanup(self, ledger):
271 def cleanup(self, ledger):
273 entries = ledger.sources.get(self, [])
272 entries = ledger.sources.get(self, [])
274 allkeys = set(self)
273 allkeys = set(self)
275 repackedkeys = {
274 repackedkeys = {
276 (e.filename, e.node) for e in entries if e.datarepacked or e.gced
275 (e.filename, e.node) for e in entries if e.datarepacked or e.gced
277 }
276 }
278
277
279 if len(allkeys - repackedkeys) == 0:
278 if len(allkeys - repackedkeys) == 0:
280 if self.path not in ledger.created:
279 if self.path not in ledger.created:
281 util.unlinkpath(self.indexpath, ignoremissing=True)
280 util.unlinkpath(self.indexpath, ignoremissing=True)
282 util.unlinkpath(self.packpath, ignoremissing=True)
281 util.unlinkpath(self.packpath, ignoremissing=True)
283
282
284 def __iter__(self):
283 def __iter__(self):
285 for f, n, deltabase, deltalen in self.iterentries():
284 for f, n, deltabase, deltalen in self.iterentries():
286 yield f, n
285 yield f, n
287
286
288 def iterentries(self):
287 def iterentries(self):
289 # Start at 1 to skip the header
288 # Start at 1 to skip the header
290 offset = 1
289 offset = 1
291 data = self._data
290 data = self._data
292 while offset < self.datasize:
291 while offset < self.datasize:
293 oldoffset = offset
292 oldoffset = offset
294
293
295 # <2 byte len> + <filename>
294 # <2 byte len> + <filename>
296 filenamelen = struct.unpack(b'!H', data[offset : offset + 2])[0]
295 filenamelen = struct.unpack(b'!H', data[offset : offset + 2])[0]
297 offset += 2
296 offset += 2
298 filename = data[offset : offset + filenamelen]
297 filename = data[offset : offset + filenamelen]
299 offset += filenamelen
298 offset += filenamelen
300
299
301 # <20 byte node>
300 # <20 byte node>
302 node = data[offset : offset + constants.NODESIZE]
301 node = data[offset : offset + constants.NODESIZE]
303 offset += constants.NODESIZE
302 offset += constants.NODESIZE
304 # <20 byte deltabase>
303 # <20 byte deltabase>
305 deltabase = data[offset : offset + constants.NODESIZE]
304 deltabase = data[offset : offset + constants.NODESIZE]
306 offset += constants.NODESIZE
305 offset += constants.NODESIZE
307
306
308 # <8 byte len> + <delta>
307 # <8 byte len> + <delta>
309 rawdeltalen = data[offset : offset + 8]
308 rawdeltalen = data[offset : offset + 8]
310 deltalen = struct.unpack(b'!Q', rawdeltalen)[0]
309 deltalen = struct.unpack(b'!Q', rawdeltalen)[0]
311 offset += 8
310 offset += 8
312
311
313 # TODO(augie): we should store a header that is the
312 # TODO(augie): we should store a header that is the
314 # uncompressed size.
313 # uncompressed size.
315 uncompressedlen = len(
314 uncompressedlen = len(
316 self._decompress(data[offset : offset + deltalen])
315 self._decompress(data[offset : offset + deltalen])
317 )
316 )
318 offset += deltalen
317 offset += deltalen
319
318
320 # <4 byte len> + <metadata-list>
319 # <4 byte len> + <metadata-list>
321 metalen = struct.unpack_from(b'!I', data, offset)[0]
320 metalen = struct.unpack_from(b'!I', data, offset)[0]
322 offset += 4 + metalen
321 offset += 4 + metalen
323
322
324 yield (filename, node, deltabase, uncompressedlen)
323 yield (filename, node, deltabase, uncompressedlen)
325
324
326 # If we've read a lot of data from the mmap, free some memory.
325 # If we've read a lot of data from the mmap, free some memory.
327 self._pagedin += offset - oldoffset
326 self._pagedin += offset - oldoffset
328 if self.freememory():
327 if self.freememory():
329 data = self._data
328 data = self._data
330
329
331
330
332 class mutabledatapack(basepack.mutablebasepack):
331 class mutabledatapack(basepack.mutablebasepack):
333 """A class for constructing and serializing a datapack file and index.
332 """A class for constructing and serializing a datapack file and index.
334
333
335 A datapack is a pair of files that contain the revision contents for various
334 A datapack is a pair of files that contain the revision contents for various
336 file revisions in Mercurial. It contains only revision contents (like file
335 file revisions in Mercurial. It contains only revision contents (like file
337 contents), not any history information.
336 contents), not any history information.
338
337
339 It consists of two files, with the following format. All bytes are in
338 It consists of two files, with the following format. All bytes are in
340 network byte order (big endian).
339 network byte order (big endian).
341
340
342 .datapack
341 .datapack
343 The pack itself is a series of revision deltas with some basic header
342 The pack itself is a series of revision deltas with some basic header
344 information on each. A revision delta may be a fulltext, represented by
343 information on each. A revision delta may be a fulltext, represented by
345 a deltabasenode equal to the nullid.
344 a deltabasenode equal to the nullid.
346
345
347 datapack = <version: 1 byte>
346 datapack = <version: 1 byte>
348 [<revision>,...]
347 [<revision>,...]
349 revision = <filename len: 2 byte unsigned int>
348 revision = <filename len: 2 byte unsigned int>
350 <filename>
349 <filename>
351 <node: 20 byte>
350 <node: 20 byte>
352 <deltabasenode: 20 byte>
351 <deltabasenode: 20 byte>
353 <delta len: 8 byte unsigned int>
352 <delta len: 8 byte unsigned int>
354 <delta>
353 <delta>
355 <metadata-list len: 4 byte unsigned int> [1]
354 <metadata-list len: 4 byte unsigned int> [1]
356 <metadata-list> [1]
355 <metadata-list> [1]
357 metadata-list = [<metadata-item>, ...]
356 metadata-list = [<metadata-item>, ...]
358 metadata-item = <metadata-key: 1 byte>
357 metadata-item = <metadata-key: 1 byte>
359 <metadata-value len: 2 byte unsigned>
358 <metadata-value len: 2 byte unsigned>
360 <metadata-value>
359 <metadata-value>
361
360
362 metadata-key could be METAKEYFLAG or METAKEYSIZE or other single byte
361 metadata-key could be METAKEYFLAG or METAKEYSIZE or other single byte
363 value in the future.
362 value in the future.
364
363
365 .dataidx
364 .dataidx
366 The index file consists of two parts, the fanout and the index.
365 The index file consists of two parts, the fanout and the index.
367
366
368 The index is a list of index entries, sorted by node (one per revision
367 The index is a list of index entries, sorted by node (one per revision
369 in the pack). Each entry has:
368 in the pack). Each entry has:
370
369
371 - node (The 20 byte node of the entry; i.e. the commit hash, file node
370 - node (The 20 byte node of the entry; i.e. the commit hash, file node
372 hash, etc)
371 hash, etc)
373 - deltabase index offset (The location in the index of the deltabase for
372 - deltabase index offset (The location in the index of the deltabase for
374 this entry. The deltabase is the next delta in
373 this entry. The deltabase is the next delta in
375 the chain, with the chain eventually
374 the chain, with the chain eventually
376 terminating in a full-text, represented by a
375 terminating in a full-text, represented by a
377 deltabase offset of -1. This lets us compute
376 deltabase offset of -1. This lets us compute
378 delta chains from the index, then do
377 delta chains from the index, then do
379 sequential reads from the pack if the revision
378 sequential reads from the pack if the revision
380 are nearby on disk.)
379 are nearby on disk.)
381 - pack entry offset (The location of this entry in the datapack)
380 - pack entry offset (The location of this entry in the datapack)
382 - pack content size (The on-disk length of this entry's pack data)
381 - pack content size (The on-disk length of this entry's pack data)
383
382
384 The fanout is a quick lookup table to reduce the number of steps for
383 The fanout is a quick lookup table to reduce the number of steps for
385 bisecting the index. It is a series of 4 byte pointers to positions
384 bisecting the index. It is a series of 4 byte pointers to positions
386 within the index. It has 2^16 entries, which corresponds to hash
385 within the index. It has 2^16 entries, which corresponds to hash
387 prefixes [0000, 0001,..., FFFE, FFFF]. Example: the pointer in slot
386 prefixes [0000, 0001,..., FFFE, FFFF]. Example: the pointer in slot
388 4F0A points to the index position of the first revision whose node
387 4F0A points to the index position of the first revision whose node
389 starts with 4F0A. This saves log(2^16)=16 bisect steps.
388 starts with 4F0A. This saves log(2^16)=16 bisect steps.
390
389
391 dataidx = <fanouttable>
390 dataidx = <fanouttable>
392 <index>
391 <index>
393 fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries)
392 fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries)
394 index = [<index entry>,...]
393 index = [<index entry>,...]
395 indexentry = <node: 20 byte>
394 indexentry = <node: 20 byte>
396 <deltabase location: 4 byte signed int>
395 <deltabase location: 4 byte signed int>
397 <pack entry offset: 8 byte unsigned int>
396 <pack entry offset: 8 byte unsigned int>
398 <pack entry size: 8 byte unsigned int>
397 <pack entry size: 8 byte unsigned int>
399
398
400 [1]: new in version 1.
399 [1]: new in version 1.
401 """
400 """
402
401
403 INDEXSUFFIX = INDEXSUFFIX
402 INDEXSUFFIX = INDEXSUFFIX
404 PACKSUFFIX = PACKSUFFIX
403 PACKSUFFIX = PACKSUFFIX
405
404
406 # v[01] index format: <node><delta offset><pack data offset><pack data size>
405 # v[01] index format: <node><delta offset><pack data offset><pack data size>
407 INDEXFORMAT = datapack.INDEXFORMAT
406 INDEXFORMAT = datapack.INDEXFORMAT
408 INDEXENTRYLENGTH = datapack.INDEXENTRYLENGTH
407 INDEXENTRYLENGTH = datapack.INDEXENTRYLENGTH
409
408
410 # v1 has metadata support
409 # v1 has metadata support
411 SUPPORTED_VERSIONS = [2]
410 SUPPORTED_VERSIONS = [2]
412
411
413 def _compress(self, data):
412 def _compress(self, data):
414 return zlib.compress(data)
413 return zlib.compress(data)
415
414
416 def add(self, name, node, deltabasenode, delta, metadata=None):
415 def add(self, name, node, deltabasenode, delta, metadata=None):
417 # metadata is a dict, ex. {METAKEYFLAG: flag}
416 # metadata is a dict, ex. {METAKEYFLAG: flag}
418 if len(name) > 2 ** 16:
417 if len(name) > 2 ** 16:
419 raise RuntimeError(_(b"name too long %s") % name)
418 raise RuntimeError(_(b"name too long %s") % name)
420 if len(node) != 20:
419 if len(node) != 20:
421 raise RuntimeError(_(b"node should be 20 bytes %s") % node)
420 raise RuntimeError(_(b"node should be 20 bytes %s") % node)
422
421
423 if node in self.entries:
422 if node in self.entries:
424 # The revision has already been added
423 # The revision has already been added
425 return
424 return
426
425
427 # TODO: allow configurable compression
426 # TODO: allow configurable compression
428 delta = self._compress(delta)
427 delta = self._compress(delta)
429
428
430 rawdata = b''.join(
429 rawdata = b''.join(
431 (
430 (
432 struct.pack(b'!H', len(name)), # unsigned 2 byte int
431 struct.pack(b'!H', len(name)), # unsigned 2 byte int
433 name,
432 name,
434 node,
433 node,
435 deltabasenode,
434 deltabasenode,
436 struct.pack(b'!Q', len(delta)), # unsigned 8 byte int
435 struct.pack(b'!Q', len(delta)), # unsigned 8 byte int
437 delta,
436 delta,
438 )
437 )
439 )
438 )
440
439
441 # v1 support metadata
440 # v1 support metadata
442 rawmeta = shallowutil.buildpackmeta(metadata)
441 rawmeta = shallowutil.buildpackmeta(metadata)
443 rawdata += struct.pack(b'!I', len(rawmeta)) # unsigned 4 byte
442 rawdata += struct.pack(b'!I', len(rawmeta)) # unsigned 4 byte
444 rawdata += rawmeta
443 rawdata += rawmeta
445
444
446 offset = self.packfp.tell()
445 offset = self.packfp.tell()
447
446
448 size = len(rawdata)
447 size = len(rawdata)
449
448
450 self.entries[node] = (deltabasenode, offset, size)
449 self.entries[node] = (deltabasenode, offset, size)
451
450
452 self.writeraw(rawdata)
451 self.writeraw(rawdata)
453
452
454 def createindex(self, nodelocations, indexoffset):
453 def createindex(self, nodelocations, indexoffset):
455 entries = sorted(
454 entries = sorted(
456 (n, db, o, s) for n, (db, o, s) in self.entries.items()
455 (n, db, o, s) for n, (db, o, s) in self.entries.items()
457 )
456 )
458
457
459 rawindex = b''
458 rawindex = b''
460 fmt = self.INDEXFORMAT
459 fmt = self.INDEXFORMAT
461 for node, deltabase, offset, size in entries:
460 for node, deltabase, offset, size in entries:
462 if deltabase == sha1nodeconstants.nullid:
461 if deltabase == sha1nodeconstants.nullid:
463 deltabaselocation = FULLTEXTINDEXMARK
462 deltabaselocation = FULLTEXTINDEXMARK
464 else:
463 else:
465 # Instead of storing the deltabase node in the index, let's
464 # Instead of storing the deltabase node in the index, let's
466 # store a pointer directly to the index entry for the deltabase.
465 # store a pointer directly to the index entry for the deltabase.
467 deltabaselocation = nodelocations.get(
466 deltabaselocation = nodelocations.get(
468 deltabase, NOBASEINDEXMARK
467 deltabase, NOBASEINDEXMARK
469 )
468 )
470
469
471 entry = struct.pack(fmt, node, deltabaselocation, offset, size)
470 entry = struct.pack(fmt, node, deltabaselocation, offset, size)
472 rawindex += entry
471 rawindex += entry
473
472
474 return rawindex
473 return rawindex
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now