##// END OF EJS Templates
py3: replace `pycompat.xrange` by `range`
Manuel Jacob -
r50179:d44e3c45 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,1165 +1,1165 b''
1 1 # absorb.py
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """apply working directory changes to changesets (EXPERIMENTAL)
9 9
10 10 The absorb extension provides a command to use annotate information to
11 11 amend modified chunks into the corresponding non-public changesets.
12 12
13 13 ::
14 14
15 15 [absorb]
16 16 # only check 50 recent non-public changesets at most
17 17 max-stack-size = 50
18 18 # whether to add noise to new commits to avoid obsolescence cycle
19 19 add-noise = 1
20 20 # make `amend --correlated` a shortcut to the main command
21 21 amend-flag = correlated
22 22
23 23 [color]
24 24 absorb.description = yellow
25 25 absorb.node = blue bold
26 26 absorb.path = bold
27 27 """
28 28
29 29 # TODO:
30 30 # * Rename config items to [commands] namespace
31 31 # * Converge getdraftstack() with other code in core
32 32 # * move many attributes on fixupstate to be private
33 33
34 34
35 35 import collections
36 36
37 37 from mercurial.i18n import _
38 38 from mercurial.node import (
39 39 hex,
40 40 short,
41 41 )
42 42 from mercurial import (
43 43 cmdutil,
44 44 commands,
45 45 context,
46 46 crecord,
47 47 error,
48 48 linelog,
49 49 mdiff,
50 50 obsolete,
51 51 patch,
52 52 phases,
53 53 pycompat,
54 54 registrar,
55 55 rewriteutil,
56 56 scmutil,
57 57 util,
58 58 )
59 59 from mercurial.utils import stringutil
60 60
61 61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 63 # be specifying the version(s) of Mercurial they are tested with, or
64 64 # leave the attribute unspecified.
65 65 testedwith = b'ships-with-hg-core'
66 66
67 67 cmdtable = {}
68 68 command = registrar.command(cmdtable)
69 69
70 70 configtable = {}
71 71 configitem = registrar.configitem(configtable)
72 72
73 73 configitem(b'absorb', b'add-noise', default=True)
74 74 configitem(b'absorb', b'amend-flag', default=None)
75 75 configitem(b'absorb', b'max-stack-size', default=50)
76 76
77 77 colortable = {
78 78 b'absorb.description': b'yellow',
79 79 b'absorb.node': b'blue bold',
80 80 b'absorb.path': b'bold',
81 81 }
82 82
83 83 defaultdict = collections.defaultdict
84 84
85 85
86 86 class nullui:
87 87 """blank ui object doing nothing"""
88 88
89 89 debugflag = False
90 90 verbose = False
91 91 quiet = True
92 92
93 93 def __getitem__(name):
94 94 def nullfunc(*args, **kwds):
95 95 return
96 96
97 97 return nullfunc
98 98
99 99
100 100 class emptyfilecontext:
101 101 """minimal filecontext representing an empty file"""
102 102
103 103 def __init__(self, repo):
104 104 self._repo = repo
105 105
106 106 def data(self):
107 107 return b''
108 108
109 109 def node(self):
110 110 return self._repo.nullid
111 111
112 112
113 113 def uniq(lst):
114 114 """list -> list. remove duplicated items without changing the order"""
115 115 seen = set()
116 116 result = []
117 117 for x in lst:
118 118 if x not in seen:
119 119 seen.add(x)
120 120 result.append(x)
121 121 return result
122 122
123 123
124 124 def getdraftstack(headctx, limit=None):
125 125 """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
126 126
127 127 changesets are sorted in topo order, oldest first.
128 128 return at most limit items, if limit is a positive number.
129 129
130 130 merges are considered as non-draft as well. i.e. every commit
131 131 returned has and only has 1 parent.
132 132 """
133 133 ctx = headctx
134 134 result = []
135 135 while ctx.phase() != phases.public:
136 136 if limit and len(result) >= limit:
137 137 break
138 138 parents = ctx.parents()
139 139 if len(parents) != 1:
140 140 break
141 141 result.append(ctx)
142 142 ctx = parents[0]
143 143 result.reverse()
144 144 return result
145 145
146 146
147 147 def getfilestack(stack, path, seenfctxs=None):
148 148 """([ctx], str, set) -> [fctx], {ctx: fctx}
149 149
150 150 stack is a list of contexts, from old to new. usually they are what
151 151 "getdraftstack" returns.
152 152
153 153 follows renames, but not copies.
154 154
155 155 seenfctxs is a set of filecontexts that will be considered "immutable".
156 156 they are usually what this function returned in earlier calls, useful
157 157 to avoid issues that a file was "moved" to multiple places and was then
158 158 modified differently, like: "a" was copied to "b", "a" was also copied to
159 159 "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
160 160 and we enforce only one of them to be able to affect "a"'s content.
161 161
162 162 return an empty list and an empty dict, if the specified path does not
163 163 exist in stack[-1] (the top of the stack).
164 164
165 165 otherwise, return a list of de-duplicated filecontexts, and the map to
166 166 convert ctx in the stack to fctx, for possible mutable fctxs. the first item
167 167 of the list would be outside the stack and should be considered immutable.
168 168 the remaining items are within the stack.
169 169
170 170 for example, given the following changelog and corresponding filelog
171 171 revisions:
172 172
173 173 changelog: 3----4----5----6----7
174 174 filelog: x 0----1----1----2 (x: no such file yet)
175 175
176 176 - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
177 177 - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
178 178 dummy empty filecontext.
179 179 - if stack = [2], returns ([], {})
180 180 - if stack = [7], returns ([1, 2], {7: 2})
181 181 - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
182 182 removed, since 1 is immutable.
183 183 """
184 184 if seenfctxs is None:
185 185 seenfctxs = set()
186 186 assert stack
187 187
188 188 if path not in stack[-1]:
189 189 return [], {}
190 190
191 191 fctxs = []
192 192 fctxmap = {}
193 193
194 194 pctx = stack[0].p1() # the public (immutable) ctx we stop at
195 195 for ctx in reversed(stack):
196 196 if path not in ctx: # the file is added in the next commit
197 197 pctx = ctx
198 198 break
199 199 fctx = ctx[path]
200 200 fctxs.append(fctx)
201 201 if fctx in seenfctxs: # treat fctx as the immutable one
202 202 pctx = None # do not add another immutable fctx
203 203 break
204 204 fctxmap[ctx] = fctx # only for mutable fctxs
205 205 copy = fctx.copysource()
206 206 if copy:
207 207 path = copy # follow rename
208 208 if path in ctx: # but do not follow copy
209 209 pctx = ctx.p1()
210 210 break
211 211
212 212 if pctx is not None: # need an extra immutable fctx
213 213 if path in pctx:
214 214 fctxs.append(pctx[path])
215 215 else:
216 216 fctxs.append(emptyfilecontext(pctx.repo()))
217 217
218 218 fctxs.reverse()
219 219 # note: we rely on a property of hg: filerev is not reused for linear
220 220 # history. i.e. it's impossible to have:
221 221 # changelog: 4----5----6 (linear, no merges)
222 222 # filelog: 1----2----1
223 223 # ^ reuse filerev (impossible)
224 224 # because parents are part of the hash. if that's not true, we need to
225 225 # remove uniq and find a different way to identify fctxs.
226 226 return uniq(fctxs), fctxmap
227 227
228 228
229 229 class overlaystore(patch.filestore):
230 230 """read-only, hybrid store based on a dict and ctx.
231 231 memworkingcopy: {path: content}, overrides file contents.
232 232 """
233 233
234 234 def __init__(self, basectx, memworkingcopy):
235 235 self.basectx = basectx
236 236 self.memworkingcopy = memworkingcopy
237 237
238 238 def getfile(self, path):
239 239 """comply with mercurial.patch.filestore.getfile"""
240 240 if path not in self.basectx:
241 241 return None, None, None
242 242 fctx = self.basectx[path]
243 243 if path in self.memworkingcopy:
244 244 content = self.memworkingcopy[path]
245 245 else:
246 246 content = fctx.data()
247 247 mode = (fctx.islink(), fctx.isexec())
248 248 copy = fctx.copysource()
249 249 return content, mode, copy
250 250
251 251
252 252 def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None):
253 253 """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
254 254 memworkingcopy overrides file contents.
255 255 """
256 256 # parents must contain 2 items: (node1, node2)
257 257 if parents is None:
258 258 parents = ctx.repo().changelog.parents(ctx.node())
259 259 if extra is None:
260 260 extra = ctx.extra()
261 261 if desc is None:
262 262 desc = ctx.description()
263 263 date = ctx.date()
264 264 user = ctx.user()
265 265 files = set(ctx.files()).union(memworkingcopy)
266 266 store = overlaystore(ctx, memworkingcopy)
267 267 return context.memctx(
268 268 repo=ctx.repo(),
269 269 parents=parents,
270 270 text=desc,
271 271 files=files,
272 272 filectxfn=store,
273 273 user=user,
274 274 date=date,
275 275 branch=None,
276 276 extra=extra,
277 277 )
278 278
279 279
280 280 class filefixupstate:
281 281 """state needed to apply fixups to a single file
282 282
283 283 internally, it keeps file contents of several revisions and a linelog.
284 284
285 285 the linelog uses odd revision numbers for original contents (fctxs passed
286 286 to __init__), and even revision numbers for fixups, like:
287 287
288 288 linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
289 289 linelog rev 2: fixups made to self.fctxs[0]
290 290 linelog rev 3: self.fctxs[1] (a child of fctxs[0])
291 291 linelog rev 4: fixups made to self.fctxs[1]
292 292 ...
293 293
294 294 a typical use is like:
295 295
296 296 1. call diffwith, to calculate self.fixups
297 297 2. (optionally), present self.fixups to the user, or change it
298 298 3. call apply, to apply changes
299 299 4. read results from "finalcontents", or call getfinalcontent
300 300 """
301 301
302 302 def __init__(self, fctxs, path, ui=None, opts=None):
303 303 """([fctx], ui or None) -> None
304 304
305 305 fctxs should be linear, and sorted by topo order - oldest first.
306 306 fctxs[0] will be considered as "immutable" and will not be changed.
307 307 """
308 308 self.fctxs = fctxs
309 309 self.path = path
310 310 self.ui = ui or nullui()
311 311 self.opts = opts or {}
312 312
313 313 # following fields are built from fctxs. they exist for perf reason
314 314 self.contents = [f.data() for f in fctxs]
315 315 self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
316 316 self.linelog = self._buildlinelog()
317 317 if self.ui.debugflag:
318 318 assert self._checkoutlinelog() == self.contents
319 319
320 320 # following fields will be filled later
321 321 self.chunkstats = [0, 0] # [adopted, total : int]
322 322 self.targetlines = [] # [str]
323 323 self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
324 324 self.finalcontents = [] # [str]
325 325 self.ctxaffected = set()
326 326
327 327 def diffwith(self, targetfctx, fm=None):
328 328 """calculate fixups needed by examining the differences between
329 329 self.fctxs[-1] and targetfctx, chunk by chunk.
330 330
331 331 targetfctx is the target state we move towards. we may or may not be
332 332 able to get there because not all modified chunks can be amended into
333 333 a non-public fctx unambiguously.
334 334
335 335 call this only once, before apply().
336 336
337 337 update self.fixups, self.chunkstats, and self.targetlines.
338 338 """
339 339 a = self.contents[-1]
340 340 alines = self.contentlines[-1]
341 341 b = targetfctx.data()
342 342 blines = mdiff.splitnewlines(b)
343 343 self.targetlines = blines
344 344
345 345 self.linelog.annotate(self.linelog.maxrev)
346 346 annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
347 347 assert len(annotated) == len(alines)
348 348 # add a dummy end line to make insertion at the end easier
349 349 if annotated:
350 350 dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
351 351 annotated.append(dummyendline)
352 352
353 353 # analyse diff blocks
354 354 for chunk in self._alldiffchunks(a, b, alines, blines):
355 355 newfixups = self._analysediffchunk(chunk, annotated)
356 356 self.chunkstats[0] += bool(newfixups) # 1 or 0
357 357 self.chunkstats[1] += 1
358 358 self.fixups += newfixups
359 359 if fm is not None:
360 360 self._showchanges(fm, alines, blines, chunk, newfixups)
361 361
362 362 def apply(self):
363 363 """apply self.fixups. update self.linelog, self.finalcontents.
364 364
365 365 call this only once, before getfinalcontent(), after diffwith().
366 366 """
367 367 # the following is unnecessary, as it's done by "diffwith":
368 368 # self.linelog.annotate(self.linelog.maxrev)
369 369 for rev, a1, a2, b1, b2 in reversed(self.fixups):
370 370 blines = self.targetlines[b1:b2]
371 371 if self.ui.debugflag:
372 372 idx = (max(rev - 1, 0)) // 2
373 373 self.ui.write(
374 374 _(b'%s: chunk %d:%d -> %d lines\n')
375 375 % (short(self.fctxs[idx].node()), a1, a2, len(blines))
376 376 )
377 377 self.linelog.replacelines(rev, a1, a2, b1, b2)
378 378 if self.opts.get(b'edit_lines', False):
379 379 self.finalcontents = self._checkoutlinelogwithedits()
380 380 else:
381 381 self.finalcontents = self._checkoutlinelog()
382 382
383 383 def getfinalcontent(self, fctx):
384 384 """(fctx) -> str. get modified file content for a given filecontext"""
385 385 idx = self.fctxs.index(fctx)
386 386 return self.finalcontents[idx]
387 387
388 388 def _analysediffchunk(self, chunk, annotated):
389 389 """analyse a different chunk and return new fixups found
390 390
391 391 return [] if no lines from the chunk can be safely applied.
392 392
393 393 the chunk (or lines) cannot be safely applied, if, for example:
394 394 - the modified (deleted) lines belong to a public changeset
395 395 (self.fctxs[0])
396 396 - the chunk is a pure insertion and the adjacent lines (at most 2
397 397 lines) belong to different non-public changesets, or do not belong
398 398 to any non-public changesets.
399 399 - the chunk is modifying lines from different changesets.
400 400 in this case, if the number of lines deleted equals to the number
401 401 of lines added, assume it's a simple 1:1 map (could be wrong).
402 402 otherwise, give up.
403 403 - the chunk is modifying lines from a single non-public changeset,
404 404 but other revisions touch the area as well. i.e. the lines are
405 405 not continuous as seen from the linelog.
406 406 """
407 407 a1, a2, b1, b2 = chunk
408 408 # find involved indexes from annotate result
409 409 involved = annotated[a1:a2]
410 410 if not involved and annotated: # a1 == a2 and a is not empty
411 411 # pure insertion, check nearby lines. ignore lines belong
412 412 # to the public (first) changeset (i.e. annotated[i][0] == 1)
413 413 nearbylinenums = {a2, max(0, a1 - 1)}
414 414 involved = [
415 415 annotated[i] for i in nearbylinenums if annotated[i][0] != 1
416 416 ]
417 417 involvedrevs = list({r for r, l in involved})
418 418 newfixups = []
419 419 if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
420 420 # chunk belongs to a single revision
421 421 rev = involvedrevs[0]
422 422 if rev > 1:
423 423 fixuprev = rev + 1
424 424 newfixups.append((fixuprev, a1, a2, b1, b2))
425 425 elif a2 - a1 == b2 - b1 or b1 == b2:
426 426 # 1:1 line mapping, or chunk was deleted
427 for i in pycompat.xrange(a1, a2):
427 for i in range(a1, a2):
428 428 rev, linenum = annotated[i]
429 429 if rev > 1:
430 430 if b1 == b2: # deletion, simply remove that single line
431 431 nb1 = nb2 = 0
432 432 else: # 1:1 line mapping, change the corresponding rev
433 433 nb1 = b1 + i - a1
434 434 nb2 = nb1 + 1
435 435 fixuprev = rev + 1
436 436 newfixups.append((fixuprev, i, i + 1, nb1, nb2))
437 437 return self._optimizefixups(newfixups)
438 438
439 439 @staticmethod
440 440 def _alldiffchunks(a, b, alines, blines):
441 441 """like mdiff.allblocks, but only care about differences"""
442 442 blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
443 443 for chunk, btype in blocks:
444 444 if btype != b'!':
445 445 continue
446 446 yield chunk
447 447
448 448 def _buildlinelog(self):
449 449 """calculate the initial linelog based on self.content{,line}s.
450 450 this is similar to running a partial "annotate".
451 451 """
452 452 llog = linelog.linelog()
453 453 a, alines = b'', []
454 for i in pycompat.xrange(len(self.contents)):
454 for i in range(len(self.contents)):
455 455 b, blines = self.contents[i], self.contentlines[i]
456 456 llrev = i * 2 + 1
457 457 chunks = self._alldiffchunks(a, b, alines, blines)
458 458 for a1, a2, b1, b2 in reversed(list(chunks)):
459 459 llog.replacelines(llrev, a1, a2, b1, b2)
460 460 a, alines = b, blines
461 461 return llog
462 462
463 463 def _checkoutlinelog(self):
464 464 """() -> [str]. check out file contents from linelog"""
465 465 contents = []
466 for i in pycompat.xrange(len(self.contents)):
466 for i in range(len(self.contents)):
467 467 rev = (i + 1) * 2
468 468 self.linelog.annotate(rev)
469 469 content = b''.join(map(self._getline, self.linelog.annotateresult))
470 470 contents.append(content)
471 471 return contents
472 472
473 473 def _checkoutlinelogwithedits(self):
474 474 """() -> [str]. prompt all lines for edit"""
475 475 alllines = self.linelog.getalllines()
476 476 # header
477 477 editortext = (
478 478 _(
479 479 b'HG: editing %s\nHG: "y" means the line to the right '
480 480 b'exists in the changeset to the top\nHG:\n'
481 481 )
482 482 % self.fctxs[-1].path()
483 483 )
484 484 # [(idx, fctx)]. hide the dummy emptyfilecontext
485 485 visiblefctxs = [
486 486 (i, f)
487 487 for i, f in enumerate(self.fctxs)
488 488 if not isinstance(f, emptyfilecontext)
489 489 ]
490 490 for i, (j, f) in enumerate(visiblefctxs):
491 491 editortext += _(b'HG: %s/%s %s %s\n') % (
492 492 b'|' * i,
493 493 b'-' * (len(visiblefctxs) - i + 1),
494 494 short(f.node()),
495 495 f.description().split(b'\n', 1)[0],
496 496 )
497 497 editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs))
498 498 # figure out the lifetime of a line, this is relatively inefficient,
499 499 # but probably fine
500 500 lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
501 501 for i, f in visiblefctxs:
502 502 self.linelog.annotate((i + 1) * 2)
503 503 for l in self.linelog.annotateresult:
504 504 lineset[l].add(i)
505 505 # append lines
506 506 for l in alllines:
507 507 editortext += b' %s : %s' % (
508 508 b''.join(
509 509 [
510 510 (b'y' if i in lineset[l] else b' ')
511 511 for i, _f in visiblefctxs
512 512 ]
513 513 ),
514 514 self._getline(l),
515 515 )
516 516 # run editor
517 517 editedtext = self.ui.edit(editortext, b'', action=b'absorb')
518 518 if not editedtext:
519 519 raise error.InputError(_(b'empty editor text'))
520 520 # parse edited result
521 521 contents = [b''] * len(self.fctxs)
522 522 leftpadpos = 4
523 523 colonpos = leftpadpos + len(visiblefctxs) + 1
524 524 for l in mdiff.splitnewlines(editedtext):
525 525 if l.startswith(b'HG:'):
526 526 continue
527 527 if l[colonpos - 1 : colonpos + 2] != b' : ':
528 528 raise error.InputError(_(b'malformed line: %s') % l)
529 529 linecontent = l[colonpos + 2 :]
530 530 for i, ch in enumerate(
531 531 pycompat.bytestr(l[leftpadpos : colonpos - 1])
532 532 ):
533 533 if ch == b'y':
534 534 contents[visiblefctxs[i][0]] += linecontent
535 535 # chunkstats is hard to calculate if anything changes, therefore
536 536 # set them to just a simple value (1, 1).
537 537 if editedtext != editortext:
538 538 self.chunkstats = [1, 1]
539 539 return contents
540 540
541 541 def _getline(self, lineinfo):
542 542 """((rev, linenum)) -> str. convert rev+line number to line content"""
543 543 rev, linenum = lineinfo
544 544 if rev & 1: # odd: original line taken from fctxs
545 545 return self.contentlines[rev // 2][linenum]
546 546 else: # even: fixup line from targetfctx
547 547 return self.targetlines[linenum]
548 548
549 549 def _iscontinuous(self, a1, a2, closedinterval=False):
550 550 """(a1, a2 : int) -> bool
551 551
552 552 check if these lines are continuous. i.e. no other insertions or
553 553 deletions (from other revisions) among these lines.
554 554
555 555 closedinterval decides whether a2 should be included or not. i.e. is
556 556 it [a1, a2), or [a1, a2] ?
557 557 """
558 558 if a1 >= a2:
559 559 return True
560 560 llog = self.linelog
561 561 offset1 = llog.getoffset(a1)
562 562 offset2 = llog.getoffset(a2) + int(closedinterval)
563 563 linesinbetween = llog.getalllines(offset1, offset2)
564 564 return len(linesinbetween) == a2 - a1 + int(closedinterval)
565 565
566 566 def _optimizefixups(self, fixups):
567 567 """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
568 568 merge adjacent fixups to make them less fragmented.
569 569 """
570 570 result = []
571 571 pcurrentchunk = [[-1, -1, -1, -1, -1]]
572 572
573 573 def pushchunk():
574 574 if pcurrentchunk[0][0] != -1:
575 575 result.append(tuple(pcurrentchunk[0]))
576 576
577 577 for i, chunk in enumerate(fixups):
578 578 rev, a1, a2, b1, b2 = chunk
579 579 lastrev = pcurrentchunk[0][0]
580 580 lasta2 = pcurrentchunk[0][2]
581 581 lastb2 = pcurrentchunk[0][4]
582 582 if (
583 583 a1 == lasta2
584 584 and b1 == lastb2
585 585 and rev == lastrev
586 586 and self._iscontinuous(max(a1 - 1, 0), a1)
587 587 ):
588 588 # merge into currentchunk
589 589 pcurrentchunk[0][2] = a2
590 590 pcurrentchunk[0][4] = b2
591 591 else:
592 592 pushchunk()
593 593 pcurrentchunk[0] = list(chunk)
594 594 pushchunk()
595 595 return result
596 596
597 597 def _showchanges(self, fm, alines, blines, chunk, fixups):
598 598 def trim(line):
599 599 if line.endswith(b'\n'):
600 600 line = line[:-1]
601 601 return line
602 602
603 603 # this is not optimized for perf but _showchanges only gets executed
604 604 # with an extra command-line flag.
605 605 a1, a2, b1, b2 = chunk
606 606 aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
607 607 for idx, fa1, fa2, fb1, fb2 in fixups:
608 for i in pycompat.xrange(fa1, fa2):
608 for i in range(fa1, fa2):
609 609 aidxs[i - a1] = (max(idx, 1) - 1) // 2
610 for i in pycompat.xrange(fb1, fb2):
610 for i in range(fb1, fb2):
611 611 bidxs[i - b1] = (max(idx, 1) - 1) // 2
612 612
613 613 fm.startitem()
614 614 fm.write(
615 615 b'hunk',
616 616 b' %s\n',
617 617 b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1),
618 618 label=b'diff.hunk',
619 619 )
620 620 fm.data(path=self.path, linetype=b'hunk')
621 621
622 622 def writeline(idx, diffchar, line, linetype, linelabel):
623 623 fm.startitem()
624 624 node = b''
625 625 if idx:
626 626 ctx = self.fctxs[idx]
627 627 fm.context(fctx=ctx)
628 628 node = ctx.hex()
629 629 self.ctxaffected.add(ctx.changectx())
630 630 fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node')
631 631 fm.write(
632 632 b'diffchar ' + linetype,
633 633 b'%s%s\n',
634 634 diffchar,
635 635 line,
636 636 label=linelabel,
637 637 )
638 638 fm.data(path=self.path, linetype=linetype)
639 639
640 for i in pycompat.xrange(a1, a2):
640 for i in range(a1, a2):
641 641 writeline(
642 642 aidxs[i - a1],
643 643 b'-',
644 644 trim(alines[i]),
645 645 b'deleted',
646 646 b'diff.deleted',
647 647 )
648 for i in pycompat.xrange(b1, b2):
648 for i in range(b1, b2):
649 649 writeline(
650 650 bidxs[i - b1],
651 651 b'+',
652 652 trim(blines[i]),
653 653 b'inserted',
654 654 b'diff.inserted',
655 655 )
656 656
657 657
658 658 class fixupstate:
659 659 """state needed to run absorb
660 660
661 661 internally, it keeps paths and filefixupstates.
662 662
663 663 a typical use is like filefixupstates:
664 664
665 665 1. call diffwith, to calculate fixups
666 666 2. (optionally), present fixups to the user, or edit fixups
667 667 3. call apply, to apply changes to memory
668 668 4. call commit, to commit changes to hg database
669 669 """
670 670
671 671 def __init__(self, stack, ui=None, opts=None):
672 672 """([ctx], ui or None) -> None
673 673
674 674 stack: should be linear, and sorted by topo order - oldest first.
675 675 all commits in stack are considered mutable.
676 676 """
677 677 assert stack
678 678 self.ui = ui or nullui()
679 679 self.opts = opts or {}
680 680 self.stack = stack
681 681 self.repo = stack[-1].repo().unfiltered()
682 682
683 683 # following fields will be filled later
684 684 self.paths = [] # [str]
685 685 self.status = None # ctx.status output
686 686 self.fctxmap = {} # {path: {ctx: fctx}}
687 687 self.fixupmap = {} # {path: filefixupstate}
688 688 self.replacemap = {} # {oldnode: newnode or None}
689 689 self.finalnode = None # head after all fixups
690 690 self.ctxaffected = set() # ctx that will be absorbed into
691 691
692 692 def diffwith(self, targetctx, match=None, fm=None):
693 693 """diff and prepare fixups. update self.fixupmap, self.paths"""
694 694 # only care about modified files
695 695 self.status = self.stack[-1].status(targetctx, match)
696 696 self.paths = []
697 697 # but if --edit-lines is used, the user may want to edit files
698 698 # even if they are not modified
699 699 editopt = self.opts.get(b'edit_lines')
700 700 if not self.status.modified and editopt and match:
701 701 interestingpaths = match.files()
702 702 else:
703 703 interestingpaths = self.status.modified
704 704 # prepare the filefixupstate
705 705 seenfctxs = set()
706 706 # sorting is necessary to eliminate ambiguity for the "double move"
707 707 # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
708 708 for path in sorted(interestingpaths):
709 709 self.ui.debug(b'calculating fixups for %s\n' % path)
710 710 targetfctx = targetctx[path]
711 711 fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
712 712 # ignore symbolic links or binary, or unchanged files
713 713 if any(
714 714 f.islink() or stringutil.binary(f.data())
715 715 for f in [targetfctx] + fctxs
716 716 if not isinstance(f, emptyfilecontext)
717 717 ):
718 718 continue
719 719 if targetfctx.data() == fctxs[-1].data() and not editopt:
720 720 continue
721 721 seenfctxs.update(fctxs[1:])
722 722 self.fctxmap[path] = ctx2fctx
723 723 fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
724 724 if fm is not None:
725 725 fm.startitem()
726 726 fm.plain(b'showing changes for ')
727 727 fm.write(b'path', b'%s\n', path, label=b'absorb.path')
728 728 fm.data(linetype=b'path')
729 729 fstate.diffwith(targetfctx, fm)
730 730 self.fixupmap[path] = fstate
731 731 self.paths.append(path)
732 732 self.ctxaffected.update(fstate.ctxaffected)
733 733
734 734 def apply(self):
735 735 """apply fixups to individual filefixupstates"""
736 736 for path, state in self.fixupmap.items():
737 737 if self.ui.debugflag:
738 738 self.ui.write(_(b'applying fixups to %s\n') % path)
739 739 state.apply()
740 740
741 741 @property
742 742 def chunkstats(self):
743 743 """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
744 744 return {path: state.chunkstats for path, state in self.fixupmap.items()}
745 745
746 746 def commit(self):
747 747 """commit changes. update self.finalnode, self.replacemap"""
748 748 with self.repo.transaction(b'absorb') as tr:
749 749 self._commitstack()
750 750 self._movebookmarks(tr)
751 751 if self.repo[b'.'].node() in self.replacemap:
752 752 self._moveworkingdirectoryparent()
753 753 self._cleanupoldcommits()
754 754 return self.finalnode
755 755
756 756 def printchunkstats(self):
757 757 """print things like '1 of 2 chunk(s) applied'"""
758 758 ui = self.ui
759 759 chunkstats = self.chunkstats
760 760 if ui.verbose:
761 761 # chunkstats for each file
762 762 for path, stat in chunkstats.items():
763 763 if stat[0]:
764 764 ui.write(
765 765 _(b'%s: %d of %d chunk(s) applied\n')
766 766 % (path, stat[0], stat[1])
767 767 )
768 768 elif not ui.quiet:
769 769 # a summary for all files
770 770 stats = chunkstats.values()
771 771 applied, total = (sum(s[i] for s in stats) for i in (0, 1))
772 772 ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total))
773 773
774 774 def _commitstack(self):
775 775 """make new commits. update self.finalnode, self.replacemap.
776 776 it is splitted from "commit" to avoid too much indentation.
777 777 """
778 778 # last node (20-char) committed by us
779 779 lastcommitted = None
780 780 # p1 which overrides the parent of the next commit, "None" means use
781 781 # the original parent unchanged
782 782 nextp1 = None
783 783 for ctx in self.stack:
784 784 memworkingcopy = self._getnewfilecontents(ctx)
785 785 if not memworkingcopy and not lastcommitted:
786 786 # nothing changed, nothing commited
787 787 nextp1 = ctx
788 788 continue
789 789 willbecomenoop = ctx.files() and self._willbecomenoop(
790 790 memworkingcopy, ctx, nextp1
791 791 )
792 792 if self.skip_empty_successor and willbecomenoop:
793 793 # changeset is no longer necessary
794 794 self.replacemap[ctx.node()] = None
795 795 msg = _(b'became empty and was dropped')
796 796 else:
797 797 # changeset needs re-commit
798 798 nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
799 799 lastcommitted = self.repo[nodestr]
800 800 nextp1 = lastcommitted
801 801 self.replacemap[ctx.node()] = lastcommitted.node()
802 802 if memworkingcopy:
803 803 if willbecomenoop:
804 804 msg = _(b'%d file(s) changed, became empty as %s')
805 805 else:
806 806 msg = _(b'%d file(s) changed, became %s')
807 807 msg = msg % (
808 808 len(memworkingcopy),
809 809 self._ctx2str(lastcommitted),
810 810 )
811 811 else:
812 812 msg = _(b'became %s') % self._ctx2str(lastcommitted)
813 813 if self.ui.verbose and msg:
814 814 self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg))
815 815 self.finalnode = lastcommitted and lastcommitted.node()
816 816
817 817 def _ctx2str(self, ctx):
818 818 if self.ui.debugflag:
819 819 return b'%d:%s' % (ctx.rev(), ctx.hex())
820 820 else:
821 821 return b'%d:%s' % (ctx.rev(), short(ctx.node()))
822 822
823 823 def _getnewfilecontents(self, ctx):
824 824 """(ctx) -> {path: str}
825 825
826 826 fetch file contents from filefixupstates.
827 827 return the working copy overrides - files different from ctx.
828 828 """
829 829 result = {}
830 830 for path in self.paths:
831 831 ctx2fctx = self.fctxmap[path] # {ctx: fctx}
832 832 if ctx not in ctx2fctx:
833 833 continue
834 834 fctx = ctx2fctx[ctx]
835 835 content = fctx.data()
836 836 newcontent = self.fixupmap[path].getfinalcontent(fctx)
837 837 if content != newcontent:
838 838 result[fctx.path()] = newcontent
839 839 return result
840 840
841 841 def _movebookmarks(self, tr):
842 842 repo = self.repo
843 843 needupdate = [
844 844 (name, self.replacemap[hsh])
845 845 for name, hsh in repo._bookmarks.items()
846 846 if hsh in self.replacemap
847 847 ]
848 848 changes = []
849 849 for name, hsh in needupdate:
850 850 if hsh:
851 851 changes.append((name, hsh))
852 852 if self.ui.verbose:
853 853 self.ui.write(
854 854 _(b'moving bookmark %s to %s\n') % (name, hex(hsh))
855 855 )
856 856 else:
857 857 changes.append((name, None))
858 858 if self.ui.verbose:
859 859 self.ui.write(_(b'deleting bookmark %s\n') % name)
860 860 repo._bookmarks.applychanges(repo, tr, changes)
861 861
862 862 def _moveworkingdirectoryparent(self):
863 863 if not self.finalnode:
864 864 # Find the latest not-{obsoleted,stripped} parent.
865 865 revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys())
866 866 ctx = self.repo[revs.first()]
867 867 self.finalnode = ctx.node()
868 868 else:
869 869 ctx = self.repo[self.finalnode]
870 870
871 871 dirstate = self.repo.dirstate
872 872 # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
873 873 # be slow. in absorb's case, no need to invalidate fsmonitorstate.
874 874 noop = lambda: 0
875 875 restore = noop
876 876 if util.safehasattr(dirstate, '_fsmonitorstate'):
877 877 bak = dirstate._fsmonitorstate.invalidate
878 878
879 879 def restore():
880 880 dirstate._fsmonitorstate.invalidate = bak
881 881
882 882 dirstate._fsmonitorstate.invalidate = noop
883 883 try:
884 884 with dirstate.parentchange():
885 885 dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
886 886 finally:
887 887 restore()
888 888
889 889 @staticmethod
890 890 def _willbecomenoop(memworkingcopy, ctx, pctx=None):
891 891 """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
892 892
893 893 if it will become an empty commit (does not change anything, after the
894 894 memworkingcopy overrides), return True. otherwise return False.
895 895 """
896 896 if not pctx:
897 897 parents = ctx.parents()
898 898 if len(parents) != 1:
899 899 return False
900 900 pctx = parents[0]
901 901 if ctx.branch() != pctx.branch():
902 902 return False
903 903 if ctx.extra().get(b'close'):
904 904 return False
905 905 # ctx changes more files (not a subset of memworkingcopy)
906 906 if not set(ctx.files()).issubset(set(memworkingcopy)):
907 907 return False
908 908 for path, content in memworkingcopy.items():
909 909 if path not in pctx or path not in ctx:
910 910 return False
911 911 fctx = ctx[path]
912 912 pfctx = pctx[path]
913 913 if pfctx.flags() != fctx.flags():
914 914 return False
915 915 if pfctx.data() != content:
916 916 return False
917 917 return True
918 918
919 919 def _commitsingle(self, memworkingcopy, ctx, p1=None):
920 920 """(ctx, {path: content}, node) -> node. make a single commit
921 921
922 922 the commit is a clone from ctx, with a (optionally) different p1, and
923 923 different file contents replaced by memworkingcopy.
924 924 """
925 925 parents = p1 and (p1, self.repo.nullid)
926 926 extra = ctx.extra()
927 927 if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
928 928 extra[b'absorb_source'] = ctx.hex()
929 929
930 930 desc = rewriteutil.update_hash_refs(
931 931 ctx.repo(),
932 932 ctx.description(),
933 933 {
934 934 oldnode: [newnode]
935 935 for oldnode, newnode in self.replacemap.items()
936 936 },
937 937 )
938 938 mctx = overlaycontext(
939 939 memworkingcopy, ctx, parents, extra=extra, desc=desc
940 940 )
941 941 return mctx.commit()
942 942
943 943 @util.propertycache
944 944 def _useobsolete(self):
945 945 """() -> bool"""
946 946 return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
947 947
948 948 def _cleanupoldcommits(self):
949 949 replacements = {
950 950 k: ([v] if v is not None else [])
951 951 for k, v in self.replacemap.items()
952 952 }
953 953 if replacements:
954 954 scmutil.cleanupnodes(
955 955 self.repo, replacements, operation=b'absorb', fixphase=True
956 956 )
957 957
958 958 @util.propertycache
959 959 def skip_empty_successor(self):
960 960 return rewriteutil.skip_empty_successor(self.ui, b'absorb')
961 961
962 962
963 963 def _parsechunk(hunk):
964 964 """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
965 965 if type(hunk) not in (crecord.uihunk, patch.recordhunk):
966 966 return None, None
967 967 path = hunk.header.filename()
968 968 a1 = hunk.fromline + len(hunk.before) - 1
969 969 # remove before and after context
970 970 hunk.before = hunk.after = []
971 971 buf = util.stringio()
972 972 hunk.write(buf)
973 973 patchlines = mdiff.splitnewlines(buf.getvalue())
974 974 # hunk.prettystr() will update hunk.removed
975 975 a2 = a1 + hunk.removed
976 976 blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')]
977 977 return path, (a1, a2, blines)
978 978
979 979
980 980 def overlaydiffcontext(ctx, chunks):
981 981 """(ctx, [crecord.uihunk]) -> memctx
982 982
983 983 return a memctx with some [1] patches (chunks) applied to ctx.
984 984 [1]: modifications are handled. renames, mode changes, etc. are ignored.
985 985 """
986 986 # sadly the applying-patch logic is hardly reusable, and messy:
987 987 # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
988 988 # needs a file stream of a patch and will re-parse it, while we have
989 989 # structured hunk objects at hand.
990 990 # 2. a lot of different implementations about "chunk" (patch.hunk,
991 991 # patch.recordhunk, crecord.uihunk)
992 992 # as we only care about applying changes to modified files, no mode
993 993 # change, no binary diff, and no renames, it's probably okay to
994 994 # re-invent the logic using much simpler code here.
995 995 memworkingcopy = {} # {path: content}
996 996 patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
997 997 for path, info in map(_parsechunk, chunks):
998 998 if not path or not info:
999 999 continue
1000 1000 patchmap[path].append(info)
1001 1001 for path, patches in patchmap.items():
1002 1002 if path not in ctx or not patches:
1003 1003 continue
1004 1004 patches.sort(reverse=True)
1005 1005 lines = mdiff.splitnewlines(ctx[path].data())
1006 1006 for a1, a2, blines in patches:
1007 1007 lines[a1:a2] = blines
1008 1008 memworkingcopy[path] = b''.join(lines)
1009 1009 return overlaycontext(memworkingcopy, ctx)
1010 1010
1011 1011
1012 1012 def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
1013 1013 """pick fixup chunks from targetctx, apply them to stack.
1014 1014
1015 1015 if targetctx is None, the working copy context will be used.
1016 1016 if stack is None, the current draft stack will be used.
1017 1017 return fixupstate.
1018 1018 """
1019 1019 if stack is None:
1020 1020 limit = ui.configint(b'absorb', b'max-stack-size')
1021 1021 headctx = repo[b'.']
1022 1022 if len(headctx.parents()) > 1:
1023 1023 raise error.InputError(_(b'cannot absorb into a merge'))
1024 1024 stack = getdraftstack(headctx, limit)
1025 1025 if limit and len(stack) >= limit:
1026 1026 ui.warn(
1027 1027 _(
1028 1028 b'absorb: only the recent %d changesets will '
1029 1029 b'be analysed\n'
1030 1030 )
1031 1031 % limit
1032 1032 )
1033 1033 if not stack:
1034 1034 raise error.InputError(_(b'no mutable changeset to change'))
1035 1035 if targetctx is None: # default to working copy
1036 1036 targetctx = repo[None]
1037 1037 if pats is None:
1038 1038 pats = ()
1039 1039 if opts is None:
1040 1040 opts = {}
1041 1041 state = fixupstate(stack, ui=ui, opts=opts)
1042 1042 matcher = scmutil.match(targetctx, pats, opts)
1043 1043 if opts.get(b'interactive'):
1044 1044 diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
1045 1045 origchunks = patch.parsepatch(diff)
1046 1046 chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
1047 1047 targetctx = overlaydiffcontext(stack[-1], chunks)
1048 1048 if opts.get(b'edit_lines'):
1049 1049 # If we're going to open the editor, don't ask the user to confirm
1050 1050 # first
1051 1051 opts[b'apply_changes'] = True
1052 1052 fm = None
1053 1053 if opts.get(b'print_changes') or not opts.get(b'apply_changes'):
1054 1054 fm = ui.formatter(b'absorb', opts)
1055 1055 state.diffwith(targetctx, matcher, fm)
1056 1056 if fm is not None:
1057 1057 fm.startitem()
1058 1058 fm.write(
1059 1059 b"count", b"\n%d changesets affected\n", len(state.ctxaffected)
1060 1060 )
1061 1061 fm.data(linetype=b'summary')
1062 1062 for ctx in reversed(stack):
1063 1063 if ctx not in state.ctxaffected:
1064 1064 continue
1065 1065 fm.startitem()
1066 1066 fm.context(ctx=ctx)
1067 1067 fm.data(linetype=b'changeset')
1068 1068 fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node')
1069 1069 descfirstline = stringutil.firstline(ctx.description())
1070 1070 fm.write(
1071 1071 b'descfirstline',
1072 1072 b'%s\n',
1073 1073 descfirstline,
1074 1074 label=b'absorb.description',
1075 1075 )
1076 1076 fm.end()
1077 1077 if not opts.get(b'dry_run'):
1078 1078 if (
1079 1079 not opts.get(b'apply_changes')
1080 1080 and state.ctxaffected
1081 1081 and ui.promptchoice(
1082 1082 b"apply changes (y/N)? $$ &Yes $$ &No", default=1
1083 1083 )
1084 1084 ):
1085 1085 raise error.CanceledError(_(b'absorb cancelled\n'))
1086 1086
1087 1087 state.apply()
1088 1088 if state.commit():
1089 1089 state.printchunkstats()
1090 1090 elif not ui.quiet:
1091 1091 ui.write(_(b'nothing applied\n'))
1092 1092 return state
1093 1093
1094 1094
1095 1095 @command(
1096 1096 b'absorb',
1097 1097 [
1098 1098 (
1099 1099 b'a',
1100 1100 b'apply-changes',
1101 1101 None,
1102 1102 _(b'apply changes without prompting for confirmation'),
1103 1103 ),
1104 1104 (
1105 1105 b'p',
1106 1106 b'print-changes',
1107 1107 None,
1108 1108 _(b'always print which changesets are modified by which changes'),
1109 1109 ),
1110 1110 (
1111 1111 b'i',
1112 1112 b'interactive',
1113 1113 None,
1114 1114 _(b'interactively select which chunks to apply'),
1115 1115 ),
1116 1116 (
1117 1117 b'e',
1118 1118 b'edit-lines',
1119 1119 None,
1120 1120 _(
1121 1121 b'edit what lines belong to which changesets before commit '
1122 1122 b'(EXPERIMENTAL)'
1123 1123 ),
1124 1124 ),
1125 1125 ]
1126 1126 + commands.dryrunopts
1127 1127 + commands.templateopts
1128 1128 + commands.walkopts,
1129 1129 _(b'hg absorb [OPTION] [FILE]...'),
1130 1130 helpcategory=command.CATEGORY_COMMITTING,
1131 1131 helpbasic=True,
1132 1132 )
1133 1133 def absorbcmd(ui, repo, *pats, **opts):
1134 1134 """incorporate corrections into the stack of draft changesets
1135 1135
1136 1136 absorb analyzes each change in your working directory and attempts to
1137 1137 amend the changed lines into the changesets in your stack that first
1138 1138 introduced those lines.
1139 1139
1140 1140 If absorb cannot find an unambiguous changeset to amend for a change,
1141 1141 that change will be left in the working directory, untouched. They can be
1142 1142 observed by :hg:`status` or :hg:`diff` afterwards. In other words,
1143 1143 absorb does not write to the working directory.
1144 1144
1145 1145 Changesets outside the revset `::. and not public() and not merge()` will
1146 1146 not be changed.
1147 1147
1148 1148 Changesets that become empty after applying the changes will be deleted.
1149 1149
1150 1150 By default, absorb will show what it plans to do and prompt for
1151 1151 confirmation. If you are confident that the changes will be absorbed
1152 1152 to the correct place, run :hg:`absorb -a` to apply the changes
1153 1153 immediately.
1154 1154
1155 1155 Returns 0 on success, 1 if all chunks were ignored and nothing amended.
1156 1156 """
1157 1157 opts = pycompat.byteskwargs(opts)
1158 1158
1159 1159 with repo.wlock(), repo.lock():
1160 1160 if not opts[b'dry_run']:
1161 1161 cmdutil.checkunfinished(repo)
1162 1162
1163 1163 state = absorb(ui, repo, pats=pats, opts=opts)
1164 1164 if sum(s[0] for s in state.chunkstats.values()) == 0:
1165 1165 return 1
@@ -1,489 +1,488 b''
1 1 # acl.py - changeset access control for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''hooks for controlling repository access
9 9
10 10 This hook makes it possible to allow or deny write access to given
11 11 branches and paths of a repository when receiving incoming changesets
12 12 via pretxnchangegroup and pretxncommit.
13 13
14 14 The authorization is matched based on the local user name on the
15 15 system where the hook runs, and not the committer of the original
16 16 changeset (since the latter is merely informative).
17 17
18 18 The acl hook is best used along with a restricted shell like hgsh,
19 19 preventing authenticating users from doing anything other than pushing
20 20 or pulling. The hook is not safe to use if users have interactive
21 21 shell access, as they can then disable the hook. Nor is it safe if
22 22 remote users share an account, because then there is no way to
23 23 distinguish them.
24 24
25 25 The order in which access checks are performed is:
26 26
27 27 1) Deny list for branches (section ``acl.deny.branches``)
28 28 2) Allow list for branches (section ``acl.allow.branches``)
29 29 3) Deny list for paths (section ``acl.deny``)
30 30 4) Allow list for paths (section ``acl.allow``)
31 31
32 32 The allow and deny sections take key-value pairs.
33 33
34 34 Branch-based Access Control
35 35 ---------------------------
36 36
37 37 Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
38 38 have branch-based access control. Keys in these sections can be
39 39 either:
40 40
41 41 - a branch name, or
42 42 - an asterisk, to match any branch;
43 43
44 44 The corresponding values can be either:
45 45
46 46 - a comma-separated list containing users and groups, or
47 47 - an asterisk, to match anyone;
48 48
49 49 You can add the "!" prefix to a user or group name to invert the sense
50 50 of the match.
51 51
52 52 Path-based Access Control
53 53 -------------------------
54 54
55 55 Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
56 56 access control. Keys in these sections accept a subtree pattern (with
57 57 a glob syntax by default). The corresponding values follow the same
58 58 syntax as the other sections above.
59 59
60 60 Bookmark-based Access Control
61 61 -----------------------------
62 62 Use the ``acl.deny.bookmarks`` and ``acl.allow.bookmarks`` sections to
63 63 have bookmark-based access control. Keys in these sections can be
64 64 either:
65 65
66 66 - a bookmark name, or
67 67 - an asterisk, to match any bookmark;
68 68
69 69 The corresponding values can be either:
70 70
71 71 - a comma-separated list containing users and groups, or
72 72 - an asterisk, to match anyone;
73 73
74 74 You can add the "!" prefix to a user or group name to invert the sense
75 75 of the match.
76 76
77 77 Note: for interactions between clients and servers using Mercurial 3.6+
78 78 a rejection will generally reject the entire push, for interactions
79 79 involving older clients, the commit transactions will already be accepted,
80 80 and only the bookmark movement will be rejected.
81 81
82 82 Groups
83 83 ------
84 84
85 85 Group names must be prefixed with an ``@`` symbol. Specifying a group
86 86 name has the same effect as specifying all the users in that group.
87 87
88 88 You can define group members in the ``acl.groups`` section.
89 89 If a group name is not defined there, and Mercurial is running under
90 90 a Unix-like system, the list of users will be taken from the OS.
91 91 Otherwise, an exception will be raised.
92 92
93 93 Example Configuration
94 94 ---------------------
95 95
96 96 ::
97 97
98 98 [hooks]
99 99
100 100 # Use this if you want to check access restrictions at commit time
101 101 pretxncommit.acl = python:hgext.acl.hook
102 102
103 103 # Use this if you want to check access restrictions for pull, push,
104 104 # bundle and serve.
105 105 pretxnchangegroup.acl = python:hgext.acl.hook
106 106
107 107 [acl]
108 108 # Allow or deny access for incoming changes only if their source is
109 109 # listed here, let them pass otherwise. Source is "serve" for all
110 110 # remote access (http or ssh), "push", "pull" or "bundle" when the
111 111 # related commands are run locally.
112 112 # Default: serve
113 113 sources = serve
114 114
115 115 [acl.deny.branches]
116 116
117 117 # Everyone is denied to the frozen branch:
118 118 frozen-branch = *
119 119
120 120 # A bad user is denied on all branches:
121 121 * = bad-user
122 122
123 123 [acl.allow.branches]
124 124
125 125 # A few users are allowed on branch-a:
126 126 branch-a = user-1, user-2, user-3
127 127
128 128 # Only one user is allowed on branch-b:
129 129 branch-b = user-1
130 130
131 131 # The super user is allowed on any branch:
132 132 * = super-user
133 133
134 134 # Everyone is allowed on branch-for-tests:
135 135 branch-for-tests = *
136 136
137 137 [acl.deny]
138 138 # This list is checked first. If a match is found, acl.allow is not
139 139 # checked. All users are granted access if acl.deny is not present.
140 140 # Format for both lists: glob pattern = user, ..., @group, ...
141 141
142 142 # To match everyone, use an asterisk for the user:
143 143 # my/glob/pattern = *
144 144
145 145 # user6 will not have write access to any file:
146 146 ** = user6
147 147
148 148 # Group "hg-denied" will not have write access to any file:
149 149 ** = @hg-denied
150 150
151 151 # Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
152 152 # everyone being able to change all other files. See below.
153 153 src/main/resources/DONT-TOUCH-THIS.txt = *
154 154
155 155 [acl.allow]
156 156 # if acl.allow is not present, all users are allowed by default
157 157 # empty acl.allow = no users allowed
158 158
159 159 # User "doc_writer" has write access to any file under the "docs"
160 160 # folder:
161 161 docs/** = doc_writer
162 162
163 163 # User "jack" and group "designers" have write access to any file
164 164 # under the "images" folder:
165 165 images/** = jack, @designers
166 166
167 167 # Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
168 168 # will have write access to any file under the "resources" folder
169 169 # (except for 1 file. See acl.deny):
170 170 src/main/resources/** = *
171 171
172 172 .hgtags = release_engineer
173 173
174 174 Examples using the "!" prefix
175 175 .............................
176 176
177 177 Suppose there's a branch that only a given user (or group) should be able to
178 178 push to, and you don't want to restrict access to any other branch that may
179 179 be created.
180 180
181 181 The "!" prefix allows you to prevent anyone except a given user or group to
182 182 push changesets in a given branch or path.
183 183
184 184 In the examples below, we will:
185 185 1) Deny access to branch "ring" to anyone but user "gollum"
186 186 2) Deny access to branch "lake" to anyone but members of the group "hobbit"
187 187 3) Deny access to a file to anyone but user "gollum"
188 188
189 189 ::
190 190
191 191 [acl.allow.branches]
192 192 # Empty
193 193
194 194 [acl.deny.branches]
195 195
196 196 # 1) only 'gollum' can commit to branch 'ring';
197 197 # 'gollum' and anyone else can still commit to any other branch.
198 198 ring = !gollum
199 199
200 200 # 2) only members of the group 'hobbit' can commit to branch 'lake';
201 201 # 'hobbit' members and anyone else can still commit to any other branch.
202 202 lake = !@hobbit
203 203
204 204 # You can also deny access based on file paths:
205 205
206 206 [acl.allow]
207 207 # Empty
208 208
209 209 [acl.deny]
210 210 # 3) only 'gollum' can change the file below;
211 211 # 'gollum' and anyone else can still change any other file.
212 212 /misty/mountains/cave/ring = !gollum
213 213
214 214 '''
215 215
216 216
217 217 from mercurial.i18n import _
218 218 from mercurial import (
219 219 error,
220 220 extensions,
221 221 match,
222 pycompat,
223 222 registrar,
224 223 util,
225 224 )
226 225 from mercurial.utils import procutil
227 226
228 227 urlreq = util.urlreq
229 228
230 229 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
231 230 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
232 231 # be specifying the version(s) of Mercurial they are tested with, or
233 232 # leave the attribute unspecified.
234 233 testedwith = b'ships-with-hg-core'
235 234
236 235 configtable = {}
237 236 configitem = registrar.configitem(configtable)
238 237
239 238 # deprecated config: acl.config
240 239 configitem(
241 240 b'acl',
242 241 b'config',
243 242 default=None,
244 243 )
245 244 configitem(
246 245 b'acl.groups',
247 246 b'.*',
248 247 default=None,
249 248 generic=True,
250 249 )
251 250 configitem(
252 251 b'acl.deny.branches',
253 252 b'.*',
254 253 default=None,
255 254 generic=True,
256 255 )
257 256 configitem(
258 257 b'acl.allow.branches',
259 258 b'.*',
260 259 default=None,
261 260 generic=True,
262 261 )
263 262 configitem(
264 263 b'acl.deny',
265 264 b'.*',
266 265 default=None,
267 266 generic=True,
268 267 )
269 268 configitem(
270 269 b'acl.allow',
271 270 b'.*',
272 271 default=None,
273 272 generic=True,
274 273 )
275 274 configitem(
276 275 b'acl',
277 276 b'sources',
278 277 default=lambda: [b'serve'],
279 278 )
280 279
281 280
282 281 def _getusers(ui, group):
283 282
284 283 # First, try to use group definition from section [acl.groups]
285 284 hgrcusers = ui.configlist(b'acl.groups', group)
286 285 if hgrcusers:
287 286 return hgrcusers
288 287
289 288 ui.debug(b'acl: "%s" not defined in [acl.groups]\n' % group)
290 289 # If no users found in group definition, get users from OS-level group
291 290 try:
292 291 return util.groupmembers(group)
293 292 except KeyError:
294 293 raise error.Abort(_(b"group '%s' is undefined") % group)
295 294
296 295
297 296 def _usermatch(ui, user, usersorgroups):
298 297
299 298 if usersorgroups == b'*':
300 299 return True
301 300
302 301 for ug in usersorgroups.replace(b',', b' ').split():
303 302
304 303 if ug.startswith(b'!'):
305 304 # Test for excluded user or group. Format:
306 305 # if ug is a user name: !username
307 306 # if ug is a group name: !@groupname
308 307 ug = ug[1:]
309 308 if (
310 309 not ug.startswith(b'@')
311 310 and user != ug
312 311 or ug.startswith(b'@')
313 312 and user not in _getusers(ui, ug[1:])
314 313 ):
315 314 return True
316 315
317 316 # Test for user or group. Format:
318 317 # if ug is a user name: username
319 318 # if ug is a group name: @groupname
320 319 elif (
321 320 user == ug or ug.startswith(b'@') and user in _getusers(ui, ug[1:])
322 321 ):
323 322 return True
324 323
325 324 return False
326 325
327 326
328 327 def buildmatch(ui, repo, user, key):
329 328 '''return tuple of (match function, list enabled).'''
330 329 if not ui.has_section(key):
331 330 ui.debug(b'acl: %s not enabled\n' % key)
332 331 return None
333 332
334 333 pats = [
335 334 pat for pat, users in ui.configitems(key) if _usermatch(ui, user, users)
336 335 ]
337 336 ui.debug(
338 337 b'acl: %s enabled, %d entries for user %s\n' % (key, len(pats), user)
339 338 )
340 339
341 340 # Branch-based ACL
342 341 if not repo:
343 342 if pats:
344 343 # If there's an asterisk (meaning "any branch"), always return True;
345 344 # Otherwise, test if b is in pats
346 345 if b'*' in pats:
347 346 return util.always
348 347 return lambda b: b in pats
349 348 return util.never
350 349
351 350 # Path-based ACL
352 351 if pats:
353 352 return match.match(repo.root, b'', pats)
354 353 return util.never
355 354
356 355
357 356 def ensureenabled(ui):
358 357 """make sure the extension is enabled when used as hook
359 358
360 359 When acl is used through hooks, the extension is never formally loaded and
361 360 enabled. This has some side effect, for example the config declaration is
362 361 never loaded. This function ensure the extension is enabled when running
363 362 hooks.
364 363 """
365 364 if b'acl' in ui._knownconfig:
366 365 return
367 366 ui.setconfig(b'extensions', b'acl', b'', source=b'internal')
368 367 extensions.loadall(ui, [b'acl'])
369 368
370 369
371 370 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
372 371
373 372 ensureenabled(ui)
374 373
375 374 if hooktype not in [b'pretxnchangegroup', b'pretxncommit', b'prepushkey']:
376 375 raise error.Abort(
377 376 _(
378 377 b'config error - hook type "%s" cannot stop '
379 378 b'incoming changesets, commits, nor bookmarks'
380 379 )
381 380 % hooktype
382 381 )
383 382 if hooktype == b'pretxnchangegroup' and source not in ui.configlist(
384 383 b'acl', b'sources'
385 384 ):
386 385 ui.debug(b'acl: changes have source "%s" - skipping\n' % source)
387 386 return
388 387
389 388 user = None
390 389 if source == b'serve' and 'url' in kwargs:
391 390 url = kwargs['url'].split(b':')
392 391 if url[0] == b'remote' and url[1].startswith(b'http'):
393 392 user = urlreq.unquote(url[3])
394 393
395 394 if user is None:
396 395 user = procutil.getuser()
397 396
398 397 ui.debug(b'acl: checking access for user "%s"\n' % user)
399 398
400 399 if hooktype == b'prepushkey':
401 400 _pkhook(ui, repo, hooktype, node, source, user, **kwargs)
402 401 else:
403 402 _txnhook(ui, repo, hooktype, node, source, user, **kwargs)
404 403
405 404
406 405 def _pkhook(ui, repo, hooktype, node, source, user, **kwargs):
407 406 if kwargs['namespace'] == b'bookmarks':
408 407 bookmark = kwargs['key']
409 408 ctx = kwargs['new']
410 409 allowbookmarks = buildmatch(ui, None, user, b'acl.allow.bookmarks')
411 410 denybookmarks = buildmatch(ui, None, user, b'acl.deny.bookmarks')
412 411
413 412 if denybookmarks and denybookmarks(bookmark):
414 413 raise error.Abort(
415 414 _(
416 415 b'acl: user "%s" denied on bookmark "%s"'
417 416 b' (changeset "%s")'
418 417 )
419 418 % (user, bookmark, ctx)
420 419 )
421 420 if allowbookmarks and not allowbookmarks(bookmark):
422 421 raise error.Abort(
423 422 _(
424 423 b'acl: user "%s" not allowed on bookmark "%s"'
425 424 b' (changeset "%s")'
426 425 )
427 426 % (user, bookmark, ctx)
428 427 )
429 428 ui.debug(
430 429 b'acl: bookmark access granted: "%s" on bookmark "%s"\n'
431 430 % (ctx, bookmark)
432 431 )
433 432
434 433
435 434 def _txnhook(ui, repo, hooktype, node, source, user, **kwargs):
436 435 # deprecated config: acl.config
437 436 cfg = ui.config(b'acl', b'config')
438 437 if cfg:
439 438 ui.readconfig(
440 439 cfg,
441 440 sections=[
442 441 b'acl.groups',
443 442 b'acl.allow.branches',
444 443 b'acl.deny.branches',
445 444 b'acl.allow',
446 445 b'acl.deny',
447 446 ],
448 447 )
449 448
450 449 allowbranches = buildmatch(ui, None, user, b'acl.allow.branches')
451 450 denybranches = buildmatch(ui, None, user, b'acl.deny.branches')
452 451 allow = buildmatch(ui, repo, user, b'acl.allow')
453 452 deny = buildmatch(ui, repo, user, b'acl.deny')
454 453
455 for rev in pycompat.xrange(repo[node].rev(), len(repo)):
454 for rev in range(repo[node].rev(), len(repo)):
456 455 ctx = repo[rev]
457 456 branch = ctx.branch()
458 457 if denybranches and denybranches(branch):
459 458 raise error.Abort(
460 459 _(b'acl: user "%s" denied on branch "%s" (changeset "%s")')
461 460 % (user, branch, ctx)
462 461 )
463 462 if allowbranches and not allowbranches(branch):
464 463 raise error.Abort(
465 464 _(
466 465 b'acl: user "%s" not allowed on branch "%s"'
467 466 b' (changeset "%s")'
468 467 )
469 468 % (user, branch, ctx)
470 469 )
471 470 ui.debug(
472 471 b'acl: branch access granted: "%s" on branch "%s"\n' % (ctx, branch)
473 472 )
474 473
475 474 for f in ctx.files():
476 475 if deny and deny(f):
477 476 raise error.Abort(
478 477 _(b'acl: user "%s" denied on "%s" (changeset "%s")')
479 478 % (user, f, ctx)
480 479 )
481 480 if allow and not allow(f):
482 481 raise error.Abort(
483 482 _(
484 483 b'acl: user "%s" not allowed on "%s"'
485 484 b' (changeset "%s")'
486 485 )
487 486 % (user, f, ctx)
488 487 )
489 488 ui.debug(b'acl: path access granted: "%s"\n' % ctx)
@@ -1,108 +1,107 b''
1 1 # -*- coding: UTF-8 -*-
2 2 # beautifygraph.py - improve graph output by using Unicode characters
3 3 #
4 4 # Copyright 2018 John Stiles <johnstiles@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''beautify log -G output by using Unicode characters (EXPERIMENTAL)
10 10
11 11 A terminal with UTF-8 support and monospace narrow text are required.
12 12 '''
13 13
14 14
15 15 from mercurial.i18n import _
16 16 from mercurial import (
17 17 encoding,
18 18 extensions,
19 19 graphmod,
20 pycompat,
21 20 templatekw,
22 21 )
23 22
24 23 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
25 24 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
26 25 # be specifying the version(s) of Mercurial they are tested with, or
27 26 # leave the attribute unspecified.
28 27 testedwith = b'ships-with-hg-core'
29 28
30 29
31 30 def prettyedge(before, edge, after):
32 31 if edge == b'~':
33 32 return b'\xE2\x95\xA7' # U+2567 ╧
34 33 if edge == b'/':
35 34 return b'\xE2\x95\xB1' # U+2571 ╱
36 35 if edge == b'-':
37 36 return b'\xE2\x94\x80' # U+2500 ─
38 37 if edge == b'|':
39 38 return b'\xE2\x94\x82' # U+2502 │
40 39 if edge == b':':
41 40 return b'\xE2\x94\x86' # U+2506 ┆
42 41 if edge == b'\\':
43 42 return b'\xE2\x95\xB2' # U+2572 ╲
44 43 if edge == b'+':
45 44 if before == b' ' and not after == b' ':
46 45 return b'\xE2\x94\x9C' # U+251C ├
47 46 if after == b' ' and not before == b' ':
48 47 return b'\xE2\x94\xA4' # U+2524 ┤
49 48 return b'\xE2\x94\xBC' # U+253C ┼
50 49 return edge
51 50
52 51
53 52 def convertedges(line):
54 53 line = b' %s ' % line
55 54 pretty = []
56 for idx in pycompat.xrange(len(line) - 2):
55 for idx in range(len(line) - 2):
57 56 pretty.append(
58 57 prettyedge(
59 58 line[idx : idx + 1],
60 59 line[idx + 1 : idx + 2],
61 60 line[idx + 2 : idx + 3],
62 61 )
63 62 )
64 63 return b''.join(pretty)
65 64
66 65
67 66 def getprettygraphnode(orig, *args, **kwargs):
68 67 node = orig(*args, **kwargs)
69 68 if node == b'o':
70 69 return b'\xE2\x97\x8B' # U+25CB ○
71 70 if node == b'@':
72 71 return b'\xE2\x97\x89' # U+25C9 ◉
73 72 if node == b'%':
74 73 return b'\xE2\x97\x8D' # U+25CE ◎
75 74 if node == b'*':
76 75 return b'\xE2\x88\x97' # U+2217 ∗
77 76 if node == b'x':
78 77 return b'\xE2\x97\x8C' # U+25CC ◌
79 78 if node == b'_':
80 79 return b'\xE2\x95\xA4' # U+2564 ╤
81 80 return node
82 81
83 82
84 83 def outputprettygraph(orig, ui, graph, *args, **kwargs):
85 84 (edges, text) = zip(*graph)
86 85 graph = zip([convertedges(e) for e in edges], text)
87 86 return orig(ui, graph, *args, **kwargs)
88 87
89 88
90 89 def extsetup(ui):
91 90 if ui.plain(b'graph'):
92 91 return
93 92
94 93 if encoding.encoding != b'UTF-8':
95 94 ui.warn(_(b'beautifygraph: unsupported encoding, UTF-8 required\n'))
96 95 return
97 96
98 97 if 'A' in encoding._wide:
99 98 ui.warn(
100 99 _(
101 100 b'beautifygraph: unsupported terminal settings, '
102 101 b'monospace narrow text required\n'
103 102 )
104 103 )
105 104 return
106 105
107 106 extensions.wrapfunction(graphmod, b'outputgraph', outputprettygraph)
108 107 extensions.wrapfunction(templatekw, b'getgraphnode', getprettygraphnode)
@@ -1,1068 +1,1068 b''
1 1 # Mercurial built-in replacement for cvsps.
2 2 #
3 3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import functools
9 9 import os
10 10 import pickle
11 11 import re
12 12
13 13 from mercurial.i18n import _
14 14 from mercurial.pycompat import open
15 15 from mercurial import (
16 16 encoding,
17 17 error,
18 18 hook,
19 19 pycompat,
20 20 util,
21 21 )
22 22 from mercurial.utils import (
23 23 dateutil,
24 24 procutil,
25 25 stringutil,
26 26 )
27 27
28 28
29 29 class logentry:
30 30 """Class logentry has the following attributes:
31 31 .author - author name as CVS knows it
32 32 .branch - name of branch this revision is on
33 33 .branches - revision tuple of branches starting at this revision
34 34 .comment - commit message
35 35 .commitid - CVS commitid or None
36 36 .date - the commit date as a (time, tz) tuple
37 37 .dead - true if file revision is dead
38 38 .file - Name of file
39 39 .lines - a tuple (+lines, -lines) or None
40 40 .parent - Previous revision of this entry
41 41 .rcs - name of file as returned from CVS
42 42 .revision - revision number as tuple
43 43 .tags - list of tags on the file
44 44 .synthetic - is this a synthetic "file ... added on ..." revision?
45 45 .mergepoint - the branch that has been merged from (if present in
46 46 rlog output) or None
47 47 .branchpoints - the branches that start at the current entry or empty
48 48 """
49 49
50 50 def __init__(self, **entries):
51 51 self.synthetic = False
52 52 self.__dict__.update(entries)
53 53
54 54 def __repr__(self):
55 55 items = ("%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__))
56 56 return "%s(%s)" % (type(self).__name__, ", ".join(items))
57 57
58 58
59 59 class logerror(Exception):
60 60 pass
61 61
62 62
63 63 def getrepopath(cvspath):
64 64 """Return the repository path from a CVS path.
65 65
66 66 >>> getrepopath(b'/foo/bar')
67 67 '/foo/bar'
68 68 >>> getrepopath(b'c:/foo/bar')
69 69 '/foo/bar'
70 70 >>> getrepopath(b':pserver:10/foo/bar')
71 71 '/foo/bar'
72 72 >>> getrepopath(b':pserver:10c:/foo/bar')
73 73 '/foo/bar'
74 74 >>> getrepopath(b':pserver:/foo/bar')
75 75 '/foo/bar'
76 76 >>> getrepopath(b':pserver:c:/foo/bar')
77 77 '/foo/bar'
78 78 >>> getrepopath(b':pserver:truc@foo.bar:/foo/bar')
79 79 '/foo/bar'
80 80 >>> getrepopath(b':pserver:truc@foo.bar:c:/foo/bar')
81 81 '/foo/bar'
82 82 >>> getrepopath(b'user@server/path/to/repository')
83 83 '/path/to/repository'
84 84 """
85 85 # According to CVS manual, CVS paths are expressed like:
86 86 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
87 87 #
88 88 # CVSpath is splitted into parts and then position of the first occurrence
89 89 # of the '/' char after the '@' is located. The solution is the rest of the
90 90 # string after that '/' sign including it
91 91
92 92 parts = cvspath.split(b':')
93 93 atposition = parts[-1].find(b'@')
94 94 start = 0
95 95
96 96 if atposition != -1:
97 97 start = atposition
98 98
99 99 repopath = parts[-1][parts[-1].find(b'/', start) :]
100 100 return repopath
101 101
102 102
103 103 def createlog(ui, directory=None, root=b"", rlog=True, cache=None):
104 104 '''Collect the CVS rlog'''
105 105
106 106 # Because we store many duplicate commit log messages, reusing strings
107 107 # saves a lot of memory and pickle storage space.
108 108 _scache = {}
109 109
110 110 def scache(s):
111 111 """return a shared version of a string"""
112 112 return _scache.setdefault(s, s)
113 113
114 114 ui.status(_(b'collecting CVS rlog\n'))
115 115
116 116 log = [] # list of logentry objects containing the CVS state
117 117
118 118 # patterns to match in CVS (r)log output, by state of use
119 119 re_00 = re.compile(b'RCS file: (.+)$')
120 120 re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$')
121 121 re_02 = re.compile(b'cvs (r?log|server): (.+)\n$')
122 122 re_03 = re.compile(
123 123 b"(Cannot access.+CVSROOT)|(can't create temporary directory.+)$"
124 124 )
125 125 re_10 = re.compile(b'Working file: (.+)$')
126 126 re_20 = re.compile(b'symbolic names:')
127 127 re_30 = re.compile(b'\t(.+): ([\\d.]+)$')
128 128 re_31 = re.compile(b'----------------------------$')
129 129 re_32 = re.compile(
130 130 b'======================================='
131 131 b'======================================$'
132 132 )
133 133 re_50 = re.compile(br'revision ([\d.]+)(\s+locked by:\s+.+;)?$')
134 134 re_60 = re.compile(
135 135 br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
136 136 br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
137 137 br'(\s+commitid:\s+([^;]+);)?'
138 138 br'(.*mergepoint:\s+([^;]+);)?'
139 139 )
140 140 re_70 = re.compile(b'branches: (.+);$')
141 141
142 142 file_added_re = re.compile(br'file [^/]+ was (initially )?added on branch')
143 143
144 144 prefix = b'' # leading path to strip of what we get from CVS
145 145
146 146 if directory is None:
147 147 # Current working directory
148 148
149 149 # Get the real directory in the repository
150 150 try:
151 151 with open(os.path.join(b'CVS', b'Repository'), b'rb') as f:
152 152 prefix = f.read().strip()
153 153 directory = prefix
154 154 if prefix == b".":
155 155 prefix = b""
156 156 except IOError:
157 157 raise logerror(_(b'not a CVS sandbox'))
158 158
159 159 if prefix and not prefix.endswith(pycompat.ossep):
160 160 prefix += pycompat.ossep
161 161
162 162 # Use the Root file in the sandbox, if it exists
163 163 try:
164 164 root = open(os.path.join(b'CVS', b'Root'), b'rb').read().strip()
165 165 except IOError:
166 166 pass
167 167
168 168 if not root:
169 169 root = encoding.environ.get(b'CVSROOT', b'')
170 170
171 171 # read log cache if one exists
172 172 oldlog = []
173 173 date = None
174 174
175 175 if cache:
176 176 cachedir = os.path.expanduser(b'~/.hg.cvsps')
177 177 if not os.path.exists(cachedir):
178 178 os.mkdir(cachedir)
179 179
180 180 # The cvsps cache pickle needs a uniquified name, based on the
181 181 # repository location. The address may have all sort of nasties
182 182 # in it, slashes, colons and such. So here we take just the
183 183 # alphanumeric characters, concatenated in a way that does not
184 184 # mix up the various components, so that
185 185 # :pserver:user@server:/path
186 186 # and
187 187 # /pserver/user/server/path
188 188 # are mapped to different cache file names.
189 189 cachefile = root.split(b":") + [directory, b"cache"]
190 190 cachefile = [b'-'.join(re.findall(br'\w+', s)) for s in cachefile if s]
191 191 cachefile = os.path.join(
192 192 cachedir, b'.'.join([s for s in cachefile if s])
193 193 )
194 194
195 195 if cache == b'update':
196 196 try:
197 197 ui.note(_(b'reading cvs log cache %s\n') % cachefile)
198 198 oldlog = pickle.load(open(cachefile, b'rb'))
199 199 for e in oldlog:
200 200 if not (
201 201 util.safehasattr(e, b'branchpoints')
202 202 and util.safehasattr(e, b'commitid')
203 203 and util.safehasattr(e, b'mergepoint')
204 204 ):
205 205 ui.status(_(b'ignoring old cache\n'))
206 206 oldlog = []
207 207 break
208 208
209 209 ui.note(_(b'cache has %d log entries\n') % len(oldlog))
210 210 except Exception as e:
211 211 ui.note(_(b'error reading cache: %r\n') % e)
212 212
213 213 if oldlog:
214 214 date = oldlog[-1].date # last commit date as a (time,tz) tuple
215 215 date = dateutil.datestr(date, b'%Y/%m/%d %H:%M:%S %1%2')
216 216
217 217 # build the CVS commandline
218 218 cmd = [b'cvs', b'-q']
219 219 if root:
220 220 cmd.append(b'-d%s' % root)
221 221 p = util.normpath(getrepopath(root))
222 222 if not p.endswith(b'/'):
223 223 p += b'/'
224 224 if prefix:
225 225 # looks like normpath replaces "" by "."
226 226 prefix = p + util.normpath(prefix)
227 227 else:
228 228 prefix = p
229 229 cmd.append([b'log', b'rlog'][rlog])
230 230 if date:
231 231 # no space between option and date string
232 232 cmd.append(b'-d>%s' % date)
233 233 cmd.append(directory)
234 234
235 235 # state machine begins here
236 236 tags = {} # dictionary of revisions on current file with their tags
237 237 branchmap = {} # mapping between branch names and revision numbers
238 238 rcsmap = {}
239 239 state = 0
240 240 store = False # set when a new record can be appended
241 241
242 242 cmd = [procutil.shellquote(arg) for arg in cmd]
243 243 ui.note(_(b"running %s\n") % (b' '.join(cmd)))
244 244 ui.debug(b"prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
245 245
246 246 pfp = procutil.popen(b' '.join(cmd), b'rb')
247 247 peek = util.fromnativeeol(pfp.readline())
248 248 while True:
249 249 line = peek
250 250 if line == b'':
251 251 break
252 252 peek = util.fromnativeeol(pfp.readline())
253 253 if line.endswith(b'\n'):
254 254 line = line[:-1]
255 255 # ui.debug('state=%d line=%r\n' % (state, line))
256 256
257 257 if state == 0:
258 258 # initial state, consume input until we see 'RCS file'
259 259 match = re_00.match(line)
260 260 if match:
261 261 rcs = match.group(1)
262 262 tags = {}
263 263 if rlog:
264 264 filename = util.normpath(rcs[:-2])
265 265 if filename.startswith(prefix):
266 266 filename = filename[len(prefix) :]
267 267 if filename.startswith(b'/'):
268 268 filename = filename[1:]
269 269 if filename.startswith(b'Attic/'):
270 270 filename = filename[6:]
271 271 else:
272 272 filename = filename.replace(b'/Attic/', b'/')
273 273 state = 2
274 274 continue
275 275 state = 1
276 276 continue
277 277 match = re_01.match(line)
278 278 if match:
279 279 raise logerror(match.group(1))
280 280 match = re_02.match(line)
281 281 if match:
282 282 raise logerror(match.group(2))
283 283 if re_03.match(line):
284 284 raise logerror(line)
285 285
286 286 elif state == 1:
287 287 # expect 'Working file' (only when using log instead of rlog)
288 288 match = re_10.match(line)
289 289 assert match, _(b'RCS file must be followed by working file')
290 290 filename = util.normpath(match.group(1))
291 291 state = 2
292 292
293 293 elif state == 2:
294 294 # expect 'symbolic names'
295 295 if re_20.match(line):
296 296 branchmap = {}
297 297 state = 3
298 298
299 299 elif state == 3:
300 300 # read the symbolic names and store as tags
301 301 match = re_30.match(line)
302 302 if match:
303 303 rev = [int(x) for x in match.group(2).split(b'.')]
304 304
305 305 # Convert magic branch number to an odd-numbered one
306 306 revn = len(rev)
307 307 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
308 308 rev = rev[:-2] + rev[-1:]
309 309 rev = tuple(rev)
310 310
311 311 if rev not in tags:
312 312 tags[rev] = []
313 313 tags[rev].append(match.group(1))
314 314 branchmap[match.group(1)] = match.group(2)
315 315
316 316 elif re_31.match(line):
317 317 state = 5
318 318 elif re_32.match(line):
319 319 state = 0
320 320
321 321 elif state == 4:
322 322 # expecting '------' separator before first revision
323 323 if re_31.match(line):
324 324 state = 5
325 325 else:
326 326 assert not re_32.match(line), _(
327 327 b'must have at least some revisions'
328 328 )
329 329
330 330 elif state == 5:
331 331 # expecting revision number and possibly (ignored) lock indication
332 332 # we create the logentry here from values stored in states 0 to 4,
333 333 # as this state is re-entered for subsequent revisions of a file.
334 334 match = re_50.match(line)
335 335 assert match, _(b'expected revision number')
336 336 e = logentry(
337 337 rcs=scache(rcs),
338 338 file=scache(filename),
339 339 revision=tuple([int(x) for x in match.group(1).split(b'.')]),
340 340 branches=[],
341 341 parent=None,
342 342 commitid=None,
343 343 mergepoint=None,
344 344 branchpoints=set(),
345 345 )
346 346
347 347 state = 6
348 348
349 349 elif state == 6:
350 350 # expecting date, author, state, lines changed
351 351 match = re_60.match(line)
352 352 assert match, _(b'revision must be followed by date line')
353 353 d = match.group(1)
354 354 if d[2] == b'/':
355 355 # Y2K
356 356 d = b'19' + d
357 357
358 358 if len(d.split()) != 3:
359 359 # cvs log dates always in GMT
360 360 d = d + b' UTC'
361 361 e.date = dateutil.parsedate(
362 362 d,
363 363 [
364 364 b'%y/%m/%d %H:%M:%S',
365 365 b'%Y/%m/%d %H:%M:%S',
366 366 b'%Y-%m-%d %H:%M:%S',
367 367 ],
368 368 )
369 369 e.author = scache(match.group(2))
370 370 e.dead = match.group(3).lower() == b'dead'
371 371
372 372 if match.group(5):
373 373 if match.group(6):
374 374 e.lines = (int(match.group(5)), int(match.group(6)))
375 375 else:
376 376 e.lines = (int(match.group(5)), 0)
377 377 elif match.group(6):
378 378 e.lines = (0, int(match.group(6)))
379 379 else:
380 380 e.lines = None
381 381
382 382 if match.group(7): # cvs 1.12 commitid
383 383 e.commitid = match.group(8)
384 384
385 385 if match.group(9): # cvsnt mergepoint
386 386 myrev = match.group(10).split(b'.')
387 387 if len(myrev) == 2: # head
388 388 e.mergepoint = b'HEAD'
389 389 else:
390 390 myrev = b'.'.join(myrev[:-2] + [b'0', myrev[-2]])
391 391 branches = [b for b in branchmap if branchmap[b] == myrev]
392 392 assert len(branches) == 1, (
393 393 b'unknown branch: %s' % e.mergepoint
394 394 )
395 395 e.mergepoint = branches[0]
396 396
397 397 e.comment = []
398 398 state = 7
399 399
400 400 elif state == 7:
401 401 # read the revision numbers of branches that start at this revision
402 402 # or store the commit log message otherwise
403 403 m = re_70.match(line)
404 404 if m:
405 405 e.branches = [
406 406 tuple([int(y) for y in x.strip().split(b'.')])
407 407 for x in m.group(1).split(b';')
408 408 ]
409 409 state = 8
410 410 elif re_31.match(line) and re_50.match(peek):
411 411 state = 5
412 412 store = True
413 413 elif re_32.match(line):
414 414 state = 0
415 415 store = True
416 416 else:
417 417 e.comment.append(line)
418 418
419 419 elif state == 8:
420 420 # store commit log message
421 421 if re_31.match(line):
422 422 cpeek = peek
423 423 if cpeek.endswith(b'\n'):
424 424 cpeek = cpeek[:-1]
425 425 if re_50.match(cpeek):
426 426 state = 5
427 427 store = True
428 428 else:
429 429 e.comment.append(line)
430 430 elif re_32.match(line):
431 431 state = 0
432 432 store = True
433 433 else:
434 434 e.comment.append(line)
435 435
436 436 # When a file is added on a branch B1, CVS creates a synthetic
437 437 # dead trunk revision 1.1 so that the branch has a root.
438 438 # Likewise, if you merge such a file to a later branch B2 (one
439 439 # that already existed when the file was added on B1), CVS
440 440 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
441 441 # these revisions now, but mark them synthetic so
442 442 # createchangeset() can take care of them.
443 443 if (
444 444 store
445 445 and e.dead
446 446 and e.revision[-1] == 1
447 447 and len(e.comment) == 1 # 1.1 or 1.1.x.1
448 448 and file_added_re.match(e.comment[0])
449 449 ):
450 450 ui.debug(
451 451 b'found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])
452 452 )
453 453 e.synthetic = True
454 454
455 455 if store:
456 456 # clean up the results and save in the log.
457 457 store = False
458 458 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
459 459 e.comment = scache(b'\n'.join(e.comment))
460 460
461 461 revn = len(e.revision)
462 462 if revn > 3 and (revn % 2) == 0:
463 463 e.branch = tags.get(e.revision[:-1], [None])[0]
464 464 else:
465 465 e.branch = None
466 466
467 467 # find the branches starting from this revision
468 468 branchpoints = set()
469 469 for branch, revision in branchmap.items():
470 470 revparts = tuple([int(i) for i in revision.split(b'.')])
471 471 if len(revparts) < 2: # bad tags
472 472 continue
473 473 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
474 474 # normal branch
475 475 if revparts[:-2] == e.revision:
476 476 branchpoints.add(branch)
477 477 elif revparts == (1, 1, 1): # vendor branch
478 478 if revparts in e.branches:
479 479 branchpoints.add(branch)
480 480 e.branchpoints = branchpoints
481 481
482 482 log.append(e)
483 483
484 484 rcsmap[e.rcs.replace(b'/Attic/', b'/')] = e.rcs
485 485
486 486 if len(log) % 100 == 0:
487 487 ui.status(
488 488 stringutil.ellipsis(b'%d %s' % (len(log), e.file), 80)
489 489 + b'\n'
490 490 )
491 491
492 492 log.sort(key=lambda x: (x.rcs, x.revision))
493 493
494 494 # find parent revisions of individual files
495 495 versions = {}
496 496 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
497 497 rcs = e.rcs.replace(b'/Attic/', b'/')
498 498 if rcs in rcsmap:
499 499 e.rcs = rcsmap[rcs]
500 500 branch = e.revision[:-1]
501 501 versions[(e.rcs, branch)] = e.revision
502 502
503 503 for e in log:
504 504 branch = e.revision[:-1]
505 505 p = versions.get((e.rcs, branch), None)
506 506 if p is None:
507 507 p = e.revision[:-2]
508 508 e.parent = p
509 509 versions[(e.rcs, branch)] = e.revision
510 510
511 511 # update the log cache
512 512 if cache:
513 513 if log:
514 514 # join up the old and new logs
515 515 log.sort(key=lambda x: x.date)
516 516
517 517 if oldlog and oldlog[-1].date >= log[0].date:
518 518 raise logerror(
519 519 _(
520 520 b'log cache overlaps with new log entries,'
521 521 b' re-run without cache.'
522 522 )
523 523 )
524 524
525 525 log = oldlog + log
526 526
527 527 # write the new cachefile
528 528 ui.note(_(b'writing cvs log cache %s\n') % cachefile)
529 529 pickle.dump(log, open(cachefile, b'wb'))
530 530 else:
531 531 log = oldlog
532 532
533 533 ui.status(_(b'%d log entries\n') % len(log))
534 534
535 535 encodings = ui.configlist(b'convert', b'cvsps.logencoding')
536 536 if encodings:
537 537
538 538 def revstr(r):
539 539 # this is needed, because logentry.revision is a tuple of "int"
540 540 # (e.g. (1, 2) for "1.2")
541 541 return b'.'.join(pycompat.maplist(pycompat.bytestr, r))
542 542
543 543 for entry in log:
544 544 comment = entry.comment
545 545 for e in encodings:
546 546 try:
547 547 entry.comment = comment.decode(pycompat.sysstr(e)).encode(
548 548 'utf-8'
549 549 )
550 550 if ui.debugflag:
551 551 ui.debug(
552 552 b"transcoding by %s: %s of %s\n"
553 553 % (e, revstr(entry.revision), entry.file)
554 554 )
555 555 break
556 556 except UnicodeDecodeError:
557 557 pass # try next encoding
558 558 except LookupError as inst: # unknown encoding, maybe
559 559 raise error.Abort(
560 560 pycompat.bytestr(inst),
561 561 hint=_(
562 562 b'check convert.cvsps.logencoding configuration'
563 563 ),
564 564 )
565 565 else:
566 566 raise error.Abort(
567 567 _(
568 568 b"no encoding can transcode"
569 569 b" CVS log message for %s of %s"
570 570 )
571 571 % (revstr(entry.revision), entry.file),
572 572 hint=_(b'check convert.cvsps.logencoding configuration'),
573 573 )
574 574
575 575 hook.hook(ui, None, b"cvslog", True, log=log)
576 576
577 577 return log
578 578
579 579
580 580 class changeset:
581 581 """Class changeset has the following attributes:
582 582 .id - integer identifying this changeset (list index)
583 583 .author - author name as CVS knows it
584 584 .branch - name of branch this changeset is on, or None
585 585 .comment - commit message
586 586 .commitid - CVS commitid or None
587 587 .date - the commit date as a (time,tz) tuple
588 588 .entries - list of logentry objects in this changeset
589 589 .parents - list of one or two parent changesets
590 590 .tags - list of tags on this changeset
591 591 .synthetic - from synthetic revision "file ... added on branch ..."
592 592 .mergepoint- the branch that has been merged from or None
593 593 .branchpoints- the branches that start at the current entry or empty
594 594 """
595 595
596 596 def __init__(self, **entries):
597 597 self.id = None
598 598 self.synthetic = False
599 599 self.__dict__.update(entries)
600 600
601 601 def __repr__(self):
602 602 items = (
603 603 b"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
604 604 )
605 605 return b"%s(%s)" % (type(self).__name__, b", ".join(items))
606 606
607 607
608 608 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
609 609 '''Convert log into changesets.'''
610 610
611 611 ui.status(_(b'creating changesets\n'))
612 612
613 613 # try to order commitids by date
614 614 mindate = {}
615 615 for e in log:
616 616 if e.commitid:
617 617 if e.commitid not in mindate:
618 618 mindate[e.commitid] = e.date
619 619 else:
620 620 mindate[e.commitid] = min(e.date, mindate[e.commitid])
621 621
622 622 # Merge changesets
623 623 log.sort(
624 624 key=lambda x: (
625 625 mindate.get(x.commitid, (-1, 0)),
626 626 x.commitid or b'',
627 627 x.comment,
628 628 x.author,
629 629 x.branch or b'',
630 630 x.date,
631 631 x.branchpoints,
632 632 )
633 633 )
634 634
635 635 changesets = []
636 636 files = set()
637 637 c = None
638 638 for i, e in enumerate(log):
639 639
640 640 # Check if log entry belongs to the current changeset or not.
641 641
642 642 # Since CVS is file-centric, two different file revisions with
643 643 # different branchpoints should be treated as belonging to two
644 644 # different changesets (and the ordering is important and not
645 645 # honoured by cvsps at this point).
646 646 #
647 647 # Consider the following case:
648 648 # foo 1.1 branchpoints: [MYBRANCH]
649 649 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
650 650 #
651 651 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
652 652 # later version of foo may be in MYBRANCH2, so foo should be the
653 653 # first changeset and bar the next and MYBRANCH and MYBRANCH2
654 654 # should both start off of the bar changeset. No provisions are
655 655 # made to ensure that this is, in fact, what happens.
656 656 if not (
657 657 c
658 658 and e.branchpoints == c.branchpoints
659 659 and ( # cvs commitids
660 660 (e.commitid is not None and e.commitid == c.commitid)
661 661 or ( # no commitids, use fuzzy commit detection
662 662 (e.commitid is None or c.commitid is None)
663 663 and e.comment == c.comment
664 664 and e.author == c.author
665 665 and e.branch == c.branch
666 666 and (
667 667 (c.date[0] + c.date[1])
668 668 <= (e.date[0] + e.date[1])
669 669 <= (c.date[0] + c.date[1]) + fuzz
670 670 )
671 671 and e.file not in files
672 672 )
673 673 )
674 674 ):
675 675 c = changeset(
676 676 comment=e.comment,
677 677 author=e.author,
678 678 branch=e.branch,
679 679 date=e.date,
680 680 entries=[],
681 681 mergepoint=e.mergepoint,
682 682 branchpoints=e.branchpoints,
683 683 commitid=e.commitid,
684 684 )
685 685 changesets.append(c)
686 686
687 687 files = set()
688 688 if len(changesets) % 100 == 0:
689 689 t = b'%d %s' % (len(changesets), repr(e.comment)[1:-1])
690 690 ui.status(stringutil.ellipsis(t, 80) + b'\n')
691 691
692 692 c.entries.append(e)
693 693 files.add(e.file)
694 694 c.date = e.date # changeset date is date of latest commit in it
695 695
696 696 # Mark synthetic changesets
697 697
698 698 for c in changesets:
699 699 # Synthetic revisions always get their own changeset, because
700 700 # the log message includes the filename. E.g. if you add file3
701 701 # and file4 on a branch, you get four log entries and three
702 702 # changesets:
703 703 # "File file3 was added on branch ..." (synthetic, 1 entry)
704 704 # "File file4 was added on branch ..." (synthetic, 1 entry)
705 705 # "Add file3 and file4 to fix ..." (real, 2 entries)
706 706 # Hence the check for 1 entry here.
707 707 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
708 708
709 709 # Sort files in each changeset
710 710
711 711 def entitycompare(l, r):
712 712 """Mimic cvsps sorting order"""
713 713 l = l.file.split(b'/')
714 714 r = r.file.split(b'/')
715 715 nl = len(l)
716 716 nr = len(r)
717 717 n = min(nl, nr)
718 718 for i in range(n):
719 719 if i + 1 == nl and nl < nr:
720 720 return -1
721 721 elif i + 1 == nr and nl > nr:
722 722 return +1
723 723 elif l[i] < r[i]:
724 724 return -1
725 725 elif l[i] > r[i]:
726 726 return +1
727 727 return 0
728 728
729 729 for c in changesets:
730 730 c.entries.sort(key=functools.cmp_to_key(entitycompare))
731 731
732 732 # Sort changesets by date
733 733
734 734 odd = set()
735 735
736 736 def cscmp(l, r):
737 737 d = sum(l.date) - sum(r.date)
738 738 if d:
739 739 return d
740 740
741 741 # detect vendor branches and initial commits on a branch
742 742 le = {}
743 743 for e in l.entries:
744 744 le[e.rcs] = e.revision
745 745 re = {}
746 746 for e in r.entries:
747 747 re[e.rcs] = e.revision
748 748
749 749 d = 0
750 750 for e in l.entries:
751 751 if re.get(e.rcs, None) == e.parent:
752 752 assert not d
753 753 d = 1
754 754 break
755 755
756 756 for e in r.entries:
757 757 if le.get(e.rcs, None) == e.parent:
758 758 if d:
759 759 odd.add((l, r))
760 760 d = -1
761 761 break
762 762 # By this point, the changesets are sufficiently compared that
763 763 # we don't really care about ordering. However, this leaves
764 764 # some race conditions in the tests, so we compare on the
765 765 # number of files modified, the files contained in each
766 766 # changeset, and the branchpoints in the change to ensure test
767 767 # output remains stable.
768 768
769 769 # recommended replacement for cmp from
770 770 # https://docs.python.org/3.0/whatsnew/3.0.html
771 771 c = lambda x, y: (x > y) - (x < y)
772 772 # Sort bigger changes first.
773 773 if not d:
774 774 d = c(len(l.entries), len(r.entries))
775 775 # Try sorting by filename in the change.
776 776 if not d:
777 777 d = c([e.file for e in l.entries], [e.file for e in r.entries])
778 778 # Try and put changes without a branch point before ones with
779 779 # a branch point.
780 780 if not d:
781 781 d = c(len(l.branchpoints), len(r.branchpoints))
782 782 return d
783 783
784 784 changesets.sort(key=functools.cmp_to_key(cscmp))
785 785
786 786 # Collect tags
787 787
788 788 globaltags = {}
789 789 for c in changesets:
790 790 for e in c.entries:
791 791 for tag in e.tags:
792 792 # remember which is the latest changeset to have this tag
793 793 globaltags[tag] = c
794 794
795 795 for c in changesets:
796 796 tags = set()
797 797 for e in c.entries:
798 798 tags.update(e.tags)
799 799 # remember tags only if this is the latest changeset to have it
800 800 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
801 801
802 802 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
803 803 # by inserting dummy changesets with two parents, and handle
804 804 # {{mergefrombranch BRANCHNAME}} by setting two parents.
805 805
806 806 if mergeto is None:
807 807 mergeto = br'{{mergetobranch ([-\w]+)}}'
808 808 if mergeto:
809 809 mergeto = re.compile(mergeto)
810 810
811 811 if mergefrom is None:
812 812 mergefrom = br'{{mergefrombranch ([-\w]+)}}'
813 813 if mergefrom:
814 814 mergefrom = re.compile(mergefrom)
815 815
816 816 versions = {} # changeset index where we saw any particular file version
817 817 branches = {} # changeset index where we saw a branch
818 818 n = len(changesets)
819 819 i = 0
820 820 while i < n:
821 821 c = changesets[i]
822 822
823 823 for f in c.entries:
824 824 versions[(f.rcs, f.revision)] = i
825 825
826 826 p = None
827 827 if c.branch in branches:
828 828 p = branches[c.branch]
829 829 else:
830 830 # first changeset on a new branch
831 831 # the parent is a changeset with the branch in its
832 832 # branchpoints such that it is the latest possible
833 833 # commit without any intervening, unrelated commits.
834 834
835 for candidate in pycompat.xrange(i):
835 for candidate in range(i):
836 836 if c.branch not in changesets[candidate].branchpoints:
837 837 if p is not None:
838 838 break
839 839 continue
840 840 p = candidate
841 841
842 842 c.parents = []
843 843 if p is not None:
844 844 p = changesets[p]
845 845
846 846 # Ensure no changeset has a synthetic changeset as a parent.
847 847 while p.synthetic:
848 848 assert len(p.parents) <= 1, _(
849 849 b'synthetic changeset cannot have multiple parents'
850 850 )
851 851 if p.parents:
852 852 p = p.parents[0]
853 853 else:
854 854 p = None
855 855 break
856 856
857 857 if p is not None:
858 858 c.parents.append(p)
859 859
860 860 if c.mergepoint:
861 861 if c.mergepoint == b'HEAD':
862 862 c.mergepoint = None
863 863 c.parents.append(changesets[branches[c.mergepoint]])
864 864
865 865 if mergefrom:
866 866 m = mergefrom.search(c.comment)
867 867 if m:
868 868 m = m.group(1)
869 869 if m == b'HEAD':
870 870 m = None
871 871 try:
872 872 candidate = changesets[branches[m]]
873 873 except KeyError:
874 874 ui.warn(
875 875 _(
876 876 b"warning: CVS commit message references "
877 877 b"non-existent branch %r:\n%s\n"
878 878 )
879 879 % (pycompat.bytestr(m), c.comment)
880 880 )
881 881 if m in branches and c.branch != m and not candidate.synthetic:
882 882 c.parents.append(candidate)
883 883
884 884 if mergeto:
885 885 m = mergeto.search(c.comment)
886 886 if m:
887 887 if m.groups():
888 888 m = m.group(1)
889 889 if m == b'HEAD':
890 890 m = None
891 891 else:
892 892 m = None # if no group found then merge to HEAD
893 893 if m in branches and c.branch != m:
894 894 # insert empty changeset for merge
895 895 cc = changeset(
896 896 author=c.author,
897 897 branch=m,
898 898 date=c.date,
899 899 comment=b'convert-repo: CVS merge from branch %s'
900 900 % c.branch,
901 901 entries=[],
902 902 tags=[],
903 903 parents=[changesets[branches[m]], c],
904 904 )
905 905 changesets.insert(i + 1, cc)
906 906 branches[m] = i + 1
907 907
908 908 # adjust our loop counters now we have inserted a new entry
909 909 n += 1
910 910 i += 2
911 911 continue
912 912
913 913 branches[c.branch] = i
914 914 i += 1
915 915
916 916 # Drop synthetic changesets (safe now that we have ensured no other
917 917 # changesets can have them as parents).
918 918 i = 0
919 919 while i < len(changesets):
920 920 if changesets[i].synthetic:
921 921 del changesets[i]
922 922 else:
923 923 i += 1
924 924
925 925 # Number changesets
926 926
927 927 for i, c in enumerate(changesets):
928 928 c.id = i + 1
929 929
930 930 if odd:
931 931 for l, r in odd:
932 932 if l.id is not None and r.id is not None:
933 933 ui.warn(
934 934 _(b'changeset %d is both before and after %d\n')
935 935 % (l.id, r.id)
936 936 )
937 937
938 938 ui.status(_(b'%d changeset entries\n') % len(changesets))
939 939
940 940 hook.hook(ui, None, b"cvschangesets", True, changesets=changesets)
941 941
942 942 return changesets
943 943
944 944
945 945 def debugcvsps(ui, *args, **opts):
946 946 """Read CVS rlog for current directory or named path in
947 947 repository, and convert the log to changesets based on matching
948 948 commit log entries and dates.
949 949 """
950 950 opts = pycompat.byteskwargs(opts)
951 951 if opts[b"new_cache"]:
952 952 cache = b"write"
953 953 elif opts[b"update_cache"]:
954 954 cache = b"update"
955 955 else:
956 956 cache = None
957 957
958 958 revisions = opts[b"revisions"]
959 959
960 960 try:
961 961 if args:
962 962 log = []
963 963 for d in args:
964 964 log += createlog(ui, d, root=opts[b"root"], cache=cache)
965 965 else:
966 966 log = createlog(ui, root=opts[b"root"], cache=cache)
967 967 except logerror as e:
968 968 ui.write(b"%r\n" % e)
969 969 return
970 970
971 971 changesets = createchangeset(ui, log, opts[b"fuzz"])
972 972 del log
973 973
974 974 # Print changesets (optionally filtered)
975 975
976 976 off = len(revisions)
977 977 branches = {} # latest version number in each branch
978 978 ancestors = {} # parent branch
979 979 for cs in changesets:
980 980
981 981 if opts[b"ancestors"]:
982 982 if cs.branch not in branches and cs.parents and cs.parents[0].id:
983 983 ancestors[cs.branch] = (
984 984 changesets[cs.parents[0].id - 1].branch,
985 985 cs.parents[0].id,
986 986 )
987 987 branches[cs.branch] = cs.id
988 988
989 989 # limit by branches
990 990 if (
991 991 opts[b"branches"]
992 992 and (cs.branch or b'HEAD') not in opts[b"branches"]
993 993 ):
994 994 continue
995 995
996 996 if not off:
997 997 # Note: trailing spaces on several lines here are needed to have
998 998 # bug-for-bug compatibility with cvsps.
999 999 ui.write(b'---------------------\n')
1000 1000 ui.write((b'PatchSet %d \n' % cs.id))
1001 1001 ui.write(
1002 1002 (
1003 1003 b'Date: %s\n'
1004 1004 % dateutil.datestr(cs.date, b'%Y/%m/%d %H:%M:%S %1%2')
1005 1005 )
1006 1006 )
1007 1007 ui.write((b'Author: %s\n' % cs.author))
1008 1008 ui.write((b'Branch: %s\n' % (cs.branch or b'HEAD')))
1009 1009 ui.write(
1010 1010 (
1011 1011 b'Tag%s: %s \n'
1012 1012 % (
1013 1013 [b'', b's'][len(cs.tags) > 1],
1014 1014 b','.join(cs.tags) or b'(none)',
1015 1015 )
1016 1016 )
1017 1017 )
1018 1018 if cs.branchpoints:
1019 1019 ui.writenoi18n(
1020 1020 b'Branchpoints: %s \n' % b', '.join(sorted(cs.branchpoints))
1021 1021 )
1022 1022 if opts[b"parents"] and cs.parents:
1023 1023 if len(cs.parents) > 1:
1024 1024 ui.write(
1025 1025 (
1026 1026 b'Parents: %s\n'
1027 1027 % (b','.join([(b"%d" % p.id) for p in cs.parents]))
1028 1028 )
1029 1029 )
1030 1030 else:
1031 1031 ui.write((b'Parent: %d\n' % cs.parents[0].id))
1032 1032
1033 1033 if opts[b"ancestors"]:
1034 1034 b = cs.branch
1035 1035 r = []
1036 1036 while b:
1037 1037 b, c = ancestors[b]
1038 1038 r.append(b'%s:%d:%d' % (b or b"HEAD", c, branches[b]))
1039 1039 if r:
1040 1040 ui.write((b'Ancestors: %s\n' % (b','.join(r))))
1041 1041
1042 1042 ui.writenoi18n(b'Log:\n')
1043 1043 ui.write(b'%s\n\n' % cs.comment)
1044 1044 ui.writenoi18n(b'Members: \n')
1045 1045 for f in cs.entries:
1046 1046 fn = f.file
1047 1047 if fn.startswith(opts[b"prefix"]):
1048 1048 fn = fn[len(opts[b"prefix"]) :]
1049 1049 ui.write(
1050 1050 b'\t%s:%s->%s%s \n'
1051 1051 % (
1052 1052 fn,
1053 1053 b'.'.join([b"%d" % x for x in f.parent]) or b'INITIAL',
1054 1054 b'.'.join([(b"%d" % x) for x in f.revision]),
1055 1055 [b'', b'(DEAD)'][f.dead],
1056 1056 )
1057 1057 )
1058 1058 ui.write(b'\n')
1059 1059
1060 1060 # have we seen the start tag?
1061 1061 if revisions and off:
1062 1062 if revisions[0] == (b"%d" % cs.id) or revisions[0] in cs.tags:
1063 1063 off = False
1064 1064
1065 1065 # see if we reached the end tag
1066 1066 if len(revisions) > 1 and not off:
1067 1067 if revisions[1] == (b"%d" % cs.id) or revisions[1] in cs.tags:
1068 1068 break
@@ -1,479 +1,479 b''
1 1 """automatically manage newlines in repository files
2 2
3 3 This extension allows you to manage the type of line endings (CRLF or
4 4 LF) that are used in the repository and in the local working
5 5 directory. That way you can get CRLF line endings on Windows and LF on
6 6 Unix/Mac, thereby letting everybody use their OS native line endings.
7 7
8 8 The extension reads its configuration from a versioned ``.hgeol``
9 9 configuration file found in the root of the working directory. The
10 10 ``.hgeol`` file use the same syntax as all other Mercurial
11 11 configuration files. It uses two sections, ``[patterns]`` and
12 12 ``[repository]``.
13 13
14 14 The ``[patterns]`` section specifies how line endings should be
15 15 converted between the working directory and the repository. The format is
16 16 specified by a file pattern. The first match is used, so put more
17 17 specific patterns first. The available line endings are ``LF``,
18 18 ``CRLF``, and ``BIN``.
19 19
20 20 Files with the declared format of ``CRLF`` or ``LF`` are always
21 21 checked out and stored in the repository in that format and files
22 22 declared to be binary (``BIN``) are left unchanged. Additionally,
23 23 ``native`` is an alias for checking out in the platform's default line
24 24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
25 25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
26 26 default behavior; it is only needed if you need to override a later,
27 27 more general pattern.
28 28
29 29 The optional ``[repository]`` section specifies the line endings to
30 30 use for files stored in the repository. It has a single setting,
31 31 ``native``, which determines the storage line endings for files
32 32 declared as ``native`` in the ``[patterns]`` section. It can be set to
33 33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
34 34 that on Windows, files configured as ``native`` (``CRLF`` by default)
35 35 will be converted to ``LF`` when stored in the repository. Files
36 36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
37 37 are always stored as-is in the repository.
38 38
39 39 Example versioned ``.hgeol`` file::
40 40
41 41 [patterns]
42 42 **.py = native
43 43 **.vcproj = CRLF
44 44 **.txt = native
45 45 Makefile = LF
46 46 **.jpg = BIN
47 47
48 48 [repository]
49 49 native = LF
50 50
51 51 .. note::
52 52
53 53 The rules will first apply when files are touched in the working
54 54 directory, e.g. by updating to null and back to tip to touch all files.
55 55
56 56 The extension uses an optional ``[eol]`` section read from both the
57 57 normal Mercurial configuration files and the ``.hgeol`` file, with the
58 58 latter overriding the former. You can use that section to control the
59 59 overall behavior. There are three settings:
60 60
61 61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
62 62 ``CRLF`` to override the default interpretation of ``native`` for
63 63 checkout. This can be used with :hg:`archive` on Unix, say, to
64 64 generate an archive where files have line endings for Windows.
65 65
66 66 - ``eol.only-consistent`` (default True) can be set to False to make
67 67 the extension convert files with inconsistent EOLs. Inconsistent
68 68 means that there is both ``CRLF`` and ``LF`` present in the file.
69 69 Such files are normally not touched under the assumption that they
70 70 have mixed EOLs on purpose.
71 71
72 72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
73 73 ensure that converted files end with a EOL character (either ``\\n``
74 74 or ``\\r\\n`` as per the configured patterns).
75 75
76 76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
77 77 like the deprecated win32text extension does. This means that you can
78 78 disable win32text and enable eol and your filters will still work. You
79 79 only need to these filters until you have prepared a ``.hgeol`` file.
80 80
81 81 The ``win32text.forbid*`` hooks provided by the win32text extension
82 82 have been unified into a single hook named ``eol.checkheadshook``. The
83 83 hook will lookup the expected line endings from the ``.hgeol`` file,
84 84 which means you must migrate to a ``.hgeol`` file first before using
85 85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
86 86 invalid revisions will be pushed. To forbid them completely, use the
87 87 ``eol.checkallhook`` hook. These hooks are best used as
88 88 ``pretxnchangegroup`` hooks.
89 89
90 90 See :hg:`help patterns` for more information about the glob patterns
91 91 used.
92 92 """
93 93
94 94
95 95 import os
96 96 import re
97 97 from mercurial.i18n import _
98 98 from mercurial import (
99 99 config,
100 100 error as errormod,
101 101 extensions,
102 102 match,
103 103 pycompat,
104 104 registrar,
105 105 scmutil,
106 106 util,
107 107 )
108 108 from mercurial.utils import stringutil
109 109
110 110 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
111 111 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
112 112 # be specifying the version(s) of Mercurial they are tested with, or
113 113 # leave the attribute unspecified.
114 114 testedwith = b'ships-with-hg-core'
115 115
116 116 configtable = {}
117 117 configitem = registrar.configitem(configtable)
118 118
119 119 configitem(
120 120 b'eol',
121 121 b'fix-trailing-newline',
122 122 default=False,
123 123 )
124 124 configitem(
125 125 b'eol',
126 126 b'native',
127 127 default=pycompat.oslinesep,
128 128 )
129 129 configitem(
130 130 b'eol',
131 131 b'only-consistent',
132 132 default=True,
133 133 )
134 134
135 135 # Matches a lone LF, i.e., one that is not part of CRLF.
136 136 singlelf = re.compile(b'(^|[^\r])\n')
137 137
138 138
139 139 def inconsistenteol(data):
140 140 return b'\r\n' in data and singlelf.search(data)
141 141
142 142
143 143 def tolf(s, params, ui, **kwargs):
144 144 """Filter to convert to LF EOLs."""
145 145 if stringutil.binary(s):
146 146 return s
147 147 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
148 148 return s
149 149 if (
150 150 ui.configbool(b'eol', b'fix-trailing-newline')
151 151 and s
152 152 and not s.endswith(b'\n')
153 153 ):
154 154 s = s + b'\n'
155 155 return util.tolf(s)
156 156
157 157
158 158 def tocrlf(s, params, ui, **kwargs):
159 159 """Filter to convert to CRLF EOLs."""
160 160 if stringutil.binary(s):
161 161 return s
162 162 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
163 163 return s
164 164 if (
165 165 ui.configbool(b'eol', b'fix-trailing-newline')
166 166 and s
167 167 and not s.endswith(b'\n')
168 168 ):
169 169 s = s + b'\n'
170 170 return util.tocrlf(s)
171 171
172 172
173 173 def isbinary(s, params, ui, **kwargs):
174 174 """Filter to do nothing with the file."""
175 175 return s
176 176
177 177
178 178 filters = {
179 179 b'to-lf': tolf,
180 180 b'to-crlf': tocrlf,
181 181 b'is-binary': isbinary,
182 182 # The following provide backwards compatibility with win32text
183 183 b'cleverencode:': tolf,
184 184 b'cleverdecode:': tocrlf,
185 185 }
186 186
187 187
188 188 class eolfile:
189 189 def __init__(self, ui, root, data):
190 190 self._decode = {
191 191 b'LF': b'to-lf',
192 192 b'CRLF': b'to-crlf',
193 193 b'BIN': b'is-binary',
194 194 }
195 195 self._encode = {
196 196 b'LF': b'to-lf',
197 197 b'CRLF': b'to-crlf',
198 198 b'BIN': b'is-binary',
199 199 }
200 200
201 201 self.cfg = config.config()
202 202 # Our files should not be touched. The pattern must be
203 203 # inserted first override a '** = native' pattern.
204 204 self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol')
205 205 # We can then parse the user's patterns.
206 206 self.cfg.parse(b'.hgeol', data)
207 207
208 208 isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF'
209 209 self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf'
210 210 iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n')
211 211 self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf'
212 212
213 213 include = []
214 214 exclude = []
215 215 self.patterns = []
216 216 for pattern, style in self.cfg.items(b'patterns'):
217 217 key = style.upper()
218 218 if key == b'BIN':
219 219 exclude.append(pattern)
220 220 else:
221 221 include.append(pattern)
222 222 m = match.match(root, b'', [pattern])
223 223 self.patterns.append((pattern, key, m))
224 224 # This will match the files for which we need to care
225 225 # about inconsistent newlines.
226 226 self.match = match.match(root, b'', [], include, exclude)
227 227
228 228 def copytoui(self, ui):
229 229 newpatterns = {pattern for pattern, key, m in self.patterns}
230 230 for section in (b'decode', b'encode'):
231 231 for oldpattern, _filter in ui.configitems(section):
232 232 if oldpattern not in newpatterns:
233 233 if ui.configsource(section, oldpattern) == b'eol':
234 234 ui.setconfig(section, oldpattern, b'!', b'eol')
235 235 for pattern, key, m in self.patterns:
236 236 try:
237 237 ui.setconfig(b'decode', pattern, self._decode[key], b'eol')
238 238 ui.setconfig(b'encode', pattern, self._encode[key], b'eol')
239 239 except KeyError:
240 240 ui.warn(
241 241 _(b"ignoring unknown EOL style '%s' from %s\n")
242 242 % (key, self.cfg.source(b'patterns', pattern))
243 243 )
244 244 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
245 245 for k, v in self.cfg.items(b'eol'):
246 246 ui.setconfig(b'eol', k, v, b'eol')
247 247
248 248 def checkrev(self, repo, ctx, files):
249 249 failed = []
250 250 for f in files or ctx.files():
251 251 if f not in ctx:
252 252 continue
253 253 for pattern, key, m in self.patterns:
254 254 if not m(f):
255 255 continue
256 256 target = self._encode[key]
257 257 data = ctx[f].data()
258 258 if (
259 259 target == b"to-lf"
260 260 and b"\r\n" in data
261 261 or target == b"to-crlf"
262 262 and singlelf.search(data)
263 263 ):
264 264 failed.append((f, target, bytes(ctx)))
265 265 break
266 266 return failed
267 267
268 268
269 269 def parseeol(ui, repo, nodes):
270 270 try:
271 271 for node in nodes:
272 272 try:
273 273 if node is None:
274 274 # Cannot use workingctx.data() since it would load
275 275 # and cache the filters before we configure them.
276 276 data = repo.wvfs(b'.hgeol').read()
277 277 else:
278 278 data = repo[node][b'.hgeol'].data()
279 279 return eolfile(ui, repo.root, data)
280 280 except (IOError, LookupError):
281 281 pass
282 282 except errormod.ConfigError as inst:
283 283 ui.warn(
284 284 _(
285 285 b"warning: ignoring .hgeol file due to parse error "
286 286 b"at %s: %s\n"
287 287 )
288 288 % (inst.location, inst.message)
289 289 )
290 290 return None
291 291
292 292
293 293 def ensureenabled(ui):
294 294 """make sure the extension is enabled when used as hook
295 295
296 296 When eol is used through hooks, the extension is never formally loaded and
297 297 enabled. This has some side effect, for example the config declaration is
298 298 never loaded. This function ensure the extension is enabled when running
299 299 hooks.
300 300 """
301 301 if b'eol' in ui._knownconfig:
302 302 return
303 303 ui.setconfig(b'extensions', b'eol', b'', source=b'internal')
304 304 extensions.loadall(ui, [b'eol'])
305 305
306 306
307 307 def _checkhook(ui, repo, node, headsonly):
308 308 # Get revisions to check and touched files at the same time
309 309 ensureenabled(ui)
310 310 files = set()
311 311 revs = set()
312 for rev in pycompat.xrange(repo[node].rev(), len(repo)):
312 for rev in range(repo[node].rev(), len(repo)):
313 313 revs.add(rev)
314 314 if headsonly:
315 315 ctx = repo[rev]
316 316 files.update(ctx.files())
317 317 for pctx in ctx.parents():
318 318 revs.discard(pctx.rev())
319 319 failed = []
320 320 for rev in revs:
321 321 ctx = repo[rev]
322 322 eol = parseeol(ui, repo, [ctx.node()])
323 323 if eol:
324 324 failed.extend(eol.checkrev(repo, ctx, files))
325 325
326 326 if failed:
327 327 eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'}
328 328 msgs = []
329 329 for f, target, node in sorted(failed):
330 330 msgs.append(
331 331 _(b" %s in %s should not have %s line endings")
332 332 % (f, node, eols[target])
333 333 )
334 334 raise errormod.Abort(
335 335 _(b"end-of-line check failed:\n") + b"\n".join(msgs)
336 336 )
337 337
338 338
339 339 def checkallhook(ui, repo, node, hooktype, **kwargs):
340 340 """verify that files have expected EOLs"""
341 341 _checkhook(ui, repo, node, False)
342 342
343 343
344 344 def checkheadshook(ui, repo, node, hooktype, **kwargs):
345 345 """verify that files have expected EOLs"""
346 346 _checkhook(ui, repo, node, True)
347 347
348 348
349 349 # "checkheadshook" used to be called "hook"
350 350 hook = checkheadshook
351 351
352 352
353 353 def preupdate(ui, repo, hooktype, parent1, parent2):
354 354 p1node = scmutil.resolvehexnodeidprefix(repo, parent1)
355 355 repo.loadeol([p1node])
356 356 return False
357 357
358 358
359 359 def uisetup(ui):
360 360 ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol')
361 361
362 362
363 363 def extsetup(ui):
364 364 try:
365 365 extensions.find(b'win32text')
366 366 ui.warn(
367 367 _(
368 368 b"the eol extension is incompatible with the "
369 369 b"win32text extension\n"
370 370 )
371 371 )
372 372 except KeyError:
373 373 pass
374 374
375 375
376 376 def reposetup(ui, repo):
377 377 uisetup(repo.ui)
378 378
379 379 if not repo.local():
380 380 return
381 381 for name, fn in filters.items():
382 382 repo.adddatafilter(name, fn)
383 383
384 384 ui.setconfig(b'patch', b'eol', b'auto', b'eol')
385 385
386 386 class eolrepo(repo.__class__):
387 387 def loadeol(self, nodes):
388 388 eol = parseeol(self.ui, self, nodes)
389 389 if eol is None:
390 390 return None
391 391 eol.copytoui(self.ui)
392 392 return eol.match
393 393
394 394 def _hgcleardirstate(self):
395 395 self._eolmatch = self.loadeol([None])
396 396 if not self._eolmatch:
397 397 self._eolmatch = util.never
398 398 return
399 399
400 400 oldeol = None
401 401 try:
402 402 cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache"))
403 403 except OSError:
404 404 cachemtime = 0
405 405 else:
406 406 olddata = self.vfs.read(b"eol.cache")
407 407 if olddata:
408 408 oldeol = eolfile(self.ui, self.root, olddata)
409 409
410 410 try:
411 411 eolmtime = os.path.getmtime(self.wjoin(b".hgeol"))
412 412 except OSError:
413 413 eolmtime = 0
414 414
415 415 if eolmtime >= cachemtime and eolmtime > 0:
416 416 self.ui.debug(b"eol: detected change in .hgeol\n")
417 417
418 418 hgeoldata = self.wvfs.read(b'.hgeol')
419 419 neweol = eolfile(self.ui, self.root, hgeoldata)
420 420
421 421 wlock = None
422 422 try:
423 423 wlock = self.wlock()
424 424 for f in self.dirstate:
425 425 if not self.dirstate.get_entry(f).maybe_clean:
426 426 continue
427 427 if oldeol is not None:
428 428 if not oldeol.match(f) and not neweol.match(f):
429 429 continue
430 430 oldkey = None
431 431 for pattern, key, m in oldeol.patterns:
432 432 if m(f):
433 433 oldkey = key
434 434 break
435 435 newkey = None
436 436 for pattern, key, m in neweol.patterns:
437 437 if m(f):
438 438 newkey = key
439 439 break
440 440 if oldkey == newkey:
441 441 continue
442 442 # all normal files need to be looked at again since
443 443 # the new .hgeol file specify a different filter
444 444 self.dirstate.set_possibly_dirty(f)
445 445 # Write the cache to update mtime and cache .hgeol
446 446 with self.vfs(b"eol.cache", b"w") as f:
447 447 f.write(hgeoldata)
448 448 except errormod.LockUnavailable:
449 449 # If we cannot lock the repository and clear the
450 450 # dirstate, then a commit might not see all files
451 451 # as modified. But if we cannot lock the
452 452 # repository, then we can also not make a commit,
453 453 # so ignore the error.
454 454 pass
455 455 finally:
456 456 if wlock is not None:
457 457 wlock.release()
458 458
459 459 def commitctx(self, ctx, error=False, origctx=None):
460 460 for f in sorted(ctx.added() + ctx.modified()):
461 461 if not self._eolmatch(f):
462 462 continue
463 463 fctx = ctx[f]
464 464 if fctx is None:
465 465 continue
466 466 data = fctx.data()
467 467 if stringutil.binary(data):
468 468 # We should not abort here, since the user should
469 469 # be able to say "** = native" to automatically
470 470 # have all non-binary files taken care of.
471 471 continue
472 472 if inconsistenteol(data):
473 473 raise errormod.Abort(
474 474 _(b"inconsistent newline style in %s\n") % f
475 475 )
476 476 return super(eolrepo, self).commitctx(ctx, error, origctx)
477 477
478 478 repo.__class__ = eolrepo
479 479 repo._hgcleardirstate()
@@ -1,858 +1,858 b''
1 1 # Copyright 2016-present Facebook. All Rights Reserved.
2 2 #
3 3 # context: context needed to annotate a file
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12
13 13 from mercurial.i18n import _
14 14 from mercurial.pycompat import (
15 15 getattr,
16 16 open,
17 17 setattr,
18 18 )
19 19 from mercurial.node import (
20 20 bin,
21 21 hex,
22 22 short,
23 23 )
24 24 from mercurial import (
25 25 error,
26 26 linelog as linelogmod,
27 27 lock as lockmod,
28 28 mdiff,
29 29 pycompat,
30 30 scmutil,
31 31 util,
32 32 )
33 33 from mercurial.utils import (
34 34 hashutil,
35 35 stringutil,
36 36 )
37 37
38 38 from . import (
39 39 error as faerror,
40 40 revmap as revmapmod,
41 41 )
42 42
43 43 # given path, get filelog, cached
44 44 @util.lrucachefunc
45 45 def _getflog(repo, path):
46 46 return repo.file(path)
47 47
48 48
49 49 # extracted from mercurial.context.basefilectx.annotate
50 50 def _parents(f, follow=True):
51 51 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
52 52 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
53 53 # from the topmost introrev (= srcrev) down to p.linkrev() if it
54 54 # isn't an ancestor of the srcrev.
55 55 f._changeid
56 56 pl = f.parents()
57 57
58 58 # Don't return renamed parents if we aren't following.
59 59 if not follow:
60 60 pl = [p for p in pl if p.path() == f.path()]
61 61
62 62 # renamed filectx won't have a filelog yet, so set it
63 63 # from the cache to save time
64 64 for p in pl:
65 65 if not '_filelog' in p.__dict__:
66 66 p._filelog = _getflog(f._repo, p.path())
67 67
68 68 return pl
69 69
70 70
71 71 # extracted from mercurial.context.basefilectx.annotate. slightly modified
72 72 # so it takes a fctx instead of a pair of text and fctx.
73 73 def _decorate(fctx):
74 74 text = fctx.data()
75 75 linecount = text.count(b'\n')
76 76 if text and not text.endswith(b'\n'):
77 77 linecount += 1
78 return ([(fctx, i) for i in pycompat.xrange(linecount)], text)
78 return ([(fctx, i) for i in range(linecount)], text)
79 79
80 80
81 81 # extracted from mercurial.context.basefilectx.annotate. slightly modified
82 82 # so it takes an extra "blocks" parameter calculated elsewhere, instead of
83 83 # calculating diff here.
84 84 def _pair(parent, child, blocks):
85 85 for (a1, a2, b1, b2), t in blocks:
86 86 # Changed blocks ('!') or blocks made only of blank lines ('~')
87 87 # belong to the child.
88 88 if t == b'=':
89 89 child[0][b1:b2] = parent[0][a1:a2]
90 90 return child
91 91
92 92
93 93 # like scmutil.revsingle, but with lru cache, so their states (like manifests)
94 94 # could be reused
95 95 _revsingle = util.lrucachefunc(scmutil.revsingle)
96 96
97 97
98 98 def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
99 99 """(repo, str, str) -> fctx
100 100
101 101 get the filectx object from repo, rev, path, in an efficient way.
102 102
103 103 if resolverev is True, "rev" is a revision specified by the revset
104 104 language, otherwise "rev" is a nodeid, or a revision number that can
105 105 be consumed by repo.__getitem__.
106 106
107 107 if adjustctx is not None, the returned fctx will point to a changeset
108 108 that introduces the change (last modified the file). if adjustctx
109 109 is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
110 110 faster for big repos but is incorrect for some cases.
111 111 """
112 112 if resolverev and not isinstance(rev, int) and rev is not None:
113 113 ctx = _revsingle(repo, rev)
114 114 else:
115 115 ctx = repo[rev]
116 116
117 117 # If we don't need to adjust the linkrev, create the filectx using the
118 118 # changectx instead of using ctx[path]. This means it already has the
119 119 # changectx information, so blame -u will be able to look directly at the
120 120 # commitctx object instead of having to resolve it by going through the
121 121 # manifest. In a lazy-manifest world this can prevent us from downloading a
122 122 # lot of data.
123 123 if adjustctx is None:
124 124 # ctx.rev() is None means it's the working copy, which is a special
125 125 # case.
126 126 if ctx.rev() is None:
127 127 fctx = ctx[path]
128 128 else:
129 129 fctx = repo.filectx(path, changeid=ctx.rev())
130 130 else:
131 131 fctx = ctx[path]
132 132 if adjustctx == b'linkrev':
133 133 introrev = fctx.linkrev()
134 134 else:
135 135 introrev = fctx.introrev()
136 136 if introrev != ctx.rev():
137 137 fctx._changeid = introrev
138 138 fctx._changectx = repo[introrev]
139 139 return fctx
140 140
141 141
142 142 # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
143 143 def encodedir(path):
144 144 return (
145 145 path.replace(b'.hg/', b'.hg.hg/')
146 146 .replace(b'.l/', b'.l.hg/')
147 147 .replace(b'.m/', b'.m.hg/')
148 148 .replace(b'.lock/', b'.lock.hg/')
149 149 )
150 150
151 151
152 152 def hashdiffopts(diffopts):
153 153 diffoptstr = stringutil.pprint(
154 154 sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults)
155 155 )
156 156 return hex(hashutil.sha1(diffoptstr).digest())[:6]
157 157
158 158
159 159 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
160 160
161 161
162 162 class annotateopts:
163 163 """like mercurial.mdiff.diffopts, but is for annotate
164 164
165 165 followrename: follow renames, like "hg annotate -f"
166 166 followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
167 167 """
168 168
169 169 defaults = {
170 170 b'diffopts': None,
171 171 b'followrename': True,
172 172 b'followmerge': True,
173 173 }
174 174
175 175 def __init__(self, **opts):
176 176 opts = pycompat.byteskwargs(opts)
177 177 for k, v in self.defaults.items():
178 178 setattr(self, k, opts.get(k, v))
179 179
180 180 @util.propertycache
181 181 def shortstr(self):
182 182 """represent opts in a short string, suitable for a directory name"""
183 183 result = b''
184 184 if not self.followrename:
185 185 result += b'r0'
186 186 if not self.followmerge:
187 187 result += b'm0'
188 188 if self.diffopts is not None:
189 189 assert isinstance(self.diffopts, mdiff.diffopts)
190 190 diffopthash = hashdiffopts(self.diffopts)
191 191 if diffopthash != _defaultdiffopthash:
192 192 result += b'i' + diffopthash
193 193 return result or b'default'
194 194
195 195
196 196 defaultopts = annotateopts()
197 197
198 198
199 199 class _annotatecontext:
200 200 """do not use this class directly as it does not use lock to protect
201 201 writes. use "with annotatecontext(...)" instead.
202 202 """
203 203
204 204 def __init__(self, repo, path, linelogpath, revmappath, opts):
205 205 self.repo = repo
206 206 self.ui = repo.ui
207 207 self.path = path
208 208 self.opts = opts
209 209 self.linelogpath = linelogpath
210 210 self.revmappath = revmappath
211 211 self._linelog = None
212 212 self._revmap = None
213 213 self._node2path = {} # {str: str}
214 214
215 215 @property
216 216 def linelog(self):
217 217 if self._linelog is None:
218 218 if os.path.exists(self.linelogpath):
219 219 with open(self.linelogpath, b'rb') as f:
220 220 try:
221 221 self._linelog = linelogmod.linelog.fromdata(f.read())
222 222 except linelogmod.LineLogError:
223 223 self._linelog = linelogmod.linelog()
224 224 else:
225 225 self._linelog = linelogmod.linelog()
226 226 return self._linelog
227 227
228 228 @property
229 229 def revmap(self):
230 230 if self._revmap is None:
231 231 self._revmap = revmapmod.revmap(self.revmappath)
232 232 return self._revmap
233 233
234 234 def close(self):
235 235 if self._revmap is not None:
236 236 self._revmap.flush()
237 237 self._revmap = None
238 238 if self._linelog is not None:
239 239 with open(self.linelogpath, b'wb') as f:
240 240 f.write(self._linelog.encode())
241 241 self._linelog = None
242 242
243 243 __del__ = close
244 244
245 245 def rebuild(self):
246 246 """delete linelog and revmap, useful for rebuilding"""
247 247 self.close()
248 248 self._node2path.clear()
249 249 _unlinkpaths([self.revmappath, self.linelogpath])
250 250
251 251 @property
252 252 def lastnode(self):
253 253 """return last node in revmap, or None if revmap is empty"""
254 254 if self._revmap is None:
255 255 # fast path, read revmap without loading its full content
256 256 return revmapmod.getlastnode(self.revmappath)
257 257 else:
258 258 return self._revmap.rev2hsh(self._revmap.maxrev)
259 259
260 260 def isuptodate(self, master, strict=True):
261 261 """return True if the revmap / linelog is up-to-date, or the file
262 262 does not exist in the master revision. False otherwise.
263 263
264 264 it tries to be fast and could return false negatives, because of the
265 265 use of linkrev instead of introrev.
266 266
267 267 useful for both server and client to decide whether to update
268 268 fastannotate cache or not.
269 269
270 270 if strict is True, even if fctx exists in the revmap, but is not the
271 271 last node, isuptodate will return False. it's good for performance - no
272 272 expensive check was done.
273 273
274 274 if strict is False, if fctx exists in the revmap, this function may
275 275 return True. this is useful for the client to skip downloading the
276 276 cache if the client's master is behind the server's.
277 277 """
278 278 lastnode = self.lastnode
279 279 try:
280 280 f = self._resolvefctx(master, resolverev=True)
281 281 # choose linkrev instead of introrev as the check is meant to be
282 282 # *fast*.
283 283 linknode = self.repo.changelog.node(f.linkrev())
284 284 if not strict and lastnode and linknode != lastnode:
285 285 # check if f.node() is in the revmap. note: this loads the
286 286 # revmap and can be slow.
287 287 return self.revmap.hsh2rev(linknode) is not None
288 288 # avoid resolving old manifest, or slow adjustlinkrev to be fast,
289 289 # false negatives are acceptable in this case.
290 290 return linknode == lastnode
291 291 except LookupError:
292 292 # master does not have the file, or the revmap is ahead
293 293 return True
294 294
295 295 def annotate(self, rev, master=None, showpath=False, showlines=False):
296 296 """incrementally update the cache so it includes revisions in the main
297 297 branch till 'master'. and run annotate on 'rev', which may or may not be
298 298 included in the main branch.
299 299
300 300 if master is None, do not update linelog.
301 301
302 302 the first value returned is the annotate result, it is [(node, linenum)]
303 303 by default. [(node, linenum, path)] if showpath is True.
304 304
305 305 if showlines is True, a second value will be returned, it is a list of
306 306 corresponding line contents.
307 307 """
308 308
309 309 # the fast path test requires commit hash, convert rev number to hash,
310 310 # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
311 311 # command could give us a revision number even if the user passes a
312 312 # commit hash.
313 313 if isinstance(rev, int):
314 314 rev = hex(self.repo.changelog.node(rev))
315 315
316 316 # fast path: if rev is in the main branch already
317 317 directly, revfctx = self.canannotatedirectly(rev)
318 318 if directly:
319 319 if self.ui.debugflag:
320 320 self.ui.debug(
321 321 b'fastannotate: %s: using fast path '
322 322 b'(resolved fctx: %s)\n'
323 323 % (
324 324 self.path,
325 325 stringutil.pprint(util.safehasattr(revfctx, b'node')),
326 326 )
327 327 )
328 328 return self.annotatedirectly(revfctx, showpath, showlines)
329 329
330 330 # resolve master
331 331 masterfctx = None
332 332 if master:
333 333 try:
334 334 masterfctx = self._resolvefctx(
335 335 master, resolverev=True, adjustctx=True
336 336 )
337 337 except LookupError: # master does not have the file
338 338 pass
339 339 else:
340 340 if masterfctx in self.revmap: # no need to update linelog
341 341 masterfctx = None
342 342
343 343 # ... - @ <- rev (can be an arbitrary changeset,
344 344 # / not necessarily a descendant
345 345 # master -> o of master)
346 346 # |
347 347 # a merge -> o 'o': new changesets in the main branch
348 348 # |\ '#': revisions in the main branch that
349 349 # o * exist in linelog / revmap
350 350 # | . '*': changesets in side branches, or
351 351 # last master -> # . descendants of master
352 352 # | .
353 353 # # * joint: '#', and is a parent of a '*'
354 354 # |/
355 355 # a joint -> # ^^^^ --- side branches
356 356 # |
357 357 # ^ --- main branch (in linelog)
358 358
359 359 # these DFSes are similar to the traditional annotate algorithm.
360 360 # we cannot really reuse the code for perf reason.
361 361
362 362 # 1st DFS calculates merges, joint points, and needed.
363 363 # "needed" is a simple reference counting dict to free items in
364 364 # "hist", reducing its memory usage otherwise could be huge.
365 365 initvisit = [revfctx]
366 366 if masterfctx:
367 367 if masterfctx.rev() is None:
368 368 raise error.Abort(
369 369 _(b'cannot update linelog to wdir()'),
370 370 hint=_(b'set fastannotate.mainbranch'),
371 371 )
372 372 initvisit.append(masterfctx)
373 373 visit = initvisit[:]
374 374 pcache = {}
375 375 needed = {revfctx: 1}
376 376 hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
377 377 while visit:
378 378 f = visit.pop()
379 379 if f in pcache or f in hist:
380 380 continue
381 381 if f in self.revmap: # in the old main branch, it's a joint
382 382 llrev = self.revmap.hsh2rev(f.node())
383 383 self.linelog.annotate(llrev)
384 384 result = self.linelog.annotateresult
385 385 hist[f] = (result, f.data())
386 386 continue
387 387 pl = self._parentfunc(f)
388 388 pcache[f] = pl
389 389 for p in pl:
390 390 needed[p] = needed.get(p, 0) + 1
391 391 if p not in pcache:
392 392 visit.append(p)
393 393
394 394 # 2nd (simple) DFS calculates new changesets in the main branch
395 395 # ('o' nodes in # the above graph), so we know when to update linelog.
396 396 newmainbranch = set()
397 397 f = masterfctx
398 398 while f and f not in self.revmap:
399 399 newmainbranch.add(f)
400 400 pl = pcache[f]
401 401 if pl:
402 402 f = pl[0]
403 403 else:
404 404 f = None
405 405 break
406 406
407 407 # f, if present, is the position where the last build stopped at, and
408 408 # should be the "master" last time. check to see if we can continue
409 409 # building the linelog incrementally. (we cannot if diverged)
410 410 if masterfctx is not None:
411 411 self._checklastmasterhead(f)
412 412
413 413 if self.ui.debugflag:
414 414 if newmainbranch:
415 415 self.ui.debug(
416 416 b'fastannotate: %s: %d new changesets in the main'
417 417 b' branch\n' % (self.path, len(newmainbranch))
418 418 )
419 419 elif not hist: # no joints, no updates
420 420 self.ui.debug(
421 421 b'fastannotate: %s: linelog cannot help in '
422 422 b'annotating this revision\n' % self.path
423 423 )
424 424
425 425 # prepare annotateresult so we can update linelog incrementally
426 426 self.linelog.annotate(self.linelog.maxrev)
427 427
428 428 # 3rd DFS does the actual annotate
429 429 visit = initvisit[:]
430 430 progress = self.ui.makeprogress(
431 431 b'building cache', total=len(newmainbranch)
432 432 )
433 433 while visit:
434 434 f = visit[-1]
435 435 if f in hist:
436 436 visit.pop()
437 437 continue
438 438
439 439 ready = True
440 440 pl = pcache[f]
441 441 for p in pl:
442 442 if p not in hist:
443 443 ready = False
444 444 visit.append(p)
445 445 if not ready:
446 446 continue
447 447
448 448 visit.pop()
449 449 blocks = None # mdiff blocks, used for appending linelog
450 450 ismainbranch = f in newmainbranch
451 451 # curr is the same as the traditional annotate algorithm,
452 452 # if we only care about linear history (do not follow merge),
453 453 # then curr is not actually used.
454 454 assert f not in hist
455 455 curr = _decorate(f)
456 456 for i, p in enumerate(pl):
457 457 bs = list(self._diffblocks(hist[p][1], curr[1]))
458 458 if i == 0 and ismainbranch:
459 459 blocks = bs
460 460 curr = _pair(hist[p], curr, bs)
461 461 if needed[p] == 1:
462 462 del hist[p]
463 463 del needed[p]
464 464 else:
465 465 needed[p] -= 1
466 466
467 467 hist[f] = curr
468 468 del pcache[f]
469 469
470 470 if ismainbranch: # need to write to linelog
471 471 progress.increment()
472 472 bannotated = None
473 473 if len(pl) == 2 and self.opts.followmerge: # merge
474 474 bannotated = curr[0]
475 475 if blocks is None: # no parents, add an empty one
476 476 blocks = list(self._diffblocks(b'', curr[1]))
477 477 self._appendrev(f, blocks, bannotated)
478 478 elif showpath: # not append linelog, but we need to record path
479 479 self._node2path[f.node()] = f.path()
480 480
481 481 progress.complete()
482 482
483 483 result = [
484 484 ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
485 485 for fr, l in hist[revfctx][0]
486 486 ] # [(node, linenumber)]
487 487 return self._refineannotateresult(result, revfctx, showpath, showlines)
488 488
489 489 def canannotatedirectly(self, rev):
490 490 """(str) -> bool, fctx or node.
491 491 return (True, f) if we can annotate without updating the linelog, pass
492 492 f to annotatedirectly.
493 493 return (False, f) if we need extra calculation. f is the fctx resolved
494 494 from rev.
495 495 """
496 496 result = True
497 497 f = None
498 498 if not isinstance(rev, int) and rev is not None:
499 499 hsh = {20: bytes, 40: bin}.get(len(rev), lambda x: None)(rev)
500 500 if hsh is not None and (hsh, self.path) in self.revmap:
501 501 f = hsh
502 502 if f is None:
503 503 adjustctx = b'linkrev' if self._perfhack else True
504 504 f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
505 505 result = f in self.revmap
506 506 if not result and self._perfhack:
507 507 # redo the resolution without perfhack - as we are going to
508 508 # do write operations, we need a correct fctx.
509 509 f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
510 510 return result, f
511 511
512 512 def annotatealllines(self, rev, showpath=False, showlines=False):
513 513 """(rev : str) -> [(node : str, linenum : int, path : str)]
514 514
515 515 the result has the same format with annotate, but include all (including
516 516 deleted) lines up to rev. call this after calling annotate(rev, ...) for
517 517 better performance and accuracy.
518 518 """
519 519 revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
520 520
521 521 # find a chain from rev to anything in the mainbranch
522 522 if revfctx not in self.revmap:
523 523 chain = [revfctx]
524 524 a = b''
525 525 while True:
526 526 f = chain[-1]
527 527 pl = self._parentfunc(f)
528 528 if not pl:
529 529 break
530 530 if pl[0] in self.revmap:
531 531 a = pl[0].data()
532 532 break
533 533 chain.append(pl[0])
534 534
535 535 # both self.linelog and self.revmap is backed by filesystem. now
536 536 # we want to modify them but do not want to write changes back to
537 537 # files. so we create in-memory objects and copy them. it's like
538 538 # a "fork".
539 539 linelog = linelogmod.linelog()
540 540 linelog.copyfrom(self.linelog)
541 541 linelog.annotate(linelog.maxrev)
542 542 revmap = revmapmod.revmap()
543 543 revmap.copyfrom(self.revmap)
544 544
545 545 for f in reversed(chain):
546 546 b = f.data()
547 547 blocks = list(self._diffblocks(a, b))
548 548 self._doappendrev(linelog, revmap, f, blocks)
549 549 a = b
550 550 else:
551 551 # fastpath: use existing linelog, revmap as we don't write to them
552 552 linelog = self.linelog
553 553 revmap = self.revmap
554 554
555 555 lines = linelog.getalllines()
556 556 hsh = revfctx.node()
557 557 llrev = revmap.hsh2rev(hsh)
558 558 result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
559 559 # cannot use _refineannotateresult since we need custom logic for
560 560 # resolving line contents
561 561 if showpath:
562 562 result = self._addpathtoresult(result, revmap)
563 563 if showlines:
564 564 linecontents = self._resolvelines(result, revmap, linelog)
565 565 result = (result, linecontents)
566 566 return result
567 567
568 568 def _resolvelines(self, annotateresult, revmap, linelog):
569 569 """(annotateresult) -> [line]. designed for annotatealllines.
570 570 this is probably the most inefficient code in the whole fastannotate
571 571 directory. but we have made a decision that the linelog does not
572 572 store line contents. so getting them requires random accesses to
573 573 the revlog data, since they can be many, it can be very slow.
574 574 """
575 575 # [llrev]
576 576 revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
577 577 result = [None] * len(annotateresult)
578 578 # {(rev, linenum): [lineindex]}
579 579 key2idxs = collections.defaultdict(list)
580 for i in pycompat.xrange(len(result)):
580 for i in range(len(result)):
581 581 key2idxs[(revs[i], annotateresult[i][1])].append(i)
582 582 while key2idxs:
583 583 # find an unresolved line and its linelog rev to annotate
584 584 hsh = None
585 585 try:
586 586 for (rev, _linenum), idxs in key2idxs.items():
587 587 if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
588 588 continue
589 589 hsh = annotateresult[idxs[0]][0]
590 590 break
591 591 except StopIteration: # no more unresolved lines
592 592 return result
593 593 if hsh is None:
594 594 # the remaining key2idxs are not in main branch, resolving them
595 595 # using the hard way...
596 596 revlines = {}
597 597 for (rev, linenum), idxs in key2idxs.items():
598 598 if rev not in revlines:
599 599 hsh = annotateresult[idxs[0]][0]
600 600 if self.ui.debugflag:
601 601 self.ui.debug(
602 602 b'fastannotate: reading %s line #%d '
603 603 b'to resolve lines %r\n'
604 604 % (short(hsh), linenum, idxs)
605 605 )
606 606 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
607 607 lines = mdiff.splitnewlines(fctx.data())
608 608 revlines[rev] = lines
609 609 for idx in idxs:
610 610 result[idx] = revlines[rev][linenum]
611 611 assert all(x is not None for x in result)
612 612 return result
613 613
614 614 # run the annotate and the lines should match to the file content
615 615 self.ui.debug(
616 616 b'fastannotate: annotate %s to resolve lines\n' % short(hsh)
617 617 )
618 618 linelog.annotate(rev)
619 619 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
620 620 annotated = linelog.annotateresult
621 621 lines = mdiff.splitnewlines(fctx.data())
622 622 if len(lines) != len(annotated):
623 623 raise faerror.CorruptedFileError(b'unexpected annotated lines')
624 624 # resolve lines from the annotate result
625 625 for i, line in enumerate(lines):
626 626 k = annotated[i]
627 627 if k in key2idxs:
628 628 for idx in key2idxs[k]:
629 629 result[idx] = line
630 630 del key2idxs[k]
631 631 return result
632 632
633 633 def annotatedirectly(self, f, showpath, showlines):
634 634 """like annotate, but when we know that f is in linelog.
635 635 f can be either a 20-char str (node) or a fctx. this is for perf - in
636 636 the best case, the user provides a node and we don't need to read the
637 637 filelog or construct any filecontext.
638 638 """
639 639 if isinstance(f, bytes):
640 640 hsh = f
641 641 else:
642 642 hsh = f.node()
643 643 llrev = self.revmap.hsh2rev(hsh)
644 644 if not llrev:
645 645 raise faerror.CorruptedFileError(b'%s is not in revmap' % hex(hsh))
646 646 if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
647 647 raise faerror.CorruptedFileError(
648 648 b'%s is not in revmap mainbranch' % hex(hsh)
649 649 )
650 650 self.linelog.annotate(llrev)
651 651 result = [
652 652 (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult
653 653 ]
654 654 return self._refineannotateresult(result, f, showpath, showlines)
655 655
656 656 def _refineannotateresult(self, result, f, showpath, showlines):
657 657 """add the missing path or line contents, they can be expensive.
658 658 f could be either node or fctx.
659 659 """
660 660 if showpath:
661 661 result = self._addpathtoresult(result)
662 662 if showlines:
663 663 if isinstance(f, bytes): # f: node or fctx
664 664 llrev = self.revmap.hsh2rev(f)
665 665 fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
666 666 else:
667 667 fctx = f
668 668 lines = mdiff.splitnewlines(fctx.data())
669 669 if len(lines) != len(result): # linelog is probably corrupted
670 670 raise faerror.CorruptedFileError()
671 671 result = (result, lines)
672 672 return result
673 673
674 674 def _appendrev(self, fctx, blocks, bannotated=None):
675 675 self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
676 676
677 677 def _diffblocks(self, a, b):
678 678 return mdiff.allblocks(a, b, self.opts.diffopts)
679 679
680 680 @staticmethod
681 681 def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
682 682 """append a revision to linelog and revmap"""
683 683
684 684 def getllrev(f):
685 685 """(fctx) -> int"""
686 686 # f should not be a linelog revision
687 687 if isinstance(f, int):
688 688 raise error.ProgrammingError(b'f should not be an int')
689 689 # f is a fctx, allocate linelog rev on demand
690 690 hsh = f.node()
691 691 rev = revmap.hsh2rev(hsh)
692 692 if rev is None:
693 693 rev = revmap.append(hsh, sidebranch=True, path=f.path())
694 694 return rev
695 695
696 696 # append sidebranch revisions to revmap
697 697 siderevs = []
698 698 siderevmap = {} # node: int
699 699 if bannotated is not None:
700 700 for (a1, a2, b1, b2), op in blocks:
701 701 if op != b'=':
702 702 # f could be either linelong rev, or fctx.
703 703 siderevs += [
704 704 f
705 705 for f, l in bannotated[b1:b2]
706 706 if not isinstance(f, int)
707 707 ]
708 708 siderevs = set(siderevs)
709 709 if fctx in siderevs: # mainnode must be appended seperately
710 710 siderevs.remove(fctx)
711 711 for f in siderevs:
712 712 siderevmap[f] = getllrev(f)
713 713
714 714 # the changeset in the main branch, could be a merge
715 715 llrev = revmap.append(fctx.node(), path=fctx.path())
716 716 siderevmap[fctx] = llrev
717 717
718 718 for (a1, a2, b1, b2), op in reversed(blocks):
719 719 if op == b'=':
720 720 continue
721 721 if bannotated is None:
722 722 linelog.replacelines(llrev, a1, a2, b1, b2)
723 723 else:
724 724 blines = [
725 725 ((r if isinstance(r, int) else siderevmap[r]), l)
726 726 for r, l in bannotated[b1:b2]
727 727 ]
728 728 linelog.replacelines_vec(llrev, a1, a2, blines)
729 729
730 730 def _addpathtoresult(self, annotateresult, revmap=None):
731 731 """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
732 732 if revmap is None:
733 733 revmap = self.revmap
734 734
735 735 def _getpath(nodeid):
736 736 path = self._node2path.get(nodeid)
737 737 if path is None:
738 738 path = revmap.rev2path(revmap.hsh2rev(nodeid))
739 739 self._node2path[nodeid] = path
740 740 return path
741 741
742 742 return [(n, l, _getpath(n)) for n, l in annotateresult]
743 743
744 744 def _checklastmasterhead(self, fctx):
745 745 """check if fctx is the master's head last time, raise if not"""
746 746 if fctx is None:
747 747 llrev = 0
748 748 else:
749 749 llrev = self.revmap.hsh2rev(fctx.node())
750 750 if not llrev:
751 751 raise faerror.CannotReuseError()
752 752 if self.linelog.maxrev != llrev:
753 753 raise faerror.CannotReuseError()
754 754
755 755 @util.propertycache
756 756 def _parentfunc(self):
757 757 """-> (fctx) -> [fctx]"""
758 758 followrename = self.opts.followrename
759 759 followmerge = self.opts.followmerge
760 760
761 761 def parents(f):
762 762 pl = _parents(f, follow=followrename)
763 763 if not followmerge:
764 764 pl = pl[:1]
765 765 return pl
766 766
767 767 return parents
768 768
769 769 @util.propertycache
770 770 def _perfhack(self):
771 771 return self.ui.configbool(b'fastannotate', b'perfhack')
772 772
773 773 def _resolvefctx(self, rev, path=None, **kwds):
774 774 return resolvefctx(self.repo, rev, (path or self.path), **kwds)
775 775
776 776
777 777 def _unlinkpaths(paths):
778 778 """silent, best-effort unlink"""
779 779 for path in paths:
780 780 try:
781 781 util.unlink(path)
782 782 except OSError:
783 783 pass
784 784
785 785
786 786 class pathhelper:
787 787 """helper for getting paths for lockfile, linelog and revmap"""
788 788
789 789 def __init__(self, repo, path, opts=defaultopts):
790 790 # different options use different directories
791 791 self._vfspath = os.path.join(
792 792 b'fastannotate', opts.shortstr, encodedir(path)
793 793 )
794 794 self._repo = repo
795 795
796 796 @property
797 797 def dirname(self):
798 798 return os.path.dirname(self._repo.vfs.join(self._vfspath))
799 799
800 800 @property
801 801 def linelogpath(self):
802 802 return self._repo.vfs.join(self._vfspath + b'.l')
803 803
804 804 def lock(self):
805 805 return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock')
806 806
807 807 @property
808 808 def revmappath(self):
809 809 return self._repo.vfs.join(self._vfspath + b'.m')
810 810
811 811
812 812 @contextlib.contextmanager
813 813 def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
814 814 """context needed to perform (fast) annotate on a file
815 815
816 816 an annotatecontext of a single file consists of two structures: the
817 817 linelog and the revmap. this function takes care of locking. only 1
818 818 process is allowed to write that file's linelog and revmap at a time.
819 819
820 820 when something goes wrong, this function will assume the linelog and the
821 821 revmap are in a bad state, and remove them from disk.
822 822
823 823 use this function in the following way:
824 824
825 825 with annotatecontext(...) as actx:
826 826 actx. ....
827 827 """
828 828 helper = pathhelper(repo, path, opts)
829 829 util.makedirs(helper.dirname)
830 830 revmappath = helper.revmappath
831 831 linelogpath = helper.linelogpath
832 832 actx = None
833 833 try:
834 834 with helper.lock():
835 835 actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
836 836 if rebuild:
837 837 actx.rebuild()
838 838 yield actx
839 839 except Exception:
840 840 if actx is not None:
841 841 actx.rebuild()
842 842 repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path)
843 843 raise
844 844 finally:
845 845 if actx is not None:
846 846 actx.close()
847 847
848 848
849 849 def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
850 850 """like annotatecontext but get the context from a fctx. convenient when
851 851 used in fctx.annotate
852 852 """
853 853 repo = fctx._repo
854 854 path = fctx._path
855 855 if repo.ui.configbool(b'fastannotate', b'forcefollow', True):
856 856 follow = True
857 857 aopts = annotateopts(diffopts=diffopts, followrename=follow)
858 858 return annotatecontext(repo, path, aopts, rebuild)
@@ -1,176 +1,176 b''
1 1 # Copyright 2016-present Facebook. All Rights Reserved.
2 2 #
3 3 # format: defines the format used to output annotate result
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial.node import (
9 9 hex,
10 10 short,
11 11 )
12 12 from mercurial import (
13 13 encoding,
14 14 pycompat,
15 15 templatefilters,
16 16 util,
17 17 )
18 18 from mercurial.utils import dateutil
19 19
20 20 # imitating mercurial.commands.annotate, not using the vanilla formatter since
21 21 # the data structures are a bit different, and we have some fast paths.
22 22 class defaultformatter:
23 23 """the default formatter that does leftpad and support some common flags"""
24 24
25 25 def __init__(self, ui, repo, opts):
26 26 self.ui = ui
27 27 self.opts = opts
28 28
29 29 if ui.quiet:
30 30 datefunc = dateutil.shortdate
31 31 else:
32 32 datefunc = dateutil.datestr
33 33 datefunc = util.cachefunc(datefunc)
34 34 getctx = util.cachefunc(lambda x: repo[x[0]])
35 35 hexfunc = self._hexfunc
36 36
37 37 # special handling working copy "changeset" and "rev" functions
38 38 if self.opts.get(b'rev') == b'wdir()':
39 39 orig = hexfunc
40 40 hexfunc = lambda x: None if x is None else orig(x)
41 41 wnode = hexfunc(repo[b'.'].node()) + b'+'
42 42 wrev = b'%d' % repo[b'.'].rev()
43 43 wrevpad = b''
44 44 if not opts.get(b'changeset'): # only show + if changeset is hidden
45 45 wrev += b'+'
46 46 wrevpad = b' '
47 47 revenc = lambda x: wrev if x is None else (b'%d' % x) + wrevpad
48 48
49 49 def csetenc(x):
50 50 if x is None:
51 51 return wnode
52 52 return pycompat.bytestr(x) + b' '
53 53
54 54 else:
55 55 revenc = csetenc = pycompat.bytestr
56 56
57 57 # opt name, separator, raw value (for json/plain), encoder (for plain)
58 58 opmap = [
59 59 (b'user', b' ', lambda x: getctx(x).user(), ui.shortuser),
60 60 (b'number', b' ', lambda x: getctx(x).rev(), revenc),
61 61 (b'changeset', b' ', lambda x: hexfunc(x[0]), csetenc),
62 62 (b'date', b' ', lambda x: getctx(x).date(), datefunc),
63 63 (b'file', b' ', lambda x: x[2], pycompat.bytestr),
64 64 (b'line_number', b':', lambda x: x[1] + 1, pycompat.bytestr),
65 65 ]
66 66 fieldnamemap = {b'number': b'rev', b'changeset': b'node'}
67 67 funcmap = [
68 68 (get, sep, fieldnamemap.get(op, op), enc)
69 69 for op, sep, get, enc in opmap
70 70 if opts.get(op)
71 71 ]
72 72 # no separator for first column
73 73 funcmap[0] = list(funcmap[0])
74 74 funcmap[0][1] = b''
75 75 self.funcmap = funcmap
76 76
77 77 def write(self, annotatedresult, lines=None, existinglines=None):
78 78 """(annotateresult, [str], set([rev, linenum])) -> None. write output.
79 79 annotateresult can be [(node, linenum, path)], or [(node, linenum)]
80 80 """
81 81 pieces = [] # [[str]]
82 82 maxwidths = [] # [int]
83 83
84 84 # calculate padding
85 85 for f, sep, name, enc in self.funcmap:
86 86 l = [enc(f(x)) for x in annotatedresult]
87 87 pieces.append(l)
88 88 if name in [b'node', b'date']: # node and date has fixed size
89 89 l = l[:1]
90 90 widths = pycompat.maplist(encoding.colwidth, set(l))
91 91 maxwidth = max(widths) if widths else 0
92 92 maxwidths.append(maxwidth)
93 93
94 94 # buffered output
95 95 result = b''
96 for i in pycompat.xrange(len(annotatedresult)):
96 for i in range(len(annotatedresult)):
97 97 for j, p in enumerate(pieces):
98 98 sep = self.funcmap[j][1]
99 99 padding = b' ' * (maxwidths[j] - len(p[i]))
100 100 result += sep + padding + p[i]
101 101 if lines:
102 102 if existinglines is None:
103 103 result += b': ' + lines[i]
104 104 else: # extra formatting showing whether a line exists
105 105 key = (annotatedresult[i][0], annotatedresult[i][1])
106 106 if key in existinglines:
107 107 result += b': ' + lines[i]
108 108 else:
109 109 result += b': ' + self.ui.label(
110 110 b'-' + lines[i], b'diff.deleted'
111 111 )
112 112
113 113 if result[-1:] != b'\n':
114 114 result += b'\n'
115 115
116 116 self.ui.write(result)
117 117
118 118 @util.propertycache
119 119 def _hexfunc(self):
120 120 if self.ui.debugflag or self.opts.get(b'long_hash'):
121 121 return hex
122 122 else:
123 123 return short
124 124
125 125 def end(self):
126 126 pass
127 127
128 128
129 129 class jsonformatter(defaultformatter):
130 130 def __init__(self, ui, repo, opts):
131 131 super(jsonformatter, self).__init__(ui, repo, opts)
132 132 self.ui.write(b'[')
133 133 self.needcomma = False
134 134
135 135 def write(self, annotatedresult, lines=None, existinglines=None):
136 136 if annotatedresult:
137 137 self._writecomma()
138 138
139 139 pieces = [
140 140 (name, pycompat.maplist(f, annotatedresult))
141 141 for f, sep, name, enc in self.funcmap
142 142 ]
143 143 if lines is not None:
144 144 pieces.append((b'line', lines))
145 145 pieces.sort()
146 146
147 147 seps = [b','] * len(pieces[:-1]) + [b'']
148 148
149 149 result = b''
150 150 lasti = len(annotatedresult) - 1
151 for i in pycompat.xrange(len(annotatedresult)):
151 for i in range(len(annotatedresult)):
152 152 result += b'\n {\n'
153 153 for j, p in enumerate(pieces):
154 154 k, vs = p
155 155 result += b' "%s": %s%s\n' % (
156 156 k,
157 157 templatefilters.json(vs[i], paranoid=False),
158 158 seps[j],
159 159 )
160 160 result += b' }%s' % (b'' if i == lasti else b',')
161 161 if lasti >= 0:
162 162 self.needcomma = True
163 163
164 164 self.ui.write(result)
165 165
166 166 def _writecomma(self):
167 167 if self.needcomma:
168 168 self.ui.write(b',')
169 169 self.needcomma = False
170 170
171 171 @util.propertycache
172 172 def _hexfunc(self):
173 173 return hex
174 174
175 175 def end(self):
176 176 self.ui.write(b'\n]\n')
@@ -1,262 +1,259 b''
1 1 # Copyright 2016-present Facebook. All Rights Reserved.
2 2 #
3 3 # revmap: trivial hg hash - linelog rev bidirectional map
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import bisect
10 10 import io
11 11 import os
12 12 import struct
13 13
14 14 from mercurial.node import hex
15 15 from mercurial.pycompat import open
16 16 from mercurial import (
17 17 error as hgerror,
18 pycompat,
19 18 )
20 19 from . import error
21 20
22 21 # the revmap file format is straightforward:
23 22 #
24 23 # 8 bytes: header
25 24 # 1 byte : flag for linelog revision 1
26 25 # ? bytes: (optional) '\0'-terminated path string
27 26 # only exists if (flag & renameflag) != 0
28 27 # 20 bytes: hg hash for linelog revision 1
29 28 # 1 byte : flag for linelog revision 2
30 29 # ? bytes: (optional) '\0'-terminated path string
31 30 # 20 bytes: hg hash for linelog revision 2
32 31 # ....
33 32 #
34 33 # the implementation is kinda stupid: __init__ loads the whole revmap.
35 34 # no laziness. benchmark shows loading 10000 revisions is about 0.015
36 35 # seconds, which looks enough for our use-case. if this implementation
37 36 # becomes a bottleneck, we can change it to lazily read the file
38 37 # from the end.
39 38
40 39 # whether the changeset is in the side branch. i.e. not in the linear main
41 40 # branch but only got referenced by lines in merge changesets.
42 41 sidebranchflag = 1
43 42
44 43 # whether the changeset changes the file path (ie. is a rename)
45 44 renameflag = 2
46 45
47 46 # len(mercurial.node.nullid)
48 47 _hshlen = 20
49 48
50 49
51 50 class revmap:
52 51 """trivial hg bin hash - linelog rev bidirectional map
53 52
54 53 also stores a flag (uint8) for each revision, and track renames.
55 54 """
56 55
57 56 HEADER = b'REVMAP1\0'
58 57
59 58 def __init__(self, path=None):
60 59 """create or load the revmap, optionally associate to a file
61 60
62 61 if path is None, the revmap is entirely in-memory. the caller is
63 62 responsible for locking. concurrent writes to a same file is unsafe.
64 63 the caller needs to make sure one file is associated to at most one
65 64 revmap object at a time."""
66 65 self.path = path
67 66 self._rev2hsh = [None]
68 67 self._rev2flag = [None]
69 68 self._hsh2rev = {}
70 69 # since rename does not happen frequently, do not store path for every
71 70 # revision. self._renamerevs can be used for bisecting.
72 71 self._renamerevs = [0]
73 72 self._renamepaths = [b'']
74 73 self._lastmaxrev = -1
75 74 if path:
76 75 if os.path.exists(path):
77 76 self._load()
78 77 else:
79 78 # write the header so "append" can do incremental updates
80 79 self.flush()
81 80
82 81 def copyfrom(self, rhs):
83 82 """copy the map data from another revmap. do not affect self.path"""
84 83 self._rev2hsh = rhs._rev2hsh[:]
85 84 self._rev2flag = rhs._rev2flag[:]
86 85 self._hsh2rev = rhs._hsh2rev.copy()
87 86 self._renamerevs = rhs._renamerevs[:]
88 87 self._renamepaths = rhs._renamepaths[:]
89 88 self._lastmaxrev = -1
90 89
91 90 @property
92 91 def maxrev(self):
93 92 """return max linelog revision number"""
94 93 return len(self._rev2hsh) - 1
95 94
96 95 def append(self, hsh, sidebranch=False, path=None, flush=False):
97 96 """add a binary hg hash and return the mapped linelog revision.
98 97 if flush is True, incrementally update the file.
99 98 """
100 99 if hsh in self._hsh2rev:
101 100 raise error.CorruptedFileError(
102 101 b'%r is in revmap already' % hex(hsh)
103 102 )
104 103 if len(hsh) != _hshlen:
105 104 raise hgerror.ProgrammingError(
106 105 b'hsh must be %d-char long' % _hshlen
107 106 )
108 107 idx = len(self._rev2hsh)
109 108 flag = 0
110 109 if sidebranch:
111 110 flag |= sidebranchflag
112 111 if path is not None and path != self._renamepaths[-1]:
113 112 flag |= renameflag
114 113 self._renamerevs.append(idx)
115 114 self._renamepaths.append(path)
116 115 self._rev2hsh.append(hsh)
117 116 self._rev2flag.append(flag)
118 117 self._hsh2rev[hsh] = idx
119 118 if flush:
120 119 self.flush()
121 120 return idx
122 121
123 122 def rev2hsh(self, rev):
124 123 """convert linelog revision to hg hash. return None if not found."""
125 124 if rev > self.maxrev or rev < 0:
126 125 return None
127 126 return self._rev2hsh[rev]
128 127
129 128 def rev2flag(self, rev):
130 129 """get the flag (uint8) for a given linelog revision.
131 130 return None if revision does not exist.
132 131 """
133 132 if rev > self.maxrev or rev < 0:
134 133 return None
135 134 return self._rev2flag[rev]
136 135
137 136 def rev2path(self, rev):
138 137 """get the path for a given linelog revision.
139 138 return None if revision does not exist.
140 139 """
141 140 if rev > self.maxrev or rev < 0:
142 141 return None
143 142 idx = bisect.bisect_right(self._renamerevs, rev) - 1
144 143 return self._renamepaths[idx]
145 144
146 145 def hsh2rev(self, hsh):
147 146 """convert hg hash to linelog revision. return None if not found."""
148 147 return self._hsh2rev.get(hsh)
149 148
150 149 def clear(self, flush=False):
151 150 """make the map empty. if flush is True, write to disk"""
152 151 # rev 0 is reserved, real rev starts from 1
153 152 self._rev2hsh = [None]
154 153 self._rev2flag = [None]
155 154 self._hsh2rev = {}
156 155 self._rev2path = [b'']
157 156 self._lastmaxrev = -1
158 157 if flush:
159 158 self.flush()
160 159
161 160 def flush(self):
162 161 """write the state down to the file"""
163 162 if not self.path:
164 163 return
165 164 if self._lastmaxrev == -1: # write the entire file
166 165 with open(self.path, b'wb') as f:
167 166 f.write(self.HEADER)
168 for i in pycompat.xrange(1, len(self._rev2hsh)):
167 for i in range(1, len(self._rev2hsh)):
169 168 self._writerev(i, f)
170 169 else: # append incrementally
171 170 with open(self.path, b'ab') as f:
172 for i in pycompat.xrange(
173 self._lastmaxrev + 1, len(self._rev2hsh)
174 ):
171 for i in range(self._lastmaxrev + 1, len(self._rev2hsh)):
175 172 self._writerev(i, f)
176 173 self._lastmaxrev = self.maxrev
177 174
178 175 def _load(self):
179 176 """load state from file"""
180 177 if not self.path:
181 178 return
182 179 # use local variables in a loop. CPython uses LOAD_FAST for them,
183 180 # which is faster than both LOAD_CONST and LOAD_GLOBAL.
184 181 flaglen = 1
185 182 hshlen = _hshlen
186 183 with open(self.path, b'rb') as f:
187 184 if f.read(len(self.HEADER)) != self.HEADER:
188 185 raise error.CorruptedFileError()
189 186 self.clear(flush=False)
190 187 while True:
191 188 buf = f.read(flaglen)
192 189 if not buf:
193 190 break
194 191 flag = ord(buf)
195 192 rev = len(self._rev2hsh)
196 193 if flag & renameflag:
197 194 path = self._readcstr(f)
198 195 self._renamerevs.append(rev)
199 196 self._renamepaths.append(path)
200 197 hsh = f.read(hshlen)
201 198 if len(hsh) != hshlen:
202 199 raise error.CorruptedFileError()
203 200 self._hsh2rev[hsh] = rev
204 201 self._rev2flag.append(flag)
205 202 self._rev2hsh.append(hsh)
206 203 self._lastmaxrev = self.maxrev
207 204
208 205 def _writerev(self, rev, f):
209 206 """append a revision data to file"""
210 207 flag = self._rev2flag[rev]
211 208 hsh = self._rev2hsh[rev]
212 209 f.write(struct.pack(b'B', flag))
213 210 if flag & renameflag:
214 211 path = self.rev2path(rev)
215 212 if path is None:
216 213 raise error.CorruptedFileError(b'cannot find path for %s' % rev)
217 214 f.write(path + b'\0')
218 215 f.write(hsh)
219 216
220 217 @staticmethod
221 218 def _readcstr(f):
222 219 """read a C-language-like '\0'-terminated string"""
223 220 buf = b''
224 221 while True:
225 222 ch = f.read(1)
226 223 if not ch: # unexpected eof
227 224 raise error.CorruptedFileError()
228 225 if ch == b'\0':
229 226 break
230 227 buf += ch
231 228 return buf
232 229
233 230 def __contains__(self, f):
234 231 """(fctx or (node, path)) -> bool.
235 232 test if (node, path) is in the map, and is not in a side branch.
236 233 f can be either a tuple of (node, path), or a fctx.
237 234 """
238 235 if isinstance(f, tuple): # f: (node, path)
239 236 hsh, path = f
240 237 else: # f: fctx
241 238 hsh, path = f.node(), f.path()
242 239 rev = self.hsh2rev(hsh)
243 240 if rev is None:
244 241 return False
245 242 if path is not None and path != self.rev2path(rev):
246 243 return False
247 244 return (self.rev2flag(rev) & sidebranchflag) == 0
248 245
249 246
250 247 def getlastnode(path):
251 248 """return the last hash in a revmap, without loading its full content.
252 249 this is equivalent to `m = revmap(path); m.rev2hsh(m.maxrev)`, but faster.
253 250 """
254 251 hsh = None
255 252 try:
256 253 with open(path, b'rb') as f:
257 254 f.seek(-_hshlen, io.SEEK_END)
258 255 if f.tell() > len(revmap.HEADER):
259 256 hsh = f.read(_hshlen)
260 257 except IOError:
261 258 pass
262 259 return hsh
@@ -1,547 +1,547 b''
1 1 from mercurial.i18n import _
2 2
3 3 from mercurial.node import (
4 4 bin,
5 5 hex,
6 6 nullrev,
7 7 sha1nodeconstants,
8 8 )
9 9 from mercurial import (
10 10 ancestor,
11 11 changelog as hgchangelog,
12 12 dagop,
13 13 encoding,
14 14 error,
15 15 manifest,
16 16 pycompat,
17 17 )
18 18 from mercurial.interfaces import (
19 19 repository,
20 20 util as interfaceutil,
21 21 )
22 22 from mercurial.utils import stringutil
23 23 from . import (
24 24 gitutil,
25 25 index,
26 26 manifest as gitmanifest,
27 27 )
28 28
29 29 pygit2 = gitutil.get_pygit2()
30 30
31 31
32 32 class baselog: # revlog.revlog):
33 33 """Common implementations between changelog and manifestlog."""
34 34
35 35 def __init__(self, gr, db):
36 36 self.gitrepo = gr
37 37 self._db = db
38 38
39 39 def __len__(self):
40 40 return int(
41 41 self._db.execute('SELECT COUNT(*) FROM changelog').fetchone()[0]
42 42 )
43 43
44 44 def rev(self, n):
45 45 if n == sha1nodeconstants.nullid:
46 46 return -1
47 47 t = self._db.execute(
48 48 'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),)
49 49 ).fetchone()
50 50 if t is None:
51 51 raise error.LookupError(n, b'00changelog.i', _(b'no node %d'))
52 52 return t[0]
53 53
54 54 def node(self, r):
55 55 if r == nullrev:
56 56 return sha1nodeconstants.nullid
57 57 t = self._db.execute(
58 58 'SELECT node FROM changelog WHERE rev = ?', (r,)
59 59 ).fetchone()
60 60 if t is None:
61 61 raise error.LookupError(r, b'00changelog.i', _(b'no node'))
62 62 return bin(t[0])
63 63
64 64 def hasnode(self, n):
65 65 t = self._db.execute(
66 66 'SELECT node FROM changelog WHERE node = ?',
67 67 (pycompat.sysstr(n),),
68 68 ).fetchone()
69 69 return t is not None
70 70
71 71
72 72 class baselogindex:
73 73 def __init__(self, log):
74 74 self._log = log
75 75
76 76 def has_node(self, n):
77 77 return self._log.rev(n) != -1
78 78
79 79 def __len__(self):
80 80 return len(self._log)
81 81
82 82 def __getitem__(self, idx):
83 83 p1rev, p2rev = self._log.parentrevs(idx)
84 84 # TODO: it's messy that the index leaks so far out of the
85 85 # storage layer that we have to implement things like reading
86 86 # this raw tuple, which exposes revlog internals.
87 87 return (
88 88 # Pretend offset is just the index, since we don't really care.
89 89 idx,
90 90 # Same with lengths
91 91 idx, # length
92 92 idx, # rawsize
93 93 -1, # delta base
94 94 idx, # linkrev TODO is this right?
95 95 p1rev,
96 96 p2rev,
97 97 self._log.node(idx),
98 98 )
99 99
100 100
101 101 # TODO: an interface for the changelog type?
102 102 class changelog(baselog):
103 103 # TODO: this appears to be an enumerated type, and should probably
104 104 # be part of the public changelog interface
105 105 _copiesstorage = b'extra'
106 106
107 107 def __contains__(self, rev):
108 108 try:
109 109 self.node(rev)
110 110 return True
111 111 except error.LookupError:
112 112 return False
113 113
114 114 def __iter__(self):
115 return iter(pycompat.xrange(len(self)))
115 return iter(range(len(self)))
116 116
117 117 @property
118 118 def filteredrevs(self):
119 119 # TODO: we should probably add a refs/hg/ namespace for hidden
120 120 # heads etc, but that's an idea for later.
121 121 return set()
122 122
123 123 @property
124 124 def index(self):
125 125 return baselogindex(self)
126 126
127 127 @property
128 128 def nodemap(self):
129 129 r = {
130 130 bin(v[0]): v[1]
131 131 for v in self._db.execute('SELECT node, rev FROM changelog')
132 132 }
133 133 r[sha1nodeconstants.nullid] = nullrev
134 134 return r
135 135
136 136 def tip(self):
137 137 t = self._db.execute(
138 138 'SELECT node FROM changelog ORDER BY rev DESC LIMIT 1'
139 139 ).fetchone()
140 140 if t:
141 141 return bin(t[0])
142 142 return sha1nodeconstants.nullid
143 143
144 144 def revs(self, start=0, stop=None):
145 145 if stop is None:
146 146 stop = self.tiprev()
147 147 t = self._db.execute(
148 148 'SELECT rev FROM changelog '
149 149 'WHERE rev >= ? AND rev <= ? '
150 150 'ORDER BY REV ASC',
151 151 (start, stop),
152 152 )
153 153 return (int(r[0]) for r in t)
154 154
155 155 def tiprev(self):
156 156 t = self._db.execute(
157 157 'SELECT rev FROM changelog ' 'ORDER BY REV DESC ' 'LIMIT 1'
158 158 ).fetchone()
159 159
160 160 if t is not None:
161 161 return t[0]
162 162 return -1
163 163
164 164 def _partialmatch(self, id):
165 165 if sha1nodeconstants.wdirhex.startswith(id):
166 166 raise error.WdirUnsupported
167 167 candidates = [
168 168 bin(x[0])
169 169 for x in self._db.execute(
170 170 'SELECT node FROM changelog WHERE node LIKE ?',
171 171 (pycompat.sysstr(id + b'%'),),
172 172 )
173 173 ]
174 174 if sha1nodeconstants.nullhex.startswith(id):
175 175 candidates.append(sha1nodeconstants.nullid)
176 176 if len(candidates) > 1:
177 177 raise error.AmbiguousPrefixLookupError(
178 178 id, b'00changelog.i', _(b'ambiguous identifier')
179 179 )
180 180 if candidates:
181 181 return candidates[0]
182 182 return None
183 183
184 184 def flags(self, rev):
185 185 return 0
186 186
187 187 def shortest(self, node, minlength=1):
188 188 nodehex = hex(node)
189 for attempt in pycompat.xrange(minlength, len(nodehex) + 1):
189 for attempt in range(minlength, len(nodehex) + 1):
190 190 candidate = nodehex[:attempt]
191 191 matches = int(
192 192 self._db.execute(
193 193 'SELECT COUNT(*) FROM changelog WHERE node LIKE ?',
194 194 (pycompat.sysstr(candidate + b'%'),),
195 195 ).fetchone()[0]
196 196 )
197 197 if matches == 1:
198 198 return candidate
199 199 return nodehex
200 200
201 201 def headrevs(self, revs=None):
202 202 realheads = [
203 203 int(x[0])
204 204 for x in self._db.execute(
205 205 'SELECT rev FROM changelog '
206 206 'INNER JOIN heads ON changelog.node = heads.node'
207 207 )
208 208 ]
209 209 if revs:
210 210 return sorted([r for r in revs if r in realheads])
211 211 return sorted(realheads)
212 212
213 213 def changelogrevision(self, nodeorrev):
214 214 # Ensure we have a node id
215 215 if isinstance(nodeorrev, int):
216 216 n = self.node(nodeorrev)
217 217 else:
218 218 n = nodeorrev
219 219 extra = {b'branch': b'default'}
220 220 # handle looking up nullid
221 221 if n == sha1nodeconstants.nullid:
222 222 return hgchangelog._changelogrevision(
223 223 extra=extra, manifest=sha1nodeconstants.nullid
224 224 )
225 225 hn = gitutil.togitnode(n)
226 226 # We've got a real commit!
227 227 files = [
228 228 r[0]
229 229 for r in self._db.execute(
230 230 'SELECT filename FROM changedfiles '
231 231 'WHERE node = ? and filenode != ?',
232 232 (hn, gitutil.nullgit),
233 233 )
234 234 ]
235 235 filesremoved = [
236 236 r[0]
237 237 for r in self._db.execute(
238 238 'SELECT filename FROM changedfiles '
239 239 'WHERE node = ? and filenode = ?',
240 240 (hn, gitutil.nullgit),
241 241 )
242 242 ]
243 243 c = self.gitrepo[hn]
244 244 return hgchangelog._changelogrevision(
245 245 manifest=n, # pretend manifest the same as the commit node
246 246 user=b'%s <%s>'
247 247 % (c.author.name.encode('utf8'), c.author.email.encode('utf8')),
248 248 date=(c.author.time, -c.author.offset * 60),
249 249 files=files,
250 250 # TODO filesadded in the index
251 251 filesremoved=filesremoved,
252 252 description=c.message.encode('utf8'),
253 253 # TODO do we want to handle extra? how?
254 254 extra=extra,
255 255 )
256 256
257 257 def ancestors(self, revs, stoprev=0, inclusive=False):
258 258 revs = list(revs)
259 259 tip = self.rev(self.tip())
260 260 for r in revs:
261 261 if r > tip:
262 262 raise IndexError(b'Invalid rev %r' % r)
263 263 return ancestor.lazyancestors(
264 264 self.parentrevs, revs, stoprev=stoprev, inclusive=inclusive
265 265 )
266 266
267 267 # Cleanup opportunity: this is *identical* to the revlog.py version
268 268 def descendants(self, revs):
269 269 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
270 270
271 271 def incrementalmissingrevs(self, common=None):
272 272 """Return an object that can be used to incrementally compute the
273 273 revision numbers of the ancestors of arbitrary sets that are not
274 274 ancestors of common. This is an ancestor.incrementalmissingancestors
275 275 object.
276 276
277 277 'common' is a list of revision numbers. If common is not supplied, uses
278 278 nullrev.
279 279 """
280 280 if common is None:
281 281 common = [nullrev]
282 282
283 283 return ancestor.incrementalmissingancestors(self.parentrevs, common)
284 284
285 285 def findmissing(self, common=None, heads=None):
286 286 """Return the ancestors of heads that are not ancestors of common.
287 287
288 288 More specifically, return a list of nodes N such that every N
289 289 satisfies the following constraints:
290 290
291 291 1. N is an ancestor of some node in 'heads'
292 292 2. N is not an ancestor of any node in 'common'
293 293
294 294 The list is sorted by revision number, meaning it is
295 295 topologically sorted.
296 296
297 297 'heads' and 'common' are both lists of node IDs. If heads is
298 298 not supplied, uses all of the revlog's heads. If common is not
299 299 supplied, uses nullid."""
300 300 if common is None:
301 301 common = [sha1nodeconstants.nullid]
302 302 if heads is None:
303 303 heads = self.heads()
304 304
305 305 common = [self.rev(n) for n in common]
306 306 heads = [self.rev(n) for n in heads]
307 307
308 308 inc = self.incrementalmissingrevs(common=common)
309 309 return [self.node(r) for r in inc.missingancestors(heads)]
310 310
311 311 def children(self, node):
312 312 """find the children of a given node"""
313 313 c = []
314 314 p = self.rev(node)
315 315 for r in self.revs(start=p + 1):
316 316 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
317 317 if prevs:
318 318 for pr in prevs:
319 319 if pr == p:
320 320 c.append(self.node(r))
321 321 elif p == nullrev:
322 322 c.append(self.node(r))
323 323 return c
324 324
325 325 def reachableroots(self, minroot, heads, roots, includepath=False):
326 326 return dagop._reachablerootspure(
327 327 self.parentrevs, minroot, roots, heads, includepath
328 328 )
329 329
330 330 # Cleanup opportunity: this is *identical* to the revlog.py version
331 331 def isancestor(self, a, b):
332 332 a, b = self.rev(a), self.rev(b)
333 333 return self.isancestorrev(a, b)
334 334
335 335 # Cleanup opportunity: this is *identical* to the revlog.py version
336 336 def isancestorrev(self, a, b):
337 337 if a == nullrev:
338 338 return True
339 339 elif a == b:
340 340 return True
341 341 elif a > b:
342 342 return False
343 343 return bool(self.reachableroots(a, [b], [a], includepath=False))
344 344
345 345 def parentrevs(self, rev):
346 346 n = self.node(rev)
347 347 hn = gitutil.togitnode(n)
348 348 if hn != gitutil.nullgit:
349 349 c = self.gitrepo[hn]
350 350 else:
351 351 return nullrev, nullrev
352 352 p1 = p2 = nullrev
353 353 if c.parents:
354 354 p1 = self.rev(c.parents[0].id.raw)
355 355 if len(c.parents) > 2:
356 356 raise error.Abort(b'TODO octopus merge handling')
357 357 if len(c.parents) == 2:
358 358 p2 = self.rev(c.parents[1].id.raw)
359 359 return p1, p2
360 360
361 361 # Private method is used at least by the tags code.
362 362 _uncheckedparentrevs = parentrevs
363 363
364 364 def commonancestorsheads(self, a, b):
365 365 # TODO the revlog verson of this has a C path, so we probably
366 366 # need to optimize this...
367 367 a, b = self.rev(a), self.rev(b)
368 368 return [
369 369 self.node(n)
370 370 for n in ancestor.commonancestorsheads(self.parentrevs, a, b)
371 371 ]
372 372
373 373 def branchinfo(self, rev):
374 374 """Git doesn't do named branches, so just put everything on default."""
375 375 return b'default', False
376 376
377 377 def delayupdate(self, tr):
378 378 # TODO: I think we can elide this because we're just dropping
379 379 # an object in the git repo?
380 380 pass
381 381
382 382 def add(
383 383 self,
384 384 manifest,
385 385 files,
386 386 desc,
387 387 transaction,
388 388 p1,
389 389 p2,
390 390 user,
391 391 date=None,
392 392 extra=None,
393 393 p1copies=None,
394 394 p2copies=None,
395 395 filesadded=None,
396 396 filesremoved=None,
397 397 ):
398 398 parents = []
399 399 hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2)
400 400 if p1 != sha1nodeconstants.nullid:
401 401 parents.append(hp1)
402 402 if p2 and p2 != sha1nodeconstants.nullid:
403 403 parents.append(hp2)
404 404 assert date is not None
405 405 timestamp, tz = date
406 406 sig = pygit2.Signature(
407 407 encoding.unifromlocal(stringutil.person(user)),
408 408 encoding.unifromlocal(stringutil.email(user)),
409 409 int(timestamp),
410 410 -int(tz // 60),
411 411 )
412 412 oid = self.gitrepo.create_commit(
413 413 None, sig, sig, desc, gitutil.togitnode(manifest), parents
414 414 )
415 415 # Set up an internal reference to force the commit into the
416 416 # changelog. Hypothetically, we could even use this refs/hg/
417 417 # namespace to allow for anonymous heads on git repos, which
418 418 # would be neat.
419 419 self.gitrepo.references.create(
420 420 'refs/hg/internal/latest-commit', oid, force=True
421 421 )
422 422 # Reindex now to pick up changes. We omit the progress
423 423 # and log callbacks because this will be very quick.
424 424 index._index_repo(self.gitrepo, self._db)
425 425 return oid.raw
426 426
427 427
428 428 class manifestlog(baselog):
429 429 nodeconstants = sha1nodeconstants
430 430
431 431 def __getitem__(self, node):
432 432 return self.get(b'', node)
433 433
434 434 def get(self, relpath, node):
435 435 if node == sha1nodeconstants.nullid:
436 436 # TODO: this should almost certainly be a memgittreemanifestctx
437 437 return manifest.memtreemanifestctx(self, relpath)
438 438 commit = self.gitrepo[gitutil.togitnode(node)]
439 439 t = commit.tree
440 440 if relpath:
441 441 parts = relpath.split(b'/')
442 442 for p in parts:
443 443 te = t[p]
444 444 t = self.gitrepo[te.id]
445 445 return gitmanifest.gittreemanifestctx(self.gitrepo, t)
446 446
447 447
448 448 @interfaceutil.implementer(repository.ifilestorage)
449 449 class filelog(baselog):
450 450 def __init__(self, gr, db, path):
451 451 super(filelog, self).__init__(gr, db)
452 452 assert isinstance(path, bytes)
453 453 self.path = path
454 454 self.nullid = sha1nodeconstants.nullid
455 455
456 456 def read(self, node):
457 457 if node == sha1nodeconstants.nullid:
458 458 return b''
459 459 return self.gitrepo[gitutil.togitnode(node)].data
460 460
461 461 def lookup(self, node):
462 462 if len(node) not in (20, 40):
463 463 node = int(node)
464 464 if isinstance(node, int):
465 465 assert False, b'todo revnums for nodes'
466 466 if len(node) == 40:
467 467 node = bin(node)
468 468 hnode = gitutil.togitnode(node)
469 469 if hnode in self.gitrepo:
470 470 return node
471 471 raise error.LookupError(self.path, node, _(b'no match found'))
472 472
473 473 def cmp(self, node, text):
474 474 """Returns True if text is different than content at `node`."""
475 475 return self.read(node) != text
476 476
477 477 def add(self, text, meta, transaction, link, p1=None, p2=None):
478 478 assert not meta # Should we even try to handle this?
479 479 return self.gitrepo.create_blob(text).raw
480 480
481 481 def __iter__(self):
482 482 for clrev in self._db.execute(
483 483 '''
484 484 SELECT rev FROM changelog
485 485 INNER JOIN changedfiles ON changelog.node = changedfiles.node
486 486 WHERE changedfiles.filename = ? AND changedfiles.filenode != ?
487 487 ''',
488 488 (pycompat.fsdecode(self.path), gitutil.nullgit),
489 489 ):
490 490 yield clrev[0]
491 491
492 492 def linkrev(self, fr):
493 493 return fr
494 494
495 495 def rev(self, node):
496 496 row = self._db.execute(
497 497 '''
498 498 SELECT rev FROM changelog
499 499 INNER JOIN changedfiles ON changelog.node = changedfiles.node
500 500 WHERE changedfiles.filename = ? AND changedfiles.filenode = ?''',
501 501 (pycompat.fsdecode(self.path), gitutil.togitnode(node)),
502 502 ).fetchone()
503 503 if row is None:
504 504 raise error.LookupError(self.path, node, _(b'no such node'))
505 505 return int(row[0])
506 506
507 507 def node(self, rev):
508 508 maybe = self._db.execute(
509 509 '''SELECT filenode FROM changedfiles
510 510 INNER JOIN changelog ON changelog.node = changedfiles.node
511 511 WHERE changelog.rev = ? AND filename = ?
512 512 ''',
513 513 (rev, pycompat.fsdecode(self.path)),
514 514 ).fetchone()
515 515 if maybe is None:
516 516 raise IndexError('gitlog %r out of range %d' % (self.path, rev))
517 517 return bin(maybe[0])
518 518
519 519 def parents(self, node):
520 520 gn = gitutil.togitnode(node)
521 521 gp = pycompat.fsdecode(self.path)
522 522 ps = []
523 523 for p in self._db.execute(
524 524 '''SELECT p1filenode, p2filenode FROM changedfiles
525 525 WHERE filenode = ? AND filename = ?
526 526 ''',
527 527 (gn, gp),
528 528 ).fetchone():
529 529 if p is None:
530 530 commit = self._db.execute(
531 531 "SELECT node FROM changedfiles "
532 532 "WHERE filenode = ? AND filename = ?",
533 533 (gn, gp),
534 534 ).fetchone()[0]
535 535 # This filelog is missing some data. Build the
536 536 # filelog, then recurse (which will always find data).
537 537 if pycompat.ispy3:
538 538 commit = commit.decode('ascii')
539 539 index.fill_in_filelog(self.gitrepo, self._db, commit, gp, gn)
540 540 return self.parents(node)
541 541 else:
542 542 ps.append(bin(p))
543 543 return ps
544 544
545 545 def renamed(self, node):
546 546 # TODO: renames/copies
547 547 return False
@@ -1,385 +1,385 b''
1 1 # Minimal support for git commands on an hg repository
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''browse the repository in a graphical way
9 9
10 10 The hgk extension allows browsing the history of a repository in a
11 11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
12 12 distributed with Mercurial.)
13 13
14 14 hgk consists of two parts: a Tcl script that does the displaying and
15 15 querying of information, and an extension to Mercurial named hgk.py,
16 16 which provides hooks for hgk to get information. hgk can be found in
17 17 the contrib directory, and the extension is shipped in the hgext
18 18 repository, and needs to be enabled.
19 19
20 20 The :hg:`view` command will launch the hgk Tcl script. For this command
21 21 to work, hgk must be in your search path. Alternately, you can specify
22 22 the path to hgk in your configuration file::
23 23
24 24 [hgk]
25 25 path = /location/of/hgk
26 26
27 27 hgk can make use of the extdiff extension to visualize revisions.
28 28 Assuming you had already configured extdiff vdiff command, just add::
29 29
30 30 [hgk]
31 31 vdiff=vdiff
32 32
33 33 Revisions context menu will now display additional entries to fire
34 34 vdiff on hovered and selected revisions.
35 35 '''
36 36
37 37
38 38 import os
39 39
40 40 from mercurial.i18n import _
41 41 from mercurial.node import (
42 42 nullrev,
43 43 short,
44 44 )
45 45 from mercurial import (
46 46 commands,
47 47 obsolete,
48 48 patch,
49 49 pycompat,
50 50 registrar,
51 51 scmutil,
52 52 )
53 53
54 54 cmdtable = {}
55 55 command = registrar.command(cmdtable)
56 56 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
57 57 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
58 58 # be specifying the version(s) of Mercurial they are tested with, or
59 59 # leave the attribute unspecified.
60 60 testedwith = b'ships-with-hg-core'
61 61
62 62 configtable = {}
63 63 configitem = registrar.configitem(configtable)
64 64
65 65 configitem(
66 66 b'hgk',
67 67 b'path',
68 68 default=b'hgk',
69 69 )
70 70
71 71
72 72 @command(
73 73 b'debug-diff-tree',
74 74 [
75 75 (b'p', b'patch', None, _(b'generate patch')),
76 76 (b'r', b'recursive', None, _(b'recursive')),
77 77 (b'P', b'pretty', None, _(b'pretty')),
78 78 (b's', b'stdin', None, _(b'stdin')),
79 79 (b'C', b'copy', None, _(b'detect copies')),
80 80 (b'S', b'search', b"", _(b'search')),
81 81 ],
82 82 b'[OPTION]... NODE1 NODE2 [FILE]...',
83 83 inferrepo=True,
84 84 )
85 85 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
86 86 """diff trees from two commits"""
87 87
88 88 def __difftree(repo, node1, node2, files=None):
89 89 assert node2 is not None
90 90 if files is None:
91 91 files = []
92 92 mmap = repo[node1].manifest()
93 93 mmap2 = repo[node2].manifest()
94 94 m = scmutil.match(repo[node1], files)
95 95 st = repo.status(node1, node2, m)
96 96 empty = short(repo.nullid)
97 97
98 98 for f in st.modified:
99 99 # TODO get file permissions
100 100 ui.writenoi18n(
101 101 b":100664 100664 %s %s M\t%s\t%s\n"
102 102 % (short(mmap[f]), short(mmap2[f]), f, f)
103 103 )
104 104 for f in st.added:
105 105 ui.writenoi18n(
106 106 b":000000 100664 %s %s N\t%s\t%s\n"
107 107 % (empty, short(mmap2[f]), f, f)
108 108 )
109 109 for f in st.removed:
110 110 ui.writenoi18n(
111 111 b":100664 000000 %s %s D\t%s\t%s\n"
112 112 % (short(mmap[f]), empty, f, f)
113 113 )
114 114
115 115 ##
116 116
117 117 while True:
118 118 if opts['stdin']:
119 119 line = ui.fin.readline()
120 120 if not line:
121 121 break
122 122 line = line.rstrip(pycompat.oslinesep).split(b' ')
123 123 node1 = line[0]
124 124 if len(line) > 1:
125 125 node2 = line[1]
126 126 else:
127 127 node2 = None
128 128 node1 = repo.lookup(node1)
129 129 if node2:
130 130 node2 = repo.lookup(node2)
131 131 else:
132 132 node2 = node1
133 133 node1 = repo.changelog.parents(node1)[0]
134 134 if opts['patch']:
135 135 if opts['pretty']:
136 136 catcommit(ui, repo, node2, b"")
137 137 m = scmutil.match(repo[node1], files)
138 138 diffopts = patch.difffeatureopts(ui)
139 139 diffopts.git = True
140 140 chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts)
141 141 for chunk in chunks:
142 142 ui.write(chunk)
143 143 else:
144 144 __difftree(repo, node1, node2, files=files)
145 145 if not opts['stdin']:
146 146 break
147 147
148 148
149 149 def catcommit(ui, repo, n, prefix, ctx=None):
150 150 nlprefix = b'\n' + prefix
151 151 if ctx is None:
152 152 ctx = repo[n]
153 153 # use ctx.node() instead ??
154 154 ui.write((b"tree %s\n" % short(ctx.changeset()[0])))
155 155 for p in ctx.parents():
156 156 ui.write((b"parent %s\n" % p))
157 157
158 158 date = ctx.date()
159 159 description = ctx.description().replace(b"\0", b"")
160 160 ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1])))
161 161
162 162 if b'committer' in ctx.extra():
163 163 ui.write((b"committer %s\n" % ctx.extra()[b'committer']))
164 164
165 165 ui.write((b"revision %d\n" % ctx.rev()))
166 166 ui.write((b"branch %s\n" % ctx.branch()))
167 167 if obsolete.isenabled(repo, obsolete.createmarkersopt):
168 168 if ctx.obsolete():
169 169 ui.writenoi18n(b"obsolete\n")
170 170 ui.write((b"phase %s\n\n" % ctx.phasestr()))
171 171
172 172 if prefix != b"":
173 173 ui.write(
174 174 b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip())
175 175 )
176 176 else:
177 177 ui.write(description + b"\n")
178 178 if prefix:
179 179 ui.write(b'\0')
180 180
181 181
182 182 @command(b'debug-merge-base', [], _(b'REV REV'))
183 183 def base(ui, repo, node1, node2):
184 184 """output common ancestor information"""
185 185 node1 = repo.lookup(node1)
186 186 node2 = repo.lookup(node2)
187 187 n = repo.changelog.ancestor(node1, node2)
188 188 ui.write(short(n) + b"\n")
189 189
190 190
191 191 @command(
192 192 b'debug-cat-file',
193 193 [(b's', b'stdin', None, _(b'stdin'))],
194 194 _(b'[OPTION]... TYPE FILE'),
195 195 inferrepo=True,
196 196 )
197 197 def catfile(ui, repo, type=None, r=None, **opts):
198 198 """cat a specific revision"""
199 199 # in stdin mode, every line except the commit is prefixed with two
200 200 # spaces. This way the our caller can find the commit without magic
201 201 # strings
202 202 #
203 203 prefix = b""
204 204 if opts['stdin']:
205 205 line = ui.fin.readline()
206 206 if not line:
207 207 return
208 208 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
209 209 prefix = b" "
210 210 else:
211 211 if not type or not r:
212 212 ui.warn(_(b"cat-file: type or revision not supplied\n"))
213 213 commands.help_(ui, b'cat-file')
214 214
215 215 while r:
216 216 if type != b"commit":
217 217 ui.warn(_(b"aborting hg cat-file only understands commits\n"))
218 218 return 1
219 219 n = repo.lookup(r)
220 220 catcommit(ui, repo, n, prefix)
221 221 if opts['stdin']:
222 222 line = ui.fin.readline()
223 223 if not line:
224 224 break
225 225 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
226 226 else:
227 227 break
228 228
229 229
230 230 # git rev-tree is a confusing thing. You can supply a number of
231 231 # commit sha1s on the command line, and it walks the commit history
232 232 # telling you which commits are reachable from the supplied ones via
233 233 # a bitmask based on arg position.
234 234 # you can specify a commit to stop at by starting the sha1 with ^
235 235 def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False):
236 236 def chlogwalk():
237 237 count = len(repo)
238 238 i = count
239 239 l = [0] * 100
240 240 chunk = 100
241 241 while True:
242 242 if chunk > i:
243 243 chunk = i
244 244 i = 0
245 245 else:
246 246 i -= chunk
247 247
248 for x in pycompat.xrange(chunk):
248 for x in range(chunk):
249 249 if i + x >= count:
250 250 l[chunk - x :] = [0] * (chunk - x)
251 251 break
252 252 if full is not None:
253 253 if (i + x) in repo:
254 254 l[x] = repo[i + x]
255 255 l[x].changeset() # force reading
256 256 else:
257 257 if (i + x) in repo:
258 258 l[x] = 1
259 for x in pycompat.xrange(chunk - 1, -1, -1):
259 for x in range(chunk - 1, -1, -1):
260 260 if l[x] != 0:
261 261 yield (i + x, full is not None and l[x] or None)
262 262 if i == 0:
263 263 break
264 264
265 265 # calculate and return the reachability bitmask for sha
266 266 def is_reachable(ar, reachable, sha):
267 267 if len(ar) == 0:
268 268 return 1
269 269 mask = 0
270 for i in pycompat.xrange(len(ar)):
270 for i in range(len(ar)):
271 271 if sha in reachable[i]:
272 272 mask |= 1 << i
273 273
274 274 return mask
275 275
276 276 reachable = []
277 277 stop_sha1 = []
278 278 want_sha1 = []
279 279 count = 0
280 280
281 281 # figure out which commits they are asking for and which ones they
282 282 # want us to stop on
283 283 for i, arg in enumerate(args):
284 284 if arg.startswith(b'^'):
285 285 s = repo.lookup(arg[1:])
286 286 stop_sha1.append(s)
287 287 want_sha1.append(s)
288 288 elif arg != b'HEAD':
289 289 want_sha1.append(repo.lookup(arg))
290 290
291 291 # calculate the graph for the supplied commits
292 292 for i, n in enumerate(want_sha1):
293 293 reachable.append(set())
294 294 visit = [n]
295 295 reachable[i].add(n)
296 296 while visit:
297 297 n = visit.pop(0)
298 298 if n in stop_sha1:
299 299 continue
300 300 for p in repo.changelog.parents(n):
301 301 if p not in reachable[i]:
302 302 reachable[i].add(p)
303 303 visit.append(p)
304 304 if p in stop_sha1:
305 305 continue
306 306
307 307 # walk the repository looking for commits that are in our
308 308 # reachability graph
309 309 for i, ctx in chlogwalk():
310 310 if i not in repo:
311 311 continue
312 312 n = repo.changelog.node(i)
313 313 mask = is_reachable(want_sha1, reachable, n)
314 314 if mask:
315 315 parentstr = b""
316 316 if parents:
317 317 pp = repo.changelog.parents(n)
318 318 if pp[0] != repo.nullid:
319 319 parentstr += b" " + short(pp[0])
320 320 if pp[1] != repo.nullid:
321 321 parentstr += b" " + short(pp[1])
322 322 if not full:
323 323 ui.write(b"%s%s\n" % (short(n), parentstr))
324 324 elif full == b"commit":
325 325 ui.write(b"%s%s\n" % (short(n), parentstr))
326 326 catcommit(ui, repo, n, b' ', ctx)
327 327 else:
328 328 (p1, p2) = repo.changelog.parents(n)
329 329 (h, h1, h2) = map(short, (n, p1, p2))
330 330 (i1, i2) = map(repo.changelog.rev, (p1, p2))
331 331
332 332 date = ctx.date()[0]
333 333 ui.write(b"%s %s:%s" % (date, h, mask))
334 334 mask = is_reachable(want_sha1, reachable, p1)
335 335 if i1 != nullrev and mask > 0:
336 336 ui.write(b"%s:%s " % (h1, mask)),
337 337 mask = is_reachable(want_sha1, reachable, p2)
338 338 if i2 != nullrev and mask > 0:
339 339 ui.write(b"%s:%s " % (h2, mask))
340 340 ui.write(b"\n")
341 341 if maxnr and count >= maxnr:
342 342 break
343 343 count += 1
344 344
345 345
346 346 # git rev-list tries to order things by date, and has the ability to stop
347 347 # at a given commit without walking the whole repo. TODO add the stop
348 348 # parameter
349 349 @command(
350 350 b'debug-rev-list',
351 351 [
352 352 (b'H', b'header', None, _(b'header')),
353 353 (b't', b'topo-order', None, _(b'topo-order')),
354 354 (b'p', b'parents', None, _(b'parents')),
355 355 (b'n', b'max-count', 0, _(b'max-count')),
356 356 ],
357 357 b'[OPTION]... REV...',
358 358 )
359 359 def revlist(ui, repo, *revs, **opts):
360 360 """print revisions"""
361 361 if opts['header']:
362 362 full = b"commit"
363 363 else:
364 364 full = None
365 365 copy = [x for x in revs]
366 366 revtree(ui, copy, repo, full, opts['max_count'], opts[r'parents'])
367 367
368 368
369 369 @command(
370 370 b'view',
371 371 [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))],
372 372 _(b'[-l LIMIT] [REVRANGE]'),
373 373 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
374 374 )
375 375 def view(ui, repo, *etc, **opts):
376 376 """start interactive history viewer"""
377 377 opts = pycompat.byteskwargs(opts)
378 378 os.chdir(repo.root)
379 379 optstr = b' '.join([b'--%s %s' % (k, v) for k, v in opts.items() if v])
380 380 if repo.filtername is None:
381 381 optstr += b'--hidden'
382 382
383 383 cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc))
384 384 ui.debug(b"running %s\n" % cmd)
385 385 ui.system(cmd, blockedtag=b'hgk_view')
@@ -1,2683 +1,2683 b''
1 1 # histedit.py - interactive history editing for mercurial
2 2 #
3 3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """interactive history editing
8 8
9 9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 10 is as follows, assuming the following history::
11 11
12 12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 13 | Add delta
14 14 |
15 15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 16 | Add gamma
17 17 |
18 18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 19 | Add beta
20 20 |
21 21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 22 Add alpha
23 23
24 24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 25 file open in your editor::
26 26
27 27 pick c561b4e977df Add beta
28 28 pick 030b686bedc4 Add gamma
29 29 pick 7c2fd3b9020c Add delta
30 30
31 31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 32 #
33 33 # Commits are listed from least to most recent
34 34 #
35 35 # Commands:
36 36 # p, pick = use commit
37 37 # e, edit = use commit, but allow edits before making new commit
38 38 # f, fold = use commit, but combine it with the one above
39 39 # r, roll = like fold, but discard this commit's description and date
40 40 # d, drop = remove commit from history
41 41 # m, mess = edit commit message without changing commit content
42 42 # b, base = checkout changeset and apply further changesets from there
43 43 #
44 44
45 45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
46 46 for each revision in your history. For example, if you had meant to add gamma
47 47 before beta, and then wanted to add delta in the same revision as beta, you
48 48 would reorganize the file to look like this::
49 49
50 50 pick 030b686bedc4 Add gamma
51 51 pick c561b4e977df Add beta
52 52 fold 7c2fd3b9020c Add delta
53 53
54 54 # Edit history between c561b4e977df and 7c2fd3b9020c
55 55 #
56 56 # Commits are listed from least to most recent
57 57 #
58 58 # Commands:
59 59 # p, pick = use commit
60 60 # e, edit = use commit, but allow edits before making new commit
61 61 # f, fold = use commit, but combine it with the one above
62 62 # r, roll = like fold, but discard this commit's description and date
63 63 # d, drop = remove commit from history
64 64 # m, mess = edit commit message without changing commit content
65 65 # b, base = checkout changeset and apply further changesets from there
66 66 #
67 67
68 68 At which point you close the editor and ``histedit`` starts working. When you
69 69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
70 70 those revisions together, offering you a chance to clean up the commit message::
71 71
72 72 Add beta
73 73 ***
74 74 Add delta
75 75
76 76 Edit the commit message to your liking, then close the editor. The date used
77 77 for the commit will be the later of the two commits' dates. For this example,
78 78 let's assume that the commit message was changed to ``Add beta and delta.``
79 79 After histedit has run and had a chance to remove any old or temporary
80 80 revisions it needed, the history looks like this::
81 81
82 82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
83 83 | Add beta and delta.
84 84 |
85 85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
86 86 | Add gamma
87 87 |
88 88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
89 89 Add alpha
90 90
91 91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
92 92 ones) until after it has completed all the editing operations, so it will
93 93 probably perform several strip operations when it's done. For the above example,
94 94 it had to run strip twice. Strip can be slow depending on a variety of factors,
95 95 so you might need to be a little patient. You can choose to keep the original
96 96 revisions by passing the ``--keep`` flag.
97 97
98 98 The ``edit`` operation will drop you back to a command prompt,
99 99 allowing you to edit files freely, or even use ``hg record`` to commit
100 100 some changes as a separate commit. When you're done, any remaining
101 101 uncommitted changes will be committed as well. When done, run ``hg
102 102 histedit --continue`` to finish this step. If there are uncommitted
103 103 changes, you'll be prompted for a new commit message, but the default
104 104 commit message will be the original message for the ``edit`` ed
105 105 revision, and the date of the original commit will be preserved.
106 106
107 107 The ``message`` operation will give you a chance to revise a commit
108 108 message without changing the contents. It's a shortcut for doing
109 109 ``edit`` immediately followed by `hg histedit --continue``.
110 110
111 111 If ``histedit`` encounters a conflict when moving a revision (while
112 112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
113 113 ``edit`` with the difference that it won't prompt you for a commit
114 114 message when done. If you decide at this point that you don't like how
115 115 much work it will be to rearrange history, or that you made a mistake,
116 116 you can use ``hg histedit --abort`` to abandon the new changes you
117 117 have made and return to the state before you attempted to edit your
118 118 history.
119 119
120 120 If we clone the histedit-ed example repository above and add four more
121 121 changes, such that we have the following history::
122 122
123 123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
124 124 | Add theta
125 125 |
126 126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
127 127 | Add eta
128 128 |
129 129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
130 130 | Add zeta
131 131 |
132 132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
133 133 | Add epsilon
134 134 |
135 135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
136 136 | Add beta and delta.
137 137 |
138 138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
139 139 | Add gamma
140 140 |
141 141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
142 142 Add alpha
143 143
144 144 If you run ``hg histedit --outgoing`` on the clone then it is the same
145 145 as running ``hg histedit 836302820282``. If you need plan to push to a
146 146 repository that Mercurial does not detect to be related to the source
147 147 repo, you can add a ``--force`` option.
148 148
149 149 Config
150 150 ------
151 151
152 152 Histedit rule lines are truncated to 80 characters by default. You
153 153 can customize this behavior by setting a different length in your
154 154 configuration file::
155 155
156 156 [histedit]
157 157 linelen = 120 # truncate rule lines at 120 characters
158 158
159 159 The summary of a change can be customized as well::
160 160
161 161 [histedit]
162 162 summary-template = '{rev} {bookmarks} {desc|firstline}'
163 163
164 164 The customized summary should be kept short enough that rule lines
165 165 will fit in the configured line length. See above if that requires
166 166 customization.
167 167
168 168 ``hg histedit`` attempts to automatically choose an appropriate base
169 169 revision to use. To change which base revision is used, define a
170 170 revset in your configuration file::
171 171
172 172 [histedit]
173 173 defaultrev = only(.) & draft()
174 174
175 175 By default each edited revision needs to be present in histedit commands.
176 176 To remove revision you need to use ``drop`` operation. You can configure
177 177 the drop to be implicit for missing commits by adding::
178 178
179 179 [histedit]
180 180 dropmissing = True
181 181
182 182 By default, histedit will close the transaction after each action. For
183 183 performance purposes, you can configure histedit to use a single transaction
184 184 across the entire histedit. WARNING: This setting introduces a significant risk
185 185 of losing the work you've done in a histedit if the histedit aborts
186 186 unexpectedly::
187 187
188 188 [histedit]
189 189 singletransaction = True
190 190
191 191 """
192 192
193 193
194 194 # chistedit dependencies that are not available everywhere
195 195 try:
196 196 import fcntl
197 197 import termios
198 198 except ImportError:
199 199 fcntl = None
200 200 termios = None
201 201
202 202 import binascii
203 203 import functools
204 204 import os
205 205 import pickle
206 206 import struct
207 207
208 208 from mercurial.i18n import _
209 209 from mercurial.pycompat import (
210 210 getattr,
211 211 open,
212 212 )
213 213 from mercurial.node import (
214 214 bin,
215 215 hex,
216 216 short,
217 217 )
218 218 from mercurial import (
219 219 bundle2,
220 220 cmdutil,
221 221 context,
222 222 copies,
223 223 destutil,
224 224 discovery,
225 225 encoding,
226 226 error,
227 227 exchange,
228 228 extensions,
229 229 hg,
230 230 logcmdutil,
231 231 merge as mergemod,
232 232 mergestate as mergestatemod,
233 233 mergeutil,
234 234 obsolete,
235 235 pycompat,
236 236 registrar,
237 237 repair,
238 238 rewriteutil,
239 239 scmutil,
240 240 state as statemod,
241 241 util,
242 242 )
243 243 from mercurial.utils import (
244 244 dateutil,
245 245 stringutil,
246 246 urlutil,
247 247 )
248 248
249 249 cmdtable = {}
250 250 command = registrar.command(cmdtable)
251 251
252 252 configtable = {}
253 253 configitem = registrar.configitem(configtable)
254 254 configitem(
255 255 b'experimental',
256 256 b'histedit.autoverb',
257 257 default=False,
258 258 )
259 259 configitem(
260 260 b'histedit',
261 261 b'defaultrev',
262 262 default=None,
263 263 )
264 264 configitem(
265 265 b'histedit',
266 266 b'dropmissing',
267 267 default=False,
268 268 )
269 269 configitem(
270 270 b'histedit',
271 271 b'linelen',
272 272 default=80,
273 273 )
274 274 configitem(
275 275 b'histedit',
276 276 b'singletransaction',
277 277 default=False,
278 278 )
279 279 configitem(
280 280 b'ui',
281 281 b'interface.histedit',
282 282 default=None,
283 283 )
284 284 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
285 285 # TODO: Teach the text-based histedit interface to respect this config option
286 286 # before we make it non-experimental.
287 287 configitem(
288 288 b'histedit', b'later-commits-first', default=False, experimental=True
289 289 )
290 290
291 291 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
292 292 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
293 293 # be specifying the version(s) of Mercurial they are tested with, or
294 294 # leave the attribute unspecified.
295 295 testedwith = b'ships-with-hg-core'
296 296
297 297 actiontable = {}
298 298 primaryactions = set()
299 299 secondaryactions = set()
300 300 tertiaryactions = set()
301 301 internalactions = set()
302 302
303 303
304 304 def geteditcomment(ui, first, last):
305 305 """construct the editor comment
306 306 The comment includes::
307 307 - an intro
308 308 - sorted primary commands
309 309 - sorted short commands
310 310 - sorted long commands
311 311 - additional hints
312 312
313 313 Commands are only included once.
314 314 """
315 315 intro = _(
316 316 b"""Edit history between %s and %s
317 317
318 318 Commits are listed from least to most recent
319 319
320 320 You can reorder changesets by reordering the lines
321 321
322 322 Commands:
323 323 """
324 324 )
325 325 actions = []
326 326
327 327 def addverb(v):
328 328 a = actiontable[v]
329 329 lines = a.message.split(b"\n")
330 330 if len(a.verbs):
331 331 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
332 332 actions.append(b" %s = %s" % (v, lines[0]))
333 333 actions.extend([b' %s'] * (len(lines) - 1))
334 334
335 335 for v in (
336 336 sorted(primaryactions)
337 337 + sorted(secondaryactions)
338 338 + sorted(tertiaryactions)
339 339 ):
340 340 addverb(v)
341 341 actions.append(b'')
342 342
343 343 hints = []
344 344 if ui.configbool(b'histedit', b'dropmissing'):
345 345 hints.append(
346 346 b"Deleting a changeset from the list "
347 347 b"will DISCARD it from the edited history!"
348 348 )
349 349
350 350 lines = (intro % (first, last)).split(b'\n') + actions + hints
351 351
352 352 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
353 353
354 354
355 355 class histeditstate:
356 356 def __init__(self, repo):
357 357 self.repo = repo
358 358 self.actions = None
359 359 self.keep = None
360 360 self.topmost = None
361 361 self.parentctxnode = None
362 362 self.lock = None
363 363 self.wlock = None
364 364 self.backupfile = None
365 365 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
366 366 self.replacements = []
367 367
368 368 def read(self):
369 369 """Load histedit state from disk and set fields appropriately."""
370 370 if not self.stateobj.exists():
371 371 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
372 372
373 373 data = self._read()
374 374
375 375 self.parentctxnode = data[b'parentctxnode']
376 376 actions = parserules(data[b'rules'], self)
377 377 self.actions = actions
378 378 self.keep = data[b'keep']
379 379 self.topmost = data[b'topmost']
380 380 self.replacements = data[b'replacements']
381 381 self.backupfile = data[b'backupfile']
382 382
383 383 def _read(self):
384 384 fp = self.repo.vfs.read(b'histedit-state')
385 385 if fp.startswith(b'v1\n'):
386 386 data = self._load()
387 387 parentctxnode, rules, keep, topmost, replacements, backupfile = data
388 388 else:
389 389 data = pickle.loads(fp)
390 390 parentctxnode, rules, keep, topmost, replacements = data
391 391 backupfile = None
392 392 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
393 393
394 394 return {
395 395 b'parentctxnode': parentctxnode,
396 396 b"rules": rules,
397 397 b"keep": keep,
398 398 b"topmost": topmost,
399 399 b"replacements": replacements,
400 400 b"backupfile": backupfile,
401 401 }
402 402
403 403 def write(self, tr=None):
404 404 if tr:
405 405 tr.addfilegenerator(
406 406 b'histedit-state',
407 407 (b'histedit-state',),
408 408 self._write,
409 409 location=b'plain',
410 410 )
411 411 else:
412 412 with self.repo.vfs(b"histedit-state", b"w") as f:
413 413 self._write(f)
414 414
415 415 def _write(self, fp):
416 416 fp.write(b'v1\n')
417 417 fp.write(b'%s\n' % hex(self.parentctxnode))
418 418 fp.write(b'%s\n' % hex(self.topmost))
419 419 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
420 420 fp.write(b'%d\n' % len(self.actions))
421 421 for action in self.actions:
422 422 fp.write(b'%s\n' % action.tostate())
423 423 fp.write(b'%d\n' % len(self.replacements))
424 424 for replacement in self.replacements:
425 425 fp.write(
426 426 b'%s%s\n'
427 427 % (
428 428 hex(replacement[0]),
429 429 b''.join(hex(r) for r in replacement[1]),
430 430 )
431 431 )
432 432 backupfile = self.backupfile
433 433 if not backupfile:
434 434 backupfile = b''
435 435 fp.write(b'%s\n' % backupfile)
436 436
437 437 def _load(self):
438 438 fp = self.repo.vfs(b'histedit-state', b'r')
439 439 lines = [l[:-1] for l in fp.readlines()]
440 440
441 441 index = 0
442 442 lines[index] # version number
443 443 index += 1
444 444
445 445 parentctxnode = bin(lines[index])
446 446 index += 1
447 447
448 448 topmost = bin(lines[index])
449 449 index += 1
450 450
451 451 keep = lines[index] == b'True'
452 452 index += 1
453 453
454 454 # Rules
455 455 rules = []
456 456 rulelen = int(lines[index])
457 457 index += 1
458 for i in pycompat.xrange(rulelen):
458 for i in range(rulelen):
459 459 ruleaction = lines[index]
460 460 index += 1
461 461 rule = lines[index]
462 462 index += 1
463 463 rules.append((ruleaction, rule))
464 464
465 465 # Replacements
466 466 replacements = []
467 467 replacementlen = int(lines[index])
468 468 index += 1
469 for i in pycompat.xrange(replacementlen):
469 for i in range(replacementlen):
470 470 replacement = lines[index]
471 471 original = bin(replacement[:40])
472 472 succ = [
473 473 bin(replacement[i : i + 40])
474 474 for i in range(40, len(replacement), 40)
475 475 ]
476 476 replacements.append((original, succ))
477 477 index += 1
478 478
479 479 backupfile = lines[index]
480 480 index += 1
481 481
482 482 fp.close()
483 483
484 484 return parentctxnode, rules, keep, topmost, replacements, backupfile
485 485
486 486 def clear(self):
487 487 if self.inprogress():
488 488 self.repo.vfs.unlink(b'histedit-state')
489 489
490 490 def inprogress(self):
491 491 return self.repo.vfs.exists(b'histedit-state')
492 492
493 493
494 494 class histeditaction:
495 495 def __init__(self, state, node):
496 496 self.state = state
497 497 self.repo = state.repo
498 498 self.node = node
499 499
500 500 @classmethod
501 501 def fromrule(cls, state, rule):
502 502 """Parses the given rule, returning an instance of the histeditaction."""
503 503 ruleid = rule.strip().split(b' ', 1)[0]
504 504 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
505 505 # Check for validation of rule ids and get the rulehash
506 506 try:
507 507 rev = bin(ruleid)
508 508 except binascii.Error:
509 509 try:
510 510 _ctx = scmutil.revsingle(state.repo, ruleid)
511 511 rulehash = _ctx.hex()
512 512 rev = bin(rulehash)
513 513 except error.RepoLookupError:
514 514 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
515 515 return cls(state, rev)
516 516
517 517 def verify(self, prev, expected, seen):
518 518 """Verifies semantic correctness of the rule"""
519 519 repo = self.repo
520 520 ha = hex(self.node)
521 521 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
522 522 if self.node is None:
523 523 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
524 524 self._verifynodeconstraints(prev, expected, seen)
525 525
526 526 def _verifynodeconstraints(self, prev, expected, seen):
527 527 # by default command need a node in the edited list
528 528 if self.node not in expected:
529 529 raise error.ParseError(
530 530 _(b'%s "%s" changeset was not a candidate')
531 531 % (self.verb, short(self.node)),
532 532 hint=_(b'only use listed changesets'),
533 533 )
534 534 # and only one command per node
535 535 if self.node in seen:
536 536 raise error.ParseError(
537 537 _(b'duplicated command for changeset %s') % short(self.node)
538 538 )
539 539
540 540 def torule(self):
541 541 """build a histedit rule line for an action
542 542
543 543 by default lines are in the form:
544 544 <hash> <rev> <summary>
545 545 """
546 546 ctx = self.repo[self.node]
547 547 ui = self.repo.ui
548 548 # We don't want color codes in the commit message template, so
549 549 # disable the label() template function while we render it.
550 550 with ui.configoverride(
551 551 {(b'templatealias', b'label(l,x)'): b"x"}, b'histedit'
552 552 ):
553 553 summary = cmdutil.rendertemplate(
554 554 ctx, ui.config(b'histedit', b'summary-template')
555 555 )
556 556 line = b'%s %s %s' % (self.verb, ctx, stringutil.firstline(summary))
557 557 # trim to 75 columns by default so it's not stupidly wide in my editor
558 558 # (the 5 more are left for verb)
559 559 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
560 560 maxlen = max(maxlen, 22) # avoid truncating hash
561 561 return stringutil.ellipsis(line, maxlen)
562 562
563 563 def tostate(self):
564 564 """Print an action in format used by histedit state files
565 565 (the first line is a verb, the remainder is the second)
566 566 """
567 567 return b"%s\n%s" % (self.verb, hex(self.node))
568 568
569 569 def run(self):
570 570 """Runs the action. The default behavior is simply apply the action's
571 571 rulectx onto the current parentctx."""
572 572 self.applychange()
573 573 self.continuedirty()
574 574 return self.continueclean()
575 575
576 576 def applychange(self):
577 577 """Applies the changes from this action's rulectx onto the current
578 578 parentctx, but does not commit them."""
579 579 repo = self.repo
580 580 rulectx = repo[self.node]
581 581 with repo.ui.silent():
582 582 hg.update(repo, self.state.parentctxnode, quietempty=True)
583 583 stats = applychanges(repo.ui, repo, rulectx, {})
584 584 repo.dirstate.setbranch(rulectx.branch())
585 585 if stats.unresolvedcount:
586 586 raise error.InterventionRequired(
587 587 _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),
588 588 hint=_(b'hg histedit --continue to resume'),
589 589 )
590 590
591 591 def continuedirty(self):
592 592 """Continues the action when changes have been applied to the working
593 593 copy. The default behavior is to commit the dirty changes."""
594 594 repo = self.repo
595 595 rulectx = repo[self.node]
596 596
597 597 editor = self.commiteditor()
598 598 commit = commitfuncfor(repo, rulectx)
599 599 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
600 600 date = dateutil.makedate()
601 601 else:
602 602 date = rulectx.date()
603 603 commit(
604 604 text=rulectx.description(),
605 605 user=rulectx.user(),
606 606 date=date,
607 607 extra=rulectx.extra(),
608 608 editor=editor,
609 609 )
610 610
611 611 def commiteditor(self):
612 612 """The editor to be used to edit the commit message."""
613 613 return False
614 614
615 615 def continueclean(self):
616 616 """Continues the action when the working copy is clean. The default
617 617 behavior is to accept the current commit as the new version of the
618 618 rulectx."""
619 619 ctx = self.repo[b'.']
620 620 if ctx.node() == self.state.parentctxnode:
621 621 self.repo.ui.warn(
622 622 _(b'%s: skipping changeset (no changes)\n') % short(self.node)
623 623 )
624 624 return ctx, [(self.node, tuple())]
625 625 if ctx.node() == self.node:
626 626 # Nothing changed
627 627 return ctx, []
628 628 return ctx, [(self.node, (ctx.node(),))]
629 629
630 630
631 631 def commitfuncfor(repo, src):
632 632 """Build a commit function for the replacement of <src>
633 633
634 634 This function ensure we apply the same treatment to all changesets.
635 635
636 636 - Add a 'histedit_source' entry in extra.
637 637
638 638 Note that fold has its own separated logic because its handling is a bit
639 639 different and not easily factored out of the fold method.
640 640 """
641 641 phasemin = src.phase()
642 642
643 643 def commitfunc(**kwargs):
644 644 overrides = {(b'phases', b'new-commit'): phasemin}
645 645 with repo.ui.configoverride(overrides, b'histedit'):
646 646 extra = kwargs.get('extra', {}).copy()
647 647 extra[b'histedit_source'] = src.hex()
648 648 kwargs['extra'] = extra
649 649 return repo.commit(**kwargs)
650 650
651 651 return commitfunc
652 652
653 653
654 654 def applychanges(ui, repo, ctx, opts):
655 655 """Merge changeset from ctx (only) in the current working directory"""
656 656 if ctx.p1().node() == repo.dirstate.p1():
657 657 # edits are "in place" we do not need to make any merge,
658 658 # just applies changes on parent for editing
659 659 with ui.silent():
660 660 cmdutil.revert(ui, repo, ctx, all=True)
661 661 stats = mergemod.updateresult(0, 0, 0, 0)
662 662 else:
663 663 try:
664 664 # ui.forcemerge is an internal variable, do not document
665 665 repo.ui.setconfig(
666 666 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
667 667 )
668 668 stats = mergemod.graft(
669 669 repo,
670 670 ctx,
671 671 labels=[
672 672 b'already edited',
673 673 b'current change',
674 674 b'parent of current change',
675 675 ],
676 676 )
677 677 finally:
678 678 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
679 679 return stats
680 680
681 681
682 682 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
683 683 """collapse the set of revisions from first to last as new one.
684 684
685 685 Expected commit options are:
686 686 - message
687 687 - date
688 688 - username
689 689 Commit message is edited in all cases.
690 690
691 691 This function works in memory."""
692 692 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
693 693 if not ctxs:
694 694 return None
695 695 for c in ctxs:
696 696 if not c.mutable():
697 697 raise error.ParseError(
698 698 _(b"cannot fold into public change %s") % short(c.node())
699 699 )
700 700 base = firstctx.p1()
701 701
702 702 # commit a new version of the old changeset, including the update
703 703 # collect all files which might be affected
704 704 files = set()
705 705 for ctx in ctxs:
706 706 files.update(ctx.files())
707 707
708 708 # Recompute copies (avoid recording a -> b -> a)
709 709 copied = copies.pathcopies(base, lastctx)
710 710
711 711 # prune files which were reverted by the updates
712 712 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
713 713 # commit version of these files as defined by head
714 714 headmf = lastctx.manifest()
715 715
716 716 def filectxfn(repo, ctx, path):
717 717 if path in headmf:
718 718 fctx = lastctx[path]
719 719 flags = fctx.flags()
720 720 mctx = context.memfilectx(
721 721 repo,
722 722 ctx,
723 723 fctx.path(),
724 724 fctx.data(),
725 725 islink=b'l' in flags,
726 726 isexec=b'x' in flags,
727 727 copysource=copied.get(path),
728 728 )
729 729 return mctx
730 730 return None
731 731
732 732 if commitopts.get(b'message'):
733 733 message = commitopts[b'message']
734 734 else:
735 735 message = firstctx.description()
736 736 user = commitopts.get(b'user')
737 737 date = commitopts.get(b'date')
738 738 extra = commitopts.get(b'extra')
739 739
740 740 parents = (firstctx.p1().node(), firstctx.p2().node())
741 741 editor = None
742 742 if not skipprompt:
743 743 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
744 744 new = context.memctx(
745 745 repo,
746 746 parents=parents,
747 747 text=message,
748 748 files=files,
749 749 filectxfn=filectxfn,
750 750 user=user,
751 751 date=date,
752 752 extra=extra,
753 753 editor=editor,
754 754 )
755 755 return repo.commitctx(new)
756 756
757 757
758 758 def _isdirtywc(repo):
759 759 return repo[None].dirty(missing=True)
760 760
761 761
762 762 def abortdirty():
763 763 raise error.StateError(
764 764 _(b'working copy has pending changes'),
765 765 hint=_(
766 766 b'amend, commit, or revert them and run histedit '
767 767 b'--continue, or abort with histedit --abort'
768 768 ),
769 769 )
770 770
771 771
772 772 def action(verbs, message, priority=False, internal=False):
773 773 def wrap(cls):
774 774 assert not priority or not internal
775 775 verb = verbs[0]
776 776 if priority:
777 777 primaryactions.add(verb)
778 778 elif internal:
779 779 internalactions.add(verb)
780 780 elif len(verbs) > 1:
781 781 secondaryactions.add(verb)
782 782 else:
783 783 tertiaryactions.add(verb)
784 784
785 785 cls.verb = verb
786 786 cls.verbs = verbs
787 787 cls.message = message
788 788 for verb in verbs:
789 789 actiontable[verb] = cls
790 790 return cls
791 791
792 792 return wrap
793 793
794 794
795 795 @action([b'pick', b'p'], _(b'use commit'), priority=True)
796 796 class pick(histeditaction):
797 797 def run(self):
798 798 rulectx = self.repo[self.node]
799 799 if rulectx.p1().node() == self.state.parentctxnode:
800 800 self.repo.ui.debug(b'node %s unchanged\n' % short(self.node))
801 801 return rulectx, []
802 802
803 803 return super(pick, self).run()
804 804
805 805
806 806 @action(
807 807 [b'edit', b'e'],
808 808 _(b'use commit, but allow edits before making new commit'),
809 809 priority=True,
810 810 )
811 811 class edit(histeditaction):
812 812 def run(self):
813 813 repo = self.repo
814 814 rulectx = repo[self.node]
815 815 hg.update(repo, self.state.parentctxnode, quietempty=True)
816 816 applychanges(repo.ui, repo, rulectx, {})
817 817 hint = _(b'to edit %s, `hg histedit --continue` after making changes')
818 818 raise error.InterventionRequired(
819 819 _(b'Editing (%s), commit as needed now to split the change')
820 820 % short(self.node),
821 821 hint=hint % short(self.node),
822 822 )
823 823
824 824 def commiteditor(self):
825 825 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
826 826
827 827
828 828 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
829 829 class fold(histeditaction):
830 830 def verify(self, prev, expected, seen):
831 831 """Verifies semantic correctness of the fold rule"""
832 832 super(fold, self).verify(prev, expected, seen)
833 833 repo = self.repo
834 834 if not prev:
835 835 c = repo[self.node].p1()
836 836 elif not prev.verb in (b'pick', b'base'):
837 837 return
838 838 else:
839 839 c = repo[prev.node]
840 840 if not c.mutable():
841 841 raise error.ParseError(
842 842 _(b"cannot fold into public change %s") % short(c.node())
843 843 )
844 844
845 845 def continuedirty(self):
846 846 repo = self.repo
847 847 rulectx = repo[self.node]
848 848
849 849 commit = commitfuncfor(repo, rulectx)
850 850 commit(
851 851 text=b'fold-temp-revision %s' % short(self.node),
852 852 user=rulectx.user(),
853 853 date=rulectx.date(),
854 854 extra=rulectx.extra(),
855 855 )
856 856
857 857 def continueclean(self):
858 858 repo = self.repo
859 859 ctx = repo[b'.']
860 860 rulectx = repo[self.node]
861 861 parentctxnode = self.state.parentctxnode
862 862 if ctx.node() == parentctxnode:
863 863 repo.ui.warn(_(b'%s: empty changeset\n') % short(self.node))
864 864 return ctx, [(self.node, (parentctxnode,))]
865 865
866 866 parentctx = repo[parentctxnode]
867 867 newcommits = {
868 868 c.node()
869 869 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
870 870 }
871 871 if not newcommits:
872 872 repo.ui.warn(
873 873 _(
874 874 b'%s: cannot fold - working copy is not a '
875 875 b'descendant of previous commit %s\n'
876 876 )
877 877 % (short(self.node), short(parentctxnode))
878 878 )
879 879 return ctx, [(self.node, (ctx.node(),))]
880 880
881 881 middlecommits = newcommits.copy()
882 882 middlecommits.discard(ctx.node())
883 883
884 884 return self.finishfold(
885 885 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
886 886 )
887 887
888 888 def skipprompt(self):
889 889 """Returns true if the rule should skip the message editor.
890 890
891 891 For example, 'fold' wants to show an editor, but 'rollup'
892 892 doesn't want to.
893 893 """
894 894 return False
895 895
896 896 def mergedescs(self):
897 897 """Returns true if the rule should merge messages of multiple changes.
898 898
899 899 This exists mainly so that 'rollup' rules can be a subclass of
900 900 'fold'.
901 901 """
902 902 return True
903 903
904 904 def firstdate(self):
905 905 """Returns true if the rule should preserve the date of the first
906 906 change.
907 907
908 908 This exists mainly so that 'rollup' rules can be a subclass of
909 909 'fold'.
910 910 """
911 911 return False
912 912
913 913 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
914 914 mergemod.update(ctx.p1())
915 915 ### prepare new commit data
916 916 commitopts = {}
917 917 commitopts[b'user'] = ctx.user()
918 918 # commit message
919 919 if not self.mergedescs():
920 920 newmessage = ctx.description()
921 921 else:
922 922 newmessage = (
923 923 b'\n***\n'.join(
924 924 [ctx.description()]
925 925 + [repo[r].description() for r in internalchanges]
926 926 + [oldctx.description()]
927 927 )
928 928 + b'\n'
929 929 )
930 930 commitopts[b'message'] = newmessage
931 931 # date
932 932 if self.firstdate():
933 933 commitopts[b'date'] = ctx.date()
934 934 else:
935 935 commitopts[b'date'] = max(ctx.date(), oldctx.date())
936 936 # if date is to be updated to current
937 937 if ui.configbool(b'rewrite', b'update-timestamp'):
938 938 commitopts[b'date'] = dateutil.makedate()
939 939
940 940 extra = ctx.extra().copy()
941 941 # histedit_source
942 942 # note: ctx is likely a temporary commit but that the best we can do
943 943 # here. This is sufficient to solve issue3681 anyway.
944 944 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
945 945 commitopts[b'extra'] = extra
946 946 phasemin = max(ctx.phase(), oldctx.phase())
947 947 overrides = {(b'phases', b'new-commit'): phasemin}
948 948 with repo.ui.configoverride(overrides, b'histedit'):
949 949 n = collapse(
950 950 repo,
951 951 ctx,
952 952 repo[newnode],
953 953 commitopts,
954 954 skipprompt=self.skipprompt(),
955 955 )
956 956 if n is None:
957 957 return ctx, []
958 958 mergemod.update(repo[n])
959 959 replacements = [
960 960 (oldctx.node(), (newnode,)),
961 961 (ctx.node(), (n,)),
962 962 (newnode, (n,)),
963 963 ]
964 964 for ich in internalchanges:
965 965 replacements.append((ich, (n,)))
966 966 return repo[n], replacements
967 967
968 968
969 969 @action(
970 970 [b'base', b'b'],
971 971 _(b'checkout changeset and apply further changesets from there'),
972 972 )
973 973 class base(histeditaction):
974 974 def run(self):
975 975 if self.repo[b'.'].node() != self.node:
976 976 mergemod.clean_update(self.repo[self.node])
977 977 return self.continueclean()
978 978
979 979 def continuedirty(self):
980 980 abortdirty()
981 981
982 982 def continueclean(self):
983 983 basectx = self.repo[b'.']
984 984 return basectx, []
985 985
986 986 def _verifynodeconstraints(self, prev, expected, seen):
987 987 # base can only be use with a node not in the edited set
988 988 if self.node in expected:
989 989 msg = _(b'%s "%s" changeset was an edited list candidate')
990 990 raise error.ParseError(
991 991 msg % (self.verb, short(self.node)),
992 992 hint=_(b'base must only use unlisted changesets'),
993 993 )
994 994
995 995
996 996 @action(
997 997 [b'_multifold'],
998 998 _(
999 999 """fold subclass used for when multiple folds happen in a row
1000 1000
1001 1001 We only want to fire the editor for the folded message once when
1002 1002 (say) four changes are folded down into a single change. This is
1003 1003 similar to rollup, but we should preserve both messages so that
1004 1004 when the last fold operation runs we can show the user all the
1005 1005 commit messages in their editor.
1006 1006 """
1007 1007 ),
1008 1008 internal=True,
1009 1009 )
1010 1010 class _multifold(fold):
1011 1011 def skipprompt(self):
1012 1012 return True
1013 1013
1014 1014
1015 1015 @action(
1016 1016 [b"roll", b"r"],
1017 1017 _(b"like fold, but discard this commit's description and date"),
1018 1018 )
1019 1019 class rollup(fold):
1020 1020 def mergedescs(self):
1021 1021 return False
1022 1022
1023 1023 def skipprompt(self):
1024 1024 return True
1025 1025
1026 1026 def firstdate(self):
1027 1027 return True
1028 1028
1029 1029
1030 1030 @action([b"drop", b"d"], _(b'remove commit from history'))
1031 1031 class drop(histeditaction):
1032 1032 def run(self):
1033 1033 parentctx = self.repo[self.state.parentctxnode]
1034 1034 return parentctx, [(self.node, tuple())]
1035 1035
1036 1036
1037 1037 @action(
1038 1038 [b"mess", b"m"],
1039 1039 _(b'edit commit message without changing commit content'),
1040 1040 priority=True,
1041 1041 )
1042 1042 class message(histeditaction):
1043 1043 def commiteditor(self):
1044 1044 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1045 1045
1046 1046
1047 1047 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1048 1048 """utility function to find the first outgoing changeset
1049 1049
1050 1050 Used by initialization code"""
1051 1051 if opts is None:
1052 1052 opts = {}
1053 1053 path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
1054 1054 dest = path.pushloc or path.loc
1055 1055
1056 1056 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1057 1057
1058 1058 revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
1059 1059 other = hg.peer(repo, opts, dest)
1060 1060
1061 1061 if revs:
1062 1062 revs = [repo.lookup(rev) for rev in revs]
1063 1063
1064 1064 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1065 1065 if not outgoing.missing:
1066 1066 raise error.StateError(_(b'no outgoing ancestors'))
1067 1067 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1068 1068 if len(roots) > 1:
1069 1069 msg = _(b'there are ambiguous outgoing revisions')
1070 1070 hint = _(b"see 'hg help histedit' for more detail")
1071 1071 raise error.StateError(msg, hint=hint)
1072 1072 return repo[roots[0]].node()
1073 1073
1074 1074
1075 1075 # Curses Support
1076 1076 try:
1077 1077 import curses
1078 1078 except ImportError:
1079 1079 curses = None
1080 1080
1081 1081 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1082 1082 ACTION_LABELS = {
1083 1083 b'fold': b'^fold',
1084 1084 b'roll': b'^roll',
1085 1085 }
1086 1086
1087 1087 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1088 1088 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1089 1089 COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11
1090 1090
1091 1091 E_QUIT, E_HISTEDIT = 1, 2
1092 1092 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1093 1093 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1094 1094
1095 1095 KEYTABLE = {
1096 1096 b'global': {
1097 1097 b'h': b'next-action',
1098 1098 b'KEY_RIGHT': b'next-action',
1099 1099 b'l': b'prev-action',
1100 1100 b'KEY_LEFT': b'prev-action',
1101 1101 b'q': b'quit',
1102 1102 b'c': b'histedit',
1103 1103 b'C': b'histedit',
1104 1104 b'v': b'showpatch',
1105 1105 b'?': b'help',
1106 1106 },
1107 1107 MODE_RULES: {
1108 1108 b'd': b'action-drop',
1109 1109 b'e': b'action-edit',
1110 1110 b'f': b'action-fold',
1111 1111 b'm': b'action-mess',
1112 1112 b'p': b'action-pick',
1113 1113 b'r': b'action-roll',
1114 1114 b' ': b'select',
1115 1115 b'j': b'down',
1116 1116 b'k': b'up',
1117 1117 b'KEY_DOWN': b'down',
1118 1118 b'KEY_UP': b'up',
1119 1119 b'J': b'move-down',
1120 1120 b'K': b'move-up',
1121 1121 b'KEY_NPAGE': b'move-down',
1122 1122 b'KEY_PPAGE': b'move-up',
1123 1123 b'0': b'goto', # Used for 0..9
1124 1124 },
1125 1125 MODE_PATCH: {
1126 1126 b' ': b'page-down',
1127 1127 b'KEY_NPAGE': b'page-down',
1128 1128 b'KEY_PPAGE': b'page-up',
1129 1129 b'j': b'line-down',
1130 1130 b'k': b'line-up',
1131 1131 b'KEY_DOWN': b'line-down',
1132 1132 b'KEY_UP': b'line-up',
1133 1133 b'J': b'down',
1134 1134 b'K': b'up',
1135 1135 },
1136 1136 MODE_HELP: {},
1137 1137 }
1138 1138
1139 1139
1140 1140 def screen_size():
1141 1141 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1142 1142
1143 1143
1144 1144 class histeditrule:
1145 1145 def __init__(self, ui, ctx, pos, action=b'pick'):
1146 1146 self.ui = ui
1147 1147 self.ctx = ctx
1148 1148 self.action = action
1149 1149 self.origpos = pos
1150 1150 self.pos = pos
1151 1151 self.conflicts = []
1152 1152
1153 1153 def __bytes__(self):
1154 1154 # Example display of several histeditrules:
1155 1155 #
1156 1156 # #10 pick 316392:06a16c25c053 add option to skip tests
1157 1157 # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED>
1158 1158 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1159 1159 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1160 1160 #
1161 1161 # The carets point to the changeset being folded into ("roll this
1162 1162 # changeset into the changeset above").
1163 1163 return b'%s%s' % (self.prefix, self.desc)
1164 1164
1165 1165 __str__ = encoding.strmethod(__bytes__)
1166 1166
1167 1167 @property
1168 1168 def prefix(self):
1169 1169 # Some actions ('fold' and 'roll') combine a patch with a
1170 1170 # previous one. Add a marker showing which patch they apply
1171 1171 # to.
1172 1172 action = ACTION_LABELS.get(self.action, self.action)
1173 1173
1174 1174 h = self.ctx.hex()[0:12]
1175 1175 r = self.ctx.rev()
1176 1176
1177 1177 return b"#%s %s %d:%s " % (
1178 1178 (b'%d' % self.origpos).ljust(2),
1179 1179 action.ljust(6),
1180 1180 r,
1181 1181 h,
1182 1182 )
1183 1183
1184 1184 @util.propertycache
1185 1185 def desc(self):
1186 1186 summary = cmdutil.rendertemplate(
1187 1187 self.ctx, self.ui.config(b'histedit', b'summary-template')
1188 1188 )
1189 1189 if summary:
1190 1190 return summary
1191 1191 # This is split off from the prefix property so that we can
1192 1192 # separately make the description for 'roll' red (since it
1193 1193 # will get discarded).
1194 1194 return stringutil.firstline(self.ctx.description())
1195 1195
1196 1196 def checkconflicts(self, other):
1197 1197 if other.pos > self.pos and other.origpos <= self.origpos:
1198 1198 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1199 1199 self.conflicts.append(other)
1200 1200 return self.conflicts
1201 1201
1202 1202 if other in self.conflicts:
1203 1203 self.conflicts.remove(other)
1204 1204 return self.conflicts
1205 1205
1206 1206
1207 1207 def makecommands(rules):
1208 1208 """Returns a list of commands consumable by histedit --commands based on
1209 1209 our list of rules"""
1210 1210 commands = []
1211 1211 for rules in rules:
1212 1212 commands.append(b'%s %s\n' % (rules.action, rules.ctx))
1213 1213 return commands
1214 1214
1215 1215
1216 1216 def addln(win, y, x, line, color=None):
1217 1217 """Add a line to the given window left padding but 100% filled with
1218 1218 whitespace characters, so that the color appears on the whole line"""
1219 1219 maxy, maxx = win.getmaxyx()
1220 1220 length = maxx - 1 - x
1221 1221 line = bytes(line).ljust(length)[:length]
1222 1222 if y < 0:
1223 1223 y = maxy + y
1224 1224 if x < 0:
1225 1225 x = maxx + x
1226 1226 if color:
1227 1227 win.addstr(y, x, line, color)
1228 1228 else:
1229 1229 win.addstr(y, x, line)
1230 1230
1231 1231
1232 1232 def _trunc_head(line, n):
1233 1233 if len(line) <= n:
1234 1234 return line
1235 1235 return b'> ' + line[-(n - 2) :]
1236 1236
1237 1237
1238 1238 def _trunc_tail(line, n):
1239 1239 if len(line) <= n:
1240 1240 return line
1241 1241 return line[: n - 2] + b' >'
1242 1242
1243 1243
1244 1244 class _chistedit_state:
1245 1245 def __init__(
1246 1246 self,
1247 1247 repo,
1248 1248 rules,
1249 1249 stdscr,
1250 1250 ):
1251 1251 self.repo = repo
1252 1252 self.rules = rules
1253 1253 self.stdscr = stdscr
1254 1254 self.later_on_top = repo.ui.configbool(
1255 1255 b'histedit', b'later-commits-first'
1256 1256 )
1257 1257 # The current item in display order, initialized to point to the top
1258 1258 # of the screen.
1259 1259 self.pos = 0
1260 1260 self.selected = None
1261 1261 self.mode = (MODE_INIT, MODE_INIT)
1262 1262 self.page_height = None
1263 1263 self.modes = {
1264 1264 MODE_RULES: {
1265 1265 b'line_offset': 0,
1266 1266 },
1267 1267 MODE_PATCH: {
1268 1268 b'line_offset': 0,
1269 1269 },
1270 1270 }
1271 1271
1272 1272 def render_commit(self, win):
1273 1273 """Renders the commit window that shows the log of the current selected
1274 1274 commit"""
1275 1275 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1276 1276
1277 1277 ctx = rule.ctx
1278 1278 win.box()
1279 1279
1280 1280 maxy, maxx = win.getmaxyx()
1281 1281 length = maxx - 3
1282 1282
1283 1283 line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12])
1284 1284 win.addstr(1, 1, line[:length])
1285 1285
1286 1286 line = b"user: %s" % ctx.user()
1287 1287 win.addstr(2, 1, line[:length])
1288 1288
1289 1289 bms = self.repo.nodebookmarks(ctx.node())
1290 1290 line = b"bookmark: %s" % b' '.join(bms)
1291 1291 win.addstr(3, 1, line[:length])
1292 1292
1293 1293 line = b"summary: %s" % stringutil.firstline(ctx.description())
1294 1294 win.addstr(4, 1, line[:length])
1295 1295
1296 1296 line = b"files: "
1297 1297 win.addstr(5, 1, line)
1298 1298 fnx = 1 + len(line)
1299 1299 fnmaxx = length - fnx + 1
1300 1300 y = 5
1301 1301 fnmaxn = maxy - (1 + y) - 1
1302 1302 files = ctx.files()
1303 1303 for i, line1 in enumerate(files):
1304 1304 if len(files) > fnmaxn and i == fnmaxn - 1:
1305 1305 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1306 1306 y = y + 1
1307 1307 break
1308 1308 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1309 1309 y = y + 1
1310 1310
1311 1311 conflicts = rule.conflicts
1312 1312 if len(conflicts) > 0:
1313 1313 conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts))
1314 1314 conflictstr = b"changed files overlap with %s" % conflictstr
1315 1315 else:
1316 1316 conflictstr = b'no overlap'
1317 1317
1318 1318 win.addstr(y, 1, conflictstr[:length])
1319 1319 win.noutrefresh()
1320 1320
1321 1321 def helplines(self):
1322 1322 if self.mode[0] == MODE_PATCH:
1323 1323 help = b"""\
1324 1324 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1325 1325 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1326 1326 """
1327 1327 else:
1328 1328 help = b"""\
1329 1329 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1330 1330 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1331 1331 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1332 1332 """
1333 1333 if self.later_on_top:
1334 1334 help += b"Newer commits are shown above older commits.\n"
1335 1335 else:
1336 1336 help += b"Older commits are shown above newer commits.\n"
1337 1337 return help.splitlines()
1338 1338
1339 1339 def render_help(self, win):
1340 1340 maxy, maxx = win.getmaxyx()
1341 1341 for y, line in enumerate(self.helplines()):
1342 1342 if y >= maxy:
1343 1343 break
1344 1344 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1345 1345 win.noutrefresh()
1346 1346
1347 1347 def layout(self):
1348 1348 maxy, maxx = self.stdscr.getmaxyx()
1349 1349 helplen = len(self.helplines())
1350 1350 mainlen = maxy - helplen - 12
1351 1351 if mainlen < 1:
1352 1352 raise error.Abort(
1353 1353 _(b"terminal dimensions %d by %d too small for curses histedit")
1354 1354 % (maxy, maxx),
1355 1355 hint=_(
1356 1356 b"enlarge your terminal or use --config ui.interface=text"
1357 1357 ),
1358 1358 )
1359 1359 return {
1360 1360 b'commit': (12, maxx),
1361 1361 b'help': (helplen, maxx),
1362 1362 b'main': (mainlen, maxx),
1363 1363 }
1364 1364
1365 1365 def display_pos_to_rule_pos(self, display_pos):
1366 1366 """Converts a position in display order to rule order.
1367 1367
1368 1368 The `display_pos` is the order from the top in display order, not
1369 1369 considering which items are currently visible on the screen. Thus,
1370 1370 `display_pos=0` is the item at the top (possibly after scrolling to
1371 1371 the top)
1372 1372 """
1373 1373 if self.later_on_top:
1374 1374 return len(self.rules) - 1 - display_pos
1375 1375 else:
1376 1376 return display_pos
1377 1377
1378 1378 def render_rules(self, rulesscr):
1379 1379 start = self.modes[MODE_RULES][b'line_offset']
1380 1380
1381 1381 conflicts = [r.ctx for r in self.rules if r.conflicts]
1382 1382 if len(conflicts) > 0:
1383 1383 line = b"potential conflict in %s" % b','.join(
1384 1384 map(pycompat.bytestr, conflicts)
1385 1385 )
1386 1386 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1387 1387
1388 1388 for display_pos in range(start, len(self.rules)):
1389 1389 y = display_pos - start
1390 1390 if y < 0 or y >= self.page_height:
1391 1391 continue
1392 1392 rule_pos = self.display_pos_to_rule_pos(display_pos)
1393 1393 rule = self.rules[rule_pos]
1394 1394 if len(rule.conflicts) > 0:
1395 1395 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1396 1396 else:
1397 1397 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1398 1398
1399 1399 if display_pos == self.selected:
1400 1400 rollcolor = COLOR_ROLL_SELECTED
1401 1401 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1402 1402 elif display_pos == self.pos:
1403 1403 rollcolor = COLOR_ROLL_CURRENT
1404 1404 addln(
1405 1405 rulesscr,
1406 1406 y,
1407 1407 2,
1408 1408 rule,
1409 1409 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1410 1410 )
1411 1411 else:
1412 1412 rollcolor = COLOR_ROLL
1413 1413 addln(rulesscr, y, 2, rule)
1414 1414
1415 1415 if rule.action == b'roll':
1416 1416 rulesscr.addstr(
1417 1417 y,
1418 1418 2 + len(rule.prefix),
1419 1419 rule.desc,
1420 1420 curses.color_pair(rollcolor),
1421 1421 )
1422 1422
1423 1423 rulesscr.noutrefresh()
1424 1424
1425 1425 def render_string(self, win, output, diffcolors=False):
1426 1426 maxy, maxx = win.getmaxyx()
1427 1427 length = min(maxy - 1, len(output))
1428 1428 for y in range(0, length):
1429 1429 line = output[y]
1430 1430 if diffcolors:
1431 1431 if line and line[0] == b'+':
1432 1432 win.addstr(
1433 1433 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1434 1434 )
1435 1435 elif line and line[0] == b'-':
1436 1436 win.addstr(
1437 1437 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1438 1438 )
1439 1439 elif line.startswith(b'@@ '):
1440 1440 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1441 1441 else:
1442 1442 win.addstr(y, 0, line)
1443 1443 else:
1444 1444 win.addstr(y, 0, line)
1445 1445 win.noutrefresh()
1446 1446
1447 1447 def render_patch(self, win):
1448 1448 start = self.modes[MODE_PATCH][b'line_offset']
1449 1449 content = self.modes[MODE_PATCH][b'patchcontents']
1450 1450 self.render_string(win, content[start:], diffcolors=True)
1451 1451
1452 1452 def event(self, ch):
1453 1453 """Change state based on the current character input
1454 1454
1455 1455 This takes the current state and based on the current character input from
1456 1456 the user we change the state.
1457 1457 """
1458 1458 oldpos = self.pos
1459 1459
1460 1460 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1461 1461 return E_RESIZE
1462 1462
1463 1463 lookup_ch = ch
1464 1464 if ch is not None and b'0' <= ch <= b'9':
1465 1465 lookup_ch = b'0'
1466 1466
1467 1467 curmode, prevmode = self.mode
1468 1468 action = KEYTABLE[curmode].get(
1469 1469 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1470 1470 )
1471 1471 if action is None:
1472 1472 return
1473 1473 if action in (b'down', b'move-down'):
1474 1474 newpos = min(oldpos + 1, len(self.rules) - 1)
1475 1475 self.move_cursor(oldpos, newpos)
1476 1476 if self.selected is not None or action == b'move-down':
1477 1477 self.swap(oldpos, newpos)
1478 1478 elif action in (b'up', b'move-up'):
1479 1479 newpos = max(0, oldpos - 1)
1480 1480 self.move_cursor(oldpos, newpos)
1481 1481 if self.selected is not None or action == b'move-up':
1482 1482 self.swap(oldpos, newpos)
1483 1483 elif action == b'next-action':
1484 1484 self.cycle_action(oldpos, next=True)
1485 1485 elif action == b'prev-action':
1486 1486 self.cycle_action(oldpos, next=False)
1487 1487 elif action == b'select':
1488 1488 self.selected = oldpos if self.selected is None else None
1489 1489 self.make_selection(self.selected)
1490 1490 elif action == b'goto' and int(ch) < len(self.rules) <= 10:
1491 1491 newrule = next((r for r in self.rules if r.origpos == int(ch)))
1492 1492 self.move_cursor(oldpos, newrule.pos)
1493 1493 if self.selected is not None:
1494 1494 self.swap(oldpos, newrule.pos)
1495 1495 elif action.startswith(b'action-'):
1496 1496 self.change_action(oldpos, action[7:])
1497 1497 elif action == b'showpatch':
1498 1498 self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode)
1499 1499 elif action == b'help':
1500 1500 self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode)
1501 1501 elif action == b'quit':
1502 1502 return E_QUIT
1503 1503 elif action == b'histedit':
1504 1504 return E_HISTEDIT
1505 1505 elif action == b'page-down':
1506 1506 return E_PAGEDOWN
1507 1507 elif action == b'page-up':
1508 1508 return E_PAGEUP
1509 1509 elif action == b'line-down':
1510 1510 return E_LINEDOWN
1511 1511 elif action == b'line-up':
1512 1512 return E_LINEUP
1513 1513
1514 1514 def patch_contents(self):
1515 1515 repo = self.repo
1516 1516 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1517 1517 displayer = logcmdutil.changesetdisplayer(
1518 1518 repo.ui,
1519 1519 repo,
1520 1520 {b"patch": True, b"template": b"status"},
1521 1521 buffered=True,
1522 1522 )
1523 1523 overrides = {(b'ui', b'verbose'): True}
1524 1524 with repo.ui.configoverride(overrides, source=b'histedit'):
1525 1525 displayer.show(rule.ctx)
1526 1526 displayer.close()
1527 1527 return displayer.hunk[rule.ctx.rev()].splitlines()
1528 1528
1529 1529 def move_cursor(self, oldpos, newpos):
1530 1530 """Change the rule/changeset that the cursor is pointing to, regardless of
1531 1531 current mode (you can switch between patches from the view patch window)."""
1532 1532 self.pos = newpos
1533 1533
1534 1534 mode, _ = self.mode
1535 1535 if mode == MODE_RULES:
1536 1536 # Scroll through the list by updating the view for MODE_RULES, so that
1537 1537 # even if we are not currently viewing the rules, switching back will
1538 1538 # result in the cursor's rule being visible.
1539 1539 modestate = self.modes[MODE_RULES]
1540 1540 if newpos < modestate[b'line_offset']:
1541 1541 modestate[b'line_offset'] = newpos
1542 1542 elif newpos > modestate[b'line_offset'] + self.page_height - 1:
1543 1543 modestate[b'line_offset'] = newpos - self.page_height + 1
1544 1544
1545 1545 # Reset the patch view region to the top of the new patch.
1546 1546 self.modes[MODE_PATCH][b'line_offset'] = 0
1547 1547
1548 1548 def change_mode(self, mode):
1549 1549 curmode, _ = self.mode
1550 1550 self.mode = (mode, curmode)
1551 1551 if mode == MODE_PATCH:
1552 1552 self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents()
1553 1553
1554 1554 def make_selection(self, pos):
1555 1555 self.selected = pos
1556 1556
1557 1557 def swap(self, oldpos, newpos):
1558 1558 """Swap two positions and calculate necessary conflicts in
1559 1559 O(|newpos-oldpos|) time"""
1560 1560 old_rule_pos = self.display_pos_to_rule_pos(oldpos)
1561 1561 new_rule_pos = self.display_pos_to_rule_pos(newpos)
1562 1562
1563 1563 rules = self.rules
1564 1564 assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules)
1565 1565
1566 1566 rules[old_rule_pos], rules[new_rule_pos] = (
1567 1567 rules[new_rule_pos],
1568 1568 rules[old_rule_pos],
1569 1569 )
1570 1570
1571 1571 # TODO: swap should not know about histeditrule's internals
1572 1572 rules[new_rule_pos].pos = new_rule_pos
1573 1573 rules[old_rule_pos].pos = old_rule_pos
1574 1574
1575 1575 start = min(old_rule_pos, new_rule_pos)
1576 1576 end = max(old_rule_pos, new_rule_pos)
1577 for r in pycompat.xrange(start, end + 1):
1577 for r in range(start, end + 1):
1578 1578 rules[new_rule_pos].checkconflicts(rules[r])
1579 1579 rules[old_rule_pos].checkconflicts(rules[r])
1580 1580
1581 1581 if self.selected:
1582 1582 self.make_selection(newpos)
1583 1583
1584 1584 def change_action(self, pos, action):
1585 1585 """Change the action state on the given position to the new action"""
1586 1586 assert 0 <= pos < len(self.rules)
1587 1587 self.rules[pos].action = action
1588 1588
1589 1589 def cycle_action(self, pos, next=False):
1590 1590 """Changes the action state the next or the previous action from
1591 1591 the action list"""
1592 1592 assert 0 <= pos < len(self.rules)
1593 1593 current = self.rules[pos].action
1594 1594
1595 1595 assert current in KEY_LIST
1596 1596
1597 1597 index = KEY_LIST.index(current)
1598 1598 if next:
1599 1599 index += 1
1600 1600 else:
1601 1601 index -= 1
1602 1602 self.change_action(pos, KEY_LIST[index % len(KEY_LIST)])
1603 1603
1604 1604 def change_view(self, delta, unit):
1605 1605 """Change the region of whatever is being viewed (a patch or the list of
1606 1606 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1607 1607 mode, _ = self.mode
1608 1608 if mode != MODE_PATCH:
1609 1609 return
1610 1610 mode_state = self.modes[mode]
1611 1611 num_lines = len(mode_state[b'patchcontents'])
1612 1612 page_height = self.page_height
1613 1613 unit = page_height if unit == b'page' else 1
1614 1614 num_pages = 1 + (num_lines - 1) // page_height
1615 1615 max_offset = (num_pages - 1) * page_height
1616 1616 newline = mode_state[b'line_offset'] + delta * unit
1617 1617 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1618 1618
1619 1619
1620 1620 def _chisteditmain(repo, rules, stdscr):
1621 1621 try:
1622 1622 curses.use_default_colors()
1623 1623 except curses.error:
1624 1624 pass
1625 1625
1626 1626 # initialize color pattern
1627 1627 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1628 1628 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1629 1629 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1630 1630 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1631 1631 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1632 1632 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1633 1633 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1634 1634 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1635 1635 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1636 1636 curses.init_pair(
1637 1637 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1638 1638 )
1639 1639 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1640 1640
1641 1641 # don't display the cursor
1642 1642 try:
1643 1643 curses.curs_set(0)
1644 1644 except curses.error:
1645 1645 pass
1646 1646
1647 1647 def drawvertwin(size, y, x):
1648 1648 win = curses.newwin(size[0], size[1], y, x)
1649 1649 y += size[0]
1650 1650 return win, y, x
1651 1651
1652 1652 state = _chistedit_state(repo, rules, stdscr)
1653 1653
1654 1654 # eventloop
1655 1655 ch = None
1656 1656 stdscr.clear()
1657 1657 stdscr.refresh()
1658 1658 while True:
1659 1659 oldmode, unused = state.mode
1660 1660 if oldmode == MODE_INIT:
1661 1661 state.change_mode(MODE_RULES)
1662 1662 e = state.event(ch)
1663 1663
1664 1664 if e == E_QUIT:
1665 1665 return False
1666 1666 if e == E_HISTEDIT:
1667 1667 return state.rules
1668 1668 else:
1669 1669 if e == E_RESIZE:
1670 1670 size = screen_size()
1671 1671 if size != stdscr.getmaxyx():
1672 1672 curses.resizeterm(*size)
1673 1673
1674 1674 sizes = state.layout()
1675 1675 curmode, unused = state.mode
1676 1676 if curmode != oldmode:
1677 1677 state.page_height = sizes[b'main'][0]
1678 1678 # Adjust the view to fit the current screen size.
1679 1679 state.move_cursor(state.pos, state.pos)
1680 1680
1681 1681 # Pack the windows against the top, each pane spread across the
1682 1682 # full width of the screen.
1683 1683 y, x = (0, 0)
1684 1684 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1685 1685 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1686 1686 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1687 1687
1688 1688 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1689 1689 if e == E_PAGEDOWN:
1690 1690 state.change_view(+1, b'page')
1691 1691 elif e == E_PAGEUP:
1692 1692 state.change_view(-1, b'page')
1693 1693 elif e == E_LINEDOWN:
1694 1694 state.change_view(+1, b'line')
1695 1695 elif e == E_LINEUP:
1696 1696 state.change_view(-1, b'line')
1697 1697
1698 1698 # start rendering
1699 1699 commitwin.erase()
1700 1700 helpwin.erase()
1701 1701 mainwin.erase()
1702 1702 if curmode == MODE_PATCH:
1703 1703 state.render_patch(mainwin)
1704 1704 elif curmode == MODE_HELP:
1705 1705 state.render_string(mainwin, __doc__.strip().splitlines())
1706 1706 else:
1707 1707 state.render_rules(mainwin)
1708 1708 state.render_commit(commitwin)
1709 1709 state.render_help(helpwin)
1710 1710 curses.doupdate()
1711 1711 # done rendering
1712 1712 ch = encoding.strtolocal(stdscr.getkey())
1713 1713
1714 1714
1715 1715 def _chistedit(ui, repo, freeargs, opts):
1716 1716 """interactively edit changeset history via a curses interface
1717 1717
1718 1718 Provides a ncurses interface to histedit. Press ? in chistedit mode
1719 1719 to see an extensive help. Requires python-curses to be installed."""
1720 1720
1721 1721 if curses is None:
1722 1722 raise error.Abort(_(b"Python curses library required"))
1723 1723
1724 1724 # disable color
1725 1725 ui._colormode = None
1726 1726
1727 1727 try:
1728 1728 keep = opts.get(b'keep')
1729 1729 revs = opts.get(b'rev', [])[:]
1730 1730 cmdutil.checkunfinished(repo)
1731 1731 cmdutil.bailifchanged(repo)
1732 1732
1733 1733 revs.extend(freeargs)
1734 1734 if not revs:
1735 1735 defaultrev = destutil.desthistedit(ui, repo)
1736 1736 if defaultrev is not None:
1737 1737 revs.append(defaultrev)
1738 1738 if len(revs) != 1:
1739 1739 raise error.InputError(
1740 1740 _(b'histedit requires exactly one ancestor revision')
1741 1741 )
1742 1742
1743 1743 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
1744 1744 if len(rr) != 1:
1745 1745 raise error.InputError(
1746 1746 _(
1747 1747 b'The specified revisions must have '
1748 1748 b'exactly one common root'
1749 1749 )
1750 1750 )
1751 1751 root = rr[0].node()
1752 1752
1753 1753 topmost = repo.dirstate.p1()
1754 1754 revs = between(repo, root, topmost, keep)
1755 1755 if not revs:
1756 1756 raise error.InputError(
1757 1757 _(b'%s is not an ancestor of working directory') % short(root)
1758 1758 )
1759 1759
1760 1760 rules = []
1761 1761 for i, r in enumerate(revs):
1762 1762 rules.append(histeditrule(ui, repo[r], i))
1763 1763 with util.with_lc_ctype():
1764 1764 rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules))
1765 1765 curses.echo()
1766 1766 curses.endwin()
1767 1767 if rc is False:
1768 1768 ui.write(_(b"histedit aborted\n"))
1769 1769 return 0
1770 1770 if type(rc) is list:
1771 1771 ui.status(_(b"performing changes\n"))
1772 1772 rules = makecommands(rc)
1773 1773 with repo.vfs(b'chistedit', b'w+') as fp:
1774 1774 for r in rules:
1775 1775 fp.write(r)
1776 1776 opts[b'commands'] = fp.name
1777 1777 return _texthistedit(ui, repo, freeargs, opts)
1778 1778 except KeyboardInterrupt:
1779 1779 pass
1780 1780 return -1
1781 1781
1782 1782
1783 1783 @command(
1784 1784 b'histedit',
1785 1785 [
1786 1786 (
1787 1787 b'',
1788 1788 b'commands',
1789 1789 b'',
1790 1790 _(b'read history edits from the specified file'),
1791 1791 _(b'FILE'),
1792 1792 ),
1793 1793 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1794 1794 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1795 1795 (
1796 1796 b'k',
1797 1797 b'keep',
1798 1798 False,
1799 1799 _(b"don't strip old nodes after edit is complete"),
1800 1800 ),
1801 1801 (b'', b'abort', False, _(b'abort an edit in progress')),
1802 1802 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1803 1803 (
1804 1804 b'f',
1805 1805 b'force',
1806 1806 False,
1807 1807 _(b'force outgoing even for unrelated repositories'),
1808 1808 ),
1809 1809 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1810 1810 ]
1811 1811 + cmdutil.formatteropts,
1812 1812 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1813 1813 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1814 1814 )
1815 1815 def histedit(ui, repo, *freeargs, **opts):
1816 1816 """interactively edit changeset history
1817 1817
1818 1818 This command lets you edit a linear series of changesets (up to
1819 1819 and including the working directory, which should be clean).
1820 1820 You can:
1821 1821
1822 1822 - `pick` to [re]order a changeset
1823 1823
1824 1824 - `drop` to omit changeset
1825 1825
1826 1826 - `mess` to reword the changeset commit message
1827 1827
1828 1828 - `fold` to combine it with the preceding changeset (using the later date)
1829 1829
1830 1830 - `roll` like fold, but discarding this commit's description and date
1831 1831
1832 1832 - `edit` to edit this changeset (preserving date)
1833 1833
1834 1834 - `base` to checkout changeset and apply further changesets from there
1835 1835
1836 1836 There are a number of ways to select the root changeset:
1837 1837
1838 1838 - Specify ANCESTOR directly
1839 1839
1840 1840 - Use --outgoing -- it will be the first linear changeset not
1841 1841 included in destination. (See :hg:`help config.paths.default-push`)
1842 1842
1843 1843 - Otherwise, the value from the "histedit.defaultrev" config option
1844 1844 is used as a revset to select the base revision when ANCESTOR is not
1845 1845 specified. The first revision returned by the revset is used. By
1846 1846 default, this selects the editable history that is unique to the
1847 1847 ancestry of the working directory.
1848 1848
1849 1849 .. container:: verbose
1850 1850
1851 1851 If you use --outgoing, this command will abort if there are ambiguous
1852 1852 outgoing revisions. For example, if there are multiple branches
1853 1853 containing outgoing revisions.
1854 1854
1855 1855 Use "min(outgoing() and ::.)" or similar revset specification
1856 1856 instead of --outgoing to specify edit target revision exactly in
1857 1857 such ambiguous situation. See :hg:`help revsets` for detail about
1858 1858 selecting revisions.
1859 1859
1860 1860 .. container:: verbose
1861 1861
1862 1862 Examples:
1863 1863
1864 1864 - A number of changes have been made.
1865 1865 Revision 3 is no longer needed.
1866 1866
1867 1867 Start history editing from revision 3::
1868 1868
1869 1869 hg histedit -r 3
1870 1870
1871 1871 An editor opens, containing the list of revisions,
1872 1872 with specific actions specified::
1873 1873
1874 1874 pick 5339bf82f0ca 3 Zworgle the foobar
1875 1875 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1876 1876 pick 0a9639fcda9d 5 Morgify the cromulancy
1877 1877
1878 1878 Additional information about the possible actions
1879 1879 to take appears below the list of revisions.
1880 1880
1881 1881 To remove revision 3 from the history,
1882 1882 its action (at the beginning of the relevant line)
1883 1883 is changed to 'drop'::
1884 1884
1885 1885 drop 5339bf82f0ca 3 Zworgle the foobar
1886 1886 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1887 1887 pick 0a9639fcda9d 5 Morgify the cromulancy
1888 1888
1889 1889 - A number of changes have been made.
1890 1890 Revision 2 and 4 need to be swapped.
1891 1891
1892 1892 Start history editing from revision 2::
1893 1893
1894 1894 hg histedit -r 2
1895 1895
1896 1896 An editor opens, containing the list of revisions,
1897 1897 with specific actions specified::
1898 1898
1899 1899 pick 252a1af424ad 2 Blorb a morgwazzle
1900 1900 pick 5339bf82f0ca 3 Zworgle the foobar
1901 1901 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1902 1902
1903 1903 To swap revision 2 and 4, its lines are swapped
1904 1904 in the editor::
1905 1905
1906 1906 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1907 1907 pick 5339bf82f0ca 3 Zworgle the foobar
1908 1908 pick 252a1af424ad 2 Blorb a morgwazzle
1909 1909
1910 1910 Returns 0 on success, 1 if user intervention is required (not only
1911 1911 for intentional "edit" command, but also for resolving unexpected
1912 1912 conflicts).
1913 1913 """
1914 1914 opts = pycompat.byteskwargs(opts)
1915 1915
1916 1916 # kludge: _chistedit only works for starting an edit, not aborting
1917 1917 # or continuing, so fall back to regular _texthistedit for those
1918 1918 # operations.
1919 1919 if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew:
1920 1920 return _chistedit(ui, repo, freeargs, opts)
1921 1921 return _texthistedit(ui, repo, freeargs, opts)
1922 1922
1923 1923
1924 1924 def _texthistedit(ui, repo, freeargs, opts):
1925 1925 state = histeditstate(repo)
1926 1926 with repo.wlock() as wlock, repo.lock() as lock:
1927 1927 state.wlock = wlock
1928 1928 state.lock = lock
1929 1929 _histedit(ui, repo, state, freeargs, opts)
1930 1930
1931 1931
1932 1932 goalcontinue = b'continue'
1933 1933 goalabort = b'abort'
1934 1934 goaleditplan = b'edit-plan'
1935 1935 goalnew = b'new'
1936 1936
1937 1937
1938 1938 def _getgoal(opts):
1939 1939 if opts.get(b'continue'):
1940 1940 return goalcontinue
1941 1941 if opts.get(b'abort'):
1942 1942 return goalabort
1943 1943 if opts.get(b'edit_plan'):
1944 1944 return goaleditplan
1945 1945 return goalnew
1946 1946
1947 1947
1948 1948 def _readfile(ui, path):
1949 1949 if path == b'-':
1950 1950 with ui.timeblockedsection(b'histedit'):
1951 1951 return ui.fin.read()
1952 1952 else:
1953 1953 with open(path, b'rb') as f:
1954 1954 return f.read()
1955 1955
1956 1956
1957 1957 def _validateargs(ui, repo, freeargs, opts, goal, rules, revs):
1958 1958 # TODO only abort if we try to histedit mq patches, not just
1959 1959 # blanket if mq patches are applied somewhere
1960 1960 mq = getattr(repo, 'mq', None)
1961 1961 if mq and mq.applied:
1962 1962 raise error.StateError(_(b'source has mq patches applied'))
1963 1963
1964 1964 # basic argument incompatibility processing
1965 1965 outg = opts.get(b'outgoing')
1966 1966 editplan = opts.get(b'edit_plan')
1967 1967 abort = opts.get(b'abort')
1968 1968 force = opts.get(b'force')
1969 1969 if force and not outg:
1970 1970 raise error.InputError(_(b'--force only allowed with --outgoing'))
1971 1971 if goal == b'continue':
1972 1972 if any((outg, abort, revs, freeargs, rules, editplan)):
1973 1973 raise error.InputError(_(b'no arguments allowed with --continue'))
1974 1974 elif goal == b'abort':
1975 1975 if any((outg, revs, freeargs, rules, editplan)):
1976 1976 raise error.InputError(_(b'no arguments allowed with --abort'))
1977 1977 elif goal == b'edit-plan':
1978 1978 if any((outg, revs, freeargs)):
1979 1979 raise error.InputError(
1980 1980 _(b'only --commands argument allowed with --edit-plan')
1981 1981 )
1982 1982 else:
1983 1983 if outg:
1984 1984 if revs:
1985 1985 raise error.InputError(
1986 1986 _(b'no revisions allowed with --outgoing')
1987 1987 )
1988 1988 if len(freeargs) > 1:
1989 1989 raise error.InputError(
1990 1990 _(b'only one repo argument allowed with --outgoing')
1991 1991 )
1992 1992 else:
1993 1993 revs.extend(freeargs)
1994 1994 if len(revs) == 0:
1995 1995 defaultrev = destutil.desthistedit(ui, repo)
1996 1996 if defaultrev is not None:
1997 1997 revs.append(defaultrev)
1998 1998
1999 1999 if len(revs) != 1:
2000 2000 raise error.InputError(
2001 2001 _(b'histedit requires exactly one ancestor revision')
2002 2002 )
2003 2003
2004 2004
2005 2005 def _histedit(ui, repo, state, freeargs, opts):
2006 2006 fm = ui.formatter(b'histedit', opts)
2007 2007 fm.startitem()
2008 2008 goal = _getgoal(opts)
2009 2009 revs = opts.get(b'rev', [])
2010 2010 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2011 2011 rules = opts.get(b'commands', b'')
2012 2012 state.keep = opts.get(b'keep', False)
2013 2013
2014 2014 _validateargs(ui, repo, freeargs, opts, goal, rules, revs)
2015 2015
2016 2016 hastags = False
2017 2017 if revs:
2018 2018 revs = logcmdutil.revrange(repo, revs)
2019 2019 ctxs = [repo[rev] for rev in revs]
2020 2020 for ctx in ctxs:
2021 2021 tags = [tag for tag in ctx.tags() if tag != b'tip']
2022 2022 if not hastags:
2023 2023 hastags = len(tags)
2024 2024 if hastags:
2025 2025 if ui.promptchoice(
2026 2026 _(
2027 2027 b'warning: tags associated with the given'
2028 2028 b' changeset will be lost after histedit.\n'
2029 2029 b'do you want to continue (yN)? $$ &Yes $$ &No'
2030 2030 ),
2031 2031 default=1,
2032 2032 ):
2033 2033 raise error.CanceledError(_(b'histedit cancelled\n'))
2034 2034 # rebuild state
2035 2035 if goal == goalcontinue:
2036 2036 state.read()
2037 2037 state = bootstrapcontinue(ui, state, opts)
2038 2038 elif goal == goaleditplan:
2039 2039 _edithisteditplan(ui, repo, state, rules)
2040 2040 return
2041 2041 elif goal == goalabort:
2042 2042 _aborthistedit(ui, repo, state, nobackup=nobackup)
2043 2043 return
2044 2044 else:
2045 2045 # goal == goalnew
2046 2046 _newhistedit(ui, repo, state, revs, freeargs, opts)
2047 2047
2048 2048 _continuehistedit(ui, repo, state)
2049 2049 _finishhistedit(ui, repo, state, fm)
2050 2050 fm.end()
2051 2051
2052 2052
2053 2053 def _continuehistedit(ui, repo, state):
2054 2054 """This function runs after either:
2055 2055 - bootstrapcontinue (if the goal is 'continue')
2056 2056 - _newhistedit (if the goal is 'new')
2057 2057 """
2058 2058 # preprocess rules so that we can hide inner folds from the user
2059 2059 # and only show one editor
2060 2060 actions = state.actions[:]
2061 2061 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
2062 2062 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
2063 2063 state.actions[idx].__class__ = _multifold
2064 2064
2065 2065 # Force an initial state file write, so the user can run --abort/continue
2066 2066 # even if there's an exception before the first transaction serialize.
2067 2067 state.write()
2068 2068
2069 2069 tr = None
2070 2070 # Don't use singletransaction by default since it rolls the entire
2071 2071 # transaction back if an unexpected exception happens (like a
2072 2072 # pretxncommit hook throws, or the user aborts the commit msg editor).
2073 2073 if ui.configbool(b"histedit", b"singletransaction"):
2074 2074 # Don't use a 'with' for the transaction, since actions may close
2075 2075 # and reopen a transaction. For example, if the action executes an
2076 2076 # external process it may choose to commit the transaction first.
2077 2077 tr = repo.transaction(b'histedit')
2078 2078 progress = ui.makeprogress(
2079 2079 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
2080 2080 )
2081 2081 with progress, util.acceptintervention(tr):
2082 2082 while state.actions:
2083 2083 state.write(tr=tr)
2084 2084 actobj = state.actions[0]
2085 2085 progress.increment(item=actobj.torule())
2086 2086 ui.debug(
2087 2087 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
2088 2088 )
2089 2089 parentctx, replacement_ = actobj.run()
2090 2090 state.parentctxnode = parentctx.node()
2091 2091 state.replacements.extend(replacement_)
2092 2092 state.actions.pop(0)
2093 2093
2094 2094 state.write()
2095 2095
2096 2096
2097 2097 def _finishhistedit(ui, repo, state, fm):
2098 2098 """This action runs when histedit is finishing its session"""
2099 2099 mergemod.update(repo[state.parentctxnode])
2100 2100
2101 2101 mapping, tmpnodes, created, ntm = processreplacement(state)
2102 2102 if mapping:
2103 2103 for prec, succs in mapping.items():
2104 2104 if not succs:
2105 2105 ui.debug(b'histedit: %s is dropped\n' % short(prec))
2106 2106 else:
2107 2107 ui.debug(
2108 2108 b'histedit: %s is replaced by %s\n'
2109 2109 % (short(prec), short(succs[0]))
2110 2110 )
2111 2111 if len(succs) > 1:
2112 2112 m = b'histedit: %s'
2113 2113 for n in succs[1:]:
2114 2114 ui.debug(m % short(n))
2115 2115
2116 2116 if not state.keep:
2117 2117 if mapping:
2118 2118 movetopmostbookmarks(repo, state.topmost, ntm)
2119 2119 # TODO update mq state
2120 2120 else:
2121 2121 mapping = {}
2122 2122
2123 2123 for n in tmpnodes:
2124 2124 if n in repo:
2125 2125 mapping[n] = ()
2126 2126
2127 2127 # remove entries about unknown nodes
2128 2128 has_node = repo.unfiltered().changelog.index.has_node
2129 2129 mapping = {
2130 2130 k: v
2131 2131 for k, v in mapping.items()
2132 2132 if has_node(k) and all(has_node(n) for n in v)
2133 2133 }
2134 2134 scmutil.cleanupnodes(repo, mapping, b'histedit')
2135 2135 hf = fm.hexfunc
2136 2136 fl = fm.formatlist
2137 2137 fd = fm.formatdict
2138 2138 nodechanges = fd(
2139 2139 {
2140 2140 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2141 2141 for oldn, newn in mapping.items()
2142 2142 },
2143 2143 key=b"oldnode",
2144 2144 value=b"newnodes",
2145 2145 )
2146 2146 fm.data(nodechanges=nodechanges)
2147 2147
2148 2148 state.clear()
2149 2149 if os.path.exists(repo.sjoin(b'undo')):
2150 2150 os.unlink(repo.sjoin(b'undo'))
2151 2151 if repo.vfs.exists(b'histedit-last-edit.txt'):
2152 2152 repo.vfs.unlink(b'histedit-last-edit.txt')
2153 2153
2154 2154
2155 2155 def _aborthistedit(ui, repo, state, nobackup=False):
2156 2156 try:
2157 2157 state.read()
2158 2158 __, leafs, tmpnodes, __ = processreplacement(state)
2159 2159 ui.debug(b'restore wc to old parent %s\n' % short(state.topmost))
2160 2160
2161 2161 # Recover our old commits if necessary
2162 2162 if not state.topmost in repo and state.backupfile:
2163 2163 backupfile = repo.vfs.join(state.backupfile)
2164 2164 f = hg.openpath(ui, backupfile)
2165 2165 gen = exchange.readbundle(ui, f, backupfile)
2166 2166 with repo.transaction(b'histedit.abort') as tr:
2167 2167 bundle2.applybundle(
2168 2168 repo,
2169 2169 gen,
2170 2170 tr,
2171 2171 source=b'histedit',
2172 2172 url=b'bundle:' + backupfile,
2173 2173 )
2174 2174
2175 2175 os.remove(backupfile)
2176 2176
2177 2177 # check whether we should update away
2178 2178 if repo.unfiltered().revs(
2179 2179 b'parents() and (%n or %ln::)',
2180 2180 state.parentctxnode,
2181 2181 leafs | tmpnodes,
2182 2182 ):
2183 2183 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2184 2184 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2185 2185 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2186 2186 except Exception:
2187 2187 if state.inprogress():
2188 2188 ui.warn(
2189 2189 _(
2190 2190 b'warning: encountered an exception during histedit '
2191 2191 b'--abort; the repository may not have been completely '
2192 2192 b'cleaned up\n'
2193 2193 )
2194 2194 )
2195 2195 raise
2196 2196 finally:
2197 2197 state.clear()
2198 2198
2199 2199
2200 2200 def hgaborthistedit(ui, repo):
2201 2201 state = histeditstate(repo)
2202 2202 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2203 2203 with repo.wlock() as wlock, repo.lock() as lock:
2204 2204 state.wlock = wlock
2205 2205 state.lock = lock
2206 2206 _aborthistedit(ui, repo, state, nobackup=nobackup)
2207 2207
2208 2208
2209 2209 def _edithisteditplan(ui, repo, state, rules):
2210 2210 state.read()
2211 2211 if not rules:
2212 2212 comment = geteditcomment(
2213 2213 ui, short(state.parentctxnode), short(state.topmost)
2214 2214 )
2215 2215 rules = ruleeditor(repo, ui, state.actions, comment)
2216 2216 else:
2217 2217 rules = _readfile(ui, rules)
2218 2218 actions = parserules(rules, state)
2219 2219 ctxs = [repo[act.node] for act in state.actions if act.node]
2220 2220 warnverifyactions(ui, repo, actions, state, ctxs)
2221 2221 state.actions = actions
2222 2222 state.write()
2223 2223
2224 2224
2225 2225 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2226 2226 outg = opts.get(b'outgoing')
2227 2227 rules = opts.get(b'commands', b'')
2228 2228 force = opts.get(b'force')
2229 2229
2230 2230 cmdutil.checkunfinished(repo)
2231 2231 cmdutil.bailifchanged(repo)
2232 2232
2233 2233 topmost = repo.dirstate.p1()
2234 2234 if outg:
2235 2235 if freeargs:
2236 2236 remote = freeargs[0]
2237 2237 else:
2238 2238 remote = None
2239 2239 root = findoutgoing(ui, repo, remote, force, opts)
2240 2240 else:
2241 2241 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
2242 2242 if len(rr) != 1:
2243 2243 raise error.InputError(
2244 2244 _(
2245 2245 b'The specified revisions must have '
2246 2246 b'exactly one common root'
2247 2247 )
2248 2248 )
2249 2249 root = rr[0].node()
2250 2250
2251 2251 revs = between(repo, root, topmost, state.keep)
2252 2252 if not revs:
2253 2253 raise error.InputError(
2254 2254 _(b'%s is not an ancestor of working directory') % short(root)
2255 2255 )
2256 2256
2257 2257 ctxs = [repo[r] for r in revs]
2258 2258
2259 2259 wctx = repo[None]
2260 2260 # Please don't ask me why `ancestors` is this value. I figured it
2261 2261 # out with print-debugging, not by actually understanding what the
2262 2262 # merge code is doing. :(
2263 2263 ancs = [repo[b'.']]
2264 2264 # Sniff-test to make sure we won't collide with untracked files in
2265 2265 # the working directory. If we don't do this, we can get a
2266 2266 # collision after we've started histedit and backing out gets ugly
2267 2267 # for everyone, especially the user.
2268 2268 for c in [ctxs[0].p1()] + ctxs:
2269 2269 try:
2270 2270 mergemod.calculateupdates(
2271 2271 repo,
2272 2272 wctx,
2273 2273 c,
2274 2274 ancs,
2275 2275 # These parameters were determined by print-debugging
2276 2276 # what happens later on inside histedit.
2277 2277 branchmerge=False,
2278 2278 force=False,
2279 2279 acceptremote=False,
2280 2280 followcopies=False,
2281 2281 )
2282 2282 except error.Abort:
2283 2283 raise error.StateError(
2284 2284 _(
2285 2285 b"untracked files in working directory conflict with files in %s"
2286 2286 )
2287 2287 % c
2288 2288 )
2289 2289
2290 2290 if not rules:
2291 2291 comment = geteditcomment(ui, short(root), short(topmost))
2292 2292 actions = [pick(state, r) for r in revs]
2293 2293 rules = ruleeditor(repo, ui, actions, comment)
2294 2294 else:
2295 2295 rules = _readfile(ui, rules)
2296 2296 actions = parserules(rules, state)
2297 2297 warnverifyactions(ui, repo, actions, state, ctxs)
2298 2298
2299 2299 parentctxnode = repo[root].p1().node()
2300 2300
2301 2301 state.parentctxnode = parentctxnode
2302 2302 state.actions = actions
2303 2303 state.topmost = topmost
2304 2304 state.replacements = []
2305 2305
2306 2306 ui.log(
2307 2307 b"histedit",
2308 2308 b"%d actions to histedit\n",
2309 2309 len(actions),
2310 2310 histedit_num_actions=len(actions),
2311 2311 )
2312 2312
2313 2313 # Create a backup so we can always abort completely.
2314 2314 backupfile = None
2315 2315 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2316 2316 backupfile = repair.backupbundle(
2317 2317 repo, [parentctxnode], [topmost], root, b'histedit'
2318 2318 )
2319 2319 state.backupfile = backupfile
2320 2320
2321 2321
2322 2322 def _getsummary(ctx):
2323 2323 return stringutil.firstline(ctx.description())
2324 2324
2325 2325
2326 2326 def bootstrapcontinue(ui, state, opts):
2327 2327 repo = state.repo
2328 2328
2329 2329 ms = mergestatemod.mergestate.read(repo)
2330 2330 mergeutil.checkunresolved(ms)
2331 2331
2332 2332 if state.actions:
2333 2333 actobj = state.actions.pop(0)
2334 2334
2335 2335 if _isdirtywc(repo):
2336 2336 actobj.continuedirty()
2337 2337 if _isdirtywc(repo):
2338 2338 abortdirty()
2339 2339
2340 2340 parentctx, replacements = actobj.continueclean()
2341 2341
2342 2342 state.parentctxnode = parentctx.node()
2343 2343 state.replacements.extend(replacements)
2344 2344
2345 2345 return state
2346 2346
2347 2347
2348 2348 def between(repo, old, new, keep):
2349 2349 """select and validate the set of revision to edit
2350 2350
2351 2351 When keep is false, the specified set can't have children."""
2352 2352 revs = repo.revs(b'%n::%n', old, new)
2353 2353 if revs and not keep:
2354 2354 rewriteutil.precheck(repo, revs, b'edit')
2355 2355 if repo.revs(b'(%ld) and merge()', revs):
2356 2356 raise error.StateError(
2357 2357 _(b'cannot edit history that contains merges')
2358 2358 )
2359 2359 return pycompat.maplist(repo.changelog.node, revs)
2360 2360
2361 2361
2362 2362 def ruleeditor(repo, ui, actions, editcomment=b""):
2363 2363 """open an editor to edit rules
2364 2364
2365 2365 rules are in the format [ [act, ctx], ...] like in state.rules
2366 2366 """
2367 2367 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2368 2368 newact = util.sortdict()
2369 2369 for act in actions:
2370 2370 ctx = repo[act.node]
2371 2371 summary = _getsummary(ctx)
2372 2372 fword = summary.split(b' ', 1)[0].lower()
2373 2373 added = False
2374 2374
2375 2375 # if it doesn't end with the special character '!' just skip this
2376 2376 if fword.endswith(b'!'):
2377 2377 fword = fword[:-1]
2378 2378 if fword in primaryactions | secondaryactions | tertiaryactions:
2379 2379 act.verb = fword
2380 2380 # get the target summary
2381 2381 tsum = summary[len(fword) + 1 :].lstrip()
2382 2382 # safe but slow: reverse iterate over the actions so we
2383 2383 # don't clash on two commits having the same summary
2384 2384 for na, l in reversed(list(newact.items())):
2385 2385 actx = repo[na.node]
2386 2386 asum = _getsummary(actx)
2387 2387 if asum == tsum:
2388 2388 added = True
2389 2389 l.append(act)
2390 2390 break
2391 2391
2392 2392 if not added:
2393 2393 newact[act] = []
2394 2394
2395 2395 # copy over and flatten the new list
2396 2396 actions = []
2397 2397 for na, l in newact.items():
2398 2398 actions.append(na)
2399 2399 actions += l
2400 2400
2401 2401 rules = b'\n'.join([act.torule() for act in actions])
2402 2402 rules += b'\n\n'
2403 2403 rules += editcomment
2404 2404 rules = ui.edit(
2405 2405 rules,
2406 2406 ui.username(),
2407 2407 {b'prefix': b'histedit'},
2408 2408 repopath=repo.path,
2409 2409 action=b'histedit',
2410 2410 )
2411 2411
2412 2412 # Save edit rules in .hg/histedit-last-edit.txt in case
2413 2413 # the user needs to ask for help after something
2414 2414 # surprising happens.
2415 2415 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2416 2416 f.write(rules)
2417 2417
2418 2418 return rules
2419 2419
2420 2420
2421 2421 def parserules(rules, state):
2422 2422 """Read the histedit rules string and return list of action objects"""
2423 2423 rules = [
2424 2424 l
2425 2425 for l in (r.strip() for r in rules.splitlines())
2426 2426 if l and not l.startswith(b'#')
2427 2427 ]
2428 2428 actions = []
2429 2429 for r in rules:
2430 2430 if b' ' not in r:
2431 2431 raise error.ParseError(_(b'malformed line "%s"') % r)
2432 2432 verb, rest = r.split(b' ', 1)
2433 2433
2434 2434 if verb not in actiontable:
2435 2435 raise error.ParseError(_(b'unknown action "%s"') % verb)
2436 2436
2437 2437 action = actiontable[verb].fromrule(state, rest)
2438 2438 actions.append(action)
2439 2439 return actions
2440 2440
2441 2441
2442 2442 def warnverifyactions(ui, repo, actions, state, ctxs):
2443 2443 try:
2444 2444 verifyactions(actions, state, ctxs)
2445 2445 except error.ParseError:
2446 2446 if repo.vfs.exists(b'histedit-last-edit.txt'):
2447 2447 ui.warn(
2448 2448 _(
2449 2449 b'warning: histedit rules saved '
2450 2450 b'to: .hg/histedit-last-edit.txt\n'
2451 2451 )
2452 2452 )
2453 2453 raise
2454 2454
2455 2455
2456 2456 def verifyactions(actions, state, ctxs):
2457 2457 """Verify that there exists exactly one action per given changeset and
2458 2458 other constraints.
2459 2459
2460 2460 Will abort if there are to many or too few rules, a malformed rule,
2461 2461 or a rule on a changeset outside of the user-given range.
2462 2462 """
2463 2463 expected = {c.node() for c in ctxs}
2464 2464 seen = set()
2465 2465 prev = None
2466 2466
2467 2467 if actions and actions[0].verb in [b'roll', b'fold']:
2468 2468 raise error.ParseError(
2469 2469 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2470 2470 )
2471 2471
2472 2472 for action in actions:
2473 2473 action.verify(prev, expected, seen)
2474 2474 prev = action
2475 2475 if action.node is not None:
2476 2476 seen.add(action.node)
2477 2477 missing = sorted(expected - seen) # sort to stabilize output
2478 2478
2479 2479 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2480 2480 if len(actions) == 0:
2481 2481 raise error.ParseError(
2482 2482 _(b'no rules provided'),
2483 2483 hint=_(b'use strip extension to remove commits'),
2484 2484 )
2485 2485
2486 2486 drops = [drop(state, n) for n in missing]
2487 2487 # put the in the beginning so they execute immediately and
2488 2488 # don't show in the edit-plan in the future
2489 2489 actions[:0] = drops
2490 2490 elif missing:
2491 2491 raise error.ParseError(
2492 2492 _(b'missing rules for changeset %s') % short(missing[0]),
2493 2493 hint=_(
2494 2494 b'use "drop %s" to discard, see also: '
2495 2495 b"'hg help -e histedit.config'"
2496 2496 )
2497 2497 % short(missing[0]),
2498 2498 )
2499 2499
2500 2500
2501 2501 def adjustreplacementsfrommarkers(repo, oldreplacements):
2502 2502 """Adjust replacements from obsolescence markers
2503 2503
2504 2504 Replacements structure is originally generated based on
2505 2505 histedit's state and does not account for changes that are
2506 2506 not recorded there. This function fixes that by adding
2507 2507 data read from obsolescence markers"""
2508 2508 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2509 2509 return oldreplacements
2510 2510
2511 2511 unfi = repo.unfiltered()
2512 2512 get_rev = unfi.changelog.index.get_rev
2513 2513 obsstore = repo.obsstore
2514 2514 newreplacements = list(oldreplacements)
2515 2515 oldsuccs = [r[1] for r in oldreplacements]
2516 2516 # successors that have already been added to succstocheck once
2517 2517 seensuccs = set().union(
2518 2518 *oldsuccs
2519 2519 ) # create a set from an iterable of tuples
2520 2520 succstocheck = list(seensuccs)
2521 2521 while succstocheck:
2522 2522 n = succstocheck.pop()
2523 2523 missing = get_rev(n) is None
2524 2524 markers = obsstore.successors.get(n, ())
2525 2525 if missing and not markers:
2526 2526 # dead end, mark it as such
2527 2527 newreplacements.append((n, ()))
2528 2528 for marker in markers:
2529 2529 nsuccs = marker[1]
2530 2530 newreplacements.append((n, nsuccs))
2531 2531 for nsucc in nsuccs:
2532 2532 if nsucc not in seensuccs:
2533 2533 seensuccs.add(nsucc)
2534 2534 succstocheck.append(nsucc)
2535 2535
2536 2536 return newreplacements
2537 2537
2538 2538
2539 2539 def processreplacement(state):
2540 2540 """process the list of replacements to return
2541 2541
2542 2542 1) the final mapping between original and created nodes
2543 2543 2) the list of temporary node created by histedit
2544 2544 3) the list of new commit created by histedit"""
2545 2545 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2546 2546 allsuccs = set()
2547 2547 replaced = set()
2548 2548 fullmapping = {}
2549 2549 # initialize basic set
2550 2550 # fullmapping records all operations recorded in replacement
2551 2551 for rep in replacements:
2552 2552 allsuccs.update(rep[1])
2553 2553 replaced.add(rep[0])
2554 2554 fullmapping.setdefault(rep[0], set()).update(rep[1])
2555 2555 new = allsuccs - replaced
2556 2556 tmpnodes = allsuccs & replaced
2557 2557 # Reduce content fullmapping into direct relation between original nodes
2558 2558 # and final node created during history edition
2559 2559 # Dropped changeset are replaced by an empty list
2560 2560 toproceed = set(fullmapping)
2561 2561 final = {}
2562 2562 while toproceed:
2563 2563 for x in list(toproceed):
2564 2564 succs = fullmapping[x]
2565 2565 for s in list(succs):
2566 2566 if s in toproceed:
2567 2567 # non final node with unknown closure
2568 2568 # We can't process this now
2569 2569 break
2570 2570 elif s in final:
2571 2571 # non final node, replace with closure
2572 2572 succs.remove(s)
2573 2573 succs.update(final[s])
2574 2574 else:
2575 2575 final[x] = succs
2576 2576 toproceed.remove(x)
2577 2577 # remove tmpnodes from final mapping
2578 2578 for n in tmpnodes:
2579 2579 del final[n]
2580 2580 # we expect all changes involved in final to exist in the repo
2581 2581 # turn `final` into list (topologically sorted)
2582 2582 get_rev = state.repo.changelog.index.get_rev
2583 2583 for prec, succs in final.items():
2584 2584 final[prec] = sorted(succs, key=get_rev)
2585 2585
2586 2586 # computed topmost element (necessary for bookmark)
2587 2587 if new:
2588 2588 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2589 2589 elif not final:
2590 2590 # Nothing rewritten at all. we won't need `newtopmost`
2591 2591 # It is the same as `oldtopmost` and `processreplacement` know it
2592 2592 newtopmost = None
2593 2593 else:
2594 2594 # every body died. The newtopmost is the parent of the root.
2595 2595 r = state.repo.changelog.rev
2596 2596 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2597 2597
2598 2598 return final, tmpnodes, new, newtopmost
2599 2599
2600 2600
2601 2601 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2602 2602 """Move bookmark from oldtopmost to newly created topmost
2603 2603
2604 2604 This is arguably a feature and we may only want that for the active
2605 2605 bookmark. But the behavior is kept compatible with the old version for now.
2606 2606 """
2607 2607 if not oldtopmost or not newtopmost:
2608 2608 return
2609 2609 oldbmarks = repo.nodebookmarks(oldtopmost)
2610 2610 if oldbmarks:
2611 2611 with repo.lock(), repo.transaction(b'histedit') as tr:
2612 2612 marks = repo._bookmarks
2613 2613 changes = []
2614 2614 for name in oldbmarks:
2615 2615 changes.append((name, newtopmost))
2616 2616 marks.applychanges(repo, tr, changes)
2617 2617
2618 2618
2619 2619 def cleanupnode(ui, repo, nodes, nobackup=False):
2620 2620 """strip a group of nodes from the repository
2621 2621
2622 2622 The set of node to strip may contains unknown nodes."""
2623 2623 with repo.lock():
2624 2624 # do not let filtering get in the way of the cleanse
2625 2625 # we should probably get rid of obsolescence marker created during the
2626 2626 # histedit, but we currently do not have such information.
2627 2627 repo = repo.unfiltered()
2628 2628 # Find all nodes that need to be stripped
2629 2629 # (we use %lr instead of %ln to silently ignore unknown items)
2630 2630 has_node = repo.changelog.index.has_node
2631 2631 nodes = sorted(n for n in nodes if has_node(n))
2632 2632 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2633 2633 if roots:
2634 2634 backup = not nobackup
2635 2635 repair.strip(ui, repo, roots, backup=backup)
2636 2636
2637 2637
2638 2638 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2639 2639 if isinstance(nodelist, bytes):
2640 2640 nodelist = [nodelist]
2641 2641 state = histeditstate(repo)
2642 2642 if state.inprogress():
2643 2643 state.read()
2644 2644 histedit_nodes = {
2645 2645 action.node for action in state.actions if action.node
2646 2646 }
2647 2647 common_nodes = histedit_nodes & set(nodelist)
2648 2648 if common_nodes:
2649 2649 raise error.Abort(
2650 2650 _(b"histedit in progress, can't strip %s")
2651 2651 % b', '.join(short(x) for x in common_nodes)
2652 2652 )
2653 2653 return orig(ui, repo, nodelist, *args, **kwargs)
2654 2654
2655 2655
2656 2656 extensions.wrapfunction(repair, b'strip', stripwrapper)
2657 2657
2658 2658
2659 2659 def summaryhook(ui, repo):
2660 2660 state = histeditstate(repo)
2661 2661 if not state.inprogress():
2662 2662 return
2663 2663 state.read()
2664 2664 if state.actions:
2665 2665 # i18n: column positioning for "hg summary"
2666 2666 ui.write(
2667 2667 _(b'hist: %s (histedit --continue)\n')
2668 2668 % (
2669 2669 ui.label(_(b'%d remaining'), b'histedit.remaining')
2670 2670 % len(state.actions)
2671 2671 )
2672 2672 )
2673 2673
2674 2674
2675 2675 def extsetup(ui):
2676 2676 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2677 2677 statemod.addunfinished(
2678 2678 b'histedit',
2679 2679 fname=b'histedit-state',
2680 2680 allowcommit=True,
2681 2681 continueflag=True,
2682 2682 abortfunc=hgaborthistedit,
2683 2683 )
@@ -1,4314 +1,4310 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help COMMAND` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behavior can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60
61 61 This extension used to provide a strip command. This command now lives
62 62 in the strip extension.
63 63 '''
64 64
65 65
66 66 import errno
67 67 import os
68 68 import re
69 69 import shutil
70 70 import sys
71 71 from mercurial.i18n import _
72 72 from mercurial.node import (
73 73 bin,
74 74 hex,
75 75 nullrev,
76 76 short,
77 77 )
78 78 from mercurial.pycompat import (
79 79 delattr,
80 80 getattr,
81 81 open,
82 82 )
83 83 from mercurial import (
84 84 cmdutil,
85 85 commands,
86 86 dirstateguard,
87 87 encoding,
88 88 error,
89 89 extensions,
90 90 hg,
91 91 localrepo,
92 92 lock as lockmod,
93 93 logcmdutil,
94 94 patch as patchmod,
95 95 phases,
96 96 pycompat,
97 97 registrar,
98 98 revsetlang,
99 99 scmutil,
100 100 smartset,
101 101 strip,
102 102 subrepoutil,
103 103 util,
104 104 vfs as vfsmod,
105 105 )
106 106 from mercurial.utils import (
107 107 dateutil,
108 108 stringutil,
109 109 urlutil,
110 110 )
111 111
112 112 release = lockmod.release
113 113 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
114 114
115 115 cmdtable = {}
116 116 command = registrar.command(cmdtable)
117 117 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
118 118 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
119 119 # be specifying the version(s) of Mercurial they are tested with, or
120 120 # leave the attribute unspecified.
121 121 testedwith = b'ships-with-hg-core'
122 122
123 123 configtable = {}
124 124 configitem = registrar.configitem(configtable)
125 125
126 126 configitem(
127 127 b'mq',
128 128 b'git',
129 129 default=b'auto',
130 130 )
131 131 configitem(
132 132 b'mq',
133 133 b'keepchanges',
134 134 default=False,
135 135 )
136 136 configitem(
137 137 b'mq',
138 138 b'plain',
139 139 default=False,
140 140 )
141 141 configitem(
142 142 b'mq',
143 143 b'secret',
144 144 default=False,
145 145 )
146 146
147 147 # force load strip extension formerly included in mq and import some utility
148 148 try:
149 149 extensions.find(b'strip')
150 150 except KeyError:
151 151 # note: load is lazy so we could avoid the try-except,
152 152 # but I (marmoute) prefer this explicit code.
153 153 class dummyui:
154 154 def debug(self, msg):
155 155 pass
156 156
157 157 def log(self, event, msgfmt, *msgargs, **opts):
158 158 pass
159 159
160 160 extensions.load(dummyui(), b'strip', b'')
161 161
162 162 strip = strip.strip
163 163
164 164
165 165 def checksubstate(repo, baserev=None):
166 166 """return list of subrepos at a different revision than substate.
167 167 Abort if any subrepos have uncommitted changes."""
168 168 inclsubs = []
169 169 wctx = repo[None]
170 170 if baserev:
171 171 bctx = repo[baserev]
172 172 else:
173 173 bctx = wctx.p1()
174 174 for s in sorted(wctx.substate):
175 175 wctx.sub(s).bailifchanged(True)
176 176 if s not in bctx.substate or bctx.sub(s).dirty():
177 177 inclsubs.append(s)
178 178 return inclsubs
179 179
180 180
181 181 # Patch names looks like unix-file names.
182 182 # They must be joinable with queue directory and result in the patch path.
183 183 normname = util.normpath
184 184
185 185
186 186 class statusentry:
187 187 def __init__(self, node, name):
188 188 self.node, self.name = node, name
189 189
190 190 def __bytes__(self):
191 191 return hex(self.node) + b':' + self.name
192 192
193 193 __str__ = encoding.strmethod(__bytes__)
194 194 __repr__ = encoding.strmethod(__bytes__)
195 195
196 196
197 197 # The order of the headers in 'hg export' HG patches:
198 198 HGHEADERS = [
199 199 # '# HG changeset patch',
200 200 b'# User ',
201 201 b'# Date ',
202 202 b'# ',
203 203 b'# Branch ',
204 204 b'# Node ID ',
205 205 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
206 206 ]
207 207 # The order of headers in plain 'mail style' patches:
208 208 PLAINHEADERS = {
209 209 b'from': 0,
210 210 b'date': 1,
211 211 b'subject': 2,
212 212 }
213 213
214 214
215 215 def inserthgheader(lines, header, value):
216 216 """Assuming lines contains a HG patch header, add a header line with value.
217 217 >>> try: inserthgheader([], b'# Date ', b'z')
218 218 ... except ValueError as inst: print("oops")
219 219 oops
220 220 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
221 221 ['# HG changeset patch', '# Date z']
222 222 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
223 223 ['# HG changeset patch', '# Date z', '']
224 224 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
225 225 ['# HG changeset patch', '# User y', '# Date z']
226 226 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
227 227 ... b'# User ', b'z')
228 228 ['# HG changeset patch', '# Date x', '# User z']
229 229 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
230 230 ['# HG changeset patch', '# Date z']
231 231 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
232 232 ... b'# Date ', b'z')
233 233 ['# HG changeset patch', '# Date z', '', '# Date y']
234 234 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
235 235 ... b'# Date ', b'z')
236 236 ['# HG changeset patch', '# Date z', '# Parent y']
237 237 """
238 238 start = lines.index(b'# HG changeset patch') + 1
239 239 newindex = HGHEADERS.index(header)
240 240 bestpos = len(lines)
241 241 for i in range(start, len(lines)):
242 242 line = lines[i]
243 243 if not line.startswith(b'# '):
244 244 bestpos = min(bestpos, i)
245 245 break
246 246 for lineindex, h in enumerate(HGHEADERS):
247 247 if line.startswith(h):
248 248 if lineindex == newindex:
249 249 lines[i] = header + value
250 250 return lines
251 251 if lineindex > newindex:
252 252 bestpos = min(bestpos, i)
253 253 break # next line
254 254 lines.insert(bestpos, header + value)
255 255 return lines
256 256
257 257
258 258 def insertplainheader(lines, header, value):
259 259 """For lines containing a plain patch header, add a header line with value.
260 260 >>> insertplainheader([], b'Date', b'z')
261 261 ['Date: z']
262 262 >>> insertplainheader([b''], b'Date', b'z')
263 263 ['Date: z', '']
264 264 >>> insertplainheader([b'x'], b'Date', b'z')
265 265 ['Date: z', '', 'x']
266 266 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
267 267 ['From: y', 'Date: z', '', 'x']
268 268 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
269 269 [' date : x', 'From: z', '']
270 270 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
271 271 ['Date: z', '', 'Date: y']
272 272 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
273 273 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
274 274 """
275 275 newprio = PLAINHEADERS[header.lower()]
276 276 bestpos = len(lines)
277 277 for i, line in enumerate(lines):
278 278 if b':' in line:
279 279 lheader = line.split(b':', 1)[0].strip().lower()
280 280 lprio = PLAINHEADERS.get(lheader, newprio + 1)
281 281 if lprio == newprio:
282 282 lines[i] = b'%s: %s' % (header, value)
283 283 return lines
284 284 if lprio > newprio and i < bestpos:
285 285 bestpos = i
286 286 else:
287 287 if line:
288 288 lines.insert(i, b'')
289 289 if i < bestpos:
290 290 bestpos = i
291 291 break
292 292 lines.insert(bestpos, b'%s: %s' % (header, value))
293 293 return lines
294 294
295 295
296 296 class patchheader:
297 297 def __init__(self, pf, plainmode=False):
298 298 def eatdiff(lines):
299 299 while lines:
300 300 l = lines[-1]
301 301 if (
302 302 l.startswith(b"diff -")
303 303 or l.startswith(b"Index:")
304 304 or l.startswith(b"===========")
305 305 ):
306 306 del lines[-1]
307 307 else:
308 308 break
309 309
310 310 def eatempty(lines):
311 311 while lines:
312 312 if not lines[-1].strip():
313 313 del lines[-1]
314 314 else:
315 315 break
316 316
317 317 message = []
318 318 comments = []
319 319 user = None
320 320 date = None
321 321 parent = None
322 322 format = None
323 323 subject = None
324 324 branch = None
325 325 nodeid = None
326 326 diffstart = 0
327 327
328 328 for line in open(pf, b'rb'):
329 329 line = line.rstrip()
330 330 if line.startswith(b'diff --git') or (
331 331 diffstart and line.startswith(b'+++ ')
332 332 ):
333 333 diffstart = 2
334 334 break
335 335 diffstart = 0 # reset
336 336 if line.startswith(b"--- "):
337 337 diffstart = 1
338 338 continue
339 339 elif format == b"hgpatch":
340 340 # parse values when importing the result of an hg export
341 341 if line.startswith(b"# User "):
342 342 user = line[7:]
343 343 elif line.startswith(b"# Date "):
344 344 date = line[7:]
345 345 elif line.startswith(b"# Parent "):
346 346 parent = line[9:].lstrip() # handle double trailing space
347 347 elif line.startswith(b"# Branch "):
348 348 branch = line[9:]
349 349 elif line.startswith(b"# Node ID "):
350 350 nodeid = line[10:]
351 351 elif not line.startswith(b"# ") and line:
352 352 message.append(line)
353 353 format = None
354 354 elif line == b'# HG changeset patch':
355 355 message = []
356 356 format = b"hgpatch"
357 357 elif format != b"tagdone" and (
358 358 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
359 359 ):
360 360 subject = line[9:]
361 361 format = b"tag"
362 362 elif format != b"tagdone" and (
363 363 line.startswith(b"From: ") or line.startswith(b"from: ")
364 364 ):
365 365 user = line[6:]
366 366 format = b"tag"
367 367 elif format != b"tagdone" and (
368 368 line.startswith(b"Date: ") or line.startswith(b"date: ")
369 369 ):
370 370 date = line[6:]
371 371 format = b"tag"
372 372 elif format == b"tag" and line == b"":
373 373 # when looking for tags (subject: from: etc) they
374 374 # end once you find a blank line in the source
375 375 format = b"tagdone"
376 376 elif message or line:
377 377 message.append(line)
378 378 comments.append(line)
379 379
380 380 eatdiff(message)
381 381 eatdiff(comments)
382 382 # Remember the exact starting line of the patch diffs before consuming
383 383 # empty lines, for external use by TortoiseHg and others
384 384 self.diffstartline = len(comments)
385 385 eatempty(message)
386 386 eatempty(comments)
387 387
388 388 # make sure message isn't empty
389 389 if format and format.startswith(b"tag") and subject:
390 390 message.insert(0, subject)
391 391
392 392 self.message = message
393 393 self.comments = comments
394 394 self.user = user
395 395 self.date = date
396 396 self.parent = parent
397 397 # nodeid and branch are for external use by TortoiseHg and others
398 398 self.nodeid = nodeid
399 399 self.branch = branch
400 400 self.haspatch = diffstart > 1
401 401 self.plainmode = (
402 402 plainmode
403 403 or b'# HG changeset patch' not in self.comments
404 404 and any(
405 405 c.startswith(b'Date: ') or c.startswith(b'From: ')
406 406 for c in self.comments
407 407 )
408 408 )
409 409
410 410 def setuser(self, user):
411 411 try:
412 412 inserthgheader(self.comments, b'# User ', user)
413 413 except ValueError:
414 414 if self.plainmode:
415 415 insertplainheader(self.comments, b'From', user)
416 416 else:
417 417 tmp = [b'# HG changeset patch', b'# User ' + user]
418 418 self.comments = tmp + self.comments
419 419 self.user = user
420 420
421 421 def setdate(self, date):
422 422 try:
423 423 inserthgheader(self.comments, b'# Date ', date)
424 424 except ValueError:
425 425 if self.plainmode:
426 426 insertplainheader(self.comments, b'Date', date)
427 427 else:
428 428 tmp = [b'# HG changeset patch', b'# Date ' + date]
429 429 self.comments = tmp + self.comments
430 430 self.date = date
431 431
432 432 def setparent(self, parent):
433 433 try:
434 434 inserthgheader(self.comments, b'# Parent ', parent)
435 435 except ValueError:
436 436 if not self.plainmode:
437 437 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
438 438 self.comments = tmp + self.comments
439 439 self.parent = parent
440 440
441 441 def setmessage(self, message):
442 442 if self.comments:
443 443 self._delmsg()
444 444 self.message = [message]
445 445 if message:
446 446 if self.plainmode and self.comments and self.comments[-1]:
447 447 self.comments.append(b'')
448 448 self.comments.append(message)
449 449
450 450 def __bytes__(self):
451 451 s = b'\n'.join(self.comments).rstrip()
452 452 if not s:
453 453 return b''
454 454 return s + b'\n\n'
455 455
456 456 __str__ = encoding.strmethod(__bytes__)
457 457
458 458 def _delmsg(self):
459 459 """Remove existing message, keeping the rest of the comments fields.
460 460 If comments contains 'subject: ', message will prepend
461 461 the field and a blank line."""
462 462 if self.message:
463 463 subj = b'subject: ' + self.message[0].lower()
464 for i in pycompat.xrange(len(self.comments)):
464 for i in range(len(self.comments)):
465 465 if subj == self.comments[i].lower():
466 466 del self.comments[i]
467 467 self.message = self.message[2:]
468 468 break
469 469 ci = 0
470 470 for mi in self.message:
471 471 while mi != self.comments[ci]:
472 472 ci += 1
473 473 del self.comments[ci]
474 474
475 475
476 476 def newcommit(repo, phase, *args, **kwargs):
477 477 """helper dedicated to ensure a commit respect mq.secret setting
478 478
479 479 It should be used instead of repo.commit inside the mq source for operation
480 480 creating new changeset.
481 481 """
482 482 repo = repo.unfiltered()
483 483 if phase is None:
484 484 if repo.ui.configbool(b'mq', b'secret'):
485 485 phase = phases.secret
486 486 overrides = {(b'ui', b'allowemptycommit'): True}
487 487 if phase is not None:
488 488 overrides[(b'phases', b'new-commit')] = phase
489 489 with repo.ui.configoverride(overrides, b'mq'):
490 490 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
491 491 return repo.commit(*args, **kwargs)
492 492
493 493
494 494 class AbortNoCleanup(error.Abort):
495 495 pass
496 496
497 497
498 498 class queue:
499 499 def __init__(self, ui, baseui, path, patchdir=None):
500 500 self.basepath = path
501 501 try:
502 502 with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
503 503 cur = fh.read().rstrip()
504 504
505 505 if not cur:
506 506 curpath = os.path.join(path, b'patches')
507 507 else:
508 508 curpath = os.path.join(path, b'patches-' + cur)
509 509 except IOError:
510 510 curpath = os.path.join(path, b'patches')
511 511 self.path = patchdir or curpath
512 512 self.opener = vfsmod.vfs(self.path)
513 513 self.ui = ui
514 514 self.baseui = baseui
515 515 self.applieddirty = False
516 516 self.seriesdirty = False
517 517 self.added = []
518 518 self.seriespath = b"series"
519 519 self.statuspath = b"status"
520 520 self.guardspath = b"guards"
521 521 self.activeguards = None
522 522 self.guardsdirty = False
523 523 # Handle mq.git as a bool with extended values
524 524 gitmode = ui.config(b'mq', b'git').lower()
525 525 boolmode = stringutil.parsebool(gitmode)
526 526 if boolmode is not None:
527 527 if boolmode:
528 528 gitmode = b'yes'
529 529 else:
530 530 gitmode = b'no'
531 531 self.gitmode = gitmode
532 532 # deprecated config: mq.plain
533 533 self.plainmode = ui.configbool(b'mq', b'plain')
534 534 self.checkapplied = True
535 535
536 536 @util.propertycache
537 537 def applied(self):
538 538 def parselines(lines):
539 539 for l in lines:
540 540 entry = l.split(b':', 1)
541 541 if len(entry) > 1:
542 542 n, name = entry
543 543 yield statusentry(bin(n), name)
544 544 elif l.strip():
545 545 self.ui.warn(
546 546 _(b'malformated mq status line: %s\n')
547 547 % stringutil.pprint(entry)
548 548 )
549 549 # else we ignore empty lines
550 550
551 551 try:
552 552 lines = self.opener.read(self.statuspath).splitlines()
553 553 return list(parselines(lines))
554 554 except IOError as e:
555 555 if e.errno == errno.ENOENT:
556 556 return []
557 557 raise
558 558
559 559 @util.propertycache
560 560 def fullseries(self):
561 561 try:
562 562 return self.opener.read(self.seriespath).splitlines()
563 563 except IOError as e:
564 564 if e.errno == errno.ENOENT:
565 565 return []
566 566 raise
567 567
568 568 @util.propertycache
569 569 def series(self):
570 570 self.parseseries()
571 571 return self.series
572 572
573 573 @util.propertycache
574 574 def seriesguards(self):
575 575 self.parseseries()
576 576 return self.seriesguards
577 577
578 578 def invalidate(self):
579 579 for a in 'applied fullseries series seriesguards'.split():
580 580 if a in self.__dict__:
581 581 delattr(self, a)
582 582 self.applieddirty = False
583 583 self.seriesdirty = False
584 584 self.guardsdirty = False
585 585 self.activeguards = None
586 586
587 587 def diffopts(self, opts=None, patchfn=None, plain=False):
588 588 """Return diff options tweaked for this mq use, possibly upgrading to
589 589 git format, and possibly plain and without lossy options."""
590 590 diffopts = patchmod.difffeatureopts(
591 591 self.ui,
592 592 opts,
593 593 git=True,
594 594 whitespace=not plain,
595 595 formatchanging=not plain,
596 596 )
597 597 if self.gitmode == b'auto':
598 598 diffopts.upgrade = True
599 599 elif self.gitmode == b'keep':
600 600 pass
601 601 elif self.gitmode in (b'yes', b'no'):
602 602 diffopts.git = self.gitmode == b'yes'
603 603 else:
604 604 raise error.Abort(
605 605 _(b'mq.git option can be auto/keep/yes/no got %s')
606 606 % self.gitmode
607 607 )
608 608 if patchfn:
609 609 diffopts = self.patchopts(diffopts, patchfn)
610 610 return diffopts
611 611
612 612 def patchopts(self, diffopts, *patches):
613 613 """Return a copy of input diff options with git set to true if
614 614 referenced patch is a git patch and should be preserved as such.
615 615 """
616 616 diffopts = diffopts.copy()
617 617 if not diffopts.git and self.gitmode == b'keep':
618 618 for patchfn in patches:
619 619 patchf = self.opener(patchfn, b'r')
620 620 # if the patch was a git patch, refresh it as a git patch
621 621 diffopts.git = any(
622 622 line.startswith(b'diff --git') for line in patchf
623 623 )
624 624 patchf.close()
625 625 return diffopts
626 626
627 627 def join(self, *p):
628 628 return os.path.join(self.path, *p)
629 629
630 630 def findseries(self, patch):
631 631 def matchpatch(l):
632 632 l = l.split(b'#', 1)[0]
633 633 return l.strip() == patch
634 634
635 635 for index, l in enumerate(self.fullseries):
636 636 if matchpatch(l):
637 637 return index
638 638 return None
639 639
640 640 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
641 641
642 642 def parseseries(self):
643 643 self.series = []
644 644 self.seriesguards = []
645 645 for l in self.fullseries:
646 646 h = l.find(b'#')
647 647 if h == -1:
648 648 patch = l
649 649 comment = b''
650 650 elif h == 0:
651 651 continue
652 652 else:
653 653 patch = l[:h]
654 654 comment = l[h:]
655 655 patch = patch.strip()
656 656 if patch:
657 657 if patch in self.series:
658 658 raise error.Abort(
659 659 _(b'%s appears more than once in %s')
660 660 % (patch, self.join(self.seriespath))
661 661 )
662 662 self.series.append(patch)
663 663 self.seriesguards.append(self.guard_re.findall(comment))
664 664
665 665 def checkguard(self, guard):
666 666 if not guard:
667 667 return _(b'guard cannot be an empty string')
668 668 bad_chars = b'# \t\r\n\f'
669 669 first = guard[0]
670 670 if first in b'-+':
671 671 return _(b'guard %r starts with invalid character: %r') % (
672 672 guard,
673 673 first,
674 674 )
675 675 for c in bad_chars:
676 676 if c in guard:
677 677 return _(b'invalid character in guard %r: %r') % (guard, c)
678 678
679 679 def setactive(self, guards):
680 680 for guard in guards:
681 681 bad = self.checkguard(guard)
682 682 if bad:
683 683 raise error.Abort(bad)
684 684 guards = sorted(set(guards))
685 685 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
686 686 self.activeguards = guards
687 687 self.guardsdirty = True
688 688
689 689 def active(self):
690 690 if self.activeguards is None:
691 691 self.activeguards = []
692 692 try:
693 693 guards = self.opener.read(self.guardspath).split()
694 694 except IOError as err:
695 695 if err.errno != errno.ENOENT:
696 696 raise
697 697 guards = []
698 698 for i, guard in enumerate(guards):
699 699 bad = self.checkguard(guard)
700 700 if bad:
701 701 self.ui.warn(
702 702 b'%s:%d: %s\n'
703 703 % (self.join(self.guardspath), i + 1, bad)
704 704 )
705 705 else:
706 706 self.activeguards.append(guard)
707 707 return self.activeguards
708 708
709 709 def setguards(self, idx, guards):
710 710 for g in guards:
711 711 if len(g) < 2:
712 712 raise error.Abort(_(b'guard %r too short') % g)
713 713 if g[0] not in b'-+':
714 714 raise error.Abort(_(b'guard %r starts with invalid char') % g)
715 715 bad = self.checkguard(g[1:])
716 716 if bad:
717 717 raise error.Abort(bad)
718 718 drop = self.guard_re.sub(b'', self.fullseries[idx])
719 719 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
720 720 self.parseseries()
721 721 self.seriesdirty = True
722 722
723 723 def pushable(self, idx):
724 724 if isinstance(idx, bytes):
725 725 idx = self.series.index(idx)
726 726 patchguards = self.seriesguards[idx]
727 727 if not patchguards:
728 728 return True, None
729 729 guards = self.active()
730 730 exactneg = [
731 731 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
732 732 ]
733 733 if exactneg:
734 734 return False, stringutil.pprint(exactneg[0])
735 735 pos = [g for g in patchguards if g.startswith(b'+')]
736 736 exactpos = [g for g in pos if g[1:] in guards]
737 737 if pos:
738 738 if exactpos:
739 739 return True, stringutil.pprint(exactpos[0])
740 740 return False, b' '.join([stringutil.pprint(p) for p in pos])
741 741 return True, b''
742 742
743 743 def explainpushable(self, idx, all_patches=False):
744 744 if all_patches:
745 745 write = self.ui.write
746 746 else:
747 747 write = self.ui.warn
748 748
749 749 if all_patches or self.ui.verbose:
750 750 if isinstance(idx, bytes):
751 751 idx = self.series.index(idx)
752 752 pushable, why = self.pushable(idx)
753 753 if all_patches and pushable:
754 754 if why is None:
755 755 write(
756 756 _(b'allowing %s - no guards in effect\n')
757 757 % self.series[idx]
758 758 )
759 759 else:
760 760 if not why:
761 761 write(
762 762 _(b'allowing %s - no matching negative guards\n')
763 763 % self.series[idx]
764 764 )
765 765 else:
766 766 write(
767 767 _(b'allowing %s - guarded by %s\n')
768 768 % (self.series[idx], why)
769 769 )
770 770 if not pushable:
771 771 if why:
772 772 write(
773 773 _(b'skipping %s - guarded by %s\n')
774 774 % (self.series[idx], why)
775 775 )
776 776 else:
777 777 write(
778 778 _(b'skipping %s - no matching guards\n')
779 779 % self.series[idx]
780 780 )
781 781
782 782 def savedirty(self):
783 783 def writelist(items, path):
784 784 fp = self.opener(path, b'wb')
785 785 for i in items:
786 786 fp.write(b"%s\n" % i)
787 787 fp.close()
788 788
789 789 if self.applieddirty:
790 790 writelist(map(bytes, self.applied), self.statuspath)
791 791 self.applieddirty = False
792 792 if self.seriesdirty:
793 793 writelist(self.fullseries, self.seriespath)
794 794 self.seriesdirty = False
795 795 if self.guardsdirty:
796 796 writelist(self.activeguards, self.guardspath)
797 797 self.guardsdirty = False
798 798 if self.added:
799 799 qrepo = self.qrepo()
800 800 if qrepo:
801 801 qrepo[None].add(f for f in self.added if f not in qrepo[None])
802 802 self.added = []
803 803
804 804 def removeundo(self, repo):
805 805 undo = repo.sjoin(b'undo')
806 806 if not os.path.exists(undo):
807 807 return
808 808 try:
809 809 os.unlink(undo)
810 810 except OSError as inst:
811 811 self.ui.warn(
812 812 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
813 813 )
814 814
815 815 def backup(self, repo, files, copy=False):
816 816 # backup local changes in --force case
817 817 for f in sorted(files):
818 818 absf = repo.wjoin(f)
819 819 if os.path.lexists(absf):
820 820 absorig = scmutil.backuppath(self.ui, repo, f)
821 821 self.ui.note(
822 822 _(b'saving current version of %s as %s\n')
823 823 % (f, os.path.relpath(absorig))
824 824 )
825 825
826 826 if copy:
827 827 util.copyfile(absf, absorig)
828 828 else:
829 829 util.rename(absf, absorig)
830 830
831 831 def printdiff(
832 832 self,
833 833 repo,
834 834 diffopts,
835 835 node1,
836 836 node2=None,
837 837 files=None,
838 838 fp=None,
839 839 changes=None,
840 840 opts=None,
841 841 ):
842 842 if opts is None:
843 843 opts = {}
844 844 stat = opts.get(b'stat')
845 845 m = scmutil.match(repo[node1], files, opts)
846 846 logcmdutil.diffordiffstat(
847 847 self.ui,
848 848 repo,
849 849 diffopts,
850 850 repo[node1],
851 851 repo[node2],
852 852 m,
853 853 changes,
854 854 stat,
855 855 fp,
856 856 )
857 857
858 858 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
859 859 # first try just applying the patch
860 860 (err, n) = self.apply(
861 861 repo, [patch], update_status=False, strict=True, merge=rev
862 862 )
863 863
864 864 if err == 0:
865 865 return (err, n)
866 866
867 867 if n is None:
868 868 raise error.Abort(_(b"apply failed for patch %s") % patch)
869 869
870 870 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
871 871
872 872 # apply failed, strip away that rev and merge.
873 873 hg.clean(repo, head)
874 874 strip(self.ui, repo, [n], update=False, backup=False)
875 875
876 876 ctx = repo[rev]
877 877 ret = hg.merge(ctx, remind=False)
878 878 if ret:
879 879 raise error.Abort(_(b"update returned %d") % ret)
880 880 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
881 881 if n is None:
882 882 raise error.Abort(_(b"repo commit failed"))
883 883 try:
884 884 ph = patchheader(mergeq.join(patch), self.plainmode)
885 885 except Exception:
886 886 raise error.Abort(_(b"unable to read %s") % patch)
887 887
888 888 diffopts = self.patchopts(diffopts, patch)
889 889 patchf = self.opener(patch, b"w")
890 890 comments = bytes(ph)
891 891 if comments:
892 892 patchf.write(comments)
893 893 self.printdiff(repo, diffopts, head, n, fp=patchf)
894 894 patchf.close()
895 895 self.removeundo(repo)
896 896 return (0, n)
897 897
898 898 def qparents(self, repo, rev=None):
899 899 """return the mq handled parent or p1
900 900
901 901 In some case where mq get himself in being the parent of a merge the
902 902 appropriate parent may be p2.
903 903 (eg: an in progress merge started with mq disabled)
904 904
905 905 If no parent are managed by mq, p1 is returned.
906 906 """
907 907 if rev is None:
908 908 (p1, p2) = repo.dirstate.parents()
909 909 if p2 == repo.nullid:
910 910 return p1
911 911 if not self.applied:
912 912 return None
913 913 return self.applied[-1].node
914 914 p1, p2 = repo.changelog.parents(rev)
915 915 if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
916 916 return p2
917 917 return p1
918 918
919 919 def mergepatch(self, repo, mergeq, series, diffopts):
920 920 if not self.applied:
921 921 # each of the patches merged in will have two parents. This
922 922 # can confuse the qrefresh, qdiff, and strip code because it
923 923 # needs to know which parent is actually in the patch queue.
924 924 # so, we insert a merge marker with only one parent. This way
925 925 # the first patch in the queue is never a merge patch
926 926 #
927 927 pname = b".hg.patches.merge.marker"
928 928 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
929 929 self.removeundo(repo)
930 930 self.applied.append(statusentry(n, pname))
931 931 self.applieddirty = True
932 932
933 933 head = self.qparents(repo)
934 934
935 935 for patch in series:
936 936 patch = mergeq.lookup(patch, strict=True)
937 937 if not patch:
938 938 self.ui.warn(_(b"patch %s does not exist\n") % patch)
939 939 return (1, None)
940 940 pushable, reason = self.pushable(patch)
941 941 if not pushable:
942 942 self.explainpushable(patch, all_patches=True)
943 943 continue
944 944 info = mergeq.isapplied(patch)
945 945 if not info:
946 946 self.ui.warn(_(b"patch %s is not applied\n") % patch)
947 947 return (1, None)
948 948 rev = info[1]
949 949 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
950 950 if head:
951 951 self.applied.append(statusentry(head, patch))
952 952 self.applieddirty = True
953 953 if err:
954 954 return (err, head)
955 955 self.savedirty()
956 956 return (0, head)
957 957
958 958 def patch(self, repo, patchfile):
959 959 """Apply patchfile to the working directory.
960 960 patchfile: name of patch file"""
961 961 files = set()
962 962 try:
963 963 fuzz = patchmod.patch(
964 964 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
965 965 )
966 966 return (True, list(files), fuzz)
967 967 except Exception as inst:
968 968 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
969 969 if not self.ui.verbose:
970 970 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
971 971 self.ui.traceback()
972 972 return (False, list(files), False)
973 973
974 974 def apply(
975 975 self,
976 976 repo,
977 977 series,
978 978 list=False,
979 979 update_status=True,
980 980 strict=False,
981 981 patchdir=None,
982 982 merge=None,
983 983 all_files=None,
984 984 tobackup=None,
985 985 keepchanges=False,
986 986 ):
987 987 wlock = lock = tr = None
988 988 try:
989 989 wlock = repo.wlock()
990 990 lock = repo.lock()
991 991 tr = repo.transaction(b"qpush")
992 992 try:
993 993 ret = self._apply(
994 994 repo,
995 995 series,
996 996 list,
997 997 update_status,
998 998 strict,
999 999 patchdir,
1000 1000 merge,
1001 1001 all_files=all_files,
1002 1002 tobackup=tobackup,
1003 1003 keepchanges=keepchanges,
1004 1004 )
1005 1005 tr.close()
1006 1006 self.savedirty()
1007 1007 return ret
1008 1008 except AbortNoCleanup:
1009 1009 tr.close()
1010 1010 self.savedirty()
1011 1011 raise
1012 1012 except: # re-raises
1013 1013 try:
1014 1014 tr.abort()
1015 1015 finally:
1016 1016 self.invalidate()
1017 1017 raise
1018 1018 finally:
1019 1019 release(tr, lock, wlock)
1020 1020 self.removeundo(repo)
1021 1021
1022 1022 def _apply(
1023 1023 self,
1024 1024 repo,
1025 1025 series,
1026 1026 list=False,
1027 1027 update_status=True,
1028 1028 strict=False,
1029 1029 patchdir=None,
1030 1030 merge=None,
1031 1031 all_files=None,
1032 1032 tobackup=None,
1033 1033 keepchanges=False,
1034 1034 ):
1035 1035 """returns (error, hash)
1036 1036
1037 1037 error = 1 for unable to read, 2 for patch failed, 3 for patch
1038 1038 fuzz. tobackup is None or a set of files to backup before they
1039 1039 are modified by a patch.
1040 1040 """
1041 1041 # TODO unify with commands.py
1042 1042 if not patchdir:
1043 1043 patchdir = self.path
1044 1044 err = 0
1045 1045 n = None
1046 1046 for patchname in series:
1047 1047 pushable, reason = self.pushable(patchname)
1048 1048 if not pushable:
1049 1049 self.explainpushable(patchname, all_patches=True)
1050 1050 continue
1051 1051 self.ui.status(_(b"applying %s\n") % patchname)
1052 1052 pf = os.path.join(patchdir, patchname)
1053 1053
1054 1054 try:
1055 1055 ph = patchheader(self.join(patchname), self.plainmode)
1056 1056 except IOError:
1057 1057 self.ui.warn(_(b"unable to read %s\n") % patchname)
1058 1058 err = 1
1059 1059 break
1060 1060
1061 1061 message = ph.message
1062 1062 if not message:
1063 1063 # The commit message should not be translated
1064 1064 message = b"imported patch %s\n" % patchname
1065 1065 else:
1066 1066 if list:
1067 1067 # The commit message should not be translated
1068 1068 message.append(b"\nimported patch %s" % patchname)
1069 1069 message = b'\n'.join(message)
1070 1070
1071 1071 if ph.haspatch:
1072 1072 if tobackup:
1073 1073 touched = patchmod.changedfiles(self.ui, repo, pf)
1074 1074 touched = set(touched) & tobackup
1075 1075 if touched and keepchanges:
1076 1076 raise AbortNoCleanup(
1077 1077 _(b"conflicting local changes found"),
1078 1078 hint=_(b"did you forget to qrefresh?"),
1079 1079 )
1080 1080 self.backup(repo, touched, copy=True)
1081 1081 tobackup = tobackup - touched
1082 1082 (patcherr, files, fuzz) = self.patch(repo, pf)
1083 1083 if all_files is not None:
1084 1084 all_files.update(files)
1085 1085 patcherr = not patcherr
1086 1086 else:
1087 1087 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1088 1088 patcherr, files, fuzz = 0, [], 0
1089 1089
1090 1090 if merge and files:
1091 1091 # Mark as removed/merged and update dirstate parent info
1092 1092 with repo.dirstate.parentchange():
1093 1093 for f in files:
1094 1094 repo.dirstate.update_file_p1(f, p1_tracked=True)
1095 1095 p1 = repo.dirstate.p1()
1096 1096 repo.setparents(p1, merge)
1097 1097
1098 1098 if all_files and b'.hgsubstate' in all_files:
1099 1099 wctx = repo[None]
1100 1100 pctx = repo[b'.']
1101 1101 overwrite = False
1102 1102 mergedsubstate = subrepoutil.submerge(
1103 1103 repo, pctx, wctx, wctx, overwrite
1104 1104 )
1105 1105 files += mergedsubstate.keys()
1106 1106
1107 1107 match = scmutil.matchfiles(repo, files or [])
1108 1108 oldtip = repo.changelog.tip()
1109 1109 n = newcommit(
1110 1110 repo, None, message, ph.user, ph.date, match=match, force=True
1111 1111 )
1112 1112 if repo.changelog.tip() == oldtip:
1113 1113 raise error.Abort(
1114 1114 _(b"qpush exactly duplicates child changeset")
1115 1115 )
1116 1116 if n is None:
1117 1117 raise error.Abort(_(b"repository commit failed"))
1118 1118
1119 1119 if update_status:
1120 1120 self.applied.append(statusentry(n, patchname))
1121 1121
1122 1122 if patcherr:
1123 1123 self.ui.warn(
1124 1124 _(b"patch failed, rejects left in working directory\n")
1125 1125 )
1126 1126 err = 2
1127 1127 break
1128 1128
1129 1129 if fuzz and strict:
1130 1130 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1131 1131 err = 3
1132 1132 break
1133 1133 return (err, n)
1134 1134
1135 1135 def _cleanup(self, patches, numrevs, keep=False):
1136 1136 if not keep:
1137 1137 r = self.qrepo()
1138 1138 if r:
1139 1139 r[None].forget(patches)
1140 1140 for p in patches:
1141 1141 try:
1142 1142 os.unlink(self.join(p))
1143 1143 except OSError as inst:
1144 1144 if inst.errno != errno.ENOENT:
1145 1145 raise
1146 1146
1147 1147 qfinished = []
1148 1148 if numrevs:
1149 1149 qfinished = self.applied[:numrevs]
1150 1150 del self.applied[:numrevs]
1151 1151 self.applieddirty = True
1152 1152
1153 1153 unknown = []
1154 1154
1155 1155 sortedseries = []
1156 1156 for p in patches:
1157 1157 idx = self.findseries(p)
1158 1158 if idx is None:
1159 1159 sortedseries.append((-1, p))
1160 1160 else:
1161 1161 sortedseries.append((idx, p))
1162 1162
1163 1163 sortedseries.sort(reverse=True)
1164 1164 for (i, p) in sortedseries:
1165 1165 if i != -1:
1166 1166 del self.fullseries[i]
1167 1167 else:
1168 1168 unknown.append(p)
1169 1169
1170 1170 if unknown:
1171 1171 if numrevs:
1172 1172 rev = {entry.name: entry.node for entry in qfinished}
1173 1173 for p in unknown:
1174 1174 msg = _(b'revision %s refers to unknown patches: %s\n')
1175 1175 self.ui.warn(msg % (short(rev[p]), p))
1176 1176 else:
1177 1177 msg = _(b'unknown patches: %s\n')
1178 1178 raise error.Abort(b''.join(msg % p for p in unknown))
1179 1179
1180 1180 self.parseseries()
1181 1181 self.seriesdirty = True
1182 1182 return [entry.node for entry in qfinished]
1183 1183
1184 1184 def _revpatches(self, repo, revs):
1185 1185 firstrev = repo[self.applied[0].node].rev()
1186 1186 patches = []
1187 1187 for i, rev in enumerate(revs):
1188 1188
1189 1189 if rev < firstrev:
1190 1190 raise error.Abort(_(b'revision %d is not managed') % rev)
1191 1191
1192 1192 ctx = repo[rev]
1193 1193 base = self.applied[i].node
1194 1194 if ctx.node() != base:
1195 1195 msg = _(b'cannot delete revision %d above applied patches')
1196 1196 raise error.Abort(msg % rev)
1197 1197
1198 1198 patch = self.applied[i].name
1199 1199 for fmt in (b'[mq]: %s', b'imported patch %s'):
1200 1200 if ctx.description() == fmt % patch:
1201 1201 msg = _(b'patch %s finalized without changeset message\n')
1202 1202 repo.ui.status(msg % patch)
1203 1203 break
1204 1204
1205 1205 patches.append(patch)
1206 1206 return patches
1207 1207
1208 1208 def finish(self, repo, revs):
1209 1209 # Manually trigger phase computation to ensure phasedefaults is
1210 1210 # executed before we remove the patches.
1211 1211 repo._phasecache
1212 1212 patches = self._revpatches(repo, sorted(revs))
1213 1213 qfinished = self._cleanup(patches, len(patches))
1214 1214 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1215 1215 # only use this logic when the secret option is added
1216 1216 oldqbase = repo[qfinished[0]]
1217 1217 tphase = phases.newcommitphase(repo.ui)
1218 1218 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1219 1219 with repo.transaction(b'qfinish') as tr:
1220 1220 phases.advanceboundary(repo, tr, tphase, qfinished)
1221 1221
1222 1222 def delete(self, repo, patches, opts):
1223 1223 if not patches and not opts.get(b'rev'):
1224 1224 raise error.Abort(
1225 1225 _(b'qdelete requires at least one revision or patch name')
1226 1226 )
1227 1227
1228 1228 realpatches = []
1229 1229 for patch in patches:
1230 1230 patch = self.lookup(patch, strict=True)
1231 1231 info = self.isapplied(patch)
1232 1232 if info:
1233 1233 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1234 1234 if patch not in self.series:
1235 1235 raise error.Abort(_(b"patch %s not in series file") % patch)
1236 1236 if patch not in realpatches:
1237 1237 realpatches.append(patch)
1238 1238
1239 1239 numrevs = 0
1240 1240 if opts.get(b'rev'):
1241 1241 if not self.applied:
1242 1242 raise error.Abort(_(b'no patches applied'))
1243 1243 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1244 1244 revs.sort()
1245 1245 revpatches = self._revpatches(repo, revs)
1246 1246 realpatches += revpatches
1247 1247 numrevs = len(revpatches)
1248 1248
1249 1249 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1250 1250
1251 1251 def checktoppatch(self, repo):
1252 1252 '''check that working directory is at qtip'''
1253 1253 if self.applied:
1254 1254 top = self.applied[-1].node
1255 1255 patch = self.applied[-1].name
1256 1256 if repo.dirstate.p1() != top:
1257 1257 raise error.Abort(_(b"working directory revision is not qtip"))
1258 1258 return top, patch
1259 1259 return None, None
1260 1260
1261 1261 def putsubstate2changes(self, substatestate, changes):
1262 1262 if isinstance(changes, list):
1263 1263 mar = changes[:3]
1264 1264 else:
1265 1265 mar = (changes.modified, changes.added, changes.removed)
1266 1266 if any((b'.hgsubstate' in files for files in mar)):
1267 1267 return # already listed up
1268 1268 # not yet listed up
1269 1269 if substatestate.added or not substatestate.any_tracked:
1270 1270 mar[1].append(b'.hgsubstate')
1271 1271 elif substatestate.removed:
1272 1272 mar[2].append(b'.hgsubstate')
1273 1273 else: # modified
1274 1274 mar[0].append(b'.hgsubstate')
1275 1275
1276 1276 def checklocalchanges(self, repo, force=False, refresh=True):
1277 1277 excsuffix = b''
1278 1278 if refresh:
1279 1279 excsuffix = b', qrefresh first'
1280 1280 # plain versions for i18n tool to detect them
1281 1281 _(b"local changes found, qrefresh first")
1282 1282 _(b"local changed subrepos found, qrefresh first")
1283 1283
1284 1284 s = repo.status()
1285 1285 if not force:
1286 1286 cmdutil.checkunfinished(repo)
1287 1287 if s.modified or s.added or s.removed or s.deleted:
1288 1288 _(b"local changes found") # i18n tool detection
1289 1289 raise error.Abort(_(b"local changes found" + excsuffix))
1290 1290 if checksubstate(repo):
1291 1291 _(b"local changed subrepos found") # i18n tool detection
1292 1292 raise error.Abort(
1293 1293 _(b"local changed subrepos found" + excsuffix)
1294 1294 )
1295 1295 else:
1296 1296 cmdutil.checkunfinished(repo, skipmerge=True)
1297 1297 return s
1298 1298
1299 1299 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1300 1300
1301 1301 def checkreservedname(self, name):
1302 1302 if name in self._reserved:
1303 1303 raise error.Abort(
1304 1304 _(b'"%s" cannot be used as the name of a patch') % name
1305 1305 )
1306 1306 if name != name.strip():
1307 1307 # whitespace is stripped by parseseries()
1308 1308 raise error.Abort(
1309 1309 _(b'patch name cannot begin or end with whitespace')
1310 1310 )
1311 1311 for prefix in (b'.hg', b'.mq'):
1312 1312 if name.startswith(prefix):
1313 1313 raise error.Abort(
1314 1314 _(b'patch name cannot begin with "%s"') % prefix
1315 1315 )
1316 1316 for c in (b'#', b':', b'\r', b'\n'):
1317 1317 if c in name:
1318 1318 raise error.Abort(
1319 1319 _(b'%r cannot be used in the name of a patch')
1320 1320 % pycompat.bytestr(c)
1321 1321 )
1322 1322
1323 1323 def checkpatchname(self, name, force=False):
1324 1324 self.checkreservedname(name)
1325 1325 if not force and os.path.exists(self.join(name)):
1326 1326 if os.path.isdir(self.join(name)):
1327 1327 raise error.Abort(
1328 1328 _(b'"%s" already exists as a directory') % name
1329 1329 )
1330 1330 else:
1331 1331 raise error.Abort(_(b'patch "%s" already exists') % name)
1332 1332
1333 1333 def makepatchname(self, title, fallbackname):
1334 1334 """Return a suitable filename for title, adding a suffix to make
1335 1335 it unique in the existing list"""
1336 1336 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1337 1337 namebase = namebase[:75] # avoid too long name (issue5117)
1338 1338 if namebase:
1339 1339 try:
1340 1340 self.checkreservedname(namebase)
1341 1341 except error.Abort:
1342 1342 namebase = fallbackname
1343 1343 else:
1344 1344 namebase = fallbackname
1345 1345 name = namebase
1346 1346 i = 0
1347 1347 while True:
1348 1348 if name not in self.fullseries:
1349 1349 try:
1350 1350 self.checkpatchname(name)
1351 1351 break
1352 1352 except error.Abort:
1353 1353 pass
1354 1354 i += 1
1355 1355 name = b'%s__%d' % (namebase, i)
1356 1356 return name
1357 1357
1358 1358 def checkkeepchanges(self, keepchanges, force):
1359 1359 if force and keepchanges:
1360 1360 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1361 1361
1362 1362 def new(self, repo, patchfn, *pats, **opts):
1363 1363 """options:
1364 1364 msg: a string or a no-argument function returning a string
1365 1365 """
1366 1366 opts = pycompat.byteskwargs(opts)
1367 1367 msg = opts.get(b'msg')
1368 1368 edit = opts.get(b'edit')
1369 1369 editform = opts.get(b'editform', b'mq.qnew')
1370 1370 user = opts.get(b'user')
1371 1371 date = opts.get(b'date')
1372 1372 if date:
1373 1373 date = dateutil.parsedate(date)
1374 1374 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1375 1375 if opts.get(b'checkname', True):
1376 1376 self.checkpatchname(patchfn)
1377 1377 inclsubs = checksubstate(repo)
1378 1378 if inclsubs:
1379 1379 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1380 1380 if opts.get(b'include') or opts.get(b'exclude') or pats:
1381 1381 # detect missing files in pats
1382 1382 def badfn(f, msg):
1383 1383 if f != b'.hgsubstate': # .hgsubstate is auto-created
1384 1384 raise error.Abort(b'%s: %s' % (f, msg))
1385 1385
1386 1386 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1387 1387 changes = repo.status(match=match)
1388 1388 else:
1389 1389 changes = self.checklocalchanges(repo, force=True)
1390 1390 commitfiles = list(inclsubs)
1391 1391 commitfiles.extend(changes.modified)
1392 1392 commitfiles.extend(changes.added)
1393 1393 commitfiles.extend(changes.removed)
1394 1394 match = scmutil.matchfiles(repo, commitfiles)
1395 1395 if len(repo[None].parents()) > 1:
1396 1396 raise error.Abort(_(b'cannot manage merge changesets'))
1397 1397 self.checktoppatch(repo)
1398 1398 insert = self.fullseriesend()
1399 1399 with repo.wlock():
1400 1400 try:
1401 1401 # if patch file write fails, abort early
1402 1402 p = self.opener(patchfn, b"w")
1403 1403 except IOError as e:
1404 1404 raise error.Abort(
1405 1405 _(b'cannot write patch "%s": %s')
1406 1406 % (patchfn, encoding.strtolocal(e.strerror))
1407 1407 )
1408 1408 try:
1409 1409 defaultmsg = b"[mq]: %s" % patchfn
1410 1410 editor = cmdutil.getcommiteditor(editform=editform)
1411 1411 if edit:
1412 1412
1413 1413 def finishdesc(desc):
1414 1414 if desc.rstrip():
1415 1415 return desc
1416 1416 else:
1417 1417 return defaultmsg
1418 1418
1419 1419 # i18n: this message is shown in editor with "HG: " prefix
1420 1420 extramsg = _(b'Leave message empty to use default message.')
1421 1421 editor = cmdutil.getcommiteditor(
1422 1422 finishdesc=finishdesc,
1423 1423 extramsg=extramsg,
1424 1424 editform=editform,
1425 1425 )
1426 1426 commitmsg = msg
1427 1427 else:
1428 1428 commitmsg = msg or defaultmsg
1429 1429
1430 1430 n = newcommit(
1431 1431 repo,
1432 1432 None,
1433 1433 commitmsg,
1434 1434 user,
1435 1435 date,
1436 1436 match=match,
1437 1437 force=True,
1438 1438 editor=editor,
1439 1439 )
1440 1440 if n is None:
1441 1441 raise error.Abort(_(b"repo commit failed"))
1442 1442 try:
1443 1443 self.fullseries[insert:insert] = [patchfn]
1444 1444 self.applied.append(statusentry(n, patchfn))
1445 1445 self.parseseries()
1446 1446 self.seriesdirty = True
1447 1447 self.applieddirty = True
1448 1448 nctx = repo[n]
1449 1449 ph = patchheader(self.join(patchfn), self.plainmode)
1450 1450 if user:
1451 1451 ph.setuser(user)
1452 1452 if date:
1453 1453 ph.setdate(b'%d %d' % date)
1454 1454 ph.setparent(hex(nctx.p1().node()))
1455 1455 msg = nctx.description().strip()
1456 1456 if msg == defaultmsg.strip():
1457 1457 msg = b''
1458 1458 ph.setmessage(msg)
1459 1459 p.write(bytes(ph))
1460 1460 if commitfiles:
1461 1461 parent = self.qparents(repo, n)
1462 1462 if inclsubs:
1463 1463 self.putsubstate2changes(substatestate, changes)
1464 1464 chunks = patchmod.diff(
1465 1465 repo,
1466 1466 node1=parent,
1467 1467 node2=n,
1468 1468 changes=changes,
1469 1469 opts=diffopts,
1470 1470 )
1471 1471 for chunk in chunks:
1472 1472 p.write(chunk)
1473 1473 p.close()
1474 1474 r = self.qrepo()
1475 1475 if r:
1476 1476 r[None].add([patchfn])
1477 1477 except: # re-raises
1478 1478 repo.rollback()
1479 1479 raise
1480 1480 except Exception:
1481 1481 patchpath = self.join(patchfn)
1482 1482 try:
1483 1483 os.unlink(patchpath)
1484 1484 except OSError:
1485 1485 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1486 1486 raise
1487 1487 self.removeundo(repo)
1488 1488
1489 1489 def isapplied(self, patch):
1490 1490 """returns (index, rev, patch)"""
1491 1491 for i, a in enumerate(self.applied):
1492 1492 if a.name == patch:
1493 1493 return (i, a.node, a.name)
1494 1494 return None
1495 1495
1496 1496 # if the exact patch name does not exist, we try a few
1497 1497 # variations. If strict is passed, we try only #1
1498 1498 #
1499 1499 # 1) a number (as string) to indicate an offset in the series file
1500 1500 # 2) a unique substring of the patch name was given
1501 1501 # 3) patchname[-+]num to indicate an offset in the series file
1502 1502 def lookup(self, patch, strict=False):
1503 1503 def partialname(s):
1504 1504 if s in self.series:
1505 1505 return s
1506 1506 matches = [x for x in self.series if s in x]
1507 1507 if len(matches) > 1:
1508 1508 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1509 1509 for m in matches:
1510 1510 self.ui.warn(b' %s\n' % m)
1511 1511 return None
1512 1512 if matches:
1513 1513 return matches[0]
1514 1514 if self.series and self.applied:
1515 1515 if s == b'qtip':
1516 1516 return self.series[self.seriesend(True) - 1]
1517 1517 if s == b'qbase':
1518 1518 return self.series[0]
1519 1519 return None
1520 1520
1521 1521 if patch in self.series:
1522 1522 return patch
1523 1523
1524 1524 if not os.path.isfile(self.join(patch)):
1525 1525 try:
1526 1526 sno = int(patch)
1527 1527 except (ValueError, OverflowError):
1528 1528 pass
1529 1529 else:
1530 1530 if -len(self.series) <= sno < len(self.series):
1531 1531 return self.series[sno]
1532 1532
1533 1533 if not strict:
1534 1534 res = partialname(patch)
1535 1535 if res:
1536 1536 return res
1537 1537 minus = patch.rfind(b'-')
1538 1538 if minus >= 0:
1539 1539 res = partialname(patch[:minus])
1540 1540 if res:
1541 1541 i = self.series.index(res)
1542 1542 try:
1543 1543 off = int(patch[minus + 1 :] or 1)
1544 1544 except (ValueError, OverflowError):
1545 1545 pass
1546 1546 else:
1547 1547 if i - off >= 0:
1548 1548 return self.series[i - off]
1549 1549 plus = patch.rfind(b'+')
1550 1550 if plus >= 0:
1551 1551 res = partialname(patch[:plus])
1552 1552 if res:
1553 1553 i = self.series.index(res)
1554 1554 try:
1555 1555 off = int(patch[plus + 1 :] or 1)
1556 1556 except (ValueError, OverflowError):
1557 1557 pass
1558 1558 else:
1559 1559 if i + off < len(self.series):
1560 1560 return self.series[i + off]
1561 1561 raise error.Abort(_(b"patch %s not in series") % patch)
1562 1562
1563 1563 def push(
1564 1564 self,
1565 1565 repo,
1566 1566 patch=None,
1567 1567 force=False,
1568 1568 list=False,
1569 1569 mergeq=None,
1570 1570 all=False,
1571 1571 move=False,
1572 1572 exact=False,
1573 1573 nobackup=False,
1574 1574 keepchanges=False,
1575 1575 ):
1576 1576 self.checkkeepchanges(keepchanges, force)
1577 1577 diffopts = self.diffopts()
1578 1578 with repo.wlock():
1579 1579 heads = []
1580 1580 for hs in repo.branchmap().iterheads():
1581 1581 heads.extend(hs)
1582 1582 if not heads:
1583 1583 heads = [repo.nullid]
1584 1584 if repo.dirstate.p1() not in heads and not exact:
1585 1585 self.ui.status(_(b"(working directory not at a head)\n"))
1586 1586
1587 1587 if not self.series:
1588 1588 self.ui.warn(_(b'no patches in series\n'))
1589 1589 return 0
1590 1590
1591 1591 # Suppose our series file is: A B C and the current 'top'
1592 1592 # patch is B. qpush C should be performed (moving forward)
1593 1593 # qpush B is a NOP (no change) qpush A is an error (can't
1594 1594 # go backwards with qpush)
1595 1595 if patch:
1596 1596 patch = self.lookup(patch)
1597 1597 info = self.isapplied(patch)
1598 1598 if info and info[0] >= len(self.applied) - 1:
1599 1599 self.ui.warn(
1600 1600 _(b'qpush: %s is already at the top\n') % patch
1601 1601 )
1602 1602 return 0
1603 1603
1604 1604 pushable, reason = self.pushable(patch)
1605 1605 if pushable:
1606 1606 if self.series.index(patch) < self.seriesend():
1607 1607 raise error.Abort(
1608 1608 _(b"cannot push to a previous patch: %s") % patch
1609 1609 )
1610 1610 else:
1611 1611 if reason:
1612 1612 reason = _(b'guarded by %s') % reason
1613 1613 else:
1614 1614 reason = _(b'no matching guards')
1615 1615 self.ui.warn(
1616 1616 _(b"cannot push '%s' - %s\n") % (patch, reason)
1617 1617 )
1618 1618 return 1
1619 1619 elif all:
1620 1620 patch = self.series[-1]
1621 1621 if self.isapplied(patch):
1622 1622 self.ui.warn(_(b'all patches are currently applied\n'))
1623 1623 return 0
1624 1624
1625 1625 # Following the above example, starting at 'top' of B:
1626 1626 # qpush should be performed (pushes C), but a subsequent
1627 1627 # qpush without an argument is an error (nothing to
1628 1628 # apply). This allows a loop of "...while hg qpush..." to
1629 1629 # work as it detects an error when done
1630 1630 start = self.seriesend()
1631 1631 if start == len(self.series):
1632 1632 self.ui.warn(_(b'patch series already fully applied\n'))
1633 1633 return 1
1634 1634 if not force and not keepchanges:
1635 1635 self.checklocalchanges(repo, refresh=self.applied)
1636 1636
1637 1637 if exact:
1638 1638 if keepchanges:
1639 1639 raise error.Abort(
1640 1640 _(b"cannot use --exact and --keep-changes together")
1641 1641 )
1642 1642 if move:
1643 1643 raise error.Abort(
1644 1644 _(b'cannot use --exact and --move together')
1645 1645 )
1646 1646 if self.applied:
1647 1647 raise error.Abort(
1648 1648 _(b'cannot push --exact with applied patches')
1649 1649 )
1650 1650 root = self.series[start]
1651 1651 target = patchheader(self.join(root), self.plainmode).parent
1652 1652 if not target:
1653 1653 raise error.Abort(
1654 1654 _(b"%s does not have a parent recorded") % root
1655 1655 )
1656 1656 if not repo[target] == repo[b'.']:
1657 1657 hg.update(repo, target)
1658 1658
1659 1659 if move:
1660 1660 if not patch:
1661 1661 raise error.Abort(_(b"please specify the patch to move"))
1662 1662 for fullstart, rpn in enumerate(self.fullseries):
1663 1663 # strip markers for patch guards
1664 1664 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1665 1665 break
1666 1666 for i, rpn in enumerate(self.fullseries[fullstart:]):
1667 1667 # strip markers for patch guards
1668 1668 if self.guard_re.split(rpn, 1)[0] == patch:
1669 1669 break
1670 1670 index = fullstart + i
1671 1671 assert index < len(self.fullseries)
1672 1672 fullpatch = self.fullseries[index]
1673 1673 del self.fullseries[index]
1674 1674 self.fullseries.insert(fullstart, fullpatch)
1675 1675 self.parseseries()
1676 1676 self.seriesdirty = True
1677 1677
1678 1678 self.applieddirty = True
1679 1679 if start > 0:
1680 1680 self.checktoppatch(repo)
1681 1681 if not patch:
1682 1682 patch = self.series[start]
1683 1683 end = start + 1
1684 1684 else:
1685 1685 end = self.series.index(patch, start) + 1
1686 1686
1687 1687 tobackup = set()
1688 1688 if (not nobackup and force) or keepchanges:
1689 1689 status = self.checklocalchanges(repo, force=True)
1690 1690 if keepchanges:
1691 1691 tobackup.update(
1692 1692 status.modified
1693 1693 + status.added
1694 1694 + status.removed
1695 1695 + status.deleted
1696 1696 )
1697 1697 else:
1698 1698 tobackup.update(status.modified + status.added)
1699 1699
1700 1700 s = self.series[start:end]
1701 1701 all_files = set()
1702 1702 try:
1703 1703 if mergeq:
1704 1704 ret = self.mergepatch(repo, mergeq, s, diffopts)
1705 1705 else:
1706 1706 ret = self.apply(
1707 1707 repo,
1708 1708 s,
1709 1709 list,
1710 1710 all_files=all_files,
1711 1711 tobackup=tobackup,
1712 1712 keepchanges=keepchanges,
1713 1713 )
1714 1714 except AbortNoCleanup:
1715 1715 raise
1716 1716 except: # re-raises
1717 1717 self.ui.warn(_(b'cleaning up working directory...\n'))
1718 1718 cmdutil.revert(
1719 1719 self.ui,
1720 1720 repo,
1721 1721 repo[b'.'],
1722 1722 no_backup=True,
1723 1723 )
1724 1724 # only remove unknown files that we know we touched or
1725 1725 # created while patching
1726 1726 for f in all_files:
1727 1727 if f not in repo.dirstate:
1728 1728 repo.wvfs.unlinkpath(f, ignoremissing=True)
1729 1729 self.ui.warn(_(b'done\n'))
1730 1730 raise
1731 1731
1732 1732 if not self.applied:
1733 1733 return ret[0]
1734 1734 top = self.applied[-1].name
1735 1735 if ret[0] and ret[0] > 1:
1736 1736 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1737 1737 self.ui.write(msg % top)
1738 1738 else:
1739 1739 self.ui.write(_(b"now at: %s\n") % top)
1740 1740 return ret[0]
1741 1741
1742 1742 def pop(
1743 1743 self,
1744 1744 repo,
1745 1745 patch=None,
1746 1746 force=False,
1747 1747 update=True,
1748 1748 all=False,
1749 1749 nobackup=False,
1750 1750 keepchanges=False,
1751 1751 ):
1752 1752 self.checkkeepchanges(keepchanges, force)
1753 1753 with repo.wlock():
1754 1754 if patch:
1755 1755 # index, rev, patch
1756 1756 info = self.isapplied(patch)
1757 1757 if not info:
1758 1758 patch = self.lookup(patch)
1759 1759 info = self.isapplied(patch)
1760 1760 if not info:
1761 1761 raise error.Abort(_(b"patch %s is not applied") % patch)
1762 1762
1763 1763 if not self.applied:
1764 1764 # Allow qpop -a to work repeatedly,
1765 1765 # but not qpop without an argument
1766 1766 self.ui.warn(_(b"no patches applied\n"))
1767 1767 return not all
1768 1768
1769 1769 if all:
1770 1770 start = 0
1771 1771 elif patch:
1772 1772 start = info[0] + 1
1773 1773 else:
1774 1774 start = len(self.applied) - 1
1775 1775
1776 1776 if start >= len(self.applied):
1777 1777 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1778 1778 return
1779 1779
1780 1780 if not update:
1781 1781 parents = repo.dirstate.parents()
1782 1782 rr = [x.node for x in self.applied]
1783 1783 for p in parents:
1784 1784 if p in rr:
1785 1785 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1786 1786 update = True
1787 1787 else:
1788 1788 parents = [p.node() for p in repo[None].parents()]
1789 1789 update = any(
1790 1790 entry.node in parents for entry in self.applied[start:]
1791 1791 )
1792 1792
1793 1793 tobackup = set()
1794 1794 if update:
1795 1795 s = self.checklocalchanges(repo, force=force or keepchanges)
1796 1796 if force:
1797 1797 if not nobackup:
1798 1798 tobackup.update(s.modified + s.added)
1799 1799 elif keepchanges:
1800 1800 tobackup.update(
1801 1801 s.modified + s.added + s.removed + s.deleted
1802 1802 )
1803 1803
1804 1804 self.applieddirty = True
1805 1805 end = len(self.applied)
1806 1806 rev = self.applied[start].node
1807 1807
1808 1808 try:
1809 1809 heads = repo.changelog.heads(rev)
1810 1810 except error.LookupError:
1811 1811 node = short(rev)
1812 1812 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1813 1813
1814 1814 if heads != [self.applied[-1].node]:
1815 1815 raise error.Abort(
1816 1816 _(
1817 1817 b"popping would remove a revision not "
1818 1818 b"managed by this patch queue"
1819 1819 )
1820 1820 )
1821 1821 if not repo[self.applied[-1].node].mutable():
1822 1822 raise error.Abort(
1823 1823 _(b"popping would remove a public revision"),
1824 1824 hint=_(b"see 'hg help phases' for details"),
1825 1825 )
1826 1826
1827 1827 # we know there are no local changes, so we can make a simplified
1828 1828 # form of hg.update.
1829 1829 if update:
1830 1830 qp = self.qparents(repo, rev)
1831 1831 ctx = repo[qp]
1832 1832 st = repo.status(qp, b'.')
1833 1833 m, a, r, d = st.modified, st.added, st.removed, st.deleted
1834 1834 if d:
1835 1835 raise error.Abort(_(b"deletions found between repo revs"))
1836 1836
1837 1837 tobackup = set(a + m + r) & tobackup
1838 1838 if keepchanges and tobackup:
1839 1839 raise error.Abort(_(b"local changes found, qrefresh first"))
1840 1840 self.backup(repo, tobackup)
1841 1841 with repo.dirstate.parentchange():
1842 1842 for f in a:
1843 1843 repo.wvfs.unlinkpath(f, ignoremissing=True)
1844 1844 repo.dirstate.update_file(
1845 1845 f, p1_tracked=False, wc_tracked=False
1846 1846 )
1847 1847 for f in m + r:
1848 1848 fctx = ctx[f]
1849 1849 repo.wwrite(f, fctx.data(), fctx.flags())
1850 1850 repo.dirstate.update_file(
1851 1851 f, p1_tracked=True, wc_tracked=True
1852 1852 )
1853 1853 repo.setparents(qp, repo.nullid)
1854 1854 for patch in reversed(self.applied[start:end]):
1855 1855 self.ui.status(_(b"popping %s\n") % patch.name)
1856 1856 del self.applied[start:end]
1857 1857 strip(self.ui, repo, [rev], update=False, backup=False)
1858 1858 for s, state in repo[b'.'].substate.items():
1859 1859 repo[b'.'].sub(s).get(state)
1860 1860 if self.applied:
1861 1861 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1862 1862 else:
1863 1863 self.ui.write(_(b"patch queue now empty\n"))
1864 1864
1865 1865 def diff(self, repo, pats, opts):
1866 1866 top, patch = self.checktoppatch(repo)
1867 1867 if not top:
1868 1868 self.ui.write(_(b"no patches applied\n"))
1869 1869 return
1870 1870 qp = self.qparents(repo, top)
1871 1871 if opts.get(b'reverse'):
1872 1872 node1, node2 = None, qp
1873 1873 else:
1874 1874 node1, node2 = qp, None
1875 1875 diffopts = self.diffopts(opts, patch)
1876 1876 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1877 1877
1878 1878 def refresh(self, repo, pats=None, **opts):
1879 1879 opts = pycompat.byteskwargs(opts)
1880 1880 if not self.applied:
1881 1881 self.ui.write(_(b"no patches applied\n"))
1882 1882 return 1
1883 1883 msg = opts.get(b'msg', b'').rstrip()
1884 1884 edit = opts.get(b'edit')
1885 1885 editform = opts.get(b'editform', b'mq.qrefresh')
1886 1886 newuser = opts.get(b'user')
1887 1887 newdate = opts.get(b'date')
1888 1888 if newdate:
1889 1889 newdate = b'%d %d' % dateutil.parsedate(newdate)
1890 1890 wlock = repo.wlock()
1891 1891
1892 1892 try:
1893 1893 self.checktoppatch(repo)
1894 1894 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1895 1895 if repo.changelog.heads(top) != [top]:
1896 1896 raise error.Abort(
1897 1897 _(b"cannot qrefresh a revision with children")
1898 1898 )
1899 1899 if not repo[top].mutable():
1900 1900 raise error.Abort(
1901 1901 _(b"cannot qrefresh public revision"),
1902 1902 hint=_(b"see 'hg help phases' for details"),
1903 1903 )
1904 1904
1905 1905 cparents = repo.changelog.parents(top)
1906 1906 patchparent = self.qparents(repo, top)
1907 1907
1908 1908 inclsubs = checksubstate(repo, patchparent)
1909 1909 if inclsubs:
1910 1910 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1911 1911
1912 1912 ph = patchheader(self.join(patchfn), self.plainmode)
1913 1913 diffopts = self.diffopts(
1914 1914 {b'git': opts.get(b'git')}, patchfn, plain=True
1915 1915 )
1916 1916 if newuser:
1917 1917 ph.setuser(newuser)
1918 1918 if newdate:
1919 1919 ph.setdate(newdate)
1920 1920 ph.setparent(hex(patchparent))
1921 1921
1922 1922 # only commit new patch when write is complete
1923 1923 patchf = self.opener(patchfn, b'w', atomictemp=True)
1924 1924
1925 1925 # update the dirstate in place, strip off the qtip commit
1926 1926 # and then commit.
1927 1927 #
1928 1928 # this should really read:
1929 1929 # st = repo.status(top, patchparent)
1930 1930 # but we do it backwards to take advantage of manifest/changelog
1931 1931 # caching against the next repo.status call
1932 1932 st = repo.status(patchparent, top)
1933 1933 mm, aa, dd = st.modified, st.added, st.removed
1934 1934 ctx = repo[top]
1935 1935 aaa = aa[:]
1936 1936 match1 = scmutil.match(repo[None], pats, opts)
1937 1937 # in short mode, we only diff the files included in the
1938 1938 # patch already plus specified files
1939 1939 if opts.get(b'short'):
1940 1940 # if amending a patch, we start with existing
1941 1941 # files plus specified files - unfiltered
1942 1942 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1943 1943 # filter with include/exclude options
1944 1944 match1 = scmutil.match(repo[None], opts=opts)
1945 1945 else:
1946 1946 match = scmutil.matchall(repo)
1947 1947 stb = repo.status(match=match)
1948 1948 m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
1949 1949 mm = set(mm)
1950 1950 aa = set(aa)
1951 1951 dd = set(dd)
1952 1952
1953 1953 # we might end up with files that were added between
1954 1954 # qtip and the dirstate parent, but then changed in the
1955 1955 # local dirstate. in this case, we want them to only
1956 1956 # show up in the added section
1957 1957 for x in m:
1958 1958 if x not in aa:
1959 1959 mm.add(x)
1960 1960 # we might end up with files added by the local dirstate that
1961 1961 # were deleted by the patch. In this case, they should only
1962 1962 # show up in the changed section.
1963 1963 for x in a:
1964 1964 if x in dd:
1965 1965 dd.remove(x)
1966 1966 mm.add(x)
1967 1967 else:
1968 1968 aa.add(x)
1969 1969 # make sure any files deleted in the local dirstate
1970 1970 # are not in the add or change column of the patch
1971 1971 forget = []
1972 1972 for x in d + r:
1973 1973 if x in aa:
1974 1974 aa.remove(x)
1975 1975 forget.append(x)
1976 1976 continue
1977 1977 else:
1978 1978 mm.discard(x)
1979 1979 dd.add(x)
1980 1980
1981 1981 m = list(mm)
1982 1982 r = list(dd)
1983 1983 a = list(aa)
1984 1984
1985 1985 # create 'match' that includes the files to be recommitted.
1986 1986 # apply match1 via repo.status to ensure correct case handling.
1987 1987 st = repo.status(patchparent, match=match1)
1988 1988 cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
1989 1989 allmatches = set(cm + ca + cr + cd)
1990 1990 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1991 1991
1992 1992 files = set(inclsubs)
1993 1993 for x in refreshchanges:
1994 1994 files.update(x)
1995 1995 match = scmutil.matchfiles(repo, files)
1996 1996
1997 1997 bmlist = repo[top].bookmarks()
1998 1998
1999 1999 with repo.dirstate.parentchange():
2000 2000 # XXX do we actually need the dirstateguard
2001 2001 dsguard = None
2002 2002 try:
2003 2003 dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
2004 2004 if diffopts.git or diffopts.upgrade:
2005 2005 copies = {}
2006 2006 for dst in a:
2007 2007 src = repo.dirstate.copied(dst)
2008 2008 # during qfold, the source file for copies may
2009 2009 # be removed. Treat this as a simple add.
2010 2010 if src is not None and src in repo.dirstate:
2011 2011 copies.setdefault(src, []).append(dst)
2012 2012 repo.dirstate.update_file(
2013 2013 dst, p1_tracked=False, wc_tracked=True
2014 2014 )
2015 2015 # remember the copies between patchparent and qtip
2016 2016 for dst in aaa:
2017 2017 src = ctx[dst].copysource()
2018 2018 if src:
2019 2019 copies.setdefault(src, []).extend(
2020 2020 copies.get(dst, [])
2021 2021 )
2022 2022 if dst in a:
2023 2023 copies[src].append(dst)
2024 2024 # we can't copy a file created by the patch itself
2025 2025 if dst in copies:
2026 2026 del copies[dst]
2027 2027 for src, dsts in copies.items():
2028 2028 for dst in dsts:
2029 2029 repo.dirstate.copy(src, dst)
2030 2030 else:
2031 2031 for dst in a:
2032 2032 repo.dirstate.update_file(
2033 2033 dst, p1_tracked=False, wc_tracked=True
2034 2034 )
2035 2035 # Drop useless copy information
2036 2036 for f in list(repo.dirstate.copies()):
2037 2037 repo.dirstate.copy(None, f)
2038 2038 for f in r:
2039 2039 repo.dirstate.update_file_p1(f, p1_tracked=True)
2040 2040 # if the patch excludes a modified file, mark that
2041 2041 # file with mtime=0 so status can see it.
2042 2042 mm = []
2043 for i in pycompat.xrange(len(m) - 1, -1, -1):
2043 for i in range(len(m) - 1, -1, -1):
2044 2044 if not match1(m[i]):
2045 2045 mm.append(m[i])
2046 2046 del m[i]
2047 2047 for f in m:
2048 2048 repo.dirstate.update_file_p1(f, p1_tracked=True)
2049 2049 for f in mm:
2050 2050 repo.dirstate.update_file_p1(f, p1_tracked=True)
2051 2051 for f in forget:
2052 2052 repo.dirstate.update_file_p1(f, p1_tracked=False)
2053 2053
2054 2054 user = ph.user or ctx.user()
2055 2055
2056 2056 oldphase = repo[top].phase()
2057 2057
2058 2058 # assumes strip can roll itself back if interrupted
2059 2059 repo.setparents(*cparents)
2060 2060 self.applied.pop()
2061 2061 self.applieddirty = True
2062 2062 strip(self.ui, repo, [top], update=False, backup=False)
2063 2063 dsguard.close()
2064 2064 finally:
2065 2065 release(dsguard)
2066 2066
2067 2067 try:
2068 2068 # might be nice to attempt to roll back strip after this
2069 2069
2070 2070 defaultmsg = b"[mq]: %s" % patchfn
2071 2071 editor = cmdutil.getcommiteditor(editform=editform)
2072 2072 if edit:
2073 2073
2074 2074 def finishdesc(desc):
2075 2075 if desc.rstrip():
2076 2076 ph.setmessage(desc)
2077 2077 return desc
2078 2078 return defaultmsg
2079 2079
2080 2080 # i18n: this message is shown in editor with "HG: " prefix
2081 2081 extramsg = _(b'Leave message empty to use default message.')
2082 2082 editor = cmdutil.getcommiteditor(
2083 2083 finishdesc=finishdesc,
2084 2084 extramsg=extramsg,
2085 2085 editform=editform,
2086 2086 )
2087 2087 message = msg or b"\n".join(ph.message)
2088 2088 elif not msg:
2089 2089 if not ph.message:
2090 2090 message = defaultmsg
2091 2091 else:
2092 2092 message = b"\n".join(ph.message)
2093 2093 else:
2094 2094 message = msg
2095 2095 ph.setmessage(msg)
2096 2096
2097 2097 # Ensure we create a new changeset in the same phase than
2098 2098 # the old one.
2099 2099 lock = tr = None
2100 2100 try:
2101 2101 lock = repo.lock()
2102 2102 tr = repo.transaction(b'mq')
2103 2103 n = newcommit(
2104 2104 repo,
2105 2105 oldphase,
2106 2106 message,
2107 2107 user,
2108 2108 ph.date,
2109 2109 match=match,
2110 2110 force=True,
2111 2111 editor=editor,
2112 2112 )
2113 2113 # only write patch after a successful commit
2114 2114 c = [list(x) for x in refreshchanges]
2115 2115 if inclsubs:
2116 2116 self.putsubstate2changes(substatestate, c)
2117 2117 chunks = patchmod.diff(
2118 2118 repo, patchparent, changes=c, opts=diffopts
2119 2119 )
2120 2120 comments = bytes(ph)
2121 2121 if comments:
2122 2122 patchf.write(comments)
2123 2123 for chunk in chunks:
2124 2124 patchf.write(chunk)
2125 2125 patchf.close()
2126 2126
2127 2127 marks = repo._bookmarks
2128 2128 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2129 2129 tr.close()
2130 2130
2131 2131 self.applied.append(statusentry(n, patchfn))
2132 2132 finally:
2133 2133 lockmod.release(tr, lock)
2134 2134 except: # re-raises
2135 2135 ctx = repo[cparents[0]]
2136 2136 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2137 2137 self.savedirty()
2138 2138 self.ui.warn(
2139 2139 _(
2140 2140 b'qrefresh interrupted while patch was popped! '
2141 2141 b'(revert --all, qpush to recover)\n'
2142 2142 )
2143 2143 )
2144 2144 raise
2145 2145 finally:
2146 2146 wlock.release()
2147 2147 self.removeundo(repo)
2148 2148
2149 2149 def init(self, repo, create=False):
2150 2150 if not create and os.path.isdir(self.path):
2151 2151 raise error.Abort(_(b"patch queue directory already exists"))
2152 2152 try:
2153 2153 os.mkdir(self.path)
2154 2154 except OSError as inst:
2155 2155 if inst.errno != errno.EEXIST or not create:
2156 2156 raise
2157 2157 if create:
2158 2158 return self.qrepo(create=True)
2159 2159
2160 2160 def unapplied(self, repo, patch=None):
2161 2161 if patch and patch not in self.series:
2162 2162 raise error.Abort(_(b"patch %s is not in series file") % patch)
2163 2163 if not patch:
2164 2164 start = self.seriesend()
2165 2165 else:
2166 2166 start = self.series.index(patch) + 1
2167 2167 unapplied = []
2168 for i in pycompat.xrange(start, len(self.series)):
2168 for i in range(start, len(self.series)):
2169 2169 pushable, reason = self.pushable(i)
2170 2170 if pushable:
2171 2171 unapplied.append((i, self.series[i]))
2172 2172 self.explainpushable(i)
2173 2173 return unapplied
2174 2174
2175 2175 def qseries(
2176 2176 self,
2177 2177 repo,
2178 2178 missing=None,
2179 2179 start=0,
2180 2180 length=None,
2181 2181 status=None,
2182 2182 summary=False,
2183 2183 ):
2184 2184 def displayname(pfx, patchname, state):
2185 2185 if pfx:
2186 2186 self.ui.write(pfx)
2187 2187 if summary:
2188 2188 ph = patchheader(self.join(patchname), self.plainmode)
2189 2189 if ph.message:
2190 2190 msg = ph.message[0]
2191 2191 else:
2192 2192 msg = b''
2193 2193
2194 2194 if self.ui.formatted():
2195 2195 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2196 2196 if width > 0:
2197 2197 msg = stringutil.ellipsis(msg, width)
2198 2198 else:
2199 2199 msg = b''
2200 2200 self.ui.write(patchname, label=b'qseries.' + state)
2201 2201 self.ui.write(b': ')
2202 2202 self.ui.write(msg, label=b'qseries.message.' + state)
2203 2203 else:
2204 2204 self.ui.write(patchname, label=b'qseries.' + state)
2205 2205 self.ui.write(b'\n')
2206 2206
2207 2207 applied = {p.name for p in self.applied}
2208 2208 if length is None:
2209 2209 length = len(self.series) - start
2210 2210 if not missing:
2211 2211 if self.ui.verbose:
2212 2212 idxwidth = len(b"%d" % (start + length - 1))
2213 for i in pycompat.xrange(start, start + length):
2213 for i in range(start, start + length):
2214 2214 patch = self.series[i]
2215 2215 if patch in applied:
2216 2216 char, state = b'A', b'applied'
2217 2217 elif self.pushable(i)[0]:
2218 2218 char, state = b'U', b'unapplied'
2219 2219 else:
2220 2220 char, state = b'G', b'guarded'
2221 2221 pfx = b''
2222 2222 if self.ui.verbose:
2223 2223 pfx = b'%*d %s ' % (idxwidth, i, char)
2224 2224 elif status and status != char:
2225 2225 continue
2226 2226 displayname(pfx, patch, state)
2227 2227 else:
2228 2228 msng_list = []
2229 2229 for root, dirs, files in os.walk(self.path):
2230 2230 d = root[len(self.path) + 1 :]
2231 2231 for f in files:
2232 2232 fl = os.path.join(d, f)
2233 2233 if (
2234 2234 fl not in self.series
2235 2235 and fl
2236 2236 not in (
2237 2237 self.statuspath,
2238 2238 self.seriespath,
2239 2239 self.guardspath,
2240 2240 )
2241 2241 and not fl.startswith(b'.')
2242 2242 ):
2243 2243 msng_list.append(fl)
2244 2244 for x in sorted(msng_list):
2245 2245 pfx = self.ui.verbose and b'D ' or b''
2246 2246 displayname(pfx, x, b'missing')
2247 2247
2248 2248 def issaveline(self, l):
2249 2249 if l.name == b'.hg.patches.save.line':
2250 2250 return True
2251 2251
2252 2252 def qrepo(self, create=False):
2253 2253 ui = self.baseui.copy()
2254 2254 # copy back attributes set by ui.pager()
2255 2255 if self.ui.pageractive and not ui.pageractive:
2256 2256 ui.pageractive = self.ui.pageractive
2257 2257 # internal config: ui.formatted
2258 2258 ui.setconfig(
2259 2259 b'ui',
2260 2260 b'formatted',
2261 2261 self.ui.config(b'ui', b'formatted'),
2262 2262 b'mqpager',
2263 2263 )
2264 2264 ui.setconfig(
2265 2265 b'ui',
2266 2266 b'interactive',
2267 2267 self.ui.config(b'ui', b'interactive'),
2268 2268 b'mqpager',
2269 2269 )
2270 2270 if create or os.path.isdir(self.join(b".hg")):
2271 2271 return hg.repository(ui, path=self.path, create=create)
2272 2272
2273 2273 def restore(self, repo, rev, delete=None, qupdate=None):
2274 2274 desc = repo[rev].description().strip()
2275 2275 lines = desc.splitlines()
2276 2276 datastart = None
2277 2277 series = []
2278 2278 applied = []
2279 2279 qpp = None
2280 2280 for i, line in enumerate(lines):
2281 2281 if line == b'Patch Data:':
2282 2282 datastart = i + 1
2283 2283 elif line.startswith(b'Dirstate:'):
2284 2284 l = line.rstrip()
2285 2285 l = l[10:].split(b' ')
2286 2286 qpp = [bin(x) for x in l]
2287 2287 elif datastart is not None:
2288 2288 l = line.rstrip()
2289 2289 n, name = l.split(b':', 1)
2290 2290 if n:
2291 2291 applied.append(statusentry(bin(n), name))
2292 2292 else:
2293 2293 series.append(l)
2294 2294 if datastart is None:
2295 2295 self.ui.warn(_(b"no saved patch data found\n"))
2296 2296 return 1
2297 2297 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2298 2298 self.fullseries = series
2299 2299 self.applied = applied
2300 2300 self.parseseries()
2301 2301 self.seriesdirty = True
2302 2302 self.applieddirty = True
2303 2303 heads = repo.changelog.heads()
2304 2304 if delete:
2305 2305 if rev not in heads:
2306 2306 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2307 2307 else:
2308 2308 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2309 2309 pp = repo.dirstate.parents()
2310 2310 if rev in pp:
2311 2311 update = True
2312 2312 else:
2313 2313 update = False
2314 2314 strip(self.ui, repo, [rev], update=update, backup=False)
2315 2315 if qpp:
2316 2316 self.ui.warn(
2317 2317 _(b"saved queue repository parents: %s %s\n")
2318 2318 % (short(qpp[0]), short(qpp[1]))
2319 2319 )
2320 2320 if qupdate:
2321 2321 self.ui.status(_(b"updating queue directory\n"))
2322 2322 r = self.qrepo()
2323 2323 if not r:
2324 2324 self.ui.warn(_(b"unable to load queue repository\n"))
2325 2325 return 1
2326 2326 hg.clean(r, qpp[0])
2327 2327
2328 2328 def save(self, repo, msg=None):
2329 2329 if not self.applied:
2330 2330 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2331 2331 return 1
2332 2332 if self.issaveline(self.applied[-1]):
2333 2333 self.ui.warn(_(b"status is already saved\n"))
2334 2334 return 1
2335 2335
2336 2336 if not msg:
2337 2337 msg = _(b"hg patches saved state")
2338 2338 else:
2339 2339 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2340 2340 r = self.qrepo()
2341 2341 if r:
2342 2342 pp = r.dirstate.parents()
2343 2343 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2344 2344 msg += b"\n\nPatch Data:\n"
2345 2345 msg += b''.join(b'%s\n' % x for x in self.applied)
2346 2346 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2347 2347 n = repo.commit(msg, force=True)
2348 2348 if not n:
2349 2349 self.ui.warn(_(b"repo commit failed\n"))
2350 2350 return 1
2351 2351 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2352 2352 self.applieddirty = True
2353 2353 self.removeundo(repo)
2354 2354
2355 2355 def fullseriesend(self):
2356 2356 if self.applied:
2357 2357 p = self.applied[-1].name
2358 2358 end = self.findseries(p)
2359 2359 if end is None:
2360 2360 return len(self.fullseries)
2361 2361 return end + 1
2362 2362 return 0
2363 2363
2364 2364 def seriesend(self, all_patches=False):
2365 2365 """If all_patches is False, return the index of the next pushable patch
2366 2366 in the series, or the series length. If all_patches is True, return the
2367 2367 index of the first patch past the last applied one.
2368 2368 """
2369 2369 end = 0
2370 2370
2371 2371 def nextpatch(start):
2372 2372 if all_patches or start >= len(self.series):
2373 2373 return start
2374 for i in pycompat.xrange(start, len(self.series)):
2374 for i in range(start, len(self.series)):
2375 2375 p, reason = self.pushable(i)
2376 2376 if p:
2377 2377 return i
2378 2378 self.explainpushable(i)
2379 2379 return len(self.series)
2380 2380
2381 2381 if self.applied:
2382 2382 p = self.applied[-1].name
2383 2383 try:
2384 2384 end = self.series.index(p)
2385 2385 except ValueError:
2386 2386 return 0
2387 2387 return nextpatch(end + 1)
2388 2388 return nextpatch(end)
2389 2389
2390 2390 def appliedname(self, index):
2391 2391 pname = self.applied[index].name
2392 2392 if not self.ui.verbose:
2393 2393 p = pname
2394 2394 else:
2395 2395 p = (b"%d" % self.series.index(pname)) + b" " + pname
2396 2396 return p
2397 2397
2398 2398 def qimport(
2399 2399 self,
2400 2400 repo,
2401 2401 files,
2402 2402 patchname=None,
2403 2403 rev=None,
2404 2404 existing=None,
2405 2405 force=None,
2406 2406 git=False,
2407 2407 ):
2408 2408 def checkseries(patchname):
2409 2409 if patchname in self.series:
2410 2410 raise error.Abort(
2411 2411 _(b'patch %s is already in the series file') % patchname
2412 2412 )
2413 2413
2414 2414 if rev:
2415 2415 if files:
2416 2416 raise error.Abort(
2417 2417 _(b'option "-r" not valid when importing files')
2418 2418 )
2419 2419 rev = logcmdutil.revrange(repo, rev)
2420 2420 rev.sort(reverse=True)
2421 2421 elif not files:
2422 2422 raise error.Abort(_(b'no files or revisions specified'))
2423 2423 if (len(files) > 1 or len(rev) > 1) and patchname:
2424 2424 raise error.Abort(
2425 2425 _(b'option "-n" not valid when importing multiple patches')
2426 2426 )
2427 2427 imported = []
2428 2428 if rev:
2429 2429 # If mq patches are applied, we can only import revisions
2430 2430 # that form a linear path to qbase.
2431 2431 # Otherwise, they should form a linear path to a head.
2432 2432 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2433 2433 if len(heads) > 1:
2434 2434 raise error.Abort(
2435 2435 _(b'revision %d is the root of more than one branch')
2436 2436 % rev.last()
2437 2437 )
2438 2438 if self.applied:
2439 2439 base = repo.changelog.node(rev.first())
2440 2440 if base in [n.node for n in self.applied]:
2441 2441 raise error.Abort(
2442 2442 _(b'revision %d is already managed') % rev.first()
2443 2443 )
2444 2444 if heads != [self.applied[-1].node]:
2445 2445 raise error.Abort(
2446 2446 _(b'revision %d is not the parent of the queue')
2447 2447 % rev.first()
2448 2448 )
2449 2449 base = repo.changelog.rev(self.applied[0].node)
2450 2450 lastparent = repo.changelog.parentrevs(base)[0]
2451 2451 else:
2452 2452 if heads != [repo.changelog.node(rev.first())]:
2453 2453 raise error.Abort(
2454 2454 _(b'revision %d has unmanaged children') % rev.first()
2455 2455 )
2456 2456 lastparent = None
2457 2457
2458 2458 diffopts = self.diffopts({b'git': git})
2459 2459 with repo.transaction(b'qimport') as tr:
2460 2460 for r in rev:
2461 2461 if not repo[r].mutable():
2462 2462 raise error.Abort(
2463 2463 _(b'revision %d is not mutable') % r,
2464 2464 hint=_(b"see 'hg help phases' " b'for details'),
2465 2465 )
2466 2466 p1, p2 = repo.changelog.parentrevs(r)
2467 2467 n = repo.changelog.node(r)
2468 2468 if p2 != nullrev:
2469 2469 raise error.Abort(
2470 2470 _(b'cannot import merge revision %d') % r
2471 2471 )
2472 2472 if lastparent and lastparent != r:
2473 2473 raise error.Abort(
2474 2474 _(b'revision %d is not the parent of %d')
2475 2475 % (r, lastparent)
2476 2476 )
2477 2477 lastparent = p1
2478 2478
2479 2479 if not patchname:
2480 2480 patchname = self.makepatchname(
2481 2481 repo[r].description().split(b'\n', 1)[0],
2482 2482 b'%d.diff' % r,
2483 2483 )
2484 2484 checkseries(patchname)
2485 2485 self.checkpatchname(patchname, force)
2486 2486 self.fullseries.insert(0, patchname)
2487 2487
2488 2488 with self.opener(patchname, b"w") as fp:
2489 2489 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2490 2490
2491 2491 se = statusentry(n, patchname)
2492 2492 self.applied.insert(0, se)
2493 2493
2494 2494 self.added.append(patchname)
2495 2495 imported.append(patchname)
2496 2496 patchname = None
2497 2497 if rev and repo.ui.configbool(b'mq', b'secret'):
2498 2498 # if we added anything with --rev, move the secret root
2499 2499 phases.retractboundary(repo, tr, phases.secret, [n])
2500 2500 self.parseseries()
2501 2501 self.applieddirty = True
2502 2502 self.seriesdirty = True
2503 2503
2504 2504 for i, filename in enumerate(files):
2505 2505 if existing:
2506 2506 if filename == b'-':
2507 2507 raise error.Abort(
2508 2508 _(b'-e is incompatible with import from -')
2509 2509 )
2510 2510 filename = normname(filename)
2511 2511 self.checkreservedname(filename)
2512 2512 if urlutil.url(filename).islocal():
2513 2513 originpath = self.join(filename)
2514 2514 if not os.path.isfile(originpath):
2515 2515 raise error.Abort(
2516 2516 _(b"patch %s does not exist") % filename
2517 2517 )
2518 2518
2519 2519 if patchname:
2520 2520 self.checkpatchname(patchname, force)
2521 2521
2522 2522 self.ui.write(
2523 2523 _(b'renaming %s to %s\n') % (filename, patchname)
2524 2524 )
2525 2525 util.rename(originpath, self.join(patchname))
2526 2526 else:
2527 2527 patchname = filename
2528 2528
2529 2529 else:
2530 2530 if filename == b'-' and not patchname:
2531 2531 raise error.Abort(
2532 2532 _(b'need --name to import a patch from -')
2533 2533 )
2534 2534 elif not patchname:
2535 2535 patchname = normname(
2536 2536 os.path.basename(filename.rstrip(b'/'))
2537 2537 )
2538 2538 self.checkpatchname(patchname, force)
2539 2539 try:
2540 2540 if filename == b'-':
2541 2541 text = self.ui.fin.read()
2542 2542 else:
2543 2543 fp = hg.openpath(self.ui, filename)
2544 2544 text = fp.read()
2545 2545 fp.close()
2546 2546 except (OSError, IOError):
2547 2547 raise error.Abort(_(b"unable to read file %s") % filename)
2548 2548 patchf = self.opener(patchname, b"w")
2549 2549 patchf.write(text)
2550 2550 patchf.close()
2551 2551 if not force:
2552 2552 checkseries(patchname)
2553 2553 if patchname not in self.series:
2554 2554 index = self.fullseriesend() + i
2555 2555 self.fullseries[index:index] = [patchname]
2556 2556 self.parseseries()
2557 2557 self.seriesdirty = True
2558 2558 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2559 2559 self.added.append(patchname)
2560 2560 imported.append(patchname)
2561 2561 patchname = None
2562 2562
2563 2563 self.removeundo(repo)
2564 2564 return imported
2565 2565
2566 2566
2567 2567 def fixkeepchangesopts(ui, opts):
2568 2568 if (
2569 2569 not ui.configbool(b'mq', b'keepchanges')
2570 2570 or opts.get(b'force')
2571 2571 or opts.get(b'exact')
2572 2572 ):
2573 2573 return opts
2574 2574 opts = dict(opts)
2575 2575 opts[b'keep_changes'] = True
2576 2576 return opts
2577 2577
2578 2578
2579 2579 @command(
2580 2580 b"qdelete|qremove|qrm",
2581 2581 [
2582 2582 (b'k', b'keep', None, _(b'keep patch file')),
2583 2583 (
2584 2584 b'r',
2585 2585 b'rev',
2586 2586 [],
2587 2587 _(b'stop managing a revision (DEPRECATED)'),
2588 2588 _(b'REV'),
2589 2589 ),
2590 2590 ],
2591 2591 _(b'hg qdelete [-k] [PATCH]...'),
2592 2592 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2593 2593 )
2594 2594 def delete(ui, repo, *patches, **opts):
2595 2595 """remove patches from queue
2596 2596
2597 2597 The patches must not be applied, and at least one patch is required. Exact
2598 2598 patch identifiers must be given. With -k/--keep, the patch files are
2599 2599 preserved in the patch directory.
2600 2600
2601 2601 To stop managing a patch and move it into permanent history,
2602 2602 use the :hg:`qfinish` command."""
2603 2603 q = repo.mq
2604 2604 q.delete(repo, patches, pycompat.byteskwargs(opts))
2605 2605 q.savedirty()
2606 2606 return 0
2607 2607
2608 2608
2609 2609 @command(
2610 2610 b"qapplied",
2611 2611 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2612 2612 + seriesopts,
2613 2613 _(b'hg qapplied [-1] [-s] [PATCH]'),
2614 2614 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2615 2615 )
2616 2616 def applied(ui, repo, patch=None, **opts):
2617 2617 """print the patches already applied
2618 2618
2619 2619 Returns 0 on success."""
2620 2620
2621 2621 q = repo.mq
2622 2622 opts = pycompat.byteskwargs(opts)
2623 2623
2624 2624 if patch:
2625 2625 if patch not in q.series:
2626 2626 raise error.Abort(_(b"patch %s is not in series file") % patch)
2627 2627 end = q.series.index(patch) + 1
2628 2628 else:
2629 2629 end = q.seriesend(True)
2630 2630
2631 2631 if opts.get(b'last') and not end:
2632 2632 ui.write(_(b"no patches applied\n"))
2633 2633 return 1
2634 2634 elif opts.get(b'last') and end == 1:
2635 2635 ui.write(_(b"only one patch applied\n"))
2636 2636 return 1
2637 2637 elif opts.get(b'last'):
2638 2638 start = end - 2
2639 2639 end = 1
2640 2640 else:
2641 2641 start = 0
2642 2642
2643 2643 q.qseries(
2644 2644 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2645 2645 )
2646 2646
2647 2647
2648 2648 @command(
2649 2649 b"qunapplied",
2650 2650 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2651 2651 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2652 2652 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2653 2653 )
2654 2654 def unapplied(ui, repo, patch=None, **opts):
2655 2655 """print the patches not yet applied
2656 2656
2657 2657 Returns 0 on success."""
2658 2658
2659 2659 q = repo.mq
2660 2660 opts = pycompat.byteskwargs(opts)
2661 2661 if patch:
2662 2662 if patch not in q.series:
2663 2663 raise error.Abort(_(b"patch %s is not in series file") % patch)
2664 2664 start = q.series.index(patch) + 1
2665 2665 else:
2666 2666 start = q.seriesend(True)
2667 2667
2668 2668 if start == len(q.series) and opts.get(b'first'):
2669 2669 ui.write(_(b"all patches applied\n"))
2670 2670 return 1
2671 2671
2672 2672 if opts.get(b'first'):
2673 2673 length = 1
2674 2674 else:
2675 2675 length = None
2676 2676 q.qseries(
2677 2677 repo,
2678 2678 start=start,
2679 2679 length=length,
2680 2680 status=b'U',
2681 2681 summary=opts.get(b'summary'),
2682 2682 )
2683 2683
2684 2684
2685 2685 @command(
2686 2686 b"qimport",
2687 2687 [
2688 2688 (b'e', b'existing', None, _(b'import file in patch directory')),
2689 2689 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2690 2690 (b'f', b'force', None, _(b'overwrite existing files')),
2691 2691 (
2692 2692 b'r',
2693 2693 b'rev',
2694 2694 [],
2695 2695 _(b'place existing revisions under mq control'),
2696 2696 _(b'REV'),
2697 2697 ),
2698 2698 (b'g', b'git', None, _(b'use git extended diff format')),
2699 2699 (b'P', b'push', None, _(b'qpush after importing')),
2700 2700 ],
2701 2701 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2702 2702 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2703 2703 )
2704 2704 def qimport(ui, repo, *filename, **opts):
2705 2705 """import a patch or existing changeset
2706 2706
2707 2707 The patch is inserted into the series after the last applied
2708 2708 patch. If no patches have been applied, qimport prepends the patch
2709 2709 to the series.
2710 2710
2711 2711 The patch will have the same name as its source file unless you
2712 2712 give it a new one with -n/--name.
2713 2713
2714 2714 You can register an existing patch inside the patch directory with
2715 2715 the -e/--existing flag.
2716 2716
2717 2717 With -f/--force, an existing patch of the same name will be
2718 2718 overwritten.
2719 2719
2720 2720 An existing changeset may be placed under mq control with -r/--rev
2721 2721 (e.g. qimport --rev . -n patch will place the current revision
2722 2722 under mq control). With -g/--git, patches imported with --rev will
2723 2723 use the git diff format. See the diffs help topic for information
2724 2724 on why this is important for preserving rename/copy information
2725 2725 and permission changes. Use :hg:`qfinish` to remove changesets
2726 2726 from mq control.
2727 2727
2728 2728 To import a patch from standard input, pass - as the patch file.
2729 2729 When importing from standard input, a patch name must be specified
2730 2730 using the --name flag.
2731 2731
2732 2732 To import an existing patch while renaming it::
2733 2733
2734 2734 hg qimport -e existing-patch -n new-name
2735 2735
2736 2736 Returns 0 if import succeeded.
2737 2737 """
2738 2738 opts = pycompat.byteskwargs(opts)
2739 2739 with repo.lock(): # cause this may move phase
2740 2740 q = repo.mq
2741 2741 try:
2742 2742 imported = q.qimport(
2743 2743 repo,
2744 2744 filename,
2745 2745 patchname=opts.get(b'name'),
2746 2746 existing=opts.get(b'existing'),
2747 2747 force=opts.get(b'force'),
2748 2748 rev=opts.get(b'rev'),
2749 2749 git=opts.get(b'git'),
2750 2750 )
2751 2751 finally:
2752 2752 q.savedirty()
2753 2753
2754 2754 if imported and opts.get(b'push') and not opts.get(b'rev'):
2755 2755 return q.push(repo, imported[-1])
2756 2756 return 0
2757 2757
2758 2758
2759 2759 def qinit(ui, repo, create):
2760 2760 """initialize a new queue repository
2761 2761
2762 2762 This command also creates a series file for ordering patches, and
2763 2763 an mq-specific .hgignore file in the queue repository, to exclude
2764 2764 the status and guards files (these contain mostly transient state).
2765 2765
2766 2766 Returns 0 if initialization succeeded."""
2767 2767 q = repo.mq
2768 2768 r = q.init(repo, create)
2769 2769 q.savedirty()
2770 2770 if r:
2771 2771 if not os.path.exists(r.wjoin(b'.hgignore')):
2772 2772 fp = r.wvfs(b'.hgignore', b'w')
2773 2773 fp.write(b'^\\.hg\n')
2774 2774 fp.write(b'^\\.mq\n')
2775 2775 fp.write(b'syntax: glob\n')
2776 2776 fp.write(b'status\n')
2777 2777 fp.write(b'guards\n')
2778 2778 fp.close()
2779 2779 if not os.path.exists(r.wjoin(b'series')):
2780 2780 r.wvfs(b'series', b'w').close()
2781 2781 r[None].add([b'.hgignore', b'series'])
2782 2782 commands.add(ui, r)
2783 2783 return 0
2784 2784
2785 2785
2786 2786 @command(
2787 2787 b"qinit",
2788 2788 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2789 2789 _(b'hg qinit [-c]'),
2790 2790 helpcategory=command.CATEGORY_REPO_CREATION,
2791 2791 helpbasic=True,
2792 2792 )
2793 2793 def init(ui, repo, **opts):
2794 2794 """init a new queue repository (DEPRECATED)
2795 2795
2796 2796 The queue repository is unversioned by default. If
2797 2797 -c/--create-repo is specified, qinit will create a separate nested
2798 2798 repository for patches (qinit -c may also be run later to convert
2799 2799 an unversioned patch repository into a versioned one). You can use
2800 2800 qcommit to commit changes to this queue repository.
2801 2801
2802 2802 This command is deprecated. Without -c, it's implied by other relevant
2803 2803 commands. With -c, use :hg:`init --mq` instead."""
2804 2804 return qinit(ui, repo, create=opts.get('create_repo'))
2805 2805
2806 2806
2807 2807 @command(
2808 2808 b"qclone",
2809 2809 [
2810 2810 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2811 2811 (
2812 2812 b'U',
2813 2813 b'noupdate',
2814 2814 None,
2815 2815 _(b'do not update the new working directories'),
2816 2816 ),
2817 2817 (
2818 2818 b'',
2819 2819 b'uncompressed',
2820 2820 None,
2821 2821 _(b'use uncompressed transfer (fast over LAN)'),
2822 2822 ),
2823 2823 (
2824 2824 b'p',
2825 2825 b'patches',
2826 2826 b'',
2827 2827 _(b'location of source patch repository'),
2828 2828 _(b'REPO'),
2829 2829 ),
2830 2830 ]
2831 2831 + cmdutil.remoteopts,
2832 2832 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2833 2833 helpcategory=command.CATEGORY_REPO_CREATION,
2834 2834 norepo=True,
2835 2835 )
2836 2836 def clone(ui, source, dest=None, **opts):
2837 2837 """clone main and patch repository at same time
2838 2838
2839 2839 If source is local, destination will have no patches applied. If
2840 2840 source is remote, this command can not check if patches are
2841 2841 applied in source, so cannot guarantee that patches are not
2842 2842 applied in destination. If you clone remote repository, be sure
2843 2843 before that it has no patches applied.
2844 2844
2845 2845 Source patch repository is looked for in <src>/.hg/patches by
2846 2846 default. Use -p <url> to change.
2847 2847
2848 2848 The patch directory must be a nested Mercurial repository, as
2849 2849 would be created by :hg:`init --mq`.
2850 2850
2851 2851 Return 0 on success.
2852 2852 """
2853 2853 opts = pycompat.byteskwargs(opts)
2854 2854
2855 2855 def patchdir(repo):
2856 2856 """compute a patch repo url from a repo object"""
2857 2857 url = repo.url()
2858 2858 if url.endswith(b'/'):
2859 2859 url = url[:-1]
2860 2860 return url + b'/.hg/patches'
2861 2861
2862 2862 # main repo (destination and sources)
2863 2863 if dest is None:
2864 2864 dest = hg.defaultdest(source)
2865 2865 __, source_path, __ = urlutil.get_clone_path(ui, source)
2866 2866 sr = hg.peer(ui, opts, source_path)
2867 2867
2868 2868 # patches repo (source only)
2869 2869 if opts.get(b'patches'):
2870 2870 __, patchespath, __ = urlutil.get_clone_path(ui, opts.get(b'patches'))
2871 2871 else:
2872 2872 patchespath = patchdir(sr)
2873 2873 try:
2874 2874 hg.peer(ui, opts, patchespath)
2875 2875 except error.RepoError:
2876 2876 raise error.Abort(
2877 2877 _(b'versioned patch repository not found (see init --mq)')
2878 2878 )
2879 2879 qbase, destrev = None, None
2880 2880 if sr.local():
2881 2881 repo = sr.local()
2882 2882 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2883 2883 qbase = repo.mq.applied[0].node
2884 2884 if not hg.islocal(dest):
2885 2885 heads = set(repo.heads())
2886 2886 destrev = list(heads.difference(repo.heads(qbase)))
2887 2887 destrev.append(repo.changelog.parents(qbase)[0])
2888 2888 elif sr.capable(b'lookup'):
2889 2889 try:
2890 2890 qbase = sr.lookup(b'qbase')
2891 2891 except error.RepoError:
2892 2892 pass
2893 2893
2894 2894 ui.note(_(b'cloning main repository\n'))
2895 2895 sr, dr = hg.clone(
2896 2896 ui,
2897 2897 opts,
2898 2898 sr.url(),
2899 2899 dest,
2900 2900 pull=opts.get(b'pull'),
2901 2901 revs=destrev,
2902 2902 update=False,
2903 2903 stream=opts.get(b'uncompressed'),
2904 2904 )
2905 2905
2906 2906 ui.note(_(b'cloning patch repository\n'))
2907 2907 hg.clone(
2908 2908 ui,
2909 2909 opts,
2910 2910 opts.get(b'patches') or patchdir(sr),
2911 2911 patchdir(dr),
2912 2912 pull=opts.get(b'pull'),
2913 2913 update=not opts.get(b'noupdate'),
2914 2914 stream=opts.get(b'uncompressed'),
2915 2915 )
2916 2916
2917 2917 if dr.local():
2918 2918 repo = dr.local()
2919 2919 if qbase:
2920 2920 ui.note(
2921 2921 _(
2922 2922 b'stripping applied patches from destination '
2923 2923 b'repository\n'
2924 2924 )
2925 2925 )
2926 2926 strip(ui, repo, [qbase], update=False, backup=None)
2927 2927 if not opts.get(b'noupdate'):
2928 2928 ui.note(_(b'updating destination repository\n'))
2929 2929 hg.update(repo, repo.changelog.tip())
2930 2930
2931 2931
2932 2932 @command(
2933 2933 b"qcommit|qci",
2934 2934 commands.table[b"commit|ci"][1],
2935 2935 _(b'hg qcommit [OPTION]... [FILE]...'),
2936 2936 helpcategory=command.CATEGORY_COMMITTING,
2937 2937 inferrepo=True,
2938 2938 )
2939 2939 def commit(ui, repo, *pats, **opts):
2940 2940 """commit changes in the queue repository (DEPRECATED)
2941 2941
2942 2942 This command is deprecated; use :hg:`commit --mq` instead."""
2943 2943 q = repo.mq
2944 2944 r = q.qrepo()
2945 2945 if not r:
2946 2946 raise error.Abort(b'no queue repository')
2947 2947 commands.commit(r.ui, r, *pats, **opts)
2948 2948
2949 2949
2950 2950 @command(
2951 2951 b"qseries",
2952 2952 [
2953 2953 (b'm', b'missing', None, _(b'print patches not in series')),
2954 2954 ]
2955 2955 + seriesopts,
2956 2956 _(b'hg qseries [-ms]'),
2957 2957 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2958 2958 )
2959 2959 def series(ui, repo, **opts):
2960 2960 """print the entire series file
2961 2961
2962 2962 Returns 0 on success."""
2963 2963 repo.mq.qseries(
2964 2964 repo, missing=opts.get('missing'), summary=opts.get('summary')
2965 2965 )
2966 2966 return 0
2967 2967
2968 2968
2969 2969 @command(
2970 2970 b"qtop",
2971 2971 seriesopts,
2972 2972 _(b'hg qtop [-s]'),
2973 2973 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2974 2974 )
2975 2975 def top(ui, repo, **opts):
2976 2976 """print the name of the current patch
2977 2977
2978 2978 Returns 0 on success."""
2979 2979 q = repo.mq
2980 2980 if q.applied:
2981 2981 t = q.seriesend(True)
2982 2982 else:
2983 2983 t = 0
2984 2984
2985 2985 if t:
2986 2986 q.qseries(
2987 2987 repo,
2988 2988 start=t - 1,
2989 2989 length=1,
2990 2990 status=b'A',
2991 2991 summary=opts.get('summary'),
2992 2992 )
2993 2993 else:
2994 2994 ui.write(_(b"no patches applied\n"))
2995 2995 return 1
2996 2996
2997 2997
2998 2998 @command(
2999 2999 b"qnext",
3000 3000 seriesopts,
3001 3001 _(b'hg qnext [-s]'),
3002 3002 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3003 3003 )
3004 3004 def next(ui, repo, **opts):
3005 3005 """print the name of the next pushable patch
3006 3006
3007 3007 Returns 0 on success."""
3008 3008 q = repo.mq
3009 3009 end = q.seriesend()
3010 3010 if end == len(q.series):
3011 3011 ui.write(_(b"all patches applied\n"))
3012 3012 return 1
3013 3013 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
3014 3014
3015 3015
3016 3016 @command(
3017 3017 b"qprev",
3018 3018 seriesopts,
3019 3019 _(b'hg qprev [-s]'),
3020 3020 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3021 3021 )
3022 3022 def prev(ui, repo, **opts):
3023 3023 """print the name of the preceding applied patch
3024 3024
3025 3025 Returns 0 on success."""
3026 3026 q = repo.mq
3027 3027 l = len(q.applied)
3028 3028 if l == 1:
3029 3029 ui.write(_(b"only one patch applied\n"))
3030 3030 return 1
3031 3031 if not l:
3032 3032 ui.write(_(b"no patches applied\n"))
3033 3033 return 1
3034 3034 idx = q.series.index(q.applied[-2].name)
3035 3035 q.qseries(
3036 3036 repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
3037 3037 )
3038 3038
3039 3039
3040 3040 def setupheaderopts(ui, opts):
3041 3041 if not opts.get(b'user') and opts.get(b'currentuser'):
3042 3042 opts[b'user'] = ui.username()
3043 3043 if not opts.get(b'date') and opts.get(b'currentdate'):
3044 3044 opts[b'date'] = b"%d %d" % dateutil.makedate()
3045 3045
3046 3046
3047 3047 @command(
3048 3048 b"qnew",
3049 3049 [
3050 3050 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3051 3051 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3052 3052 (b'g', b'git', None, _(b'use git extended diff format')),
3053 3053 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3054 3054 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3055 3055 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3056 3056 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3057 3057 ]
3058 3058 + cmdutil.walkopts
3059 3059 + cmdutil.commitopts,
3060 3060 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3061 3061 helpcategory=command.CATEGORY_COMMITTING,
3062 3062 helpbasic=True,
3063 3063 inferrepo=True,
3064 3064 )
3065 3065 def new(ui, repo, patch, *args, **opts):
3066 3066 """create a new patch
3067 3067
3068 3068 qnew creates a new patch on top of the currently-applied patch (if
3069 3069 any). The patch will be initialized with any outstanding changes
3070 3070 in the working directory. You may also use -I/--include,
3071 3071 -X/--exclude, and/or a list of files after the patch name to add
3072 3072 only changes to matching files to the new patch, leaving the rest
3073 3073 as uncommitted modifications.
3074 3074
3075 3075 -u/--user and -d/--date can be used to set the (given) user and
3076 3076 date, respectively. -U/--currentuser and -D/--currentdate set user
3077 3077 to current user and date to current date.
3078 3078
3079 3079 -e/--edit, -m/--message or -l/--logfile set the patch header as
3080 3080 well as the commit message. If none is specified, the header is
3081 3081 empty and the commit message is '[mq]: PATCH'.
3082 3082
3083 3083 Use the -g/--git option to keep the patch in the git extended diff
3084 3084 format. Read the diffs help topic for more information on why this
3085 3085 is important for preserving permission changes and copy/rename
3086 3086 information.
3087 3087
3088 3088 Returns 0 on successful creation of a new patch.
3089 3089 """
3090 3090 opts = pycompat.byteskwargs(opts)
3091 3091 msg = cmdutil.logmessage(ui, opts)
3092 3092 q = repo.mq
3093 3093 opts[b'msg'] = msg
3094 3094 setupheaderopts(ui, opts)
3095 3095 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3096 3096 q.savedirty()
3097 3097 return 0
3098 3098
3099 3099
3100 3100 @command(
3101 3101 b"qrefresh",
3102 3102 [
3103 3103 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3104 3104 (b'g', b'git', None, _(b'use git extended diff format')),
3105 3105 (
3106 3106 b's',
3107 3107 b'short',
3108 3108 None,
3109 3109 _(b'refresh only files already in the patch and specified files'),
3110 3110 ),
3111 3111 (
3112 3112 b'U',
3113 3113 b'currentuser',
3114 3114 None,
3115 3115 _(b'add/update author field in patch with current user'),
3116 3116 ),
3117 3117 (
3118 3118 b'u',
3119 3119 b'user',
3120 3120 b'',
3121 3121 _(b'add/update author field in patch with given user'),
3122 3122 _(b'USER'),
3123 3123 ),
3124 3124 (
3125 3125 b'D',
3126 3126 b'currentdate',
3127 3127 None,
3128 3128 _(b'add/update date field in patch with current date'),
3129 3129 ),
3130 3130 (
3131 3131 b'd',
3132 3132 b'date',
3133 3133 b'',
3134 3134 _(b'add/update date field in patch with given date'),
3135 3135 _(b'DATE'),
3136 3136 ),
3137 3137 ]
3138 3138 + cmdutil.walkopts
3139 3139 + cmdutil.commitopts,
3140 3140 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3141 3141 helpcategory=command.CATEGORY_COMMITTING,
3142 3142 helpbasic=True,
3143 3143 inferrepo=True,
3144 3144 )
3145 3145 def refresh(ui, repo, *pats, **opts):
3146 3146 """update the current patch
3147 3147
3148 3148 If any file patterns are provided, the refreshed patch will
3149 3149 contain only the modifications that match those patterns; the
3150 3150 remaining modifications will remain in the working directory.
3151 3151
3152 3152 If -s/--short is specified, files currently included in the patch
3153 3153 will be refreshed just like matched files and remain in the patch.
3154 3154
3155 3155 If -e/--edit is specified, Mercurial will start your configured editor for
3156 3156 you to enter a message. In case qrefresh fails, you will find a backup of
3157 3157 your message in ``.hg/last-message.txt``.
3158 3158
3159 3159 hg add/remove/copy/rename work as usual, though you might want to
3160 3160 use git-style patches (-g/--git or [diff] git=1) to track copies
3161 3161 and renames. See the diffs help topic for more information on the
3162 3162 git diff format.
3163 3163
3164 3164 Returns 0 on success.
3165 3165 """
3166 3166 opts = pycompat.byteskwargs(opts)
3167 3167 q = repo.mq
3168 3168 message = cmdutil.logmessage(ui, opts)
3169 3169 setupheaderopts(ui, opts)
3170 3170 with repo.wlock():
3171 3171 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3172 3172 q.savedirty()
3173 3173 return ret
3174 3174
3175 3175
3176 3176 @command(
3177 3177 b"qdiff",
3178 3178 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3179 3179 _(b'hg qdiff [OPTION]... [FILE]...'),
3180 3180 helpcategory=command.CATEGORY_FILE_CONTENTS,
3181 3181 helpbasic=True,
3182 3182 inferrepo=True,
3183 3183 )
3184 3184 def diff(ui, repo, *pats, **opts):
3185 3185 """diff of the current patch and subsequent modifications
3186 3186
3187 3187 Shows a diff which includes the current patch as well as any
3188 3188 changes which have been made in the working directory since the
3189 3189 last refresh (thus showing what the current patch would become
3190 3190 after a qrefresh).
3191 3191
3192 3192 Use :hg:`diff` if you only want to see the changes made since the
3193 3193 last qrefresh, or :hg:`export qtip` if you want to see changes
3194 3194 made by the current patch without including changes made since the
3195 3195 qrefresh.
3196 3196
3197 3197 Returns 0 on success.
3198 3198 """
3199 3199 ui.pager(b'qdiff')
3200 3200 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3201 3201 return 0
3202 3202
3203 3203
3204 3204 @command(
3205 3205 b'qfold',
3206 3206 [
3207 3207 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3208 3208 (b'k', b'keep', None, _(b'keep folded patch files')),
3209 3209 ]
3210 3210 + cmdutil.commitopts,
3211 3211 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3212 3212 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3213 3213 )
3214 3214 def fold(ui, repo, *files, **opts):
3215 3215 """fold the named patches into the current patch
3216 3216
3217 3217 Patches must not yet be applied. Each patch will be successively
3218 3218 applied to the current patch in the order given. If all the
3219 3219 patches apply successfully, the current patch will be refreshed
3220 3220 with the new cumulative patch, and the folded patches will be
3221 3221 deleted. With -k/--keep, the folded patch files will not be
3222 3222 removed afterwards.
3223 3223
3224 3224 The header for each folded patch will be concatenated with the
3225 3225 current patch header, separated by a line of ``* * *``.
3226 3226
3227 3227 Returns 0 on success."""
3228 3228 opts = pycompat.byteskwargs(opts)
3229 3229 q = repo.mq
3230 3230 if not files:
3231 3231 raise error.Abort(_(b'qfold requires at least one patch name'))
3232 3232 if not q.checktoppatch(repo)[0]:
3233 3233 raise error.Abort(_(b'no patches applied'))
3234 3234 q.checklocalchanges(repo)
3235 3235
3236 3236 message = cmdutil.logmessage(ui, opts)
3237 3237
3238 3238 parent = q.lookup(b'qtip')
3239 3239 patches = []
3240 3240 messages = []
3241 3241 for f in files:
3242 3242 p = q.lookup(f)
3243 3243 if p in patches or p == parent:
3244 3244 ui.warn(_(b'skipping already folded patch %s\n') % p)
3245 3245 if q.isapplied(p):
3246 3246 raise error.Abort(
3247 3247 _(b'qfold cannot fold already applied patch %s') % p
3248 3248 )
3249 3249 patches.append(p)
3250 3250
3251 3251 for p in patches:
3252 3252 if not message:
3253 3253 ph = patchheader(q.join(p), q.plainmode)
3254 3254 if ph.message:
3255 3255 messages.append(ph.message)
3256 3256 pf = q.join(p)
3257 3257 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3258 3258 if not patchsuccess:
3259 3259 raise error.Abort(_(b'error folding patch %s') % p)
3260 3260
3261 3261 if not message:
3262 3262 ph = patchheader(q.join(parent), q.plainmode)
3263 3263 message = ph.message
3264 3264 for msg in messages:
3265 3265 if msg:
3266 3266 if message:
3267 3267 message.append(b'* * *')
3268 3268 message.extend(msg)
3269 3269 message = b'\n'.join(message)
3270 3270
3271 3271 diffopts = q.patchopts(q.diffopts(), *patches)
3272 3272 with repo.wlock():
3273 3273 q.refresh(
3274 3274 repo,
3275 3275 msg=message,
3276 3276 git=diffopts.git,
3277 3277 edit=opts.get(b'edit'),
3278 3278 editform=b'mq.qfold',
3279 3279 )
3280 3280 q.delete(repo, patches, opts)
3281 3281 q.savedirty()
3282 3282
3283 3283
3284 3284 @command(
3285 3285 b"qgoto",
3286 3286 [
3287 3287 (
3288 3288 b'',
3289 3289 b'keep-changes',
3290 3290 None,
3291 3291 _(b'tolerate non-conflicting local changes'),
3292 3292 ),
3293 3293 (b'f', b'force', None, _(b'overwrite any local changes')),
3294 3294 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3295 3295 ],
3296 3296 _(b'hg qgoto [OPTION]... PATCH'),
3297 3297 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3298 3298 )
3299 3299 def goto(ui, repo, patch, **opts):
3300 3300 """push or pop patches until named patch is at top of stack
3301 3301
3302 3302 Returns 0 on success."""
3303 3303 opts = pycompat.byteskwargs(opts)
3304 3304 opts = fixkeepchangesopts(ui, opts)
3305 3305 q = repo.mq
3306 3306 patch = q.lookup(patch)
3307 3307 nobackup = opts.get(b'no_backup')
3308 3308 keepchanges = opts.get(b'keep_changes')
3309 3309 if q.isapplied(patch):
3310 3310 ret = q.pop(
3311 3311 repo,
3312 3312 patch,
3313 3313 force=opts.get(b'force'),
3314 3314 nobackup=nobackup,
3315 3315 keepchanges=keepchanges,
3316 3316 )
3317 3317 else:
3318 3318 ret = q.push(
3319 3319 repo,
3320 3320 patch,
3321 3321 force=opts.get(b'force'),
3322 3322 nobackup=nobackup,
3323 3323 keepchanges=keepchanges,
3324 3324 )
3325 3325 q.savedirty()
3326 3326 return ret
3327 3327
3328 3328
3329 3329 @command(
3330 3330 b"qguard",
3331 3331 [
3332 3332 (b'l', b'list', None, _(b'list all patches and guards')),
3333 3333 (b'n', b'none', None, _(b'drop all guards')),
3334 3334 ],
3335 3335 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3336 3336 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3337 3337 )
3338 3338 def guard(ui, repo, *args, **opts):
3339 3339 """set or print guards for a patch
3340 3340
3341 3341 Guards control whether a patch can be pushed. A patch with no
3342 3342 guards is always pushed. A patch with a positive guard ("+foo") is
3343 3343 pushed only if the :hg:`qselect` command has activated it. A patch with
3344 3344 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3345 3345 has activated it.
3346 3346
3347 3347 With no arguments, print the currently active guards.
3348 3348 With arguments, set guards for the named patch.
3349 3349
3350 3350 .. note::
3351 3351
3352 3352 Specifying negative guards now requires '--'.
3353 3353
3354 3354 To set guards on another patch::
3355 3355
3356 3356 hg qguard other.patch -- +2.6.17 -stable
3357 3357
3358 3358 Returns 0 on success.
3359 3359 """
3360 3360
3361 3361 def status(idx):
3362 3362 guards = q.seriesguards[idx] or [b'unguarded']
3363 3363 if q.series[idx] in applied:
3364 3364 state = b'applied'
3365 3365 elif q.pushable(idx)[0]:
3366 3366 state = b'unapplied'
3367 3367 else:
3368 3368 state = b'guarded'
3369 3369 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3370 3370 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3371 3371
3372 3372 for i, guard in enumerate(guards):
3373 3373 if guard.startswith(b'+'):
3374 3374 ui.write(guard, label=b'qguard.positive')
3375 3375 elif guard.startswith(b'-'):
3376 3376 ui.write(guard, label=b'qguard.negative')
3377 3377 else:
3378 3378 ui.write(guard, label=b'qguard.unguarded')
3379 3379 if i != len(guards) - 1:
3380 3380 ui.write(b' ')
3381 3381 ui.write(b'\n')
3382 3382
3383 3383 q = repo.mq
3384 3384 applied = {p.name for p in q.applied}
3385 3385 patch = None
3386 3386 args = list(args)
3387 3387 if opts.get('list'):
3388 3388 if args or opts.get('none'):
3389 3389 raise error.Abort(
3390 3390 _(b'cannot mix -l/--list with options or arguments')
3391 3391 )
3392 for i in pycompat.xrange(len(q.series)):
3392 for i in range(len(q.series)):
3393 3393 status(i)
3394 3394 return
3395 3395 if not args or args[0][0:1] in b'-+':
3396 3396 if not q.applied:
3397 3397 raise error.Abort(_(b'no patches applied'))
3398 3398 patch = q.applied[-1].name
3399 3399 if patch is None and args[0][0:1] not in b'-+':
3400 3400 patch = args.pop(0)
3401 3401 if patch is None:
3402 3402 raise error.Abort(_(b'no patch to work with'))
3403 3403 if args or opts.get('none'):
3404 3404 idx = q.findseries(patch)
3405 3405 if idx is None:
3406 3406 raise error.Abort(_(b'no patch named %s') % patch)
3407 3407 q.setguards(idx, args)
3408 3408 q.savedirty()
3409 3409 else:
3410 3410 status(q.series.index(q.lookup(patch)))
3411 3411
3412 3412
3413 3413 @command(
3414 3414 b"qheader",
3415 3415 [],
3416 3416 _(b'hg qheader [PATCH]'),
3417 3417 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3418 3418 )
3419 3419 def header(ui, repo, patch=None):
3420 3420 """print the header of the topmost or specified patch
3421 3421
3422 3422 Returns 0 on success."""
3423 3423 q = repo.mq
3424 3424
3425 3425 if patch:
3426 3426 patch = q.lookup(patch)
3427 3427 else:
3428 3428 if not q.applied:
3429 3429 ui.write(_(b'no patches applied\n'))
3430 3430 return 1
3431 3431 patch = q.lookup(b'qtip')
3432 3432 ph = patchheader(q.join(patch), q.plainmode)
3433 3433
3434 3434 ui.write(b'\n'.join(ph.message) + b'\n')
3435 3435
3436 3436
3437 3437 def lastsavename(path):
3438 3438 (directory, base) = os.path.split(path)
3439 3439 names = os.listdir(directory)
3440 3440 namere = re.compile(b"%s.([0-9]+)" % base)
3441 3441 maxindex = None
3442 3442 maxname = None
3443 3443 for f in names:
3444 3444 m = namere.match(f)
3445 3445 if m:
3446 3446 index = int(m.group(1))
3447 3447 if maxindex is None or index > maxindex:
3448 3448 maxindex = index
3449 3449 maxname = f
3450 3450 if maxname:
3451 3451 return (os.path.join(directory, maxname), maxindex)
3452 3452 return (None, None)
3453 3453
3454 3454
3455 3455 def savename(path):
3456 3456 (last, index) = lastsavename(path)
3457 3457 if last is None:
3458 3458 index = 0
3459 3459 newpath = path + b".%d" % (index + 1)
3460 3460 return newpath
3461 3461
3462 3462
3463 3463 @command(
3464 3464 b"qpush",
3465 3465 [
3466 3466 (
3467 3467 b'',
3468 3468 b'keep-changes',
3469 3469 None,
3470 3470 _(b'tolerate non-conflicting local changes'),
3471 3471 ),
3472 3472 (b'f', b'force', None, _(b'apply on top of local changes')),
3473 3473 (
3474 3474 b'e',
3475 3475 b'exact',
3476 3476 None,
3477 3477 _(b'apply the target patch to its recorded parent'),
3478 3478 ),
3479 3479 (b'l', b'list', None, _(b'list patch name in commit text')),
3480 3480 (b'a', b'all', None, _(b'apply all patches')),
3481 3481 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3482 3482 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3483 3483 (
3484 3484 b'',
3485 3485 b'move',
3486 3486 None,
3487 3487 _(b'reorder patch series and apply only the patch'),
3488 3488 ),
3489 3489 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3490 3490 ],
3491 3491 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3492 3492 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3493 3493 helpbasic=True,
3494 3494 )
3495 3495 def push(ui, repo, patch=None, **opts):
3496 3496 """push the next patch onto the stack
3497 3497
3498 3498 By default, abort if the working directory contains uncommitted
3499 3499 changes. With --keep-changes, abort only if the uncommitted files
3500 3500 overlap with patched files. With -f/--force, backup and patch over
3501 3501 uncommitted changes.
3502 3502
3503 3503 Return 0 on success.
3504 3504 """
3505 3505 q = repo.mq
3506 3506 mergeq = None
3507 3507
3508 3508 opts = pycompat.byteskwargs(opts)
3509 3509 opts = fixkeepchangesopts(ui, opts)
3510 3510 if opts.get(b'merge'):
3511 3511 if opts.get(b'name'):
3512 3512 newpath = repo.vfs.join(opts.get(b'name'))
3513 3513 else:
3514 3514 newpath, i = lastsavename(q.path)
3515 3515 if not newpath:
3516 3516 ui.warn(_(b"no saved queues found, please use -n\n"))
3517 3517 return 1
3518 3518 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3519 3519 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3520 3520 ret = q.push(
3521 3521 repo,
3522 3522 patch,
3523 3523 force=opts.get(b'force'),
3524 3524 list=opts.get(b'list'),
3525 3525 mergeq=mergeq,
3526 3526 all=opts.get(b'all'),
3527 3527 move=opts.get(b'move'),
3528 3528 exact=opts.get(b'exact'),
3529 3529 nobackup=opts.get(b'no_backup'),
3530 3530 keepchanges=opts.get(b'keep_changes'),
3531 3531 )
3532 3532 return ret
3533 3533
3534 3534
3535 3535 @command(
3536 3536 b"qpop",
3537 3537 [
3538 3538 (b'a', b'all', None, _(b'pop all patches')),
3539 3539 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3540 3540 (
3541 3541 b'',
3542 3542 b'keep-changes',
3543 3543 None,
3544 3544 _(b'tolerate non-conflicting local changes'),
3545 3545 ),
3546 3546 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3547 3547 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3548 3548 ],
3549 3549 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3550 3550 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3551 3551 helpbasic=True,
3552 3552 )
3553 3553 def pop(ui, repo, patch=None, **opts):
3554 3554 """pop the current patch off the stack
3555 3555
3556 3556 Without argument, pops off the top of the patch stack. If given a
3557 3557 patch name, keeps popping off patches until the named patch is at
3558 3558 the top of the stack.
3559 3559
3560 3560 By default, abort if the working directory contains uncommitted
3561 3561 changes. With --keep-changes, abort only if the uncommitted files
3562 3562 overlap with patched files. With -f/--force, backup and discard
3563 3563 changes made to such files.
3564 3564
3565 3565 Return 0 on success.
3566 3566 """
3567 3567 opts = pycompat.byteskwargs(opts)
3568 3568 opts = fixkeepchangesopts(ui, opts)
3569 3569 localupdate = True
3570 3570 if opts.get(b'name'):
3571 3571 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3572 3572 ui.warn(_(b'using patch queue: %s\n') % q.path)
3573 3573 localupdate = False
3574 3574 else:
3575 3575 q = repo.mq
3576 3576 ret = q.pop(
3577 3577 repo,
3578 3578 patch,
3579 3579 force=opts.get(b'force'),
3580 3580 update=localupdate,
3581 3581 all=opts.get(b'all'),
3582 3582 nobackup=opts.get(b'no_backup'),
3583 3583 keepchanges=opts.get(b'keep_changes'),
3584 3584 )
3585 3585 q.savedirty()
3586 3586 return ret
3587 3587
3588 3588
3589 3589 @command(
3590 3590 b"qrename|qmv",
3591 3591 [],
3592 3592 _(b'hg qrename PATCH1 [PATCH2]'),
3593 3593 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3594 3594 )
3595 3595 def rename(ui, repo, patch, name=None, **opts):
3596 3596 """rename a patch
3597 3597
3598 3598 With one argument, renames the current patch to PATCH1.
3599 3599 With two arguments, renames PATCH1 to PATCH2.
3600 3600
3601 3601 Returns 0 on success."""
3602 3602 q = repo.mq
3603 3603 if not name:
3604 3604 name = patch
3605 3605 patch = None
3606 3606
3607 3607 if patch:
3608 3608 patch = q.lookup(patch)
3609 3609 else:
3610 3610 if not q.applied:
3611 3611 ui.write(_(b'no patches applied\n'))
3612 3612 return
3613 3613 patch = q.lookup(b'qtip')
3614 3614 absdest = q.join(name)
3615 3615 if os.path.isdir(absdest):
3616 3616 name = normname(os.path.join(name, os.path.basename(patch)))
3617 3617 absdest = q.join(name)
3618 3618 q.checkpatchname(name)
3619 3619
3620 3620 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3621 3621 i = q.findseries(patch)
3622 3622 guards = q.guard_re.findall(q.fullseries[i])
3623 3623 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3624 3624 q.parseseries()
3625 3625 q.seriesdirty = True
3626 3626
3627 3627 info = q.isapplied(patch)
3628 3628 if info:
3629 3629 q.applied[info[0]] = statusentry(info[1], name)
3630 3630 q.applieddirty = True
3631 3631
3632 3632 destdir = os.path.dirname(absdest)
3633 3633 if not os.path.isdir(destdir):
3634 3634 os.makedirs(destdir)
3635 3635 util.rename(q.join(patch), absdest)
3636 3636 r = q.qrepo()
3637 3637 if r and patch in r.dirstate:
3638 3638 wctx = r[None]
3639 3639 with r.wlock():
3640 3640 if r.dirstate.get_entry(patch).added:
3641 3641 r.dirstate.set_untracked(patch)
3642 3642 r.dirstate.set_tracked(name)
3643 3643 else:
3644 3644 wctx.copy(patch, name)
3645 3645 wctx.forget([patch])
3646 3646
3647 3647 q.savedirty()
3648 3648
3649 3649
3650 3650 @command(
3651 3651 b"qrestore",
3652 3652 [
3653 3653 (b'd', b'delete', None, _(b'delete save entry')),
3654 3654 (b'u', b'update', None, _(b'update queue working directory')),
3655 3655 ],
3656 3656 _(b'hg qrestore [-d] [-u] REV'),
3657 3657 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3658 3658 )
3659 3659 def restore(ui, repo, rev, **opts):
3660 3660 """restore the queue state saved by a revision (DEPRECATED)
3661 3661
3662 3662 This command is deprecated, use :hg:`rebase` instead."""
3663 3663 rev = repo.lookup(rev)
3664 3664 q = repo.mq
3665 3665 q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
3666 3666 q.savedirty()
3667 3667 return 0
3668 3668
3669 3669
3670 3670 @command(
3671 3671 b"qsave",
3672 3672 [
3673 3673 (b'c', b'copy', None, _(b'copy patch directory')),
3674 3674 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3675 3675 (b'e', b'empty', None, _(b'clear queue status file')),
3676 3676 (b'f', b'force', None, _(b'force copy')),
3677 3677 ]
3678 3678 + cmdutil.commitopts,
3679 3679 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3680 3680 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3681 3681 )
3682 3682 def save(ui, repo, **opts):
3683 3683 """save current queue state (DEPRECATED)
3684 3684
3685 3685 This command is deprecated, use :hg:`rebase` instead."""
3686 3686 q = repo.mq
3687 3687 opts = pycompat.byteskwargs(opts)
3688 3688 message = cmdutil.logmessage(ui, opts)
3689 3689 ret = q.save(repo, msg=message)
3690 3690 if ret:
3691 3691 return ret
3692 3692 q.savedirty() # save to .hg/patches before copying
3693 3693 if opts.get(b'copy'):
3694 3694 path = q.path
3695 3695 if opts.get(b'name'):
3696 3696 newpath = os.path.join(q.basepath, opts.get(b'name'))
3697 3697 if os.path.exists(newpath):
3698 3698 if not os.path.isdir(newpath):
3699 3699 raise error.Abort(
3700 3700 _(b'destination %s exists and is not a directory')
3701 3701 % newpath
3702 3702 )
3703 3703 if not opts.get(b'force'):
3704 3704 raise error.Abort(
3705 3705 _(b'destination %s exists, use -f to force') % newpath
3706 3706 )
3707 3707 else:
3708 3708 newpath = savename(path)
3709 3709 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3710 3710 util.copyfiles(path, newpath)
3711 3711 if opts.get(b'empty'):
3712 3712 del q.applied[:]
3713 3713 q.applieddirty = True
3714 3714 q.savedirty()
3715 3715 return 0
3716 3716
3717 3717
3718 3718 @command(
3719 3719 b"qselect",
3720 3720 [
3721 3721 (b'n', b'none', None, _(b'disable all guards')),
3722 3722 (b's', b'series', None, _(b'list all guards in series file')),
3723 3723 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3724 3724 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3725 3725 ],
3726 3726 _(b'hg qselect [OPTION]... [GUARD]...'),
3727 3727 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3728 3728 )
3729 3729 def select(ui, repo, *args, **opts):
3730 3730 """set or print guarded patches to push
3731 3731
3732 3732 Use the :hg:`qguard` command to set or print guards on patch, then use
3733 3733 qselect to tell mq which guards to use. A patch will be pushed if
3734 3734 it has no guards or any positive guards match the currently
3735 3735 selected guard, but will not be pushed if any negative guards
3736 3736 match the current guard. For example::
3737 3737
3738 3738 qguard foo.patch -- -stable (negative guard)
3739 3739 qguard bar.patch +stable (positive guard)
3740 3740 qselect stable
3741 3741
3742 3742 This activates the "stable" guard. mq will skip foo.patch (because
3743 3743 it has a negative match) but push bar.patch (because it has a
3744 3744 positive match).
3745 3745
3746 3746 With no arguments, prints the currently active guards.
3747 3747 With one argument, sets the active guard.
3748 3748
3749 3749 Use -n/--none to deactivate guards (no other arguments needed).
3750 3750 When no guards are active, patches with positive guards are
3751 3751 skipped and patches with negative guards are pushed.
3752 3752
3753 3753 qselect can change the guards on applied patches. It does not pop
3754 3754 guarded patches by default. Use --pop to pop back to the last
3755 3755 applied patch that is not guarded. Use --reapply (which implies
3756 3756 --pop) to push back to the current patch afterwards, but skip
3757 3757 guarded patches.
3758 3758
3759 3759 Use -s/--series to print a list of all guards in the series file
3760 3760 (no other arguments needed). Use -v for more information.
3761 3761
3762 3762 Returns 0 on success."""
3763 3763
3764 3764 q = repo.mq
3765 3765 opts = pycompat.byteskwargs(opts)
3766 3766 guards = q.active()
3767 3767 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3768 3768 if args or opts.get(b'none'):
3769 3769 old_unapplied = q.unapplied(repo)
3770 old_guarded = [
3771 i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
3772 ]
3770 old_guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3773 3771 q.setactive(args)
3774 3772 q.savedirty()
3775 3773 if not args:
3776 3774 ui.status(_(b'guards deactivated\n'))
3777 3775 if not opts.get(b'pop') and not opts.get(b'reapply'):
3778 3776 unapplied = q.unapplied(repo)
3779 guarded = [
3780 i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
3781 ]
3777 guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3782 3778 if len(unapplied) != len(old_unapplied):
3783 3779 ui.status(
3784 3780 _(
3785 3781 b'number of unguarded, unapplied patches has '
3786 3782 b'changed from %d to %d\n'
3787 3783 )
3788 3784 % (len(old_unapplied), len(unapplied))
3789 3785 )
3790 3786 if len(guarded) != len(old_guarded):
3791 3787 ui.status(
3792 3788 _(
3793 3789 b'number of guarded, applied patches has changed '
3794 3790 b'from %d to %d\n'
3795 3791 )
3796 3792 % (len(old_guarded), len(guarded))
3797 3793 )
3798 3794 elif opts.get(b'series'):
3799 3795 guards = {}
3800 3796 noguards = 0
3801 3797 for gs in q.seriesguards:
3802 3798 if not gs:
3803 3799 noguards += 1
3804 3800 for g in gs:
3805 3801 guards.setdefault(g, 0)
3806 3802 guards[g] += 1
3807 3803 if ui.verbose:
3808 3804 guards[b'NONE'] = noguards
3809 3805 guards = list(guards.items())
3810 3806 guards.sort(key=lambda x: x[0][1:])
3811 3807 if guards:
3812 3808 ui.note(_(b'guards in series file:\n'))
3813 3809 for guard, count in guards:
3814 3810 ui.note(b'%2d ' % count)
3815 3811 ui.write(guard, b'\n')
3816 3812 else:
3817 3813 ui.note(_(b'no guards in series file\n'))
3818 3814 else:
3819 3815 if guards:
3820 3816 ui.note(_(b'active guards:\n'))
3821 3817 for g in guards:
3822 3818 ui.write(g, b'\n')
3823 3819 else:
3824 3820 ui.write(_(b'no active guards\n'))
3825 3821 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3826 3822 popped = False
3827 3823 if opts.get(b'pop') or opts.get(b'reapply'):
3828 for i in pycompat.xrange(len(q.applied)):
3824 for i in range(len(q.applied)):
3829 3825 if not pushable(i):
3830 3826 ui.status(_(b'popping guarded patches\n'))
3831 3827 popped = True
3832 3828 if i == 0:
3833 3829 q.pop(repo, all=True)
3834 3830 else:
3835 3831 q.pop(repo, q.applied[i - 1].name)
3836 3832 break
3837 3833 if popped:
3838 3834 try:
3839 3835 if reapply:
3840 3836 ui.status(_(b'reapplying unguarded patches\n'))
3841 3837 q.push(repo, reapply)
3842 3838 finally:
3843 3839 q.savedirty()
3844 3840
3845 3841
3846 3842 @command(
3847 3843 b"qfinish",
3848 3844 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3849 3845 _(b'hg qfinish [-a] [REV]...'),
3850 3846 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3851 3847 )
3852 3848 def finish(ui, repo, *revrange, **opts):
3853 3849 """move applied patches into repository history
3854 3850
3855 3851 Finishes the specified revisions (corresponding to applied
3856 3852 patches) by moving them out of mq control into regular repository
3857 3853 history.
3858 3854
3859 3855 Accepts a revision range or the -a/--applied option. If --applied
3860 3856 is specified, all applied mq revisions are removed from mq
3861 3857 control. Otherwise, the given revisions must be at the base of the
3862 3858 stack of applied patches.
3863 3859
3864 3860 This can be especially useful if your changes have been applied to
3865 3861 an upstream repository, or if you are about to push your changes
3866 3862 to upstream.
3867 3863
3868 3864 Returns 0 on success.
3869 3865 """
3870 3866 if not opts.get('applied') and not revrange:
3871 3867 raise error.Abort(_(b'no revisions specified'))
3872 3868 elif opts.get('applied'):
3873 3869 revrange = (b'qbase::qtip',) + revrange
3874 3870
3875 3871 q = repo.mq
3876 3872 if not q.applied:
3877 3873 ui.status(_(b'no patches applied\n'))
3878 3874 return 0
3879 3875
3880 3876 revs = logcmdutil.revrange(repo, revrange)
3881 3877 if repo[b'.'].rev() in revs and repo[None].files():
3882 3878 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3883 3879 # queue.finish may changes phases but leave the responsibility to lock the
3884 3880 # repo to the caller to avoid deadlock with wlock. This command code is
3885 3881 # responsibility for this locking.
3886 3882 with repo.lock():
3887 3883 q.finish(repo, revs)
3888 3884 q.savedirty()
3889 3885 return 0
3890 3886
3891 3887
3892 3888 @command(
3893 3889 b"qqueue",
3894 3890 [
3895 3891 (b'l', b'list', False, _(b'list all available queues')),
3896 3892 (b'', b'active', False, _(b'print name of active queue')),
3897 3893 (b'c', b'create', False, _(b'create new queue')),
3898 3894 (b'', b'rename', False, _(b'rename active queue')),
3899 3895 (b'', b'delete', False, _(b'delete reference to queue')),
3900 3896 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3901 3897 ],
3902 3898 _(b'[OPTION] [QUEUE]'),
3903 3899 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3904 3900 )
3905 3901 def qqueue(ui, repo, name=None, **opts):
3906 3902 """manage multiple patch queues
3907 3903
3908 3904 Supports switching between different patch queues, as well as creating
3909 3905 new patch queues and deleting existing ones.
3910 3906
3911 3907 Omitting a queue name or specifying -l/--list will show you the registered
3912 3908 queues - by default the "normal" patches queue is registered. The currently
3913 3909 active queue will be marked with "(active)". Specifying --active will print
3914 3910 only the name of the active queue.
3915 3911
3916 3912 To create a new queue, use -c/--create. The queue is automatically made
3917 3913 active, except in the case where there are applied patches from the
3918 3914 currently active queue in the repository. Then the queue will only be
3919 3915 created and switching will fail.
3920 3916
3921 3917 To delete an existing queue, use --delete. You cannot delete the currently
3922 3918 active queue.
3923 3919
3924 3920 Returns 0 on success.
3925 3921 """
3926 3922 q = repo.mq
3927 3923 _defaultqueue = b'patches'
3928 3924 _allqueues = b'patches.queues'
3929 3925 _activequeue = b'patches.queue'
3930 3926
3931 3927 def _getcurrent():
3932 3928 cur = os.path.basename(q.path)
3933 3929 if cur.startswith(b'patches-'):
3934 3930 cur = cur[8:]
3935 3931 return cur
3936 3932
3937 3933 def _noqueues():
3938 3934 try:
3939 3935 fh = repo.vfs(_allqueues, b'r')
3940 3936 fh.close()
3941 3937 except IOError:
3942 3938 return True
3943 3939
3944 3940 return False
3945 3941
3946 3942 def _getqueues():
3947 3943 current = _getcurrent()
3948 3944
3949 3945 try:
3950 3946 fh = repo.vfs(_allqueues, b'r')
3951 3947 queues = [queue.strip() for queue in fh if queue.strip()]
3952 3948 fh.close()
3953 3949 if current not in queues:
3954 3950 queues.append(current)
3955 3951 except IOError:
3956 3952 queues = [_defaultqueue]
3957 3953
3958 3954 return sorted(queues)
3959 3955
3960 3956 def _setactive(name):
3961 3957 if q.applied:
3962 3958 raise error.Abort(
3963 3959 _(
3964 3960 b'new queue created, but cannot make active '
3965 3961 b'as patches are applied'
3966 3962 )
3967 3963 )
3968 3964 _setactivenocheck(name)
3969 3965
3970 3966 def _setactivenocheck(name):
3971 3967 fh = repo.vfs(_activequeue, b'w')
3972 3968 if name != b'patches':
3973 3969 fh.write(name)
3974 3970 fh.close()
3975 3971
3976 3972 def _addqueue(name):
3977 3973 fh = repo.vfs(_allqueues, b'a')
3978 3974 fh.write(b'%s\n' % (name,))
3979 3975 fh.close()
3980 3976
3981 3977 def _queuedir(name):
3982 3978 if name == b'patches':
3983 3979 return repo.vfs.join(b'patches')
3984 3980 else:
3985 3981 return repo.vfs.join(b'patches-' + name)
3986 3982
3987 3983 def _validname(name):
3988 3984 for n in name:
3989 3985 if n in b':\\/.':
3990 3986 return False
3991 3987 return True
3992 3988
3993 3989 def _delete(name):
3994 3990 if name not in existing:
3995 3991 raise error.Abort(_(b'cannot delete queue that does not exist'))
3996 3992
3997 3993 current = _getcurrent()
3998 3994
3999 3995 if name == current:
4000 3996 raise error.Abort(_(b'cannot delete currently active queue'))
4001 3997
4002 3998 fh = repo.vfs(b'patches.queues.new', b'w')
4003 3999 for queue in existing:
4004 4000 if queue == name:
4005 4001 continue
4006 4002 fh.write(b'%s\n' % (queue,))
4007 4003 fh.close()
4008 4004 repo.vfs.rename(b'patches.queues.new', _allqueues)
4009 4005
4010 4006 opts = pycompat.byteskwargs(opts)
4011 4007 if not name or opts.get(b'list') or opts.get(b'active'):
4012 4008 current = _getcurrent()
4013 4009 if opts.get(b'active'):
4014 4010 ui.write(b'%s\n' % (current,))
4015 4011 return
4016 4012 for queue in _getqueues():
4017 4013 ui.write(b'%s' % (queue,))
4018 4014 if queue == current and not ui.quiet:
4019 4015 ui.write(_(b' (active)\n'))
4020 4016 else:
4021 4017 ui.write(b'\n')
4022 4018 return
4023 4019
4024 4020 if not _validname(name):
4025 4021 raise error.Abort(
4026 4022 _(b'invalid queue name, may not contain the characters ":\\/."')
4027 4023 )
4028 4024
4029 4025 with repo.wlock():
4030 4026 existing = _getqueues()
4031 4027
4032 4028 if opts.get(b'create'):
4033 4029 if name in existing:
4034 4030 raise error.Abort(_(b'queue "%s" already exists') % name)
4035 4031 if _noqueues():
4036 4032 _addqueue(_defaultqueue)
4037 4033 _addqueue(name)
4038 4034 _setactive(name)
4039 4035 elif opts.get(b'rename'):
4040 4036 current = _getcurrent()
4041 4037 if name == current:
4042 4038 raise error.Abort(
4043 4039 _(b'can\'t rename "%s" to its current name') % name
4044 4040 )
4045 4041 if name in existing:
4046 4042 raise error.Abort(_(b'queue "%s" already exists') % name)
4047 4043
4048 4044 olddir = _queuedir(current)
4049 4045 newdir = _queuedir(name)
4050 4046
4051 4047 if os.path.exists(newdir):
4052 4048 raise error.Abort(
4053 4049 _(b'non-queue directory "%s" already exists') % newdir
4054 4050 )
4055 4051
4056 4052 fh = repo.vfs(b'patches.queues.new', b'w')
4057 4053 for queue in existing:
4058 4054 if queue == current:
4059 4055 fh.write(b'%s\n' % (name,))
4060 4056 if os.path.exists(olddir):
4061 4057 util.rename(olddir, newdir)
4062 4058 else:
4063 4059 fh.write(b'%s\n' % (queue,))
4064 4060 fh.close()
4065 4061 repo.vfs.rename(b'patches.queues.new', _allqueues)
4066 4062 _setactivenocheck(name)
4067 4063 elif opts.get(b'delete'):
4068 4064 _delete(name)
4069 4065 elif opts.get(b'purge'):
4070 4066 if name in existing:
4071 4067 _delete(name)
4072 4068 qdir = _queuedir(name)
4073 4069 if os.path.exists(qdir):
4074 4070 shutil.rmtree(qdir)
4075 4071 else:
4076 4072 if name not in existing:
4077 4073 raise error.Abort(_(b'use --create to create a new queue'))
4078 4074 _setactive(name)
4079 4075
4080 4076
4081 4077 def mqphasedefaults(repo, roots):
4082 4078 """callback used to set mq changeset as secret when no phase data exists"""
4083 4079 if repo.mq.applied:
4084 4080 if repo.ui.configbool(b'mq', b'secret'):
4085 4081 mqphase = phases.secret
4086 4082 else:
4087 4083 mqphase = phases.draft
4088 4084 qbase = repo[repo.mq.applied[0].node]
4089 4085 roots[mqphase].add(qbase.node())
4090 4086 return roots
4091 4087
4092 4088
4093 4089 def reposetup(ui, repo):
4094 4090 class mqrepo(repo.__class__):
4095 4091 @localrepo.unfilteredpropertycache
4096 4092 def mq(self):
4097 4093 return queue(self.ui, self.baseui, self.path)
4098 4094
4099 4095 def invalidateall(self):
4100 4096 super(mqrepo, self).invalidateall()
4101 4097 if localrepo.hasunfilteredcache(self, 'mq'):
4102 4098 # recreate mq in case queue path was changed
4103 4099 delattr(self.unfiltered(), 'mq')
4104 4100
4105 4101 def abortifwdirpatched(self, errmsg, force=False):
4106 4102 if self.mq.applied and self.mq.checkapplied and not force:
4107 4103 parents = self.dirstate.parents()
4108 4104 patches = [s.node for s in self.mq.applied]
4109 4105 if any(p in patches for p in parents):
4110 4106 raise error.Abort(errmsg)
4111 4107
4112 4108 def commit(
4113 4109 self,
4114 4110 text=b"",
4115 4111 user=None,
4116 4112 date=None,
4117 4113 match=None,
4118 4114 force=False,
4119 4115 editor=False,
4120 4116 extra=None,
4121 4117 ):
4122 4118 if extra is None:
4123 4119 extra = {}
4124 4120 self.abortifwdirpatched(
4125 4121 _(b'cannot commit over an applied mq patch'), force
4126 4122 )
4127 4123
4128 4124 return super(mqrepo, self).commit(
4129 4125 text, user, date, match, force, editor, extra
4130 4126 )
4131 4127
4132 4128 def checkpush(self, pushop):
4133 4129 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4134 4130 outapplied = [e.node for e in self.mq.applied]
4135 4131 if pushop.revs:
4136 4132 # Assume applied patches have no non-patch descendants and
4137 4133 # are not on remote already. Filtering any changeset not
4138 4134 # pushed.
4139 4135 heads = set(pushop.revs)
4140 4136 for node in reversed(outapplied):
4141 4137 if node in heads:
4142 4138 break
4143 4139 else:
4144 4140 outapplied.pop()
4145 4141 # looking for pushed and shared changeset
4146 4142 for node in outapplied:
4147 4143 if self[node].phase() < phases.secret:
4148 4144 raise error.Abort(_(b'source has mq patches applied'))
4149 4145 # no non-secret patches pushed
4150 4146 super(mqrepo, self).checkpush(pushop)
4151 4147
4152 4148 def _findtags(self):
4153 4149 '''augment tags from base class with patch tags'''
4154 4150 result = super(mqrepo, self)._findtags()
4155 4151
4156 4152 q = self.mq
4157 4153 if not q.applied:
4158 4154 return result
4159 4155
4160 4156 mqtags = [(patch.node, patch.name) for patch in q.applied]
4161 4157
4162 4158 try:
4163 4159 # for now ignore filtering business
4164 4160 self.unfiltered().changelog.rev(mqtags[-1][0])
4165 4161 except error.LookupError:
4166 4162 self.ui.warn(
4167 4163 _(b'mq status file refers to unknown node %s\n')
4168 4164 % short(mqtags[-1][0])
4169 4165 )
4170 4166 return result
4171 4167
4172 4168 # do not add fake tags for filtered revisions
4173 4169 included = self.changelog.hasnode
4174 4170 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4175 4171 if not mqtags:
4176 4172 return result
4177 4173
4178 4174 mqtags.append((mqtags[-1][0], b'qtip'))
4179 4175 mqtags.append((mqtags[0][0], b'qbase'))
4180 4176 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4181 4177 tags = result[0]
4182 4178 for patch in mqtags:
4183 4179 if patch[1] in tags:
4184 4180 self.ui.warn(
4185 4181 _(b'tag %s overrides mq patch of the same name\n')
4186 4182 % patch[1]
4187 4183 )
4188 4184 else:
4189 4185 tags[patch[1]] = patch[0]
4190 4186
4191 4187 return result
4192 4188
4193 4189 if repo.local():
4194 4190 repo.__class__ = mqrepo
4195 4191
4196 4192 repo._phasedefaults.append(mqphasedefaults)
4197 4193
4198 4194
4199 4195 def mqimport(orig, ui, repo, *args, **kwargs):
4200 4196 if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
4201 4197 'no_commit', False
4202 4198 ):
4203 4199 repo.abortifwdirpatched(
4204 4200 _(b'cannot import over an applied patch'), kwargs.get('force')
4205 4201 )
4206 4202 return orig(ui, repo, *args, **kwargs)
4207 4203
4208 4204
4209 4205 def mqinit(orig, ui, *args, **kwargs):
4210 4206 mq = kwargs.pop('mq', None)
4211 4207
4212 4208 if not mq:
4213 4209 return orig(ui, *args, **kwargs)
4214 4210
4215 4211 if args:
4216 4212 repopath = args[0]
4217 4213 if not hg.islocal(repopath):
4218 4214 raise error.Abort(
4219 4215 _(b'only a local queue repository may be initialized')
4220 4216 )
4221 4217 else:
4222 4218 repopath = cmdutil.findrepo(encoding.getcwd())
4223 4219 if not repopath:
4224 4220 raise error.Abort(
4225 4221 _(b'there is no Mercurial repository here (.hg not found)')
4226 4222 )
4227 4223 repo = hg.repository(ui, repopath)
4228 4224 return qinit(ui, repo, True)
4229 4225
4230 4226
4231 4227 def mqcommand(orig, ui, repo, *args, **kwargs):
4232 4228 """Add --mq option to operate on patch repository instead of main"""
4233 4229
4234 4230 # some commands do not like getting unknown options
4235 4231 mq = kwargs.pop('mq', None)
4236 4232
4237 4233 if not mq:
4238 4234 return orig(ui, repo, *args, **kwargs)
4239 4235
4240 4236 q = repo.mq
4241 4237 r = q.qrepo()
4242 4238 if not r:
4243 4239 raise error.Abort(_(b'no queue repository'))
4244 4240 return orig(r.ui, r, *args, **kwargs)
4245 4241
4246 4242
4247 4243 def summaryhook(ui, repo):
4248 4244 q = repo.mq
4249 4245 m = []
4250 4246 a, u = len(q.applied), len(q.unapplied(repo))
4251 4247 if a:
4252 4248 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4253 4249 if u:
4254 4250 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4255 4251 if m:
4256 4252 # i18n: column positioning for "hg summary"
4257 4253 ui.write(_(b"mq: %s\n") % b', '.join(m))
4258 4254 else:
4259 4255 # i18n: column positioning for "hg summary"
4260 4256 ui.note(_(b"mq: (empty queue)\n"))
4261 4257
4262 4258
4263 4259 revsetpredicate = registrar.revsetpredicate()
4264 4260
4265 4261
4266 4262 @revsetpredicate(b'mq()')
4267 4263 def revsetmq(repo, subset, x):
4268 4264 """Changesets managed by MQ."""
4269 4265 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4270 4266 applied = {repo[r.node].rev() for r in repo.mq.applied}
4271 4267 return smartset.baseset([r for r in subset if r in applied])
4272 4268
4273 4269
4274 4270 # tell hggettext to extract docstrings from these functions:
4275 4271 i18nfunctions = [revsetmq]
4276 4272
4277 4273
4278 4274 def extsetup(ui):
4279 4275 # Ensure mq wrappers are called first, regardless of extension load order by
4280 4276 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4281 4277 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4282 4278
4283 4279 extensions.wrapcommand(commands.table, b'import', mqimport)
4284 4280 cmdutil.summaryhooks.add(b'mq', summaryhook)
4285 4281
4286 4282 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4287 4283 entry[1].extend(mqopt)
4288 4284
4289 4285 def dotable(cmdtable):
4290 4286 for cmd, entry in cmdtable.items():
4291 4287 cmd = cmdutil.parsealiases(cmd)[0]
4292 4288 func = entry[0]
4293 4289 if func.norepo:
4294 4290 continue
4295 4291 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4296 4292 entry[1].extend(mqopt)
4297 4293
4298 4294 dotable(commands.table)
4299 4295
4300 4296 thismodule = sys.modules["hgext.mq"]
4301 4297 for extname, extmodule in extensions.extensions():
4302 4298 if extmodule != thismodule:
4303 4299 dotable(getattr(extmodule, 'cmdtable', {}))
4304 4300
4305 4301
4306 4302 colortable = {
4307 4303 b'qguard.negative': b'red',
4308 4304 b'qguard.positive': b'yellow',
4309 4305 b'qguard.unguarded': b'green',
4310 4306 b'qseries.applied': b'blue bold underline',
4311 4307 b'qseries.guarded': b'black bold',
4312 4308 b'qseries.missing': b'red bold',
4313 4309 b'qseries.unapplied': b'black bold',
4314 4310 }
@@ -1,559 +1,559 b''
1 1 import collections
2 2 import errno
3 3 import mmap
4 4 import os
5 5 import struct
6 6 import time
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.pycompat import (
10 10 getattr,
11 11 open,
12 12 )
13 13 from mercurial.node import hex
14 14 from mercurial import (
15 15 policy,
16 16 pycompat,
17 17 util,
18 18 vfs as vfsmod,
19 19 )
20 20 from mercurial.utils import hashutil
21 21 from . import shallowutil
22 22
23 23 osutil = policy.importmod('osutil')
24 24
25 25 # The pack version supported by this implementation. This will need to be
26 26 # rev'd whenever the byte format changes. Ex: changing the fanout prefix,
27 27 # changing any of the int sizes, changing the delta algorithm, etc.
28 28 PACKVERSIONSIZE = 1
29 29 INDEXVERSIONSIZE = 2
30 30
31 31 FANOUTSTART = INDEXVERSIONSIZE
32 32
33 33 # Constant that indicates a fanout table entry hasn't been filled in. (This does
34 34 # not get serialized)
35 35 EMPTYFANOUT = -1
36 36
37 37 # The fanout prefix is the number of bytes that can be addressed by the fanout
38 38 # table. Example: a fanout prefix of 1 means we use the first byte of a hash to
39 39 # look in the fanout table (which will be 2^8 entries long).
40 40 SMALLFANOUTPREFIX = 1
41 41 LARGEFANOUTPREFIX = 2
42 42
43 43 # The number of entries in the index at which point we switch to a large fanout.
44 44 # It is chosen to balance the linear scan through a sparse fanout, with the
45 45 # size of the bisect in actual index.
46 46 # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step
47 47 # bisect) with (8 step fanout scan + 1 step bisect)
48 48 # 5 step bisect = log(2^16 / 8 / 255) # fanout
49 49 # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
50 50 SMALLFANOUTCUTOFF = 2 ** 16 // 8
51 51
52 52 # The amount of time to wait between checking for new packs. This prevents an
53 53 # exception when data is moved to a new pack after the process has already
54 54 # loaded the pack list.
55 55 REFRESHRATE = 0.1
56 56
57 57 if pycompat.isposix and not pycompat.ispy3:
58 58 # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening.
59 59 # The 'e' flag will be ignored on older versions of glibc.
60 60 # Python 3 can't handle the 'e' flag.
61 61 PACKOPENMODE = b'rbe'
62 62 else:
63 63 PACKOPENMODE = b'rb'
64 64
65 65
66 66 class _cachebackedpacks:
67 67 def __init__(self, packs, cachesize):
68 68 self._packs = set(packs)
69 69 self._lrucache = util.lrucachedict(cachesize)
70 70 self._lastpack = None
71 71
72 72 # Avoid cold start of the cache by populating the most recent packs
73 73 # in the cache.
74 74 for i in reversed(range(min(cachesize, len(packs)))):
75 75 self._movetofront(packs[i])
76 76
77 77 def _movetofront(self, pack):
78 78 # This effectively makes pack the first entry in the cache.
79 79 self._lrucache[pack] = True
80 80
81 81 def _registerlastpackusage(self):
82 82 if self._lastpack is not None:
83 83 self._movetofront(self._lastpack)
84 84 self._lastpack = None
85 85
86 86 def add(self, pack):
87 87 self._registerlastpackusage()
88 88
89 89 # This method will mostly be called when packs are not in cache.
90 90 # Therefore, adding pack to the cache.
91 91 self._movetofront(pack)
92 92 self._packs.add(pack)
93 93
94 94 def __iter__(self):
95 95 self._registerlastpackusage()
96 96
97 97 # Cache iteration is based on LRU.
98 98 for pack in self._lrucache:
99 99 self._lastpack = pack
100 100 yield pack
101 101
102 102 cachedpacks = {pack for pack in self._lrucache}
103 103 # Yield for paths not in the cache.
104 104 for pack in self._packs - cachedpacks:
105 105 self._lastpack = pack
106 106 yield pack
107 107
108 108 # Data not found in any pack.
109 109 self._lastpack = None
110 110
111 111
112 112 class basepackstore:
113 113 # Default cache size limit for the pack files.
114 114 DEFAULTCACHESIZE = 100
115 115
116 116 def __init__(self, ui, path):
117 117 self.ui = ui
118 118 self.path = path
119 119
120 120 # lastrefesh is 0 so we'll immediately check for new packs on the first
121 121 # failure.
122 122 self.lastrefresh = 0
123 123
124 124 packs = []
125 125 for filepath, __, __ in self._getavailablepackfilessorted():
126 126 try:
127 127 pack = self.getpack(filepath)
128 128 except Exception as ex:
129 129 # An exception may be thrown if the pack file is corrupted
130 130 # somehow. Log a warning but keep going in this case, just
131 131 # skipping this pack file.
132 132 #
133 133 # If this is an ENOENT error then don't even bother logging.
134 134 # Someone could have removed the file since we retrieved the
135 135 # list of paths.
136 136 if getattr(ex, 'errno', None) != errno.ENOENT:
137 137 ui.warn(_(b'unable to load pack %s: %s\n') % (filepath, ex))
138 138 continue
139 139 packs.append(pack)
140 140
141 141 self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE)
142 142
143 143 def _getavailablepackfiles(self):
144 144 """For each pack file (a index/data file combo), yields:
145 145 (full path without extension, mtime, size)
146 146
147 147 mtime will be the mtime of the index/data file (whichever is newer)
148 148 size is the combined size of index/data file
149 149 """
150 150 indexsuffixlen = len(self.INDEXSUFFIX)
151 151 packsuffixlen = len(self.PACKSUFFIX)
152 152
153 153 ids = set()
154 154 sizes = collections.defaultdict(lambda: 0)
155 155 mtimes = collections.defaultdict(lambda: [])
156 156 try:
157 157 for filename, type, stat in osutil.listdir(self.path, stat=True):
158 158 id = None
159 159 if filename[-indexsuffixlen:] == self.INDEXSUFFIX:
160 160 id = filename[:-indexsuffixlen]
161 161 elif filename[-packsuffixlen:] == self.PACKSUFFIX:
162 162 id = filename[:-packsuffixlen]
163 163
164 164 # Since we expect to have two files corresponding to each ID
165 165 # (the index file and the pack file), we can yield once we see
166 166 # it twice.
167 167 if id:
168 168 sizes[id] += stat.st_size # Sum both files' sizes together
169 169 mtimes[id].append(stat.st_mtime)
170 170 if id in ids:
171 171 yield (
172 172 os.path.join(self.path, id),
173 173 max(mtimes[id]),
174 174 sizes[id],
175 175 )
176 176 else:
177 177 ids.add(id)
178 178 except OSError as ex:
179 179 if ex.errno != errno.ENOENT:
180 180 raise
181 181
182 182 def _getavailablepackfilessorted(self):
183 183 """Like `_getavailablepackfiles`, but also sorts the files by mtime,
184 184 yielding newest files first.
185 185
186 186 This is desirable, since it is more likely newer packfiles have more
187 187 desirable data.
188 188 """
189 189 files = []
190 190 for path, mtime, size in self._getavailablepackfiles():
191 191 files.append((mtime, size, path))
192 192 files = sorted(files, reverse=True)
193 193 for mtime, size, path in files:
194 194 yield path, mtime, size
195 195
196 196 def gettotalsizeandcount(self):
197 197 """Returns the total disk size (in bytes) of all the pack files in
198 198 this store, and the count of pack files.
199 199
200 200 (This might be smaller than the total size of the ``self.path``
201 201 directory, since this only considers fuly-writen pack files, and not
202 202 temporary files or other detritus on the directory.)
203 203 """
204 204 totalsize = 0
205 205 count = 0
206 206 for __, __, size in self._getavailablepackfiles():
207 207 totalsize += size
208 208 count += 1
209 209 return totalsize, count
210 210
211 211 def getmetrics(self):
212 212 """Returns metrics on the state of this store."""
213 213 size, count = self.gettotalsizeandcount()
214 214 return {
215 215 b'numpacks': count,
216 216 b'totalpacksize': size,
217 217 }
218 218
219 219 def getpack(self, path):
220 220 raise NotImplementedError()
221 221
222 222 def getmissing(self, keys):
223 223 missing = keys
224 224 for pack in self.packs:
225 225 missing = pack.getmissing(missing)
226 226
227 227 # Ensures better performance of the cache by keeping the most
228 228 # recently accessed pack at the beginning in subsequent iterations.
229 229 if not missing:
230 230 return missing
231 231
232 232 if missing:
233 233 for pack in self.refresh():
234 234 missing = pack.getmissing(missing)
235 235
236 236 return missing
237 237
238 238 def markledger(self, ledger, options=None):
239 239 for pack in self.packs:
240 240 pack.markledger(ledger)
241 241
242 242 def markforrefresh(self):
243 243 """Tells the store that there may be new pack files, so the next time it
244 244 has a lookup miss it should check for new files."""
245 245 self.lastrefresh = 0
246 246
247 247 def refresh(self):
248 248 """Checks for any new packs on disk, adds them to the main pack list,
249 249 and returns a list of just the new packs."""
250 250 now = time.time()
251 251
252 252 # If we experience a lot of misses (like in the case of getmissing() on
253 253 # new objects), let's only actually check disk for new stuff every once
254 254 # in a while. Generally this code path should only ever matter when a
255 255 # repack is going on in the background, and that should be pretty rare
256 256 # to have that happen twice in quick succession.
257 257 newpacks = []
258 258 if now > self.lastrefresh + REFRESHRATE:
259 259 self.lastrefresh = now
260 260 previous = {p.path for p in self.packs}
261 261 for filepath, __, __ in self._getavailablepackfilessorted():
262 262 if filepath not in previous:
263 263 newpack = self.getpack(filepath)
264 264 newpacks.append(newpack)
265 265 self.packs.add(newpack)
266 266
267 267 return newpacks
268 268
269 269
270 270 class versionmixin:
271 271 # Mix-in for classes with multiple supported versions
272 272 VERSION = None
273 273 SUPPORTED_VERSIONS = [2]
274 274
275 275 def _checkversion(self, version):
276 276 if version in self.SUPPORTED_VERSIONS:
277 277 if self.VERSION is None:
278 278 # only affect this instance
279 279 self.VERSION = version
280 280 elif self.VERSION != version:
281 281 raise RuntimeError(b'inconsistent version: %d' % version)
282 282 else:
283 283 raise RuntimeError(b'unsupported version: %d' % version)
284 284
285 285
286 286 class basepack(versionmixin):
287 287 # The maximum amount we should read via mmap before remmaping so the old
288 288 # pages can be released (100MB)
289 289 MAXPAGEDIN = 100 * 1024 ** 2
290 290
291 291 SUPPORTED_VERSIONS = [2]
292 292
293 293 def __init__(self, path):
294 294 self.path = path
295 295 self.packpath = path + self.PACKSUFFIX
296 296 self.indexpath = path + self.INDEXSUFFIX
297 297
298 298 self.indexsize = os.stat(self.indexpath).st_size
299 299 self.datasize = os.stat(self.packpath).st_size
300 300
301 301 self._index = None
302 302 self._data = None
303 303 self.freememory() # initialize the mmap
304 304
305 305 version = struct.unpack(b'!B', self._data[:PACKVERSIONSIZE])[0]
306 306 self._checkversion(version)
307 307
308 308 version, config = struct.unpack(b'!BB', self._index[:INDEXVERSIONSIZE])
309 309 self._checkversion(version)
310 310
311 311 if 0b10000000 & config:
312 312 self.params = indexparams(LARGEFANOUTPREFIX, version)
313 313 else:
314 314 self.params = indexparams(SMALLFANOUTPREFIX, version)
315 315
316 316 @util.propertycache
317 317 def _fanouttable(self):
318 318 params = self.params
319 319 rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize]
320 320 fanouttable = []
321 for i in pycompat.xrange(0, params.fanoutcount):
321 for i in range(0, params.fanoutcount):
322 322 loc = i * 4
323 323 fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0]
324 324 fanouttable.append(fanoutentry)
325 325 return fanouttable
326 326
327 327 @util.propertycache
328 328 def _indexend(self):
329 329 nodecount = struct.unpack_from(
330 330 b'!Q', self._index, self.params.indexstart - 8
331 331 )[0]
332 332 return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
333 333
334 334 def freememory(self):
335 335 """Unmap and remap the memory to free it up after known expensive
336 336 operations. Return True if self._data and self._index were reloaded.
337 337 """
338 338 if self._index:
339 339 if self._pagedin < self.MAXPAGEDIN:
340 340 return False
341 341
342 342 self._index.close()
343 343 self._data.close()
344 344
345 345 # TODO: use an opener/vfs to access these paths
346 346 with open(self.indexpath, PACKOPENMODE) as indexfp:
347 347 # memory-map the file, size 0 means whole file
348 348 self._index = mmap.mmap(
349 349 indexfp.fileno(), 0, access=mmap.ACCESS_READ
350 350 )
351 351 with open(self.packpath, PACKOPENMODE) as datafp:
352 352 self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
353 353
354 354 self._pagedin = 0
355 355 return True
356 356
357 357 def getmissing(self, keys):
358 358 raise NotImplementedError()
359 359
360 360 def markledger(self, ledger, options=None):
361 361 raise NotImplementedError()
362 362
363 363 def cleanup(self, ledger):
364 364 raise NotImplementedError()
365 365
366 366 def __iter__(self):
367 367 raise NotImplementedError()
368 368
369 369 def iterentries(self):
370 370 raise NotImplementedError()
371 371
372 372
373 373 class mutablebasepack(versionmixin):
374 374 def __init__(self, ui, packdir, version=2):
375 375 self._checkversion(version)
376 376 # TODO(augie): make this configurable
377 377 self._compressor = b'GZ'
378 378 opener = vfsmod.vfs(packdir)
379 379 opener.createmode = 0o444
380 380 self.opener = opener
381 381
382 382 self.entries = {}
383 383
384 384 shallowutil.mkstickygroupdir(ui, packdir)
385 385 self.packfp, self.packpath = opener.mkstemp(
386 386 suffix=self.PACKSUFFIX + b'-tmp'
387 387 )
388 388 self.idxfp, self.idxpath = opener.mkstemp(
389 389 suffix=self.INDEXSUFFIX + b'-tmp'
390 390 )
391 391 self.packfp = os.fdopen(self.packfp, 'wb+')
392 392 self.idxfp = os.fdopen(self.idxfp, 'wb+')
393 393 self.sha = hashutil.sha1()
394 394 self._closed = False
395 395
396 396 # The opener provides no way of doing permission fixup on files created
397 397 # via mkstemp, so we must fix it ourselves. We can probably fix this
398 398 # upstream in vfs.mkstemp so we don't need to use the private method.
399 399 opener._fixfilemode(opener.join(self.packpath))
400 400 opener._fixfilemode(opener.join(self.idxpath))
401 401
402 402 # Write header
403 403 # TODO: make it extensible (ex: allow specifying compression algorithm,
404 404 # a flexible key/value header, delta algorithm, fanout size, etc)
405 405 versionbuf = struct.pack(b'!B', self.VERSION) # unsigned 1 byte int
406 406 self.writeraw(versionbuf)
407 407
408 408 def __enter__(self):
409 409 return self
410 410
411 411 def __exit__(self, exc_type, exc_value, traceback):
412 412 if exc_type is None:
413 413 self.close()
414 414 else:
415 415 self.abort()
416 416
417 417 def abort(self):
418 418 # Unclean exit
419 419 self._cleantemppacks()
420 420
421 421 def writeraw(self, data):
422 422 self.packfp.write(data)
423 423 self.sha.update(data)
424 424
425 425 def close(self, ledger=None):
426 426 if self._closed:
427 427 return
428 428
429 429 try:
430 430 sha = hex(self.sha.digest())
431 431 self.packfp.close()
432 432 self.writeindex()
433 433
434 434 if len(self.entries) == 0:
435 435 # Empty pack
436 436 self._cleantemppacks()
437 437 self._closed = True
438 438 return None
439 439
440 440 self.opener.rename(self.packpath, sha + self.PACKSUFFIX)
441 441 try:
442 442 self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX)
443 443 except Exception as ex:
444 444 try:
445 445 self.opener.unlink(sha + self.PACKSUFFIX)
446 446 except Exception:
447 447 pass
448 448 # Throw exception 'ex' explicitly since a normal 'raise' would
449 449 # potentially throw an exception from the unlink cleanup.
450 450 raise ex
451 451 except Exception:
452 452 # Clean up temp packs in all exception cases
453 453 self._cleantemppacks()
454 454 raise
455 455
456 456 self._closed = True
457 457 result = self.opener.join(sha)
458 458 if ledger:
459 459 ledger.addcreated(result)
460 460 return result
461 461
462 462 def _cleantemppacks(self):
463 463 try:
464 464 self.opener.unlink(self.packpath)
465 465 except Exception:
466 466 pass
467 467 try:
468 468 self.opener.unlink(self.idxpath)
469 469 except Exception:
470 470 pass
471 471
472 472 def writeindex(self):
473 473 largefanout = len(self.entries) > SMALLFANOUTCUTOFF
474 474 if largefanout:
475 475 params = indexparams(LARGEFANOUTPREFIX, self.VERSION)
476 476 else:
477 477 params = indexparams(SMALLFANOUTPREFIX, self.VERSION)
478 478
479 479 fanouttable = [EMPTYFANOUT] * params.fanoutcount
480 480
481 481 # Precompute the location of each entry
482 482 locations = {}
483 483 count = 0
484 484 for node in sorted(self.entries):
485 485 location = count * self.INDEXENTRYLENGTH
486 486 locations[node] = location
487 487 count += 1
488 488
489 489 # Must use [0] on the unpack result since it's always a tuple.
490 490 fanoutkey = struct.unpack(
491 491 params.fanoutstruct, node[: params.fanoutprefix]
492 492 )[0]
493 493 if fanouttable[fanoutkey] == EMPTYFANOUT:
494 494 fanouttable[fanoutkey] = location
495 495
496 496 rawfanouttable = b''
497 497 last = 0
498 498 for offset in fanouttable:
499 499 offset = offset if offset != EMPTYFANOUT else last
500 500 last = offset
501 501 rawfanouttable += struct.pack(b'!I', offset)
502 502
503 503 rawentrieslength = struct.pack(b'!Q', len(self.entries))
504 504
505 505 # The index offset is the it's location in the file. So after the 2 byte
506 506 # header and the fanouttable.
507 507 rawindex = self.createindex(locations, 2 + len(rawfanouttable))
508 508
509 509 self._writeheader(params)
510 510 self.idxfp.write(rawfanouttable)
511 511 self.idxfp.write(rawentrieslength)
512 512 self.idxfp.write(rawindex)
513 513 self.idxfp.close()
514 514
515 515 def createindex(self, nodelocations):
516 516 raise NotImplementedError()
517 517
518 518 def _writeheader(self, indexparams):
519 519 # Index header
520 520 # <version: 1 byte>
521 521 # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8
522 522 # <unused: 7 bit> # future use (compression, delta format, etc)
523 523 config = 0
524 524 if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
525 525 config = 0b10000000
526 526 self.idxfp.write(struct.pack(b'!BB', self.VERSION, config))
527 527
528 528
529 529 class indexparams:
530 530 __slots__ = (
531 531 'fanoutprefix',
532 532 'fanoutstruct',
533 533 'fanoutcount',
534 534 'fanoutsize',
535 535 'indexstart',
536 536 )
537 537
538 538 def __init__(self, prefixsize, version):
539 539 self.fanoutprefix = prefixsize
540 540
541 541 # The struct pack format for fanout table location (i.e. the format that
542 542 # converts the node prefix into an integer location in the fanout
543 543 # table).
544 544 if prefixsize == SMALLFANOUTPREFIX:
545 545 self.fanoutstruct = b'!B'
546 546 elif prefixsize == LARGEFANOUTPREFIX:
547 547 self.fanoutstruct = b'!H'
548 548 else:
549 549 raise ValueError(b"invalid fanout prefix size: %s" % prefixsize)
550 550
551 551 # The number of fanout table entries
552 552 self.fanoutcount = 2 ** (prefixsize * 8)
553 553
554 554 # The total bytes used by the fanout table
555 555 self.fanoutsize = self.fanoutcount * 4
556 556
557 557 self.indexstart = FANOUTSTART + self.fanoutsize
558 558 # Skip the index length
559 559 self.indexstart += 8
@@ -1,459 +1,459 b''
1 1 import errno
2 2 import os
3 3 import shutil
4 4 import stat
5 5 import time
6 6
7 7 from mercurial.i18n import _
8 8 from mercurial.node import bin, hex
9 9 from mercurial.pycompat import open
10 10 from mercurial import (
11 11 error,
12 12 pycompat,
13 13 util,
14 14 )
15 15 from mercurial.utils import hashutil
16 16 from . import (
17 17 constants,
18 18 shallowutil,
19 19 )
20 20
21 21
22 22 class basestore:
23 23 def __init__(self, repo, path, reponame, shared=False):
24 24 """Creates a remotefilelog store object for the given repo name.
25 25
26 26 `path` - The file path where this store keeps its data
27 27 `reponame` - The name of the repo. This is used to partition data from
28 28 many repos.
29 29 `shared` - True if this store is a shared cache of data from the central
30 30 server, for many repos on this machine. False means this store is for
31 31 the local data for one repo.
32 32 """
33 33 self.repo = repo
34 34 self.ui = repo.ui
35 35 self._path = path
36 36 self._reponame = reponame
37 37 self._shared = shared
38 38 self._uid = os.getuid() if not pycompat.iswindows else None
39 39
40 40 self._validatecachelog = self.ui.config(
41 41 b"remotefilelog", b"validatecachelog"
42 42 )
43 43 self._validatecache = self.ui.config(
44 44 b"remotefilelog", b"validatecache", b'on'
45 45 )
46 46 if self._validatecache not in (b'on', b'strict', b'off'):
47 47 self._validatecache = b'on'
48 48 if self._validatecache == b'off':
49 49 self._validatecache = False
50 50
51 51 if shared:
52 52 shallowutil.mkstickygroupdir(self.ui, path)
53 53
54 54 def getmissing(self, keys):
55 55 missing = []
56 56 for name, node in keys:
57 57 filepath = self._getfilepath(name, node)
58 58 exists = os.path.exists(filepath)
59 59 if (
60 60 exists
61 61 and self._validatecache == b'strict'
62 62 and not self._validatekey(filepath, b'contains')
63 63 ):
64 64 exists = False
65 65 if not exists:
66 66 missing.append((name, node))
67 67
68 68 return missing
69 69
70 70 # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE
71 71
72 72 def markledger(self, ledger, options=None):
73 73 if options and options.get(constants.OPTION_PACKSONLY):
74 74 return
75 75 if self._shared:
76 76 for filename, nodes in self._getfiles():
77 77 for node in nodes:
78 78 ledger.markdataentry(self, filename, node)
79 79 ledger.markhistoryentry(self, filename, node)
80 80
81 81 def cleanup(self, ledger):
82 82 ui = self.ui
83 83 entries = ledger.sources.get(self, [])
84 84 count = 0
85 85 progress = ui.makeprogress(
86 86 _(b"cleaning up"), unit=b"files", total=len(entries)
87 87 )
88 88 for entry in entries:
89 89 if entry.gced or (entry.datarepacked and entry.historyrepacked):
90 90 progress.update(count)
91 91 path = self._getfilepath(entry.filename, entry.node)
92 92 util.tryunlink(path)
93 93 count += 1
94 94 progress.complete()
95 95
96 96 # Clean up the repo cache directory.
97 97 self._cleanupdirectory(self._getrepocachepath())
98 98
99 99 # BELOW THIS ARE NON-STANDARD APIS
100 100
101 101 def _cleanupdirectory(self, rootdir):
102 102 """Removes the empty directories and unnecessary files within the root
103 103 directory recursively. Note that this method does not remove the root
104 104 directory itself."""
105 105
106 106 oldfiles = set()
107 107 otherfiles = set()
108 108 # osutil.listdir returns stat information which saves some rmdir/listdir
109 109 # syscalls.
110 110 for name, mode in util.osutil.listdir(rootdir):
111 111 if stat.S_ISDIR(mode):
112 112 dirpath = os.path.join(rootdir, name)
113 113 self._cleanupdirectory(dirpath)
114 114
115 115 # Now that the directory specified by dirpath is potentially
116 116 # empty, try and remove it.
117 117 try:
118 118 os.rmdir(dirpath)
119 119 except OSError:
120 120 pass
121 121
122 122 elif stat.S_ISREG(mode):
123 123 if name.endswith(b'_old'):
124 124 oldfiles.add(name[:-4])
125 125 else:
126 126 otherfiles.add(name)
127 127
128 128 # Remove the files which end with suffix '_old' and have no
129 129 # corresponding file without the suffix '_old'. See addremotefilelognode
130 130 # method for the generation/purpose of files with '_old' suffix.
131 131 for filename in oldfiles - otherfiles:
132 132 filepath = os.path.join(rootdir, filename + b'_old')
133 133 util.tryunlink(filepath)
134 134
135 135 def _getfiles(self):
136 136 """Return a list of (filename, [node,...]) for all the revisions that
137 137 exist in the store.
138 138
139 139 This is useful for obtaining a list of all the contents of the store
140 140 when performing a repack to another store, since the store API requires
141 141 name+node keys and not namehash+node keys.
142 142 """
143 143 existing = {}
144 144 for filenamehash, node in self._listkeys():
145 145 existing.setdefault(filenamehash, []).append(node)
146 146
147 147 filenamemap = self._resolvefilenames(existing.keys())
148 148
149 149 for filename, sha in filenamemap.items():
150 150 yield (filename, existing[sha])
151 151
152 152 def _resolvefilenames(self, hashes):
153 153 """Given a list of filename hashes that are present in the
154 154 remotefilelog store, return a mapping from filename->hash.
155 155
156 156 This is useful when converting remotefilelog blobs into other storage
157 157 formats.
158 158 """
159 159 if not hashes:
160 160 return {}
161 161
162 162 filenames = {}
163 163 missingfilename = set(hashes)
164 164
165 165 # Start with a full manifest, since it'll cover the majority of files
166 166 for filename in self.repo[b'tip'].manifest():
167 167 sha = hashutil.sha1(filename).digest()
168 168 if sha in missingfilename:
169 169 filenames[filename] = sha
170 170 missingfilename.discard(sha)
171 171
172 172 # Scan the changelog until we've found every file name
173 173 cl = self.repo.unfiltered().changelog
174 for rev in pycompat.xrange(len(cl) - 1, -1, -1):
174 for rev in range(len(cl) - 1, -1, -1):
175 175 if not missingfilename:
176 176 break
177 177 files = cl.readfiles(cl.node(rev))
178 178 for filename in files:
179 179 sha = hashutil.sha1(filename).digest()
180 180 if sha in missingfilename:
181 181 filenames[filename] = sha
182 182 missingfilename.discard(sha)
183 183
184 184 return filenames
185 185
186 186 def _getrepocachepath(self):
187 187 return (
188 188 os.path.join(self._path, self._reponame)
189 189 if self._shared
190 190 else self._path
191 191 )
192 192
193 193 def _listkeys(self):
194 194 """List all the remotefilelog keys that exist in the store.
195 195
196 196 Returns a iterator of (filename hash, filecontent hash) tuples.
197 197 """
198 198
199 199 for root, dirs, files in os.walk(self._getrepocachepath()):
200 200 for filename in files:
201 201 if len(filename) != 40:
202 202 continue
203 203 node = filename
204 204 if self._shared:
205 205 # .../1a/85ffda..be21
206 206 filenamehash = root[-41:-39] + root[-38:]
207 207 else:
208 208 filenamehash = root[-40:]
209 209 yield (bin(filenamehash), bin(node))
210 210
211 211 def _getfilepath(self, name, node):
212 212 node = hex(node)
213 213 if self._shared:
214 214 key = shallowutil.getcachekey(self._reponame, name, node)
215 215 else:
216 216 key = shallowutil.getlocalkey(name, node)
217 217
218 218 return os.path.join(self._path, key)
219 219
220 220 def _getdata(self, name, node):
221 221 filepath = self._getfilepath(name, node)
222 222 try:
223 223 data = shallowutil.readfile(filepath)
224 224 if self._validatecache and not self._validatedata(data, filepath):
225 225 if self._validatecachelog:
226 226 with open(self._validatecachelog, b'ab+') as f:
227 227 f.write(b"corrupt %s during read\n" % filepath)
228 228 os.rename(filepath, filepath + b".corrupt")
229 229 raise KeyError(b"corrupt local cache file %s" % filepath)
230 230 except IOError:
231 231 raise KeyError(
232 232 b"no file found at %s for %s:%s" % (filepath, name, hex(node))
233 233 )
234 234
235 235 return data
236 236
237 237 def addremotefilelognode(self, name, node, data):
238 238 filepath = self._getfilepath(name, node)
239 239
240 240 oldumask = os.umask(0o002)
241 241 try:
242 242 # if this node already exists, save the old version for
243 243 # recovery/debugging purposes.
244 244 if os.path.exists(filepath):
245 245 newfilename = filepath + b'_old'
246 246 # newfilename can be read-only and shutil.copy will fail.
247 247 # Delete newfilename to avoid it
248 248 if os.path.exists(newfilename):
249 249 shallowutil.unlinkfile(newfilename)
250 250 shutil.copy(filepath, newfilename)
251 251
252 252 shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath))
253 253 shallowutil.writefile(filepath, data, readonly=True)
254 254
255 255 if self._validatecache:
256 256 if not self._validatekey(filepath, b'write'):
257 257 raise error.Abort(
258 258 _(b"local cache write was corrupted %s") % filepath
259 259 )
260 260 finally:
261 261 os.umask(oldumask)
262 262
263 263 def markrepo(self, path):
264 264 """Call this to add the given repo path to the store's list of
265 265 repositories that are using it. This is useful later when doing garbage
266 266 collection, since it allows us to insecpt the repos to see what nodes
267 267 they want to be kept alive in the store.
268 268 """
269 269 repospath = os.path.join(self._path, b"repos")
270 270 with open(repospath, b'ab') as reposfile:
271 271 reposfile.write(os.path.dirname(path) + b"\n")
272 272
273 273 repospathstat = os.stat(repospath)
274 274 if repospathstat.st_uid == self._uid:
275 275 os.chmod(repospath, 0o0664)
276 276
277 277 def _validatekey(self, path, action):
278 278 with open(path, b'rb') as f:
279 279 data = f.read()
280 280
281 281 if self._validatedata(data, path):
282 282 return True
283 283
284 284 if self._validatecachelog:
285 285 with open(self._validatecachelog, b'ab+') as f:
286 286 f.write(b"corrupt %s during %s\n" % (path, action))
287 287
288 288 os.rename(path, path + b".corrupt")
289 289 return False
290 290
291 291 def _validatedata(self, data, path):
292 292 try:
293 293 if len(data) > 0:
294 294 # see remotefilelogserver.createfileblob for the format
295 295 offset, size, flags = shallowutil.parsesizeflags(data)
296 296 if len(data) <= size:
297 297 # it is truncated
298 298 return False
299 299
300 300 # extract the node from the metadata
301 301 offset += size
302 302 datanode = data[offset : offset + 20]
303 303
304 304 # and compare against the path
305 305 if os.path.basename(path) == hex(datanode):
306 306 # Content matches the intended path
307 307 return True
308 308 return False
309 309 except (ValueError, shallowutil.BadRemotefilelogHeader):
310 310 pass
311 311
312 312 return False
313 313
314 314 def gc(self, keepkeys):
315 315 ui = self.ui
316 316 cachepath = self._path
317 317
318 318 # prune cache
319 319 queue = pycompat.queue.PriorityQueue()
320 320 originalsize = 0
321 321 size = 0
322 322 count = 0
323 323 removed = 0
324 324
325 325 # keep files newer than a day even if they aren't needed
326 326 limit = time.time() - (60 * 60 * 24)
327 327
328 328 progress = ui.makeprogress(
329 329 _(b"removing unnecessary files"), unit=b"files"
330 330 )
331 331 progress.update(0)
332 332 for root, dirs, files in os.walk(cachepath):
333 333 for file in files:
334 334 if file == b'repos':
335 335 continue
336 336
337 337 # Don't delete pack files
338 338 if b'/packs/' in root:
339 339 continue
340 340
341 341 progress.update(count)
342 342 path = os.path.join(root, file)
343 343 key = os.path.relpath(path, cachepath)
344 344 count += 1
345 345 try:
346 346 pathstat = os.stat(path)
347 347 except OSError as e:
348 348 # errno.ENOENT = no such file or directory
349 349 if e.errno != errno.ENOENT:
350 350 raise
351 351 msg = _(
352 352 b"warning: file %s was removed by another process\n"
353 353 )
354 354 ui.warn(msg % path)
355 355 continue
356 356
357 357 originalsize += pathstat.st_size
358 358
359 359 if key in keepkeys or pathstat.st_atime > limit:
360 360 queue.put((pathstat.st_atime, path, pathstat))
361 361 size += pathstat.st_size
362 362 else:
363 363 try:
364 364 shallowutil.unlinkfile(path)
365 365 except OSError as e:
366 366 # errno.ENOENT = no such file or directory
367 367 if e.errno != errno.ENOENT:
368 368 raise
369 369 msg = _(
370 370 b"warning: file %s was removed by another "
371 371 b"process\n"
372 372 )
373 373 ui.warn(msg % path)
374 374 continue
375 375 removed += 1
376 376 progress.complete()
377 377
378 378 # remove oldest files until under limit
379 379 limit = ui.configbytes(b"remotefilelog", b"cachelimit")
380 380 if size > limit:
381 381 excess = size - limit
382 382 progress = ui.makeprogress(
383 383 _(b"enforcing cache limit"), unit=b"bytes", total=excess
384 384 )
385 385 removedexcess = 0
386 386 while queue and size > limit and size > 0:
387 387 progress.update(removedexcess)
388 388 atime, oldpath, oldpathstat = queue.get()
389 389 try:
390 390 shallowutil.unlinkfile(oldpath)
391 391 except OSError as e:
392 392 # errno.ENOENT = no such file or directory
393 393 if e.errno != errno.ENOENT:
394 394 raise
395 395 msg = _(
396 396 b"warning: file %s was removed by another process\n"
397 397 )
398 398 ui.warn(msg % oldpath)
399 399 size -= oldpathstat.st_size
400 400 removed += 1
401 401 removedexcess += oldpathstat.st_size
402 402 progress.complete()
403 403
404 404 ui.status(
405 405 _(b"finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n")
406 406 % (
407 407 removed,
408 408 count,
409 409 float(originalsize) / 1024.0 / 1024.0 / 1024.0,
410 410 float(size) / 1024.0 / 1024.0 / 1024.0,
411 411 )
412 412 )
413 413
414 414
415 415 class baseunionstore:
416 416 def __init__(self, *args, **kwargs):
417 417 # If one of the functions that iterates all of the stores is about to
418 418 # throw a KeyError, try this many times with a full refresh between
419 419 # attempts. A repack operation may have moved data from one store to
420 420 # another while we were running.
421 421 self.numattempts = kwargs.get('numretries', 0) + 1
422 422 # If not-None, call this function on every retry and if the attempts are
423 423 # exhausted.
424 424 self.retrylog = kwargs.get('retrylog', None)
425 425
426 426 def markforrefresh(self):
427 427 for store in self.stores:
428 428 if util.safehasattr(store, b'markforrefresh'):
429 429 store.markforrefresh()
430 430
431 431 @staticmethod
432 432 def retriable(fn):
433 433 def noop(*args):
434 434 pass
435 435
436 436 def wrapped(self, *args, **kwargs):
437 437 retrylog = self.retrylog or noop
438 438 funcname = fn.__name__
439 439 i = 0
440 440 while i < self.numattempts:
441 441 if i > 0:
442 442 retrylog(
443 443 b're-attempting (n=%d) %s\n'
444 444 % (i, pycompat.sysbytes(funcname))
445 445 )
446 446 self.markforrefresh()
447 447 i += 1
448 448 try:
449 449 return fn(self, *args, **kwargs)
450 450 except KeyError:
451 451 if i == self.numattempts:
452 452 # retries exhausted
453 453 retrylog(
454 454 b'retries exhausted in %s, raising KeyError\n'
455 455 % pycompat.sysbytes(funcname)
456 456 )
457 457 raise
458 458
459 459 return wrapped
@@ -1,397 +1,396 b''
1 1 import threading
2 2
3 3 from mercurial.node import (
4 4 hex,
5 5 sha1nodeconstants,
6 6 )
7 7 from mercurial.pycompat import getattr
8 8 from mercurial import (
9 9 mdiff,
10 pycompat,
11 10 revlog,
12 11 )
13 12 from . import (
14 13 basestore,
15 14 constants,
16 15 shallowutil,
17 16 )
18 17
19 18
20 19 class ChainIndicies:
21 20 """A static class for easy reference to the delta chain indicies."""
22 21
23 22 # The filename of this revision delta
24 23 NAME = 0
25 24 # The mercurial file node for this revision delta
26 25 NODE = 1
27 26 # The filename of the delta base's revision. This is useful when delta
28 27 # between different files (like in the case of a move or copy, we can delta
29 28 # against the original file content).
30 29 BASENAME = 2
31 30 # The mercurial file node for the delta base revision. This is the nullid if
32 31 # this delta is a full text.
33 32 BASENODE = 3
34 33 # The actual delta or full text data.
35 34 DATA = 4
36 35
37 36
38 37 class unioncontentstore(basestore.baseunionstore):
39 38 def __init__(self, *args, **kwargs):
40 39 super(unioncontentstore, self).__init__(*args, **kwargs)
41 40
42 41 self.stores = args
43 42 self.writestore = kwargs.get('writestore')
44 43
45 44 # If allowincomplete==True then the union store can return partial
46 45 # delta chains, otherwise it will throw a KeyError if a full
47 46 # deltachain can't be found.
48 47 self.allowincomplete = kwargs.get('allowincomplete', False)
49 48
50 49 def get(self, name, node):
51 50 """Fetches the full text revision contents of the given name+node pair.
52 51 If the full text doesn't exist, throws a KeyError.
53 52
54 53 Under the hood, this uses getdeltachain() across all the stores to build
55 54 up a full chain to produce the full text.
56 55 """
57 56 chain = self.getdeltachain(name, node)
58 57
59 58 if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
60 59 # If we didn't receive a full chain, throw
61 60 raise KeyError((name, hex(node)))
62 61
63 62 # The last entry in the chain is a full text, so we start our delta
64 63 # applies with that.
65 64 fulltext = chain.pop()[ChainIndicies.DATA]
66 65
67 66 text = fulltext
68 67 while chain:
69 68 delta = chain.pop()[ChainIndicies.DATA]
70 69 text = mdiff.patches(text, [delta])
71 70
72 71 return text
73 72
74 73 @basestore.baseunionstore.retriable
75 74 def getdelta(self, name, node):
76 75 """Return the single delta entry for the given name/node pair."""
77 76 for store in self.stores:
78 77 try:
79 78 return store.getdelta(name, node)
80 79 except KeyError:
81 80 pass
82 81
83 82 raise KeyError((name, hex(node)))
84 83
85 84 def getdeltachain(self, name, node):
86 85 """Returns the deltachain for the given name/node pair.
87 86
88 87 Returns an ordered list of:
89 88
90 89 [(name, node, deltabasename, deltabasenode, deltacontent),...]
91 90
92 91 where the chain is terminated by a full text entry with a nullid
93 92 deltabasenode.
94 93 """
95 94 chain = self._getpartialchain(name, node)
96 95 while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
97 96 x, x, deltabasename, deltabasenode, x = chain[-1]
98 97 try:
99 98 morechain = self._getpartialchain(deltabasename, deltabasenode)
100 99 chain.extend(morechain)
101 100 except KeyError:
102 101 # If we allow incomplete chains, don't throw.
103 102 if not self.allowincomplete:
104 103 raise
105 104 break
106 105
107 106 return chain
108 107
109 108 @basestore.baseunionstore.retriable
110 109 def getmeta(self, name, node):
111 110 """Returns the metadata dict for given node."""
112 111 for store in self.stores:
113 112 try:
114 113 return store.getmeta(name, node)
115 114 except KeyError:
116 115 pass
117 116 raise KeyError((name, hex(node)))
118 117
119 118 def getmetrics(self):
120 119 metrics = [s.getmetrics() for s in self.stores]
121 120 return shallowutil.sumdicts(*metrics)
122 121
123 122 @basestore.baseunionstore.retriable
124 123 def _getpartialchain(self, name, node):
125 124 """Returns a partial delta chain for the given name/node pair.
126 125
127 126 A partial chain is a chain that may not be terminated in a full-text.
128 127 """
129 128 for store in self.stores:
130 129 try:
131 130 return store.getdeltachain(name, node)
132 131 except KeyError:
133 132 pass
134 133
135 134 raise KeyError((name, hex(node)))
136 135
137 136 def add(self, name, node, data):
138 137 raise RuntimeError(
139 138 b"cannot add content only to remotefilelog contentstore"
140 139 )
141 140
142 141 def getmissing(self, keys):
143 142 missing = keys
144 143 for store in self.stores:
145 144 if missing:
146 145 missing = store.getmissing(missing)
147 146 return missing
148 147
149 148 def addremotefilelognode(self, name, node, data):
150 149 if self.writestore:
151 150 self.writestore.addremotefilelognode(name, node, data)
152 151 else:
153 152 raise RuntimeError(b"no writable store configured")
154 153
155 154 def markledger(self, ledger, options=None):
156 155 for store in self.stores:
157 156 store.markledger(ledger, options)
158 157
159 158
160 159 class remotefilelogcontentstore(basestore.basestore):
161 160 def __init__(self, *args, **kwargs):
162 161 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
163 162 self._threaddata = threading.local()
164 163
165 164 def get(self, name, node):
166 165 # return raw revision text
167 166 data = self._getdata(name, node)
168 167
169 168 offset, size, flags = shallowutil.parsesizeflags(data)
170 169 content = data[offset : offset + size]
171 170
172 171 ancestormap = shallowutil.ancestormap(data)
173 172 p1, p2, linknode, copyfrom = ancestormap[node]
174 173 copyrev = None
175 174 if copyfrom:
176 175 copyrev = hex(p1)
177 176
178 177 self._updatemetacache(node, size, flags)
179 178
180 179 # lfs tracks renames in its own metadata, remove hg copy metadata,
181 180 # because copy metadata will be re-added by lfs flag processor.
182 181 if flags & revlog.REVIDX_EXTSTORED:
183 182 copyrev = copyfrom = None
184 183 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
185 184 return revision
186 185
187 186 def getdelta(self, name, node):
188 187 # Since remotefilelog content stores only contain full texts, just
189 188 # return that.
190 189 revision = self.get(name, node)
191 190 return (
192 191 revision,
193 192 name,
194 193 sha1nodeconstants.nullid,
195 194 self.getmeta(name, node),
196 195 )
197 196
198 197 def getdeltachain(self, name, node):
199 198 # Since remotefilelog content stores just contain full texts, we return
200 199 # a fake delta chain that just consists of a single full text revision.
201 200 # The nullid in the deltabasenode slot indicates that the revision is a
202 201 # fulltext.
203 202 revision = self.get(name, node)
204 203 return [(name, node, None, sha1nodeconstants.nullid, revision)]
205 204
206 205 def getmeta(self, name, node):
207 206 self._sanitizemetacache()
208 207 if node != self._threaddata.metacache[0]:
209 208 data = self._getdata(name, node)
210 209 offset, size, flags = shallowutil.parsesizeflags(data)
211 210 self._updatemetacache(node, size, flags)
212 211 return self._threaddata.metacache[1]
213 212
214 213 def add(self, name, node, data):
215 214 raise RuntimeError(
216 215 b"cannot add content only to remotefilelog contentstore"
217 216 )
218 217
219 218 def _sanitizemetacache(self):
220 219 metacache = getattr(self._threaddata, 'metacache', None)
221 220 if metacache is None:
222 221 self._threaddata.metacache = (None, None) # (node, meta)
223 222
224 223 def _updatemetacache(self, node, size, flags):
225 224 self._sanitizemetacache()
226 225 if node == self._threaddata.metacache[0]:
227 226 return
228 227 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
229 228 self._threaddata.metacache = (node, meta)
230 229
231 230
232 231 class remotecontentstore:
233 232 def __init__(self, ui, fileservice, shared):
234 233 self._fileservice = fileservice
235 234 # type(shared) is usually remotefilelogcontentstore
236 235 self._shared = shared
237 236
238 237 def get(self, name, node):
239 238 self._fileservice.prefetch(
240 239 [(name, hex(node))], force=True, fetchdata=True
241 240 )
242 241 return self._shared.get(name, node)
243 242
244 243 def getdelta(self, name, node):
245 244 revision = self.get(name, node)
246 245 return (
247 246 revision,
248 247 name,
249 248 sha1nodeconstants.nullid,
250 249 self._shared.getmeta(name, node),
251 250 )
252 251
253 252 def getdeltachain(self, name, node):
254 253 # Since our remote content stores just contain full texts, we return a
255 254 # fake delta chain that just consists of a single full text revision.
256 255 # The nullid in the deltabasenode slot indicates that the revision is a
257 256 # fulltext.
258 257 revision = self.get(name, node)
259 258 return [(name, node, None, sha1nodeconstants.nullid, revision)]
260 259
261 260 def getmeta(self, name, node):
262 261 self._fileservice.prefetch(
263 262 [(name, hex(node))], force=True, fetchdata=True
264 263 )
265 264 return self._shared.getmeta(name, node)
266 265
267 266 def add(self, name, node, data):
268 267 raise RuntimeError(b"cannot add to a remote store")
269 268
270 269 def getmissing(self, keys):
271 270 return keys
272 271
273 272 def markledger(self, ledger, options=None):
274 273 pass
275 274
276 275
277 276 class manifestrevlogstore:
278 277 def __init__(self, repo):
279 278 self._store = repo.store
280 279 self._svfs = repo.svfs
281 280 self._revlogs = dict()
282 281 self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
283 282 self._repackstartlinkrev = 0
284 283
285 284 def get(self, name, node):
286 285 return self._revlog(name).rawdata(node)
287 286
288 287 def getdelta(self, name, node):
289 288 revision = self.get(name, node)
290 289 return revision, name, self._cl.nullid, self.getmeta(name, node)
291 290
292 291 def getdeltachain(self, name, node):
293 292 revision = self.get(name, node)
294 293 return [(name, node, None, self._cl.nullid, revision)]
295 294
296 295 def getmeta(self, name, node):
297 296 rl = self._revlog(name)
298 297 rev = rl.rev(node)
299 298 return {
300 299 constants.METAKEYFLAG: rl.flags(rev),
301 300 constants.METAKEYSIZE: rl.rawsize(rev),
302 301 }
303 302
304 303 def getancestors(self, name, node, known=None):
305 304 if known is None:
306 305 known = set()
307 306 if node in known:
308 307 return []
309 308
310 309 rl = self._revlog(name)
311 310 ancestors = {}
312 311 missing = {node}
313 312 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
314 313 ancnode = rl.node(ancrev)
315 314 missing.discard(ancnode)
316 315
317 316 p1, p2 = rl.parents(ancnode)
318 317 if p1 != self._cl.nullid and p1 not in known:
319 318 missing.add(p1)
320 319 if p2 != self._cl.nullid and p2 not in known:
321 320 missing.add(p2)
322 321
323 322 linknode = self._cl.node(rl.linkrev(ancrev))
324 323 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
325 324 if not missing:
326 325 break
327 326 return ancestors
328 327
329 328 def getnodeinfo(self, name, node):
330 329 cl = self._cl
331 330 rl = self._revlog(name)
332 331 parents = rl.parents(node)
333 332 linkrev = rl.linkrev(rl.rev(node))
334 333 return (parents[0], parents[1], cl.node(linkrev), None)
335 334
336 335 def add(self, *args):
337 336 raise RuntimeError(b"cannot add to a revlog store")
338 337
339 338 def _revlog(self, name):
340 339 rl = self._revlogs.get(name)
341 340 if rl is None:
342 341 revlogname = b'00manifesttree'
343 342 if name != b'':
344 343 revlogname = b'meta/%s/00manifest' % name
345 344 rl = revlog.revlog(self._svfs, radix=revlogname)
346 345 self._revlogs[name] = rl
347 346 return rl
348 347
349 348 def getmissing(self, keys):
350 349 missing = []
351 350 for name, node in keys:
352 351 mfrevlog = self._revlog(name)
353 352 if node not in mfrevlog.nodemap:
354 353 missing.append((name, node))
355 354
356 355 return missing
357 356
358 357 def setrepacklinkrevrange(self, startrev, endrev):
359 358 self._repackstartlinkrev = startrev
360 359 self._repackendlinkrev = endrev
361 360
362 361 def markledger(self, ledger, options=None):
363 362 if options and options.get(constants.OPTION_PACKSONLY):
364 363 return
365 364 treename = b''
366 365 rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
367 366 startlinkrev = self._repackstartlinkrev
368 367 endlinkrev = self._repackendlinkrev
369 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
368 for rev in range(len(rl) - 1, -1, -1):
370 369 linkrev = rl.linkrev(rev)
371 370 if linkrev < startlinkrev:
372 371 break
373 372 if linkrev > endlinkrev:
374 373 continue
375 374 node = rl.node(rev)
376 375 ledger.markdataentry(self, treename, node)
377 376 ledger.markhistoryentry(self, treename, node)
378 377
379 378 for t, path, size in self._store.datafiles():
380 379 if path[:5] != b'meta/' or path[-2:] != b'.i':
381 380 continue
382 381
383 382 treename = path[5 : -len(b'/00manifest')]
384 383
385 384 rl = revlog.revlog(self._svfs, indexfile=path[:-2])
386 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
385 for rev in range(len(rl) - 1, -1, -1):
387 386 linkrev = rl.linkrev(rev)
388 387 if linkrev < startlinkrev:
389 388 break
390 389 if linkrev > endlinkrev:
391 390 continue
392 391 node = rl.node(rev)
393 392 ledger.markdataentry(self, treename, node)
394 393 ledger.markhistoryentry(self, treename, node)
395 394
396 395 def cleanup(self, ledger):
397 396 pass
@@ -1,474 +1,473 b''
1 1 import struct
2 2 import zlib
3 3
4 4 from mercurial.node import (
5 5 hex,
6 6 sha1nodeconstants,
7 7 )
8 8 from mercurial.i18n import _
9 9 from mercurial import (
10 pycompat,
11 10 util,
12 11 )
13 12 from . import (
14 13 basepack,
15 14 constants,
16 15 shallowutil,
17 16 )
18 17
19 18 NODELENGTH = 20
20 19
21 20 # The indicator value in the index for a fulltext entry.
22 21 FULLTEXTINDEXMARK = -1
23 22 NOBASEINDEXMARK = -2
24 23
25 24 INDEXSUFFIX = b'.dataidx'
26 25 PACKSUFFIX = b'.datapack'
27 26
28 27
29 28 class datapackstore(basepack.basepackstore):
30 29 INDEXSUFFIX = INDEXSUFFIX
31 30 PACKSUFFIX = PACKSUFFIX
32 31
33 32 def __init__(self, ui, path):
34 33 super(datapackstore, self).__init__(ui, path)
35 34
36 35 def getpack(self, path):
37 36 return datapack(path)
38 37
39 38 def get(self, name, node):
40 39 raise RuntimeError(b"must use getdeltachain with datapackstore")
41 40
42 41 def getmeta(self, name, node):
43 42 for pack in self.packs:
44 43 try:
45 44 return pack.getmeta(name, node)
46 45 except KeyError:
47 46 pass
48 47
49 48 for pack in self.refresh():
50 49 try:
51 50 return pack.getmeta(name, node)
52 51 except KeyError:
53 52 pass
54 53
55 54 raise KeyError((name, hex(node)))
56 55
57 56 def getdelta(self, name, node):
58 57 for pack in self.packs:
59 58 try:
60 59 return pack.getdelta(name, node)
61 60 except KeyError:
62 61 pass
63 62
64 63 for pack in self.refresh():
65 64 try:
66 65 return pack.getdelta(name, node)
67 66 except KeyError:
68 67 pass
69 68
70 69 raise KeyError((name, hex(node)))
71 70
72 71 def getdeltachain(self, name, node):
73 72 for pack in self.packs:
74 73 try:
75 74 return pack.getdeltachain(name, node)
76 75 except KeyError:
77 76 pass
78 77
79 78 for pack in self.refresh():
80 79 try:
81 80 return pack.getdeltachain(name, node)
82 81 except KeyError:
83 82 pass
84 83
85 84 raise KeyError((name, hex(node)))
86 85
87 86 def add(self, name, node, data):
88 87 raise RuntimeError(b"cannot add to datapackstore")
89 88
90 89
91 90 class datapack(basepack.basepack):
92 91 INDEXSUFFIX = INDEXSUFFIX
93 92 PACKSUFFIX = PACKSUFFIX
94 93
95 94 # Format is <node><delta offset><pack data offset><pack data size>
96 95 # See the mutabledatapack doccomment for more details.
97 96 INDEXFORMAT = b'!20siQQ'
98 97 INDEXENTRYLENGTH = 40
99 98
100 99 SUPPORTED_VERSIONS = [2]
101 100
102 101 def getmissing(self, keys):
103 102 missing = []
104 103 for name, node in keys:
105 104 value = self._find(node)
106 105 if not value:
107 106 missing.append((name, node))
108 107
109 108 return missing
110 109
111 110 def get(self, name, node):
112 111 raise RuntimeError(
113 112 b"must use getdeltachain with datapack (%s:%s)" % (name, hex(node))
114 113 )
115 114
116 115 def getmeta(self, name, node):
117 116 value = self._find(node)
118 117 if value is None:
119 118 raise KeyError((name, hex(node)))
120 119
121 120 node, deltabaseoffset, offset, size = value
122 121 rawentry = self._data[offset : offset + size]
123 122
124 123 # see docstring of mutabledatapack for the format
125 124 offset = 0
126 125 offset += struct.unpack_from(b'!H', rawentry, offset)[0] + 2 # filename
127 126 offset += 40 # node, deltabase node
128 127 offset += struct.unpack_from(b'!Q', rawentry, offset)[0] + 8 # delta
129 128
130 129 metalen = struct.unpack_from(b'!I', rawentry, offset)[0]
131 130 offset += 4
132 131
133 132 meta = shallowutil.parsepackmeta(rawentry[offset : offset + metalen])
134 133
135 134 return meta
136 135
137 136 def getdelta(self, name, node):
138 137 value = self._find(node)
139 138 if value is None:
140 139 raise KeyError((name, hex(node)))
141 140
142 141 node, deltabaseoffset, offset, size = value
143 142 entry = self._readentry(offset, size, getmeta=True)
144 143 filename, node, deltabasenode, delta, meta = entry
145 144
146 145 # If we've read a lot of data from the mmap, free some memory.
147 146 self.freememory()
148 147
149 148 return delta, filename, deltabasenode, meta
150 149
151 150 def getdeltachain(self, name, node):
152 151 value = self._find(node)
153 152 if value is None:
154 153 raise KeyError((name, hex(node)))
155 154
156 155 params = self.params
157 156
158 157 # Precompute chains
159 158 chain = [value]
160 159 deltabaseoffset = value[1]
161 160 entrylen = self.INDEXENTRYLENGTH
162 161 while (
163 162 deltabaseoffset != FULLTEXTINDEXMARK
164 163 and deltabaseoffset != NOBASEINDEXMARK
165 164 ):
166 165 loc = params.indexstart + deltabaseoffset
167 166 value = struct.unpack(
168 167 self.INDEXFORMAT, self._index[loc : loc + entrylen]
169 168 )
170 169 deltabaseoffset = value[1]
171 170 chain.append(value)
172 171
173 172 # Read chain data
174 173 deltachain = []
175 174 for node, deltabaseoffset, offset, size in chain:
176 175 filename, node, deltabasenode, delta = self._readentry(offset, size)
177 176 deltachain.append((filename, node, filename, deltabasenode, delta))
178 177
179 178 # If we've read a lot of data from the mmap, free some memory.
180 179 self.freememory()
181 180
182 181 return deltachain
183 182
184 183 def _readentry(self, offset, size, getmeta=False):
185 184 rawentry = self._data[offset : offset + size]
186 185 self._pagedin += len(rawentry)
187 186
188 187 # <2 byte len> + <filename>
189 188 lengthsize = 2
190 189 filenamelen = struct.unpack(b'!H', rawentry[:2])[0]
191 190 filename = rawentry[lengthsize : lengthsize + filenamelen]
192 191
193 192 # <20 byte node> + <20 byte deltabase>
194 193 nodestart = lengthsize + filenamelen
195 194 deltabasestart = nodestart + NODELENGTH
196 195 node = rawentry[nodestart:deltabasestart]
197 196 deltabasenode = rawentry[deltabasestart : deltabasestart + NODELENGTH]
198 197
199 198 # <8 byte len> + <delta>
200 199 deltastart = deltabasestart + NODELENGTH
201 200 rawdeltalen = rawentry[deltastart : deltastart + 8]
202 201 deltalen = struct.unpack(b'!Q', rawdeltalen)[0]
203 202
204 203 delta = rawentry[deltastart + 8 : deltastart + 8 + deltalen]
205 204 delta = self._decompress(delta)
206 205
207 206 if getmeta:
208 207 metastart = deltastart + 8 + deltalen
209 208 metalen = struct.unpack_from(b'!I', rawentry, metastart)[0]
210 209
211 210 rawmeta = rawentry[metastart + 4 : metastart + 4 + metalen]
212 211 meta = shallowutil.parsepackmeta(rawmeta)
213 212 return filename, node, deltabasenode, delta, meta
214 213 else:
215 214 return filename, node, deltabasenode, delta
216 215
217 216 def _decompress(self, data):
218 217 return zlib.decompress(data)
219 218
220 219 def add(self, name, node, data):
221 220 raise RuntimeError(b"cannot add to datapack (%s:%s)" % (name, node))
222 221
223 222 def _find(self, node):
224 223 params = self.params
225 224 fanoutkey = struct.unpack(
226 225 params.fanoutstruct, node[: params.fanoutprefix]
227 226 )[0]
228 227 fanout = self._fanouttable
229 228
230 229 start = fanout[fanoutkey] + params.indexstart
231 230 indexend = self._indexend
232 231
233 232 # Scan forward to find the first non-same entry, which is the upper
234 233 # bound.
235 for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount):
234 for i in range(fanoutkey + 1, params.fanoutcount):
236 235 end = fanout[i] + params.indexstart
237 236 if end != start:
238 237 break
239 238 else:
240 239 end = indexend
241 240
242 241 # Bisect between start and end to find node
243 242 index = self._index
244 243 startnode = index[start : start + NODELENGTH]
245 244 endnode = index[end : end + NODELENGTH]
246 245 entrylen = self.INDEXENTRYLENGTH
247 246 if startnode == node:
248 247 entry = index[start : start + entrylen]
249 248 elif endnode == node:
250 249 entry = index[end : end + entrylen]
251 250 else:
252 251 while start < end - entrylen:
253 252 mid = start + (end - start) // 2
254 253 mid = mid - ((mid - params.indexstart) % entrylen)
255 254 midnode = index[mid : mid + NODELENGTH]
256 255 if midnode == node:
257 256 entry = index[mid : mid + entrylen]
258 257 break
259 258 if node > midnode:
260 259 start = mid
261 260 elif node < midnode:
262 261 end = mid
263 262 else:
264 263 return None
265 264
266 265 return struct.unpack(self.INDEXFORMAT, entry)
267 266
268 267 def markledger(self, ledger, options=None):
269 268 for filename, node in self:
270 269 ledger.markdataentry(self, filename, node)
271 270
272 271 def cleanup(self, ledger):
273 272 entries = ledger.sources.get(self, [])
274 273 allkeys = set(self)
275 274 repackedkeys = {
276 275 (e.filename, e.node) for e in entries if e.datarepacked or e.gced
277 276 }
278 277
279 278 if len(allkeys - repackedkeys) == 0:
280 279 if self.path not in ledger.created:
281 280 util.unlinkpath(self.indexpath, ignoremissing=True)
282 281 util.unlinkpath(self.packpath, ignoremissing=True)
283 282
284 283 def __iter__(self):
285 284 for f, n, deltabase, deltalen in self.iterentries():
286 285 yield f, n
287 286
288 287 def iterentries(self):
289 288 # Start at 1 to skip the header
290 289 offset = 1
291 290 data = self._data
292 291 while offset < self.datasize:
293 292 oldoffset = offset
294 293
295 294 # <2 byte len> + <filename>
296 295 filenamelen = struct.unpack(b'!H', data[offset : offset + 2])[0]
297 296 offset += 2
298 297 filename = data[offset : offset + filenamelen]
299 298 offset += filenamelen
300 299
301 300 # <20 byte node>
302 301 node = data[offset : offset + constants.NODESIZE]
303 302 offset += constants.NODESIZE
304 303 # <20 byte deltabase>
305 304 deltabase = data[offset : offset + constants.NODESIZE]
306 305 offset += constants.NODESIZE
307 306
308 307 # <8 byte len> + <delta>
309 308 rawdeltalen = data[offset : offset + 8]
310 309 deltalen = struct.unpack(b'!Q', rawdeltalen)[0]
311 310 offset += 8
312 311
313 312 # TODO(augie): we should store a header that is the
314 313 # uncompressed size.
315 314 uncompressedlen = len(
316 315 self._decompress(data[offset : offset + deltalen])
317 316 )
318 317 offset += deltalen
319 318
320 319 # <4 byte len> + <metadata-list>
321 320 metalen = struct.unpack_from(b'!I', data, offset)[0]
322 321 offset += 4 + metalen
323 322
324 323 yield (filename, node, deltabase, uncompressedlen)
325 324
326 325 # If we've read a lot of data from the mmap, free some memory.
327 326 self._pagedin += offset - oldoffset
328 327 if self.freememory():
329 328 data = self._data
330 329
331 330
332 331 class mutabledatapack(basepack.mutablebasepack):
333 332 """A class for constructing and serializing a datapack file and index.
334 333
335 334 A datapack is a pair of files that contain the revision contents for various
336 335 file revisions in Mercurial. It contains only revision contents (like file
337 336 contents), not any history information.
338 337
339 338 It consists of two files, with the following format. All bytes are in
340 339 network byte order (big endian).
341 340
342 341 .datapack
343 342 The pack itself is a series of revision deltas with some basic header
344 343 information on each. A revision delta may be a fulltext, represented by
345 344 a deltabasenode equal to the nullid.
346 345
347 346 datapack = <version: 1 byte>
348 347 [<revision>,...]
349 348 revision = <filename len: 2 byte unsigned int>
350 349 <filename>
351 350 <node: 20 byte>
352 351 <deltabasenode: 20 byte>
353 352 <delta len: 8 byte unsigned int>
354 353 <delta>
355 354 <metadata-list len: 4 byte unsigned int> [1]
356 355 <metadata-list> [1]
357 356 metadata-list = [<metadata-item>, ...]
358 357 metadata-item = <metadata-key: 1 byte>
359 358 <metadata-value len: 2 byte unsigned>
360 359 <metadata-value>
361 360
362 361 metadata-key could be METAKEYFLAG or METAKEYSIZE or other single byte
363 362 value in the future.
364 363
365 364 .dataidx
366 365 The index file consists of two parts, the fanout and the index.
367 366
368 367 The index is a list of index entries, sorted by node (one per revision
369 368 in the pack). Each entry has:
370 369
371 370 - node (The 20 byte node of the entry; i.e. the commit hash, file node
372 371 hash, etc)
373 372 - deltabase index offset (The location in the index of the deltabase for
374 373 this entry. The deltabase is the next delta in
375 374 the chain, with the chain eventually
376 375 terminating in a full-text, represented by a
377 376 deltabase offset of -1. This lets us compute
378 377 delta chains from the index, then do
379 378 sequential reads from the pack if the revision
380 379 are nearby on disk.)
381 380 - pack entry offset (The location of this entry in the datapack)
382 381 - pack content size (The on-disk length of this entry's pack data)
383 382
384 383 The fanout is a quick lookup table to reduce the number of steps for
385 384 bisecting the index. It is a series of 4 byte pointers to positions
386 385 within the index. It has 2^16 entries, which corresponds to hash
387 386 prefixes [0000, 0001,..., FFFE, FFFF]. Example: the pointer in slot
388 387 4F0A points to the index position of the first revision whose node
389 388 starts with 4F0A. This saves log(2^16)=16 bisect steps.
390 389
391 390 dataidx = <fanouttable>
392 391 <index>
393 392 fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries)
394 393 index = [<index entry>,...]
395 394 indexentry = <node: 20 byte>
396 395 <deltabase location: 4 byte signed int>
397 396 <pack entry offset: 8 byte unsigned int>
398 397 <pack entry size: 8 byte unsigned int>
399 398
400 399 [1]: new in version 1.
401 400 """
402 401
403 402 INDEXSUFFIX = INDEXSUFFIX
404 403 PACKSUFFIX = PACKSUFFIX
405 404
406 405 # v[01] index format: <node><delta offset><pack data offset><pack data size>
407 406 INDEXFORMAT = datapack.INDEXFORMAT
408 407 INDEXENTRYLENGTH = datapack.INDEXENTRYLENGTH
409 408
410 409 # v1 has metadata support
411 410 SUPPORTED_VERSIONS = [2]
412 411
413 412 def _compress(self, data):
414 413 return zlib.compress(data)
415 414
416 415 def add(self, name, node, deltabasenode, delta, metadata=None):
417 416 # metadata is a dict, ex. {METAKEYFLAG: flag}
418 417 if len(name) > 2 ** 16:
419 418 raise RuntimeError(_(b"name too long %s") % name)
420 419 if len(node) != 20:
421 420 raise RuntimeError(_(b"node should be 20 bytes %s") % node)
422 421
423 422 if node in self.entries:
424 423 # The revision has already been added
425 424 return
426 425
427 426 # TODO: allow configurable compression
428 427 delta = self._compress(delta)
429 428
430 429 rawdata = b''.join(
431 430 (
432 431 struct.pack(b'!H', len(name)), # unsigned 2 byte int
433 432 name,
434 433 node,
435 434 deltabasenode,
436 435 struct.pack(b'!Q', len(delta)), # unsigned 8 byte int
437 436 delta,
438 437 )
439 438 )
440 439
441 440 # v1 support metadata
442 441 rawmeta = shallowutil.buildpackmeta(metadata)
443 442 rawdata += struct.pack(b'!I', len(rawmeta)) # unsigned 4 byte
444 443 rawdata += rawmeta
445 444
446 445 offset = self.packfp.tell()
447 446
448 447 size = len(rawdata)
449 448
450 449 self.entries[node] = (deltabasenode, offset, size)
451 450
452 451 self.writeraw(rawdata)
453 452
454 453 def createindex(self, nodelocations, indexoffset):
455 454 entries = sorted(
456 455 (n, db, o, s) for n, (db, o, s) in self.entries.items()
457 456 )
458 457
459 458 rawindex = b''
460 459 fmt = self.INDEXFORMAT
461 460 for node, deltabase, offset, size in entries:
462 461 if deltabase == sha1nodeconstants.nullid:
463 462 deltabaselocation = FULLTEXTINDEXMARK
464 463 else:
465 464 # Instead of storing the deltabase node in the index, let's
466 465 # store a pointer directly to the index entry for the deltabase.
467 466 deltabaselocation = nodelocations.get(
468 467 deltabase, NOBASEINDEXMARK
469 468 )
470 469
471 470 entry = struct.pack(fmt, node, deltabaselocation, offset, size)
472 471 rawindex += entry
473 472
474 473 return rawindex
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now