##// END OF EJS Templates
cmdutil: change check_incompatible_arguments() *arg to single iterable...
Martin von Zweigbergk -
r44655:d4c15012 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,2266 +1,2266 b''
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 https://mercurial-scm.org/wiki/RebaseExtension
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 import errno
20 20 import os
21 21
22 22 from mercurial.i18n import _
23 23 from mercurial.node import (
24 24 nullrev,
25 25 short,
26 26 )
27 27 from mercurial.pycompat import open
28 28 from mercurial import (
29 29 bookmarks,
30 30 cmdutil,
31 31 commands,
32 32 copies,
33 33 destutil,
34 34 dirstateguard,
35 35 error,
36 36 extensions,
37 37 hg,
38 38 merge as mergemod,
39 39 mergeutil,
40 40 obsolete,
41 41 obsutil,
42 42 patch,
43 43 phases,
44 44 pycompat,
45 45 registrar,
46 46 repair,
47 47 revset,
48 48 revsetlang,
49 49 rewriteutil,
50 50 scmutil,
51 51 smartset,
52 52 state as statemod,
53 53 util,
54 54 )
55 55
56 56 # The following constants are used throughout the rebase module. The ordering of
57 57 # their values must be maintained.
58 58
59 59 # Indicates that a revision needs to be rebased
60 60 revtodo = -1
61 61 revtodostr = b'-1'
62 62
63 63 # legacy revstates no longer needed in current code
64 64 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
65 65 legacystates = {b'-2', b'-3', b'-4', b'-5'}
66 66
67 67 cmdtable = {}
68 68 command = registrar.command(cmdtable)
69 69 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
70 70 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
71 71 # be specifying the version(s) of Mercurial they are tested with, or
72 72 # leave the attribute unspecified.
73 73 testedwith = b'ships-with-hg-core'
74 74
75 75
76 76 def _nothingtorebase():
77 77 return 1
78 78
79 79
80 80 def _savegraft(ctx, extra):
81 81 s = ctx.extra().get(b'source', None)
82 82 if s is not None:
83 83 extra[b'source'] = s
84 84 s = ctx.extra().get(b'intermediate-source', None)
85 85 if s is not None:
86 86 extra[b'intermediate-source'] = s
87 87
88 88
89 89 def _savebranch(ctx, extra):
90 90 extra[b'branch'] = ctx.branch()
91 91
92 92
93 93 def _destrebase(repo, sourceset, destspace=None):
94 94 """small wrapper around destmerge to pass the right extra args
95 95
96 96 Please wrap destutil.destmerge instead."""
97 97 return destutil.destmerge(
98 98 repo,
99 99 action=b'rebase',
100 100 sourceset=sourceset,
101 101 onheadcheck=False,
102 102 destspace=destspace,
103 103 )
104 104
105 105
106 106 revsetpredicate = registrar.revsetpredicate()
107 107
108 108
109 109 @revsetpredicate(b'_destrebase')
110 110 def _revsetdestrebase(repo, subset, x):
111 111 # ``_rebasedefaultdest()``
112 112
113 113 # default destination for rebase.
114 114 # # XXX: Currently private because I expect the signature to change.
115 115 # # XXX: - bailing out in case of ambiguity vs returning all data.
116 116 # i18n: "_rebasedefaultdest" is a keyword
117 117 sourceset = None
118 118 if x is not None:
119 119 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
120 120 return subset & smartset.baseset([_destrebase(repo, sourceset)])
121 121
122 122
123 123 @revsetpredicate(b'_destautoorphanrebase')
124 124 def _revsetdestautoorphanrebase(repo, subset, x):
125 125 # ``_destautoorphanrebase()``
126 126
127 127 # automatic rebase destination for a single orphan revision.
128 128 unfi = repo.unfiltered()
129 129 obsoleted = unfi.revs(b'obsolete()')
130 130
131 131 src = revset.getset(repo, subset, x).first()
132 132
133 133 # Empty src or already obsoleted - Do not return a destination
134 134 if not src or src in obsoleted:
135 135 return smartset.baseset()
136 136 dests = destutil.orphanpossibledestination(repo, src)
137 137 if len(dests) > 1:
138 138 raise error.Abort(
139 139 _(b"ambiguous automatic rebase: %r could end up on any of %r")
140 140 % (src, dests)
141 141 )
142 142 # We have zero or one destination, so we can just return here.
143 143 return smartset.baseset(dests)
144 144
145 145
146 146 def _ctxdesc(ctx):
147 147 """short description for a context"""
148 148 desc = b'%d:%s "%s"' % (
149 149 ctx.rev(),
150 150 ctx,
151 151 ctx.description().split(b'\n', 1)[0],
152 152 )
153 153 repo = ctx.repo()
154 154 names = []
155 155 for nsname, ns in pycompat.iteritems(repo.names):
156 156 if nsname == b'branches':
157 157 continue
158 158 names.extend(ns.names(repo, ctx.node()))
159 159 if names:
160 160 desc += b' (%s)' % b' '.join(names)
161 161 return desc
162 162
163 163
164 164 class rebaseruntime(object):
165 165 """This class is a container for rebase runtime state"""
166 166
167 167 def __init__(self, repo, ui, inmemory=False, opts=None):
168 168 if opts is None:
169 169 opts = {}
170 170
171 171 # prepared: whether we have rebasestate prepared or not. Currently it
172 172 # decides whether "self.repo" is unfiltered or not.
173 173 # The rebasestate has explicit hash to hash instructions not depending
174 174 # on visibility. If rebasestate exists (in-memory or on-disk), use
175 175 # unfiltered repo to avoid visibility issues.
176 176 # Before knowing rebasestate (i.e. when starting a new rebase (not
177 177 # --continue or --abort)), the original repo should be used so
178 178 # visibility-dependent revsets are correct.
179 179 self.prepared = False
180 180 self._repo = repo
181 181
182 182 self.ui = ui
183 183 self.opts = opts
184 184 self.originalwd = None
185 185 self.external = nullrev
186 186 # Mapping between the old revision id and either what is the new rebased
187 187 # revision or what needs to be done with the old revision. The state
188 188 # dict will be what contains most of the rebase progress state.
189 189 self.state = {}
190 190 self.activebookmark = None
191 191 self.destmap = {}
192 192 self.skipped = set()
193 193
194 194 self.collapsef = opts.get(b'collapse', False)
195 195 self.collapsemsg = cmdutil.logmessage(ui, opts)
196 196 self.date = opts.get(b'date', None)
197 197
198 198 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
199 199 self.extrafns = [_savegraft]
200 200 if e:
201 201 self.extrafns = [e]
202 202
203 203 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
204 204 self.keepf = opts.get(b'keep', False)
205 205 self.keepbranchesf = opts.get(b'keepbranches', False)
206 206 self.obsoletenotrebased = {}
207 207 self.obsoletewithoutsuccessorindestination = set()
208 208 self.inmemory = inmemory
209 209 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
210 210
211 211 @property
212 212 def repo(self):
213 213 if self.prepared:
214 214 return self._repo.unfiltered()
215 215 else:
216 216 return self._repo
217 217
218 218 def storestatus(self, tr=None):
219 219 """Store the current status to allow recovery"""
220 220 if tr:
221 221 tr.addfilegenerator(
222 222 b'rebasestate',
223 223 (b'rebasestate',),
224 224 self._writestatus,
225 225 location=b'plain',
226 226 )
227 227 else:
228 228 with self.repo.vfs(b"rebasestate", b"w") as f:
229 229 self._writestatus(f)
230 230
231 231 def _writestatus(self, f):
232 232 repo = self.repo
233 233 assert repo.filtername is None
234 234 f.write(repo[self.originalwd].hex() + b'\n')
235 235 # was "dest". we now write dest per src root below.
236 236 f.write(b'\n')
237 237 f.write(repo[self.external].hex() + b'\n')
238 238 f.write(b'%d\n' % int(self.collapsef))
239 239 f.write(b'%d\n' % int(self.keepf))
240 240 f.write(b'%d\n' % int(self.keepbranchesf))
241 241 f.write(b'%s\n' % (self.activebookmark or b''))
242 242 destmap = self.destmap
243 243 for d, v in pycompat.iteritems(self.state):
244 244 oldrev = repo[d].hex()
245 245 if v >= 0:
246 246 newrev = repo[v].hex()
247 247 else:
248 248 newrev = b"%d" % v
249 249 destnode = repo[destmap[d]].hex()
250 250 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
251 251 repo.ui.debug(b'rebase status stored\n')
252 252
253 253 def restorestatus(self):
254 254 """Restore a previously stored status"""
255 255 if not self.stateobj.exists():
256 256 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
257 257
258 258 data = self._read()
259 259 self.repo.ui.debug(b'rebase status resumed\n')
260 260
261 261 self.originalwd = data[b'originalwd']
262 262 self.destmap = data[b'destmap']
263 263 self.state = data[b'state']
264 264 self.skipped = data[b'skipped']
265 265 self.collapsef = data[b'collapse']
266 266 self.keepf = data[b'keep']
267 267 self.keepbranchesf = data[b'keepbranches']
268 268 self.external = data[b'external']
269 269 self.activebookmark = data[b'activebookmark']
270 270
271 271 def _read(self):
272 272 self.prepared = True
273 273 repo = self.repo
274 274 assert repo.filtername is None
275 275 data = {
276 276 b'keepbranches': None,
277 277 b'collapse': None,
278 278 b'activebookmark': None,
279 279 b'external': nullrev,
280 280 b'keep': None,
281 281 b'originalwd': None,
282 282 }
283 283 legacydest = None
284 284 state = {}
285 285 destmap = {}
286 286
287 287 if True:
288 288 f = repo.vfs(b"rebasestate")
289 289 for i, l in enumerate(f.read().splitlines()):
290 290 if i == 0:
291 291 data[b'originalwd'] = repo[l].rev()
292 292 elif i == 1:
293 293 # this line should be empty in newer version. but legacy
294 294 # clients may still use it
295 295 if l:
296 296 legacydest = repo[l].rev()
297 297 elif i == 2:
298 298 data[b'external'] = repo[l].rev()
299 299 elif i == 3:
300 300 data[b'collapse'] = bool(int(l))
301 301 elif i == 4:
302 302 data[b'keep'] = bool(int(l))
303 303 elif i == 5:
304 304 data[b'keepbranches'] = bool(int(l))
305 305 elif i == 6 and not (len(l) == 81 and b':' in l):
306 306 # line 6 is a recent addition, so for backwards
307 307 # compatibility check that the line doesn't look like the
308 308 # oldrev:newrev lines
309 309 data[b'activebookmark'] = l
310 310 else:
311 311 args = l.split(b':')
312 312 oldrev = repo[args[0]].rev()
313 313 newrev = args[1]
314 314 if newrev in legacystates:
315 315 continue
316 316 if len(args) > 2:
317 317 destrev = repo[args[2]].rev()
318 318 else:
319 319 destrev = legacydest
320 320 destmap[oldrev] = destrev
321 321 if newrev == revtodostr:
322 322 state[oldrev] = revtodo
323 323 # Legacy compat special case
324 324 else:
325 325 state[oldrev] = repo[newrev].rev()
326 326
327 327 if data[b'keepbranches'] is None:
328 328 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
329 329
330 330 data[b'destmap'] = destmap
331 331 data[b'state'] = state
332 332 skipped = set()
333 333 # recompute the set of skipped revs
334 334 if not data[b'collapse']:
335 335 seen = set(destmap.values())
336 336 for old, new in sorted(state.items()):
337 337 if new != revtodo and new in seen:
338 338 skipped.add(old)
339 339 seen.add(new)
340 340 data[b'skipped'] = skipped
341 341 repo.ui.debug(
342 342 b'computed skipped revs: %s\n'
343 343 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
344 344 )
345 345
346 346 return data
347 347
348 348 def _handleskippingobsolete(self, obsoleterevs, destmap):
349 349 """Compute structures necessary for skipping obsolete revisions
350 350
351 351 obsoleterevs: iterable of all obsolete revisions in rebaseset
352 352 destmap: {srcrev: destrev} destination revisions
353 353 """
354 354 self.obsoletenotrebased = {}
355 355 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
356 356 return
357 357 obsoleteset = set(obsoleterevs)
358 358 (
359 359 self.obsoletenotrebased,
360 360 self.obsoletewithoutsuccessorindestination,
361 361 obsoleteextinctsuccessors,
362 362 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
363 363 skippedset = set(self.obsoletenotrebased)
364 364 skippedset.update(self.obsoletewithoutsuccessorindestination)
365 365 skippedset.update(obsoleteextinctsuccessors)
366 366 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
367 367
368 368 def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
369 369 try:
370 370 self.restorestatus()
371 371 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
372 372 except error.RepoLookupError:
373 373 if isabort:
374 374 clearstatus(self.repo)
375 375 clearcollapsemsg(self.repo)
376 376 self.repo.ui.warn(
377 377 _(
378 378 b'rebase aborted (no revision is removed,'
379 379 b' only broken state is cleared)\n'
380 380 )
381 381 )
382 382 return 0
383 383 else:
384 384 msg = _(b'cannot continue inconsistent rebase')
385 385 hint = _(b'use "hg rebase --abort" to clear broken state')
386 386 raise error.Abort(msg, hint=hint)
387 387
388 388 if isabort:
389 389 backup = backup and self.backupf
390 390 return self._abort(backup=backup, suppwarns=suppwarns)
391 391
392 392 def _preparenewrebase(self, destmap):
393 393 if not destmap:
394 394 return _nothingtorebase()
395 395
396 396 rebaseset = destmap.keys()
397 397 if not self.keepf:
398 398 try:
399 399 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
400 400 except error.Abort as e:
401 401 if e.hint is None:
402 402 e.hint = _(b'use --keep to keep original changesets')
403 403 raise e
404 404
405 405 result = buildstate(self.repo, destmap, self.collapsef)
406 406
407 407 if not result:
408 408 # Empty state built, nothing to rebase
409 409 self.ui.status(_(b'nothing to rebase\n'))
410 410 return _nothingtorebase()
411 411
412 412 (self.originalwd, self.destmap, self.state) = result
413 413 if self.collapsef:
414 414 dests = set(self.destmap.values())
415 415 if len(dests) != 1:
416 416 raise error.Abort(
417 417 _(b'--collapse does not work with multiple destinations')
418 418 )
419 419 destrev = next(iter(dests))
420 420 destancestors = self.repo.changelog.ancestors(
421 421 [destrev], inclusive=True
422 422 )
423 423 self.external = externalparent(self.repo, self.state, destancestors)
424 424
425 425 for destrev in sorted(set(destmap.values())):
426 426 dest = self.repo[destrev]
427 427 if dest.closesbranch() and not self.keepbranchesf:
428 428 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
429 429
430 430 self.prepared = True
431 431
432 432 def _assignworkingcopy(self):
433 433 if self.inmemory:
434 434 from mercurial.context import overlayworkingctx
435 435
436 436 self.wctx = overlayworkingctx(self.repo)
437 437 self.repo.ui.debug(b"rebasing in-memory\n")
438 438 else:
439 439 self.wctx = self.repo[None]
440 440 self.repo.ui.debug(b"rebasing on disk\n")
441 441 self.repo.ui.log(
442 442 b"rebase",
443 443 b"using in-memory rebase: %r\n",
444 444 self.inmemory,
445 445 rebase_imm_used=self.inmemory,
446 446 )
447 447
448 448 def _performrebase(self, tr):
449 449 self._assignworkingcopy()
450 450 repo, ui = self.repo, self.ui
451 451 if self.keepbranchesf:
452 452 # insert _savebranch at the start of extrafns so if
453 453 # there's a user-provided extrafn it can clobber branch if
454 454 # desired
455 455 self.extrafns.insert(0, _savebranch)
456 456 if self.collapsef:
457 457 branches = set()
458 458 for rev in self.state:
459 459 branches.add(repo[rev].branch())
460 460 if len(branches) > 1:
461 461 raise error.Abort(
462 462 _(b'cannot collapse multiple named branches')
463 463 )
464 464
465 465 # Calculate self.obsoletenotrebased
466 466 obsrevs = _filterobsoleterevs(self.repo, self.state)
467 467 self._handleskippingobsolete(obsrevs, self.destmap)
468 468
469 469 # Keep track of the active bookmarks in order to reset them later
470 470 self.activebookmark = self.activebookmark or repo._activebookmark
471 471 if self.activebookmark:
472 472 bookmarks.deactivate(repo)
473 473
474 474 # Store the state before we begin so users can run 'hg rebase --abort'
475 475 # if we fail before the transaction closes.
476 476 self.storestatus()
477 477 if tr:
478 478 # When using single transaction, store state when transaction
479 479 # commits.
480 480 self.storestatus(tr)
481 481
482 482 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
483 483 p = repo.ui.makeprogress(
484 484 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
485 485 )
486 486
487 487 def progress(ctx):
488 488 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
489 489
490 490 allowdivergence = self.ui.configbool(
491 491 b'experimental', b'evolution.allowdivergence'
492 492 )
493 493 for subset in sortsource(self.destmap):
494 494 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
495 495 if not allowdivergence:
496 496 sortedrevs -= self.repo.revs(
497 497 b'descendants(%ld) and not %ld',
498 498 self.obsoletewithoutsuccessorindestination,
499 499 self.obsoletewithoutsuccessorindestination,
500 500 )
501 501 for rev in sortedrevs:
502 502 self._rebasenode(tr, rev, allowdivergence, progress)
503 503 p.complete()
504 504 ui.note(_(b'rebase merging completed\n'))
505 505
506 506 def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
507 507 '''Commit the wd changes with parents p1 and p2.
508 508
509 509 Reuse commit info from rev but also store useful information in extra.
510 510 Return node of committed revision.'''
511 511 repo = self.repo
512 512 ctx = repo[rev]
513 513 if commitmsg is None:
514 514 commitmsg = ctx.description()
515 515 date = self.date
516 516 if date is None:
517 517 date = ctx.date()
518 518 extra = {b'rebase_source': ctx.hex()}
519 519 for c in self.extrafns:
520 520 c(ctx, extra)
521 521 keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
522 522 destphase = max(ctx.phase(), phases.draft)
523 523 overrides = {(b'phases', b'new-commit'): destphase}
524 524 if keepbranch:
525 525 overrides[(b'ui', b'allowemptycommit')] = True
526 526 with repo.ui.configoverride(overrides, b'rebase'):
527 527 if self.inmemory:
528 528 newnode = commitmemorynode(
529 529 repo,
530 530 p1,
531 531 p2,
532 532 wctx=self.wctx,
533 533 extra=extra,
534 534 commitmsg=commitmsg,
535 535 editor=editor,
536 536 user=ctx.user(),
537 537 date=date,
538 538 )
539 539 mergemod.mergestate.clean(repo)
540 540 else:
541 541 newnode = commitnode(
542 542 repo,
543 543 p1,
544 544 p2,
545 545 extra=extra,
546 546 commitmsg=commitmsg,
547 547 editor=editor,
548 548 user=ctx.user(),
549 549 date=date,
550 550 )
551 551
552 552 if newnode is None:
553 553 # If it ended up being a no-op commit, then the normal
554 554 # merge state clean-up path doesn't happen, so do it
555 555 # here. Fix issue5494
556 556 mergemod.mergestate.clean(repo)
557 557 return newnode
558 558
559 559 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
560 560 repo, ui, opts = self.repo, self.ui, self.opts
561 561 dest = self.destmap[rev]
562 562 ctx = repo[rev]
563 563 desc = _ctxdesc(ctx)
564 564 if self.state[rev] == rev:
565 565 ui.status(_(b'already rebased %s\n') % desc)
566 566 elif (
567 567 not allowdivergence
568 568 and rev in self.obsoletewithoutsuccessorindestination
569 569 ):
570 570 msg = (
571 571 _(
572 572 b'note: not rebasing %s and its descendants as '
573 573 b'this would cause divergence\n'
574 574 )
575 575 % desc
576 576 )
577 577 repo.ui.status(msg)
578 578 self.skipped.add(rev)
579 579 elif rev in self.obsoletenotrebased:
580 580 succ = self.obsoletenotrebased[rev]
581 581 if succ is None:
582 582 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
583 583 else:
584 584 succdesc = _ctxdesc(repo[succ])
585 585 msg = _(
586 586 b'note: not rebasing %s, already in destination as %s\n'
587 587 ) % (desc, succdesc)
588 588 repo.ui.status(msg)
589 589 # Make clearrebased aware state[rev] is not a true successor
590 590 self.skipped.add(rev)
591 591 # Record rev as moved to its desired destination in self.state.
592 592 # This helps bookmark and working parent movement.
593 593 dest = max(
594 594 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
595 595 )
596 596 self.state[rev] = dest
597 597 elif self.state[rev] == revtodo:
598 598 ui.status(_(b'rebasing %s\n') % desc)
599 599 progressfn(ctx)
600 600 p1, p2, base = defineparents(
601 601 repo,
602 602 rev,
603 603 self.destmap,
604 604 self.state,
605 605 self.skipped,
606 606 self.obsoletenotrebased,
607 607 )
608 608 if not self.inmemory and len(repo[None].parents()) == 2:
609 609 repo.ui.debug(b'resuming interrupted rebase\n')
610 610 else:
611 611 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
612 612 with ui.configoverride(overrides, b'rebase'):
613 613 stats = rebasenode(
614 614 repo,
615 615 rev,
616 616 p1,
617 617 base,
618 618 self.collapsef,
619 619 dest,
620 620 wctx=self.wctx,
621 621 )
622 622 if stats.unresolvedcount > 0:
623 623 if self.inmemory:
624 624 raise error.InMemoryMergeConflictsError()
625 625 else:
626 626 raise error.InterventionRequired(
627 627 _(
628 628 b'unresolved conflicts (see hg '
629 629 b'resolve, then hg rebase --continue)'
630 630 )
631 631 )
632 632 if not self.collapsef:
633 633 merging = p2 != nullrev
634 634 editform = cmdutil.mergeeditform(merging, b'rebase')
635 635 editor = cmdutil.getcommiteditor(
636 636 editform=editform, **pycompat.strkwargs(opts)
637 637 )
638 638 newnode = self._concludenode(rev, p1, p2, editor)
639 639 else:
640 640 # Skip commit if we are collapsing
641 641 if self.inmemory:
642 642 self.wctx.setbase(repo[p1])
643 643 else:
644 644 repo.setparents(repo[p1].node())
645 645 newnode = None
646 646 # Update the state
647 647 if newnode is not None:
648 648 self.state[rev] = repo[newnode].rev()
649 649 ui.debug(b'rebased as %s\n' % short(newnode))
650 650 else:
651 651 if not self.collapsef:
652 652 ui.warn(
653 653 _(
654 654 b'note: not rebasing %s, its destination already '
655 655 b'has all its changes\n'
656 656 )
657 657 % desc
658 658 )
659 659 self.skipped.add(rev)
660 660 self.state[rev] = p1
661 661 ui.debug(b'next revision set to %d\n' % p1)
662 662 else:
663 663 ui.status(
664 664 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
665 665 )
666 666 if not tr:
667 667 # When not using single transaction, store state after each
668 668 # commit is completely done. On InterventionRequired, we thus
669 669 # won't store the status. Instead, we'll hit the "len(parents) == 2"
670 670 # case and realize that the commit was in progress.
671 671 self.storestatus()
672 672
673 673 def _finishrebase(self):
674 674 repo, ui, opts = self.repo, self.ui, self.opts
675 675 fm = ui.formatter(b'rebase', opts)
676 676 fm.startitem()
677 677 if self.collapsef:
678 678 p1, p2, _base = defineparents(
679 679 repo,
680 680 min(self.state),
681 681 self.destmap,
682 682 self.state,
683 683 self.skipped,
684 684 self.obsoletenotrebased,
685 685 )
686 686 editopt = opts.get(b'edit')
687 687 editform = b'rebase.collapse'
688 688 if self.collapsemsg:
689 689 commitmsg = self.collapsemsg
690 690 else:
691 691 commitmsg = b'Collapsed revision'
692 692 for rebased in sorted(self.state):
693 693 if rebased not in self.skipped:
694 694 commitmsg += b'\n* %s' % repo[rebased].description()
695 695 editopt = True
696 696 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
697 697 revtoreuse = max(self.state)
698 698
699 699 newnode = self._concludenode(
700 700 revtoreuse, p1, self.external, editor, commitmsg=commitmsg
701 701 )
702 702
703 703 if newnode is not None:
704 704 newrev = repo[newnode].rev()
705 705 for oldrev in self.state:
706 706 self.state[oldrev] = newrev
707 707
708 708 if b'qtip' in repo.tags():
709 709 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
710 710
711 711 # restore original working directory
712 712 # (we do this before stripping)
713 713 newwd = self.state.get(self.originalwd, self.originalwd)
714 714 if newwd < 0:
715 715 # original directory is a parent of rebase set root or ignored
716 716 newwd = self.originalwd
717 717 if newwd not in [c.rev() for c in repo[None].parents()]:
718 718 ui.note(_(b"update back to initial working directory parent\n"))
719 719 hg.updaterepo(repo, newwd, overwrite=False)
720 720
721 721 collapsedas = None
722 722 if self.collapsef and not self.keepf:
723 723 collapsedas = newnode
724 724 clearrebased(
725 725 ui,
726 726 repo,
727 727 self.destmap,
728 728 self.state,
729 729 self.skipped,
730 730 collapsedas,
731 731 self.keepf,
732 732 fm=fm,
733 733 backup=self.backupf,
734 734 )
735 735
736 736 clearstatus(repo)
737 737 clearcollapsemsg(repo)
738 738
739 739 ui.note(_(b"rebase completed\n"))
740 740 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
741 741 if self.skipped:
742 742 skippedlen = len(self.skipped)
743 743 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
744 744 fm.end()
745 745
746 746 if (
747 747 self.activebookmark
748 748 and self.activebookmark in repo._bookmarks
749 749 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
750 750 ):
751 751 bookmarks.activate(repo, self.activebookmark)
752 752
753 753 def _abort(self, backup=True, suppwarns=False):
754 754 '''Restore the repository to its original state.'''
755 755
756 756 repo = self.repo
757 757 try:
758 758 # If the first commits in the rebased set get skipped during the
759 759 # rebase, their values within the state mapping will be the dest
760 760 # rev id. The rebased list must must not contain the dest rev
761 761 # (issue4896)
762 762 rebased = [
763 763 s
764 764 for r, s in self.state.items()
765 765 if s >= 0 and s != r and s != self.destmap[r]
766 766 ]
767 767 immutable = [d for d in rebased if not repo[d].mutable()]
768 768 cleanup = True
769 769 if immutable:
770 770 repo.ui.warn(
771 771 _(b"warning: can't clean up public changesets %s\n")
772 772 % b', '.join(bytes(repo[r]) for r in immutable),
773 773 hint=_(b"see 'hg help phases' for details"),
774 774 )
775 775 cleanup = False
776 776
777 777 descendants = set()
778 778 if rebased:
779 779 descendants = set(repo.changelog.descendants(rebased))
780 780 if descendants - set(rebased):
781 781 repo.ui.warn(
782 782 _(
783 783 b"warning: new changesets detected on "
784 784 b"destination branch, can't strip\n"
785 785 )
786 786 )
787 787 cleanup = False
788 788
789 789 if cleanup:
790 790 if rebased:
791 791 strippoints = [
792 792 c.node() for c in repo.set(b'roots(%ld)', rebased)
793 793 ]
794 794
795 795 updateifonnodes = set(rebased)
796 796 updateifonnodes.update(self.destmap.values())
797 797 updateifonnodes.add(self.originalwd)
798 798 shouldupdate = repo[b'.'].rev() in updateifonnodes
799 799
800 800 # Update away from the rebase if necessary
801 801 if shouldupdate:
802 802 mergemod.update(
803 803 repo, self.originalwd, branchmerge=False, force=True
804 804 )
805 805
806 806 # Strip from the first rebased revision
807 807 if rebased:
808 808 repair.strip(repo.ui, repo, strippoints, backup=backup)
809 809
810 810 if self.activebookmark and self.activebookmark in repo._bookmarks:
811 811 bookmarks.activate(repo, self.activebookmark)
812 812
813 813 finally:
814 814 clearstatus(repo)
815 815 clearcollapsemsg(repo)
816 816 if not suppwarns:
817 817 repo.ui.warn(_(b'rebase aborted\n'))
818 818 return 0
819 819
820 820
821 821 @command(
822 822 b'rebase',
823 823 [
824 824 (
825 825 b's',
826 826 b'source',
827 827 b'',
828 828 _(b'rebase the specified changeset and descendants'),
829 829 _(b'REV'),
830 830 ),
831 831 (
832 832 b'b',
833 833 b'base',
834 834 b'',
835 835 _(b'rebase everything from branching point of specified changeset'),
836 836 _(b'REV'),
837 837 ),
838 838 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
839 839 (
840 840 b'd',
841 841 b'dest',
842 842 b'',
843 843 _(b'rebase onto the specified changeset'),
844 844 _(b'REV'),
845 845 ),
846 846 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
847 847 (
848 848 b'm',
849 849 b'message',
850 850 b'',
851 851 _(b'use text as collapse commit message'),
852 852 _(b'TEXT'),
853 853 ),
854 854 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
855 855 (
856 856 b'l',
857 857 b'logfile',
858 858 b'',
859 859 _(b'read collapse commit message from file'),
860 860 _(b'FILE'),
861 861 ),
862 862 (b'k', b'keep', False, _(b'keep original changesets')),
863 863 (b'', b'keepbranches', False, _(b'keep original branch names')),
864 864 (b'D', b'detach', False, _(b'(DEPRECATED)')),
865 865 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
866 866 (b't', b'tool', b'', _(b'specify merge tool')),
867 867 (b'', b'stop', False, _(b'stop interrupted rebase')),
868 868 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
869 869 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
870 870 (
871 871 b'',
872 872 b'auto-orphans',
873 873 b'',
874 874 _(
875 875 b'automatically rebase orphan revisions '
876 876 b'in the specified revset (EXPERIMENTAL)'
877 877 ),
878 878 ),
879 879 ]
880 880 + cmdutil.dryrunopts
881 881 + cmdutil.formatteropts
882 882 + cmdutil.confirmopts,
883 883 _(b'[-s REV | -b REV] [-d REV] [OPTION]'),
884 884 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
885 885 )
886 886 def rebase(ui, repo, **opts):
887 887 """move changeset (and descendants) to a different branch
888 888
889 889 Rebase uses repeated merging to graft changesets from one part of
890 890 history (the source) onto another (the destination). This can be
891 891 useful for linearizing *local* changes relative to a master
892 892 development tree.
893 893
894 894 Published commits cannot be rebased (see :hg:`help phases`).
895 895 To copy commits, see :hg:`help graft`.
896 896
897 897 If you don't specify a destination changeset (``-d/--dest``), rebase
898 898 will use the same logic as :hg:`merge` to pick a destination. if
899 899 the current branch contains exactly one other head, the other head
900 900 is merged with by default. Otherwise, an explicit revision with
901 901 which to merge with must be provided. (destination changeset is not
902 902 modified by rebasing, but new changesets are added as its
903 903 descendants.)
904 904
905 905 Here are the ways to select changesets:
906 906
907 907 1. Explicitly select them using ``--rev``.
908 908
909 909 2. Use ``--source`` to select a root changeset and include all of its
910 910 descendants.
911 911
912 912 3. Use ``--base`` to select a changeset; rebase will find ancestors
913 913 and their descendants which are not also ancestors of the destination.
914 914
915 915 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
916 916 rebase will use ``--base .`` as above.
917 917
918 918 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
919 919 can be used in ``--dest``. Destination would be calculated per source
920 920 revision with ``SRC`` substituted by that single source revision and
921 921 ``ALLSRC`` substituted by all source revisions.
922 922
923 923 Rebase will destroy original changesets unless you use ``--keep``.
924 924 It will also move your bookmarks (even if you do).
925 925
926 926 Some changesets may be dropped if they do not contribute changes
927 927 (e.g. merges from the destination branch).
928 928
929 929 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
930 930 a named branch with two heads. You will need to explicitly specify source
931 931 and/or destination.
932 932
933 933 If you need to use a tool to automate merge/conflict decisions, you
934 934 can specify one with ``--tool``, see :hg:`help merge-tools`.
935 935 As a caveat: the tool will not be used to mediate when a file was
936 936 deleted, there is no hook presently available for this.
937 937
938 938 If a rebase is interrupted to manually resolve a conflict, it can be
939 939 continued with --continue/-c, aborted with --abort/-a, or stopped with
940 940 --stop.
941 941
942 942 .. container:: verbose
943 943
944 944 Examples:
945 945
946 946 - move "local changes" (current commit back to branching point)
947 947 to the current branch tip after a pull::
948 948
949 949 hg rebase
950 950
951 951 - move a single changeset to the stable branch::
952 952
953 953 hg rebase -r 5f493448 -d stable
954 954
955 955 - splice a commit and all its descendants onto another part of history::
956 956
957 957 hg rebase --source c0c3 --dest 4cf9
958 958
959 959 - rebase everything on a branch marked by a bookmark onto the
960 960 default branch::
961 961
962 962 hg rebase --base myfeature --dest default
963 963
964 964 - collapse a sequence of changes into a single commit::
965 965
966 966 hg rebase --collapse -r 1520:1525 -d .
967 967
968 968 - move a named branch while preserving its name::
969 969
970 970 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
971 971
972 972 - stabilize orphaned changesets so history looks linear::
973 973
974 974 hg rebase -r 'orphan()-obsolete()'\
975 975 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
976 976 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
977 977
978 978 Configuration Options:
979 979
980 980 You can make rebase require a destination if you set the following config
981 981 option::
982 982
983 983 [commands]
984 984 rebase.requiredest = True
985 985
986 986 By default, rebase will close the transaction after each commit. For
987 987 performance purposes, you can configure rebase to use a single transaction
988 988 across the entire rebase. WARNING: This setting introduces a significant
989 989 risk of losing the work you've done in a rebase if the rebase aborts
990 990 unexpectedly::
991 991
992 992 [rebase]
993 993 singletransaction = True
994 994
995 995 By default, rebase writes to the working copy, but you can configure it to
996 996 run in-memory for better performance. When the rebase is not moving the
997 997 parent(s) of the working copy (AKA the "currently checked out changesets"),
998 998 this may also allow it to run even if the working copy is dirty::
999 999
1000 1000 [rebase]
1001 1001 experimental.inmemory = True
1002 1002
1003 1003 Return Values:
1004 1004
1005 1005 Returns 0 on success, 1 if nothing to rebase or there are
1006 1006 unresolved conflicts.
1007 1007
1008 1008 """
1009 1009 opts = pycompat.byteskwargs(opts)
1010 1010 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1011 1011 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1012 1012 if action:
1013 1013 cmdutil.check_incompatible_arguments(
1014 opts, action, b'confirm', b'dry_run'
1014 opts, action, [b'confirm', b'dry_run']
1015 1015 )
1016 1016 cmdutil.check_incompatible_arguments(
1017 opts, action, b'rev', b'source', b'base', b'dest'
1017 opts, action, [b'rev', b'source', b'base', b'dest']
1018 1018 )
1019 1019 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1020 1020 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1021 1021
1022 1022 if action or repo.currenttransaction() is not None:
1023 1023 # in-memory rebase is not compatible with resuming rebases.
1024 1024 # (Or if it is run within a transaction, since the restart logic can
1025 1025 # fail the entire transaction.)
1026 1026 inmemory = False
1027 1027
1028 1028 if opts.get(b'auto_orphans'):
1029 1029 disallowed_opts = set(opts) - {b'auto_orphans'}
1030 1030 cmdutil.check_incompatible_arguments(
1031 opts, b'auto_orphans', *disallowed_opts
1031 opts, b'auto_orphans', disallowed_opts
1032 1032 )
1033 1033
1034 1034 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1035 1035 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1036 1036 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1037 1037
1038 1038 if opts.get(b'dry_run') or opts.get(b'confirm'):
1039 1039 return _dryrunrebase(ui, repo, action, opts)
1040 1040 elif action == b'stop':
1041 1041 rbsrt = rebaseruntime(repo, ui)
1042 1042 with repo.wlock(), repo.lock():
1043 1043 rbsrt.restorestatus()
1044 1044 if rbsrt.collapsef:
1045 1045 raise error.Abort(_(b"cannot stop in --collapse session"))
1046 1046 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1047 1047 if not (rbsrt.keepf or allowunstable):
1048 1048 raise error.Abort(
1049 1049 _(
1050 1050 b"cannot remove original changesets with"
1051 1051 b" unrebased descendants"
1052 1052 ),
1053 1053 hint=_(
1054 1054 b'either enable obsmarkers to allow unstable '
1055 1055 b'revisions or use --keep to keep original '
1056 1056 b'changesets'
1057 1057 ),
1058 1058 )
1059 1059 # update to the current working revision
1060 1060 # to clear interrupted merge
1061 1061 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1062 1062 rbsrt._finishrebase()
1063 1063 return 0
1064 1064 elif inmemory:
1065 1065 try:
1066 1066 # in-memory merge doesn't support conflicts, so if we hit any, abort
1067 1067 # and re-run as an on-disk merge.
1068 1068 overrides = {(b'rebase', b'singletransaction'): True}
1069 1069 with ui.configoverride(overrides, b'rebase'):
1070 1070 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1071 1071 except error.InMemoryMergeConflictsError:
1072 1072 ui.warn(
1073 1073 _(
1074 1074 b'hit merge conflicts; re-running rebase without in-memory'
1075 1075 b' merge\n'
1076 1076 )
1077 1077 )
1078 1078 # TODO: Make in-memory merge not use the on-disk merge state, so
1079 1079 # we don't have to clean it here
1080 1080 mergemod.mergestate.clean(repo)
1081 1081 clearstatus(repo)
1082 1082 clearcollapsemsg(repo)
1083 1083 return _dorebase(ui, repo, action, opts, inmemory=False)
1084 1084 else:
1085 1085 return _dorebase(ui, repo, action, opts)
1086 1086
1087 1087
1088 1088 def _dryrunrebase(ui, repo, action, opts):
1089 1089 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1090 1090 confirm = opts.get(b'confirm')
1091 1091 if confirm:
1092 1092 ui.status(_(b'starting in-memory rebase\n'))
1093 1093 else:
1094 1094 ui.status(
1095 1095 _(b'starting dry-run rebase; repository will not be changed\n')
1096 1096 )
1097 1097 with repo.wlock(), repo.lock():
1098 1098 needsabort = True
1099 1099 try:
1100 1100 overrides = {(b'rebase', b'singletransaction'): True}
1101 1101 with ui.configoverride(overrides, b'rebase'):
1102 1102 _origrebase(
1103 1103 ui,
1104 1104 repo,
1105 1105 action,
1106 1106 opts,
1107 1107 rbsrt,
1108 1108 inmemory=True,
1109 1109 leaveunfinished=True,
1110 1110 )
1111 1111 except error.InMemoryMergeConflictsError:
1112 1112 ui.status(_(b'hit a merge conflict\n'))
1113 1113 return 1
1114 1114 except error.Abort:
1115 1115 needsabort = False
1116 1116 raise
1117 1117 else:
1118 1118 if confirm:
1119 1119 ui.status(_(b'rebase completed successfully\n'))
1120 1120 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1121 1121 # finish unfinished rebase
1122 1122 rbsrt._finishrebase()
1123 1123 else:
1124 1124 rbsrt._prepareabortorcontinue(
1125 1125 isabort=True, backup=False, suppwarns=True
1126 1126 )
1127 1127 needsabort = False
1128 1128 else:
1129 1129 ui.status(
1130 1130 _(
1131 1131 b'dry-run rebase completed successfully; run without'
1132 1132 b' -n/--dry-run to perform this rebase\n'
1133 1133 )
1134 1134 )
1135 1135 return 0
1136 1136 finally:
1137 1137 if needsabort:
1138 1138 # no need to store backup in case of dryrun
1139 1139 rbsrt._prepareabortorcontinue(
1140 1140 isabort=True, backup=False, suppwarns=True
1141 1141 )
1142 1142
1143 1143
1144 1144 def _dorebase(ui, repo, action, opts, inmemory=False):
1145 1145 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1146 1146 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1147 1147
1148 1148
1149 1149 def _origrebase(
1150 1150 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1151 1151 ):
1152 1152 assert action != b'stop'
1153 1153 with repo.wlock(), repo.lock():
1154 1154 if opts.get(b'interactive'):
1155 1155 try:
1156 1156 if extensions.find(b'histedit'):
1157 1157 enablehistedit = b''
1158 1158 except KeyError:
1159 1159 enablehistedit = b" --config extensions.histedit="
1160 1160 help = b"hg%s help -e histedit" % enablehistedit
1161 1161 msg = (
1162 1162 _(
1163 1163 b"interactive history editing is supported by the "
1164 1164 b"'histedit' extension (see \"%s\")"
1165 1165 )
1166 1166 % help
1167 1167 )
1168 1168 raise error.Abort(msg)
1169 1169
1170 1170 if rbsrt.collapsemsg and not rbsrt.collapsef:
1171 1171 raise error.Abort(_(b'message can only be specified with collapse'))
1172 1172
1173 1173 if action:
1174 1174 if rbsrt.collapsef:
1175 1175 raise error.Abort(
1176 1176 _(b'cannot use collapse with continue or abort')
1177 1177 )
1178 1178 if action == b'abort' and opts.get(b'tool', False):
1179 1179 ui.warn(_(b'tool option will be ignored\n'))
1180 1180 if action == b'continue':
1181 1181 ms = mergemod.mergestate.read(repo)
1182 1182 mergeutil.checkunresolved(ms)
1183 1183
1184 1184 retcode = rbsrt._prepareabortorcontinue(
1185 1185 isabort=(action == b'abort')
1186 1186 )
1187 1187 if retcode is not None:
1188 1188 return retcode
1189 1189 else:
1190 1190 # search default destination in this space
1191 1191 # used in the 'hg pull --rebase' case, see issue 5214.
1192 1192 destspace = opts.get(b'_destspace')
1193 1193 destmap = _definedestmap(
1194 1194 ui,
1195 1195 repo,
1196 1196 inmemory,
1197 1197 opts.get(b'dest', None),
1198 1198 opts.get(b'source', None),
1199 1199 opts.get(b'base', None),
1200 1200 opts.get(b'rev', []),
1201 1201 destspace=destspace,
1202 1202 )
1203 1203 retcode = rbsrt._preparenewrebase(destmap)
1204 1204 if retcode is not None:
1205 1205 return retcode
1206 1206 storecollapsemsg(repo, rbsrt.collapsemsg)
1207 1207
1208 1208 tr = None
1209 1209
1210 1210 singletr = ui.configbool(b'rebase', b'singletransaction')
1211 1211 if singletr:
1212 1212 tr = repo.transaction(b'rebase')
1213 1213
1214 1214 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1215 1215 # one transaction here. Otherwise, transactions are obtained when
1216 1216 # committing each node, which is slower but allows partial success.
1217 1217 with util.acceptintervention(tr):
1218 1218 # Same logic for the dirstate guard, except we don't create one when
1219 1219 # rebasing in-memory (it's not needed).
1220 1220 dsguard = None
1221 1221 if singletr and not inmemory:
1222 1222 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1223 1223 with util.acceptintervention(dsguard):
1224 1224 rbsrt._performrebase(tr)
1225 1225 if not leaveunfinished:
1226 1226 rbsrt._finishrebase()
1227 1227
1228 1228
1229 1229 def _definedestmap(
1230 1230 ui,
1231 1231 repo,
1232 1232 inmemory,
1233 1233 destf=None,
1234 1234 srcf=None,
1235 1235 basef=None,
1236 1236 revf=None,
1237 1237 destspace=None,
1238 1238 ):
1239 1239 """use revisions argument to define destmap {srcrev: destrev}"""
1240 1240 if revf is None:
1241 1241 revf = []
1242 1242
1243 1243 # destspace is here to work around issues with `hg pull --rebase` see
1244 1244 # issue5214 for details
1245 1245
1246 1246 cmdutil.checkunfinished(repo)
1247 1247 if not inmemory:
1248 1248 cmdutil.bailifchanged(repo)
1249 1249
1250 1250 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1251 1251 raise error.Abort(
1252 1252 _(b'you must specify a destination'),
1253 1253 hint=_(b'use: hg rebase -d REV'),
1254 1254 )
1255 1255
1256 1256 dest = None
1257 1257
1258 1258 if revf:
1259 1259 rebaseset = scmutil.revrange(repo, revf)
1260 1260 if not rebaseset:
1261 1261 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1262 1262 return None
1263 1263 elif srcf:
1264 1264 src = scmutil.revrange(repo, [srcf])
1265 1265 if not src:
1266 1266 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1267 1267 return None
1268 1268 rebaseset = repo.revs(b'(%ld)::', src)
1269 1269 assert rebaseset
1270 1270 else:
1271 1271 base = scmutil.revrange(repo, [basef or b'.'])
1272 1272 if not base:
1273 1273 ui.status(
1274 1274 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1275 1275 )
1276 1276 return None
1277 1277 if destf:
1278 1278 # --base does not support multiple destinations
1279 1279 dest = scmutil.revsingle(repo, destf)
1280 1280 else:
1281 1281 dest = repo[_destrebase(repo, base, destspace=destspace)]
1282 1282 destf = bytes(dest)
1283 1283
1284 1284 roots = [] # selected children of branching points
1285 1285 bpbase = {} # {branchingpoint: [origbase]}
1286 1286 for b in base: # group bases by branching points
1287 1287 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1288 1288 bpbase[bp] = bpbase.get(bp, []) + [b]
1289 1289 if None in bpbase:
1290 1290 # emulate the old behavior, showing "nothing to rebase" (a better
1291 1291 # behavior may be abort with "cannot find branching point" error)
1292 1292 bpbase.clear()
1293 1293 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1294 1294 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1295 1295
1296 1296 rebaseset = repo.revs(b'%ld::', roots)
1297 1297
1298 1298 if not rebaseset:
1299 1299 # transform to list because smartsets are not comparable to
1300 1300 # lists. This should be improved to honor laziness of
1301 1301 # smartset.
1302 1302 if list(base) == [dest.rev()]:
1303 1303 if basef:
1304 1304 ui.status(
1305 1305 _(
1306 1306 b'nothing to rebase - %s is both "base"'
1307 1307 b' and destination\n'
1308 1308 )
1309 1309 % dest
1310 1310 )
1311 1311 else:
1312 1312 ui.status(
1313 1313 _(
1314 1314 b'nothing to rebase - working directory '
1315 1315 b'parent is also destination\n'
1316 1316 )
1317 1317 )
1318 1318 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1319 1319 if basef:
1320 1320 ui.status(
1321 1321 _(
1322 1322 b'nothing to rebase - "base" %s is '
1323 1323 b'already an ancestor of destination '
1324 1324 b'%s\n'
1325 1325 )
1326 1326 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1327 1327 )
1328 1328 else:
1329 1329 ui.status(
1330 1330 _(
1331 1331 b'nothing to rebase - working '
1332 1332 b'directory parent is already an '
1333 1333 b'ancestor of destination %s\n'
1334 1334 )
1335 1335 % dest
1336 1336 )
1337 1337 else: # can it happen?
1338 1338 ui.status(
1339 1339 _(b'nothing to rebase from %s to %s\n')
1340 1340 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1341 1341 )
1342 1342 return None
1343 1343
1344 1344 rebasingwcp = repo[b'.'].rev() in rebaseset
1345 1345 ui.log(
1346 1346 b"rebase",
1347 1347 b"rebasing working copy parent: %r\n",
1348 1348 rebasingwcp,
1349 1349 rebase_rebasing_wcp=rebasingwcp,
1350 1350 )
1351 1351 if inmemory and rebasingwcp:
1352 1352 # Check these since we did not before.
1353 1353 cmdutil.checkunfinished(repo)
1354 1354 cmdutil.bailifchanged(repo)
1355 1355
1356 1356 if not destf:
1357 1357 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1358 1358 destf = bytes(dest)
1359 1359
1360 1360 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1361 1361 alias = {b'ALLSRC': allsrc}
1362 1362
1363 1363 if dest is None:
1364 1364 try:
1365 1365 # fast path: try to resolve dest without SRC alias
1366 1366 dest = scmutil.revsingle(repo, destf, localalias=alias)
1367 1367 except error.RepoLookupError:
1368 1368 # multi-dest path: resolve dest for each SRC separately
1369 1369 destmap = {}
1370 1370 for r in rebaseset:
1371 1371 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1372 1372 # use repo.anyrevs instead of scmutil.revsingle because we
1373 1373 # don't want to abort if destset is empty.
1374 1374 destset = repo.anyrevs([destf], user=True, localalias=alias)
1375 1375 size = len(destset)
1376 1376 if size == 1:
1377 1377 destmap[r] = destset.first()
1378 1378 elif size == 0:
1379 1379 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1380 1380 else:
1381 1381 raise error.Abort(
1382 1382 _(b'rebase destination for %s is not unique') % repo[r]
1383 1383 )
1384 1384
1385 1385 if dest is not None:
1386 1386 # single-dest case: assign dest to each rev in rebaseset
1387 1387 destrev = dest.rev()
1388 1388 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1389 1389
1390 1390 if not destmap:
1391 1391 ui.status(_(b'nothing to rebase - empty destination\n'))
1392 1392 return None
1393 1393
1394 1394 return destmap
1395 1395
1396 1396
1397 1397 def externalparent(repo, state, destancestors):
1398 1398 """Return the revision that should be used as the second parent
1399 1399 when the revisions in state is collapsed on top of destancestors.
1400 1400 Abort if there is more than one parent.
1401 1401 """
1402 1402 parents = set()
1403 1403 source = min(state)
1404 1404 for rev in state:
1405 1405 if rev == source:
1406 1406 continue
1407 1407 for p in repo[rev].parents():
1408 1408 if p.rev() not in state and p.rev() not in destancestors:
1409 1409 parents.add(p.rev())
1410 1410 if not parents:
1411 1411 return nullrev
1412 1412 if len(parents) == 1:
1413 1413 return parents.pop()
1414 1414 raise error.Abort(
1415 1415 _(
1416 1416 b'unable to collapse on top of %d, there is more '
1417 1417 b'than one external parent: %s'
1418 1418 )
1419 1419 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1420 1420 )
1421 1421
1422 1422
1423 1423 def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
1424 1424 '''Commit the memory changes with parents p1 and p2.
1425 1425 Return node of committed revision.'''
1426 1426 # Replicates the empty check in ``repo.commit``.
1427 1427 if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1428 1428 return None
1429 1429
1430 1430 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1431 1431 # ``branch`` (used when passing ``--keepbranches``).
1432 1432 branch = None
1433 1433 if b'branch' in extra:
1434 1434 branch = extra[b'branch']
1435 1435
1436 1436 wctx.setparents(repo[p1].node(), repo[p2].node())
1437 1437 memctx = wctx.tomemctx(
1438 1438 commitmsg,
1439 1439 date=date,
1440 1440 extra=extra,
1441 1441 user=user,
1442 1442 branch=branch,
1443 1443 editor=editor,
1444 1444 )
1445 1445 commitres = repo.commitctx(memctx)
1446 1446 wctx.clean() # Might be reused
1447 1447 return commitres
1448 1448
1449 1449
1450 1450 def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
1451 1451 '''Commit the wd changes with parents p1 and p2.
1452 1452 Return node of committed revision.'''
1453 1453 dsguard = util.nullcontextmanager()
1454 1454 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1455 1455 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1456 1456 with dsguard:
1457 1457 repo.setparents(repo[p1].node(), repo[p2].node())
1458 1458
1459 1459 # Commit might fail if unresolved files exist
1460 1460 newnode = repo.commit(
1461 1461 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1462 1462 )
1463 1463
1464 1464 repo.dirstate.setbranch(repo[newnode].branch())
1465 1465 return newnode
1466 1466
1467 1467
1468 1468 def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
1469 1469 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1470 1470 # Merge phase
1471 1471 # Update to destination and merge it with local
1472 1472 p1ctx = repo[p1]
1473 1473 if wctx.isinmemory():
1474 1474 wctx.setbase(p1ctx)
1475 1475 else:
1476 1476 if repo[b'.'].rev() != p1:
1477 1477 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1478 1478 mergemod.update(repo, p1, branchmerge=False, force=True)
1479 1479 else:
1480 1480 repo.ui.debug(b" already in destination\n")
1481 1481 # This is, alas, necessary to invalidate workingctx's manifest cache,
1482 1482 # as well as other data we litter on it in other places.
1483 1483 wctx = repo[None]
1484 1484 repo.dirstate.write(repo.currenttransaction())
1485 1485 ctx = repo[rev]
1486 1486 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1487 1487 if base is not None:
1488 1488 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1489 1489
1490 1490 # See explanation in merge.graft()
1491 1491 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1492 1492 stats = mergemod.update(
1493 1493 repo,
1494 1494 rev,
1495 1495 branchmerge=True,
1496 1496 force=True,
1497 1497 ancestor=base,
1498 1498 mergeancestor=mergeancestor,
1499 1499 labels=[b'dest', b'source'],
1500 1500 wc=wctx,
1501 1501 )
1502 1502 if collapse:
1503 1503 copies.graftcopies(wctx, ctx, repo[dest])
1504 1504 else:
1505 1505 # If we're not using --collapse, we need to
1506 1506 # duplicate copies between the revision we're
1507 1507 # rebasing and its first parent.
1508 1508 copies.graftcopies(wctx, ctx, ctx.p1())
1509 1509 return stats
1510 1510
1511 1511
1512 1512 def adjustdest(repo, rev, destmap, state, skipped):
1513 1513 r"""adjust rebase destination given the current rebase state
1514 1514
1515 1515 rev is what is being rebased. Return a list of two revs, which are the
1516 1516 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1517 1517 nullrev, return dest without adjustment for it.
1518 1518
1519 1519 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1520 1520 to B1, and E's destination will be adjusted from F to B1.
1521 1521
1522 1522 B1 <- written during rebasing B
1523 1523 |
1524 1524 F <- original destination of B, E
1525 1525 |
1526 1526 | E <- rev, which is being rebased
1527 1527 | |
1528 1528 | D <- prev, one parent of rev being checked
1529 1529 | |
1530 1530 | x <- skipped, ex. no successor or successor in (::dest)
1531 1531 | |
1532 1532 | C <- rebased as C', different destination
1533 1533 | |
1534 1534 | B <- rebased as B1 C'
1535 1535 |/ |
1536 1536 A G <- destination of C, different
1537 1537
1538 1538 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1539 1539 first move C to C1, G to G1, and when it's checking H, the adjusted
1540 1540 destinations will be [C1, G1].
1541 1541
1542 1542 H C1 G1
1543 1543 /| | /
1544 1544 F G |/
1545 1545 K | | -> K
1546 1546 | C D |
1547 1547 | |/ |
1548 1548 | B | ...
1549 1549 |/ |/
1550 1550 A A
1551 1551
1552 1552 Besides, adjust dest according to existing rebase information. For example,
1553 1553
1554 1554 B C D B needs to be rebased on top of C, C needs to be rebased on top
1555 1555 \|/ of D. We will rebase C first.
1556 1556 A
1557 1557
1558 1558 C' After rebasing C, when considering B's destination, use C'
1559 1559 | instead of the original C.
1560 1560 B D
1561 1561 \ /
1562 1562 A
1563 1563 """
1564 1564 # pick already rebased revs with same dest from state as interesting source
1565 1565 dest = destmap[rev]
1566 1566 source = [
1567 1567 s
1568 1568 for s, d in state.items()
1569 1569 if d > 0 and destmap[s] == dest and s not in skipped
1570 1570 ]
1571 1571
1572 1572 result = []
1573 1573 for prev in repo.changelog.parentrevs(rev):
1574 1574 adjusted = dest
1575 1575 if prev != nullrev:
1576 1576 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1577 1577 if candidate is not None:
1578 1578 adjusted = state[candidate]
1579 1579 if adjusted == dest and dest in state:
1580 1580 adjusted = state[dest]
1581 1581 if adjusted == revtodo:
1582 1582 # sortsource should produce an order that makes this impossible
1583 1583 raise error.ProgrammingError(
1584 1584 b'rev %d should be rebased already at this time' % dest
1585 1585 )
1586 1586 result.append(adjusted)
1587 1587 return result
1588 1588
1589 1589
1590 1590 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1591 1591 """
1592 1592 Abort if rebase will create divergence or rebase is noop because of markers
1593 1593
1594 1594 `rebaseobsrevs`: set of obsolete revision in source
1595 1595 `rebaseobsskipped`: set of revisions from source skipped because they have
1596 1596 successors in destination or no non-obsolete successor.
1597 1597 """
1598 1598 # Obsolete node with successors not in dest leads to divergence
1599 1599 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1600 1600 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1601 1601
1602 1602 if divergencebasecandidates and not divergenceok:
1603 1603 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1604 1604 msg = _(b"this rebase will cause divergences from: %s")
1605 1605 h = _(
1606 1606 b"to force the rebase please set "
1607 1607 b"experimental.evolution.allowdivergence=True"
1608 1608 )
1609 1609 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1610 1610
1611 1611
1612 1612 def successorrevs(unfi, rev):
1613 1613 """yield revision numbers for successors of rev"""
1614 1614 assert unfi.filtername is None
1615 1615 get_rev = unfi.changelog.index.get_rev
1616 1616 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1617 1617 r = get_rev(s)
1618 1618 if r is not None:
1619 1619 yield r
1620 1620
1621 1621
1622 1622 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1623 1623 """Return new parents and optionally a merge base for rev being rebased
1624 1624
1625 1625 The destination specified by "dest" cannot always be used directly because
1626 1626 previously rebase result could affect destination. For example,
1627 1627
1628 1628 D E rebase -r C+D+E -d B
1629 1629 |/ C will be rebased to C'
1630 1630 B C D's new destination will be C' instead of B
1631 1631 |/ E's new destination will be C' instead of B
1632 1632 A
1633 1633
1634 1634 The new parents of a merge is slightly more complicated. See the comment
1635 1635 block below.
1636 1636 """
1637 1637 # use unfiltered changelog since successorrevs may return filtered nodes
1638 1638 assert repo.filtername is None
1639 1639 cl = repo.changelog
1640 1640 isancestor = cl.isancestorrev
1641 1641
1642 1642 dest = destmap[rev]
1643 1643 oldps = repo.changelog.parentrevs(rev) # old parents
1644 1644 newps = [nullrev, nullrev] # new parents
1645 1645 dests = adjustdest(repo, rev, destmap, state, skipped)
1646 1646 bases = list(oldps) # merge base candidates, initially just old parents
1647 1647
1648 1648 if all(r == nullrev for r in oldps[1:]):
1649 1649 # For non-merge changeset, just move p to adjusted dest as requested.
1650 1650 newps[0] = dests[0]
1651 1651 else:
1652 1652 # For merge changeset, if we move p to dests[i] unconditionally, both
1653 1653 # parents may change and the end result looks like "the merge loses a
1654 1654 # parent", which is a surprise. This is a limit because "--dest" only
1655 1655 # accepts one dest per src.
1656 1656 #
1657 1657 # Therefore, only move p with reasonable conditions (in this order):
1658 1658 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1659 1659 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1660 1660 #
1661 1661 # Comparing with adjustdest, the logic here does some additional work:
1662 1662 # 1. decide which parents will not be moved towards dest
1663 1663 # 2. if the above decision is "no", should a parent still be moved
1664 1664 # because it was rebased?
1665 1665 #
1666 1666 # For example:
1667 1667 #
1668 1668 # C # "rebase -r C -d D" is an error since none of the parents
1669 1669 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1670 1670 # A B D # B (using rule "2."), since B will be rebased.
1671 1671 #
1672 1672 # The loop tries to be not rely on the fact that a Mercurial node has
1673 1673 # at most 2 parents.
1674 1674 for i, p in enumerate(oldps):
1675 1675 np = p # new parent
1676 1676 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1677 1677 np = dests[i]
1678 1678 elif p in state and state[p] > 0:
1679 1679 np = state[p]
1680 1680
1681 1681 # "bases" only record "special" merge bases that cannot be
1682 1682 # calculated from changelog DAG (i.e. isancestor(p, np) is False).
1683 1683 # For example:
1684 1684 #
1685 1685 # B' # rebase -s B -d D, when B was rebased to B'. dest for C
1686 1686 # | C # is B', but merge base for C is B, instead of
1687 1687 # D | # changelog.ancestor(C, B') == A. If changelog DAG and
1688 1688 # | B # "state" edges are merged (so there will be an edge from
1689 1689 # |/ # B to B'), the merge base is still ancestor(C, B') in
1690 1690 # A # the merged graph.
1691 1691 #
1692 1692 # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
1693 1693 # which uses "virtual null merge" to explain this situation.
1694 1694 if isancestor(p, np):
1695 1695 bases[i] = nullrev
1696 1696
1697 1697 # If one parent becomes an ancestor of the other, drop the ancestor
1698 1698 for j, x in enumerate(newps[:i]):
1699 1699 if x == nullrev:
1700 1700 continue
1701 1701 if isancestor(np, x): # CASE-1
1702 1702 np = nullrev
1703 1703 elif isancestor(x, np): # CASE-2
1704 1704 newps[j] = np
1705 1705 np = nullrev
1706 1706 # New parents forming an ancestor relationship does not
1707 1707 # mean the old parents have a similar relationship. Do not
1708 1708 # set bases[x] to nullrev.
1709 1709 bases[j], bases[i] = bases[i], bases[j]
1710 1710
1711 1711 newps[i] = np
1712 1712
1713 1713 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1714 1714 # base. If only p2 changes, merging using unchanged p1 as merge base is
1715 1715 # suboptimal. Therefore swap parents to make the merge sane.
1716 1716 if newps[1] != nullrev and oldps[0] == newps[0]:
1717 1717 assert len(newps) == 2 and len(oldps) == 2
1718 1718 newps.reverse()
1719 1719 bases.reverse()
1720 1720
1721 1721 # No parent change might be an error because we fail to make rev a
1722 1722 # descendent of requested dest. This can happen, for example:
1723 1723 #
1724 1724 # C # rebase -r C -d D
1725 1725 # /| # None of A and B will be changed to D and rebase fails.
1726 1726 # A B D
1727 1727 if set(newps) == set(oldps) and dest not in newps:
1728 1728 raise error.Abort(
1729 1729 _(
1730 1730 b'cannot rebase %d:%s without '
1731 1731 b'moving at least one of its parents'
1732 1732 )
1733 1733 % (rev, repo[rev])
1734 1734 )
1735 1735
1736 1736 # Source should not be ancestor of dest. The check here guarantees it's
1737 1737 # impossible. With multi-dest, the initial check does not cover complex
1738 1738 # cases since we don't have abstractions to dry-run rebase cheaply.
1739 1739 if any(p != nullrev and isancestor(rev, p) for p in newps):
1740 1740 raise error.Abort(_(b'source is ancestor of destination'))
1741 1741
1742 1742 # "rebasenode" updates to new p1, use the corresponding merge base.
1743 1743 if bases[0] != nullrev:
1744 1744 base = bases[0]
1745 1745 else:
1746 1746 base = None
1747 1747
1748 1748 # Check if the merge will contain unwanted changes. That may happen if
1749 1749 # there are multiple special (non-changelog ancestor) merge bases, which
1750 1750 # cannot be handled well by the 3-way merge algorithm. For example:
1751 1751 #
1752 1752 # F
1753 1753 # /|
1754 1754 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1755 1755 # | | # as merge base, the difference between D and F will include
1756 1756 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1757 1757 # |/ # chosen, the rebased F will contain B.
1758 1758 # A Z
1759 1759 #
1760 1760 # But our merge base candidates (D and E in above case) could still be
1761 1761 # better than the default (ancestor(F, Z) == null). Therefore still
1762 1762 # pick one (so choose p1 above).
1763 1763 if sum(1 for b in set(bases) if b != nullrev) > 1:
1764 1764 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1765 1765 for i, base in enumerate(bases):
1766 1766 if base == nullrev:
1767 1767 continue
1768 1768 # Revisions in the side (not chosen as merge base) branch that
1769 1769 # might contain "surprising" contents
1770 1770 other_bases = set(bases) - {base}
1771 1771 siderevs = list(
1772 1772 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1773 1773 )
1774 1774
1775 1775 # If those revisions are covered by rebaseset, the result is good.
1776 1776 # A merge in rebaseset would be considered to cover its ancestors.
1777 1777 if siderevs:
1778 1778 rebaseset = [
1779 1779 r for r, d in state.items() if d > 0 and r not in obsskipped
1780 1780 ]
1781 1781 merges = [
1782 1782 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1783 1783 ]
1784 1784 unwanted[i] = list(
1785 1785 repo.revs(
1786 1786 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1787 1787 )
1788 1788 )
1789 1789
1790 1790 # Choose a merge base that has a minimal number of unwanted revs.
1791 1791 l, i = min(
1792 1792 (len(revs), i)
1793 1793 for i, revs in enumerate(unwanted)
1794 1794 if revs is not None
1795 1795 )
1796 1796
1797 1797 # The merge will include unwanted revisions. Abort now. Revisit this if
1798 1798 # we have a more advanced merge algorithm that handles multiple bases.
1799 1799 if l > 0:
1800 1800 unwanteddesc = _(b' or ').join(
1801 1801 (
1802 1802 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1803 1803 for revs in unwanted
1804 1804 if revs is not None
1805 1805 )
1806 1806 )
1807 1807 raise error.Abort(
1808 1808 _(b'rebasing %d:%s will include unwanted changes from %s')
1809 1809 % (rev, repo[rev], unwanteddesc)
1810 1810 )
1811 1811
1812 1812 base = bases[i]
1813 1813
1814 1814 # newps[0] should match merge base if possible. Currently, if newps[i]
1815 1815 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1816 1816 # the other's ancestor. In that case, it's fine to not swap newps here.
1817 1817 # (see CASE-1 and CASE-2 above)
1818 1818 if i != 0 and newps[i] != nullrev:
1819 1819 newps[0], newps[i] = newps[i], newps[0]
1820 1820
1821 1821 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1822 1822
1823 1823 return newps[0], newps[1], base
1824 1824
1825 1825
1826 1826 def isagitpatch(repo, patchname):
1827 1827 """Return true if the given patch is in git format"""
1828 1828 mqpatch = os.path.join(repo.mq.path, patchname)
1829 1829 for line in patch.linereader(open(mqpatch, b'rb')):
1830 1830 if line.startswith(b'diff --git'):
1831 1831 return True
1832 1832 return False
1833 1833
1834 1834
1835 1835 def updatemq(repo, state, skipped, **opts):
1836 1836 """Update rebased mq patches - finalize and then import them"""
1837 1837 mqrebase = {}
1838 1838 mq = repo.mq
1839 1839 original_series = mq.fullseries[:]
1840 1840 skippedpatches = set()
1841 1841
1842 1842 for p in mq.applied:
1843 1843 rev = repo[p.node].rev()
1844 1844 if rev in state:
1845 1845 repo.ui.debug(
1846 1846 b'revision %d is an mq patch (%s), finalize it.\n'
1847 1847 % (rev, p.name)
1848 1848 )
1849 1849 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1850 1850 else:
1851 1851 # Applied but not rebased, not sure this should happen
1852 1852 skippedpatches.add(p.name)
1853 1853
1854 1854 if mqrebase:
1855 1855 mq.finish(repo, mqrebase.keys())
1856 1856
1857 1857 # We must start import from the newest revision
1858 1858 for rev in sorted(mqrebase, reverse=True):
1859 1859 if rev not in skipped:
1860 1860 name, isgit = mqrebase[rev]
1861 1861 repo.ui.note(
1862 1862 _(b'updating mq patch %s to %d:%s\n')
1863 1863 % (name, state[rev], repo[state[rev]])
1864 1864 )
1865 1865 mq.qimport(
1866 1866 repo,
1867 1867 (),
1868 1868 patchname=name,
1869 1869 git=isgit,
1870 1870 rev=[b"%d" % state[rev]],
1871 1871 )
1872 1872 else:
1873 1873 # Rebased and skipped
1874 1874 skippedpatches.add(mqrebase[rev][0])
1875 1875
1876 1876 # Patches were either applied and rebased and imported in
1877 1877 # order, applied and removed or unapplied. Discard the removed
1878 1878 # ones while preserving the original series order and guards.
1879 1879 newseries = [
1880 1880 s
1881 1881 for s in original_series
1882 1882 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1883 1883 ]
1884 1884 mq.fullseries[:] = newseries
1885 1885 mq.seriesdirty = True
1886 1886 mq.savedirty()
1887 1887
1888 1888
1889 1889 def storecollapsemsg(repo, collapsemsg):
1890 1890 """Store the collapse message to allow recovery"""
1891 1891 collapsemsg = collapsemsg or b''
1892 1892 f = repo.vfs(b"last-message.txt", b"w")
1893 1893 f.write(b"%s\n" % collapsemsg)
1894 1894 f.close()
1895 1895
1896 1896
1897 1897 def clearcollapsemsg(repo):
1898 1898 """Remove collapse message file"""
1899 1899 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1900 1900
1901 1901
1902 1902 def restorecollapsemsg(repo, isabort):
1903 1903 """Restore previously stored collapse message"""
1904 1904 try:
1905 1905 f = repo.vfs(b"last-message.txt")
1906 1906 collapsemsg = f.readline().strip()
1907 1907 f.close()
1908 1908 except IOError as err:
1909 1909 if err.errno != errno.ENOENT:
1910 1910 raise
1911 1911 if isabort:
1912 1912 # Oh well, just abort like normal
1913 1913 collapsemsg = b''
1914 1914 else:
1915 1915 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1916 1916 return collapsemsg
1917 1917
1918 1918
1919 1919 def clearstatus(repo):
1920 1920 """Remove the status files"""
1921 1921 # Make sure the active transaction won't write the state file
1922 1922 tr = repo.currenttransaction()
1923 1923 if tr:
1924 1924 tr.removefilegenerator(b'rebasestate')
1925 1925 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1926 1926
1927 1927
1928 1928 def sortsource(destmap):
1929 1929 """yield source revisions in an order that we only rebase things once
1930 1930
1931 1931 If source and destination overlaps, we should filter out revisions
1932 1932 depending on other revisions which hasn't been rebased yet.
1933 1933
1934 1934 Yield a sorted list of revisions each time.
1935 1935
1936 1936 For example, when rebasing A to B, B to C. This function yields [B], then
1937 1937 [A], indicating B needs to be rebased first.
1938 1938
1939 1939 Raise if there is a cycle so the rebase is impossible.
1940 1940 """
1941 1941 srcset = set(destmap)
1942 1942 while srcset:
1943 1943 srclist = sorted(srcset)
1944 1944 result = []
1945 1945 for r in srclist:
1946 1946 if destmap[r] not in srcset:
1947 1947 result.append(r)
1948 1948 if not result:
1949 1949 raise error.Abort(_(b'source and destination form a cycle'))
1950 1950 srcset -= set(result)
1951 1951 yield result
1952 1952
1953 1953
1954 1954 def buildstate(repo, destmap, collapse):
1955 1955 '''Define which revisions are going to be rebased and where
1956 1956
1957 1957 repo: repo
1958 1958 destmap: {srcrev: destrev}
1959 1959 '''
1960 1960 rebaseset = destmap.keys()
1961 1961 originalwd = repo[b'.'].rev()
1962 1962
1963 1963 # This check isn't strictly necessary, since mq detects commits over an
1964 1964 # applied patch. But it prevents messing up the working directory when
1965 1965 # a partially completed rebase is blocked by mq.
1966 1966 if b'qtip' in repo.tags():
1967 1967 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1968 1968 if set(destmap.values()) & mqapplied:
1969 1969 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1970 1970
1971 1971 # Get "cycle" error early by exhausting the generator.
1972 1972 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1973 1973 if not sortedsrc:
1974 1974 raise error.Abort(_(b'no matching revisions'))
1975 1975
1976 1976 # Only check the first batch of revisions to rebase not depending on other
1977 1977 # rebaseset. This means "source is ancestor of destination" for the second
1978 1978 # (and following) batches of revisions are not checked here. We rely on
1979 1979 # "defineparents" to do that check.
1980 1980 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1981 1981 if not roots:
1982 1982 raise error.Abort(_(b'no matching revisions'))
1983 1983
1984 1984 def revof(r):
1985 1985 return r.rev()
1986 1986
1987 1987 roots = sorted(roots, key=revof)
1988 1988 state = dict.fromkeys(rebaseset, revtodo)
1989 1989 emptyrebase = len(sortedsrc) == 1
1990 1990 for root in roots:
1991 1991 dest = repo[destmap[root.rev()]]
1992 1992 commonbase = root.ancestor(dest)
1993 1993 if commonbase == root:
1994 1994 raise error.Abort(_(b'source is ancestor of destination'))
1995 1995 if commonbase == dest:
1996 1996 wctx = repo[None]
1997 1997 if dest == wctx.p1():
1998 1998 # when rebasing to '.', it will use the current wd branch name
1999 1999 samebranch = root.branch() == wctx.branch()
2000 2000 else:
2001 2001 samebranch = root.branch() == dest.branch()
2002 2002 if not collapse and samebranch and dest in root.parents():
2003 2003 # mark the revision as done by setting its new revision
2004 2004 # equal to its old (current) revisions
2005 2005 state[root.rev()] = root.rev()
2006 2006 repo.ui.debug(b'source is a child of destination\n')
2007 2007 continue
2008 2008
2009 2009 emptyrebase = False
2010 2010 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
2011 2011 if emptyrebase:
2012 2012 return None
2013 2013 for rev in sorted(state):
2014 2014 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2015 2015 # if all parents of this revision are done, then so is this revision
2016 2016 if parents and all((state.get(p) == p for p in parents)):
2017 2017 state[rev] = rev
2018 2018 return originalwd, destmap, state
2019 2019
2020 2020
2021 2021 def clearrebased(
2022 2022 ui,
2023 2023 repo,
2024 2024 destmap,
2025 2025 state,
2026 2026 skipped,
2027 2027 collapsedas=None,
2028 2028 keepf=False,
2029 2029 fm=None,
2030 2030 backup=True,
2031 2031 ):
2032 2032 """dispose of rebased revision at the end of the rebase
2033 2033
2034 2034 If `collapsedas` is not None, the rebase was a collapse whose result if the
2035 2035 `collapsedas` node.
2036 2036
2037 2037 If `keepf` is not True, the rebase has --keep set and no nodes should be
2038 2038 removed (but bookmarks still need to be moved).
2039 2039
2040 2040 If `backup` is False, no backup will be stored when stripping rebased
2041 2041 revisions.
2042 2042 """
2043 2043 tonode = repo.changelog.node
2044 2044 replacements = {}
2045 2045 moves = {}
2046 2046 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2047 2047
2048 2048 collapsednodes = []
2049 2049 for rev, newrev in sorted(state.items()):
2050 2050 if newrev >= 0 and newrev != rev:
2051 2051 oldnode = tonode(rev)
2052 2052 newnode = collapsedas or tonode(newrev)
2053 2053 moves[oldnode] = newnode
2054 2054 succs = None
2055 2055 if rev in skipped:
2056 2056 if stripcleanup or not repo[rev].obsolete():
2057 2057 succs = ()
2058 2058 elif collapsedas:
2059 2059 collapsednodes.append(oldnode)
2060 2060 else:
2061 2061 succs = (newnode,)
2062 2062 if succs is not None:
2063 2063 replacements[(oldnode,)] = succs
2064 2064 if collapsednodes:
2065 2065 replacements[tuple(collapsednodes)] = (collapsedas,)
2066 2066 if fm:
2067 2067 hf = fm.hexfunc
2068 2068 fl = fm.formatlist
2069 2069 fd = fm.formatdict
2070 2070 changes = {}
2071 2071 for oldns, newn in pycompat.iteritems(replacements):
2072 2072 for oldn in oldns:
2073 2073 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2074 2074 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2075 2075 fm.data(nodechanges=nodechanges)
2076 2076 if keepf:
2077 2077 replacements = {}
2078 2078 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2079 2079
2080 2080
2081 2081 def pullrebase(orig, ui, repo, *args, **opts):
2082 2082 """Call rebase after pull if the latter has been invoked with --rebase"""
2083 2083 if opts.get('rebase'):
2084 2084 if ui.configbool(b'commands', b'rebase.requiredest'):
2085 2085 msg = _(b'rebase destination required by configuration')
2086 2086 hint = _(b'use hg pull followed by hg rebase -d DEST')
2087 2087 raise error.Abort(msg, hint=hint)
2088 2088
2089 2089 with repo.wlock(), repo.lock():
2090 2090 if opts.get('update'):
2091 2091 del opts['update']
2092 2092 ui.debug(
2093 2093 b'--update and --rebase are not compatible, ignoring '
2094 2094 b'the update flag\n'
2095 2095 )
2096 2096
2097 2097 cmdutil.checkunfinished(repo, skipmerge=True)
2098 2098 cmdutil.bailifchanged(
2099 2099 repo,
2100 2100 hint=_(
2101 2101 b'cannot pull with rebase: '
2102 2102 b'please commit or shelve your changes first'
2103 2103 ),
2104 2104 )
2105 2105
2106 2106 revsprepull = len(repo)
2107 2107 origpostincoming = commands.postincoming
2108 2108
2109 2109 def _dummy(*args, **kwargs):
2110 2110 pass
2111 2111
2112 2112 commands.postincoming = _dummy
2113 2113 try:
2114 2114 ret = orig(ui, repo, *args, **opts)
2115 2115 finally:
2116 2116 commands.postincoming = origpostincoming
2117 2117 revspostpull = len(repo)
2118 2118 if revspostpull > revsprepull:
2119 2119 # --rev option from pull conflict with rebase own --rev
2120 2120 # dropping it
2121 2121 if 'rev' in opts:
2122 2122 del opts['rev']
2123 2123 # positional argument from pull conflicts with rebase's own
2124 2124 # --source.
2125 2125 if 'source' in opts:
2126 2126 del opts['source']
2127 2127 # revsprepull is the len of the repo, not revnum of tip.
2128 2128 destspace = list(repo.changelog.revs(start=revsprepull))
2129 2129 opts['_destspace'] = destspace
2130 2130 try:
2131 2131 rebase(ui, repo, **opts)
2132 2132 except error.NoMergeDestAbort:
2133 2133 # we can maybe update instead
2134 2134 rev, _a, _b = destutil.destupdate(repo)
2135 2135 if rev == repo[b'.'].rev():
2136 2136 ui.status(_(b'nothing to rebase\n'))
2137 2137 else:
2138 2138 ui.status(_(b'nothing to rebase - updating instead\n'))
2139 2139 # not passing argument to get the bare update behavior
2140 2140 # with warning and trumpets
2141 2141 commands.update(ui, repo)
2142 2142 else:
2143 2143 if opts.get('tool'):
2144 2144 raise error.Abort(_(b'--tool can only be used with --rebase'))
2145 2145 ret = orig(ui, repo, *args, **opts)
2146 2146
2147 2147 return ret
2148 2148
2149 2149
2150 2150 def _filterobsoleterevs(repo, revs):
2151 2151 """returns a set of the obsolete revisions in revs"""
2152 2152 return set(r for r in revs if repo[r].obsolete())
2153 2153
2154 2154
2155 2155 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2156 2156 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2157 2157
2158 2158 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2159 2159 obsolete nodes to be rebased given in `rebaseobsrevs`.
2160 2160
2161 2161 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2162 2162 without a successor in destination.
2163 2163
2164 2164 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2165 2165 obsolete successors.
2166 2166 """
2167 2167 obsoletenotrebased = {}
2168 2168 obsoletewithoutsuccessorindestination = set()
2169 2169 obsoleteextinctsuccessors = set()
2170 2170
2171 2171 assert repo.filtername is None
2172 2172 cl = repo.changelog
2173 2173 get_rev = cl.index.get_rev
2174 2174 extinctrevs = set(repo.revs(b'extinct()'))
2175 2175 for srcrev in rebaseobsrevs:
2176 2176 srcnode = cl.node(srcrev)
2177 2177 # XXX: more advanced APIs are required to handle split correctly
2178 2178 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2179 2179 # obsutil.allsuccessors includes node itself
2180 2180 successors.remove(srcnode)
2181 2181 succrevs = {get_rev(s) for s in successors}
2182 2182 succrevs.discard(None)
2183 2183 if succrevs.issubset(extinctrevs):
2184 2184 # all successors are extinct
2185 2185 obsoleteextinctsuccessors.add(srcrev)
2186 2186 if not successors:
2187 2187 # no successor
2188 2188 obsoletenotrebased[srcrev] = None
2189 2189 else:
2190 2190 dstrev = destmap[srcrev]
2191 2191 for succrev in succrevs:
2192 2192 if cl.isancestorrev(succrev, dstrev):
2193 2193 obsoletenotrebased[srcrev] = succrev
2194 2194 break
2195 2195 else:
2196 2196 # If 'srcrev' has a successor in rebase set but none in
2197 2197 # destination (which would be catched above), we shall skip it
2198 2198 # and its descendants to avoid divergence.
2199 2199 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2200 2200 obsoletewithoutsuccessorindestination.add(srcrev)
2201 2201
2202 2202 return (
2203 2203 obsoletenotrebased,
2204 2204 obsoletewithoutsuccessorindestination,
2205 2205 obsoleteextinctsuccessors,
2206 2206 )
2207 2207
2208 2208
2209 2209 def abortrebase(ui, repo):
2210 2210 with repo.wlock(), repo.lock():
2211 2211 rbsrt = rebaseruntime(repo, ui)
2212 2212 rbsrt._prepareabortorcontinue(isabort=True)
2213 2213
2214 2214
2215 2215 def continuerebase(ui, repo):
2216 2216 with repo.wlock(), repo.lock():
2217 2217 rbsrt = rebaseruntime(repo, ui)
2218 2218 ms = mergemod.mergestate.read(repo)
2219 2219 mergeutil.checkunresolved(ms)
2220 2220 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2221 2221 if retcode is not None:
2222 2222 return retcode
2223 2223 rbsrt._performrebase(None)
2224 2224 rbsrt._finishrebase()
2225 2225
2226 2226
2227 2227 def summaryhook(ui, repo):
2228 2228 if not repo.vfs.exists(b'rebasestate'):
2229 2229 return
2230 2230 try:
2231 2231 rbsrt = rebaseruntime(repo, ui, {})
2232 2232 rbsrt.restorestatus()
2233 2233 state = rbsrt.state
2234 2234 except error.RepoLookupError:
2235 2235 # i18n: column positioning for "hg summary"
2236 2236 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2237 2237 ui.write(msg)
2238 2238 return
2239 2239 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2240 2240 # i18n: column positioning for "hg summary"
2241 2241 ui.write(
2242 2242 _(b'rebase: %s, %s (rebase --continue)\n')
2243 2243 % (
2244 2244 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2245 2245 ui.label(_(b'%d remaining'), b'rebase.remaining')
2246 2246 % (len(state) - numrebased),
2247 2247 )
2248 2248 )
2249 2249
2250 2250
2251 2251 def uisetup(ui):
2252 2252 # Replace pull with a decorator to provide --rebase option
2253 2253 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2254 2254 entry[1].append(
2255 2255 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2256 2256 )
2257 2257 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2258 2258 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2259 2259 statemod.addunfinished(
2260 2260 b'rebase',
2261 2261 fname=b'rebasestate',
2262 2262 stopflag=True,
2263 2263 continueflag=True,
2264 2264 abortfunc=abortrebase,
2265 2265 continuefunc=continuerebase,
2266 2266 )
@@ -1,711 +1,711 b''
1 1 # Copyright 2017-present Gregory Szorc <gregory.szorc@gmail.com>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 """generate release notes from commit messages (EXPERIMENTAL)
7 7
8 8 It is common to maintain files detailing changes in a project between
9 9 releases. Maintaining these files can be difficult and time consuming.
10 10 The :hg:`releasenotes` command provided by this extension makes the
11 11 process simpler by automating it.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import difflib
17 17 import errno
18 18 import re
19 19
20 20 from mercurial.i18n import _
21 21 from mercurial.pycompat import open
22 22 from mercurial import (
23 23 cmdutil,
24 24 config,
25 25 error,
26 26 minirst,
27 27 node,
28 28 pycompat,
29 29 registrar,
30 30 scmutil,
31 31 util,
32 32 )
33 33 from mercurial.utils import stringutil
34 34
35 35 cmdtable = {}
36 36 command = registrar.command(cmdtable)
37 37
38 38 try:
39 39 import fuzzywuzzy.fuzz as fuzz
40 40
41 41 fuzz.token_set_ratio
42 42 except ImportError:
43 43 fuzz = None
44 44
45 45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 47 # be specifying the version(s) of Mercurial they are tested with, or
48 48 # leave the attribute unspecified.
49 49 testedwith = b'ships-with-hg-core'
50 50
51 51 DEFAULT_SECTIONS = [
52 52 (b'feature', _(b'New Features')),
53 53 (b'bc', _(b'Backwards Compatibility Changes')),
54 54 (b'fix', _(b'Bug Fixes')),
55 55 (b'perf', _(b'Performance Improvements')),
56 56 (b'api', _(b'API Changes')),
57 57 ]
58 58
59 59 RE_DIRECTIVE = re.compile(br'^\.\. ([a-zA-Z0-9_]+)::\s*([^$]+)?$')
60 60 RE_ISSUE = br'\bissue ?[0-9]{4,6}(?![0-9])\b'
61 61
62 62 BULLET_SECTION = _(b'Other Changes')
63 63
64 64
65 65 class parsedreleasenotes(object):
66 66 def __init__(self):
67 67 self.sections = {}
68 68
69 69 def __contains__(self, section):
70 70 return section in self.sections
71 71
72 72 def __iter__(self):
73 73 return iter(sorted(self.sections))
74 74
75 75 def addtitleditem(self, section, title, paragraphs):
76 76 """Add a titled release note entry."""
77 77 self.sections.setdefault(section, ([], []))
78 78 self.sections[section][0].append((title, paragraphs))
79 79
80 80 def addnontitleditem(self, section, paragraphs):
81 81 """Adds a non-titled release note entry.
82 82
83 83 Will be rendered as a bullet point.
84 84 """
85 85 self.sections.setdefault(section, ([], []))
86 86 self.sections[section][1].append(paragraphs)
87 87
88 88 def titledforsection(self, section):
89 89 """Returns titled entries in a section.
90 90
91 91 Returns a list of (title, paragraphs) tuples describing sub-sections.
92 92 """
93 93 return self.sections.get(section, ([], []))[0]
94 94
95 95 def nontitledforsection(self, section):
96 96 """Returns non-titled, bulleted paragraphs in a section."""
97 97 return self.sections.get(section, ([], []))[1]
98 98
99 99 def hastitledinsection(self, section, title):
100 100 return any(t[0] == title for t in self.titledforsection(section))
101 101
102 102 def merge(self, ui, other):
103 103 """Merge another instance into this one.
104 104
105 105 This is used to combine multiple sources of release notes together.
106 106 """
107 107 if not fuzz:
108 108 ui.warn(
109 109 _(
110 110 b"module 'fuzzywuzzy' not found, merging of similar "
111 111 b"releasenotes is disabled\n"
112 112 )
113 113 )
114 114
115 115 for section in other:
116 116 existingnotes = converttitled(
117 117 self.titledforsection(section)
118 118 ) + convertnontitled(self.nontitledforsection(section))
119 119 for title, paragraphs in other.titledforsection(section):
120 120 if self.hastitledinsection(section, title):
121 121 # TODO prompt for resolution if different and running in
122 122 # interactive mode.
123 123 ui.write(
124 124 _(b'%s already exists in %s section; ignoring\n')
125 125 % (title, section)
126 126 )
127 127 continue
128 128
129 129 incoming_str = converttitled([(title, paragraphs)])[0]
130 130 if section == b'fix':
131 131 issue = getissuenum(incoming_str)
132 132 if issue:
133 133 if findissue(ui, existingnotes, issue):
134 134 continue
135 135
136 136 if similar(ui, existingnotes, incoming_str):
137 137 continue
138 138
139 139 self.addtitleditem(section, title, paragraphs)
140 140
141 141 for paragraphs in other.nontitledforsection(section):
142 142 if paragraphs in self.nontitledforsection(section):
143 143 continue
144 144
145 145 incoming_str = convertnontitled([paragraphs])[0]
146 146 if section == b'fix':
147 147 issue = getissuenum(incoming_str)
148 148 if issue:
149 149 if findissue(ui, existingnotes, issue):
150 150 continue
151 151
152 152 if similar(ui, existingnotes, incoming_str):
153 153 continue
154 154
155 155 self.addnontitleditem(section, paragraphs)
156 156
157 157
158 158 class releasenotessections(object):
159 159 def __init__(self, ui, repo=None):
160 160 if repo:
161 161 sections = util.sortdict(DEFAULT_SECTIONS)
162 162 custom_sections = getcustomadmonitions(repo)
163 163 if custom_sections:
164 164 sections.update(custom_sections)
165 165 self._sections = list(pycompat.iteritems(sections))
166 166 else:
167 167 self._sections = list(DEFAULT_SECTIONS)
168 168
169 169 def __iter__(self):
170 170 return iter(self._sections)
171 171
172 172 def names(self):
173 173 return [t[0] for t in self._sections]
174 174
175 175 def sectionfromtitle(self, title):
176 176 for name, value in self._sections:
177 177 if value == title:
178 178 return name
179 179
180 180 return None
181 181
182 182
183 183 def converttitled(titledparagraphs):
184 184 """
185 185 Convert titled paragraphs to strings
186 186 """
187 187 string_list = []
188 188 for title, paragraphs in titledparagraphs:
189 189 lines = []
190 190 for para in paragraphs:
191 191 lines.extend(para)
192 192 string_list.append(b' '.join(lines))
193 193 return string_list
194 194
195 195
196 196 def convertnontitled(nontitledparagraphs):
197 197 """
198 198 Convert non-titled bullets to strings
199 199 """
200 200 string_list = []
201 201 for paragraphs in nontitledparagraphs:
202 202 lines = []
203 203 for para in paragraphs:
204 204 lines.extend(para)
205 205 string_list.append(b' '.join(lines))
206 206 return string_list
207 207
208 208
209 209 def getissuenum(incoming_str):
210 210 """
211 211 Returns issue number from the incoming string if it exists
212 212 """
213 213 issue = re.search(RE_ISSUE, incoming_str, re.IGNORECASE)
214 214 if issue:
215 215 issue = issue.group()
216 216 return issue
217 217
218 218
219 219 def findissue(ui, existing, issue):
220 220 """
221 221 Returns true if issue number already exists in notes.
222 222 """
223 223 if any(issue in s for s in existing):
224 224 ui.write(_(b'"%s" already exists in notes; ignoring\n') % issue)
225 225 return True
226 226 else:
227 227 return False
228 228
229 229
230 230 def similar(ui, existing, incoming_str):
231 231 """
232 232 Returns true if similar note found in existing notes.
233 233 """
234 234 if len(incoming_str.split()) > 10:
235 235 merge = similaritycheck(incoming_str, existing)
236 236 if not merge:
237 237 ui.write(
238 238 _(b'"%s" already exists in notes file; ignoring\n')
239 239 % incoming_str
240 240 )
241 241 return True
242 242 else:
243 243 return False
244 244 else:
245 245 return False
246 246
247 247
248 248 def similaritycheck(incoming_str, existingnotes):
249 249 """
250 250 Returns false when note fragment can be merged to existing notes.
251 251 """
252 252 # fuzzywuzzy not present
253 253 if not fuzz:
254 254 return True
255 255
256 256 merge = True
257 257 for bullet in existingnotes:
258 258 score = fuzz.token_set_ratio(incoming_str, bullet)
259 259 if score > 75:
260 260 merge = False
261 261 break
262 262 return merge
263 263
264 264
265 265 def getcustomadmonitions(repo):
266 266 ctx = repo[b'.']
267 267 p = config.config()
268 268
269 269 def read(f, sections=None, remap=None):
270 270 if f in ctx:
271 271 data = ctx[f].data()
272 272 p.parse(f, data, sections, remap, read)
273 273 else:
274 274 raise error.Abort(
275 275 _(b".hgreleasenotes file \'%s\' not found") % repo.pathto(f)
276 276 )
277 277
278 278 if b'.hgreleasenotes' in ctx:
279 279 read(b'.hgreleasenotes')
280 280 return p[b'sections']
281 281
282 282
283 283 def checkadmonitions(ui, repo, directives, revs):
284 284 """
285 285 Checks the commit messages for admonitions and their validity.
286 286
287 287 .. abcd::
288 288
289 289 First paragraph under this admonition
290 290
291 291 For this commit message, using `hg releasenotes -r . --check`
292 292 returns: Invalid admonition 'abcd' present in changeset 3ea92981e103
293 293
294 294 As admonition 'abcd' is neither present in default nor custom admonitions
295 295 """
296 296 for rev in revs:
297 297 ctx = repo[rev]
298 298 admonition = re.search(RE_DIRECTIVE, ctx.description())
299 299 if admonition:
300 300 if admonition.group(1) in directives:
301 301 continue
302 302 else:
303 303 ui.write(
304 304 _(b"Invalid admonition '%s' present in changeset %s\n")
305 305 % (admonition.group(1), ctx.hex()[:12])
306 306 )
307 307 sim = lambda x: difflib.SequenceMatcher(
308 308 None, admonition.group(1), x
309 309 ).ratio()
310 310
311 311 similar = [s for s in directives if sim(s) > 0.6]
312 312 if len(similar) == 1:
313 313 ui.write(_(b"(did you mean %s?)\n") % similar[0])
314 314 elif similar:
315 315 ss = b", ".join(sorted(similar))
316 316 ui.write(_(b"(did you mean one of %s?)\n") % ss)
317 317
318 318
319 319 def _getadmonitionlist(ui, sections):
320 320 for section in sections:
321 321 ui.write(b"%s: %s\n" % (section[0], section[1]))
322 322
323 323
324 324 def parsenotesfromrevisions(repo, directives, revs):
325 325 notes = parsedreleasenotes()
326 326
327 327 for rev in revs:
328 328 ctx = repo[rev]
329 329
330 330 blocks, pruned = minirst.parse(
331 331 ctx.description(), admonitions=directives
332 332 )
333 333
334 334 for i, block in enumerate(blocks):
335 335 if block[b'type'] != b'admonition':
336 336 continue
337 337
338 338 directive = block[b'admonitiontitle']
339 339 title = block[b'lines'][0].strip() if block[b'lines'] else None
340 340
341 341 if i + 1 == len(blocks):
342 342 raise error.Abort(
343 343 _(
344 344 b'changeset %s: release notes directive %s '
345 345 b'lacks content'
346 346 )
347 347 % (ctx, directive)
348 348 )
349 349
350 350 # Now search ahead and find all paragraphs attached to this
351 351 # admonition.
352 352 paragraphs = []
353 353 for j in range(i + 1, len(blocks)):
354 354 pblock = blocks[j]
355 355
356 356 # Margin blocks may appear between paragraphs. Ignore them.
357 357 if pblock[b'type'] == b'margin':
358 358 continue
359 359
360 360 if pblock[b'type'] == b'admonition':
361 361 break
362 362
363 363 if pblock[b'type'] != b'paragraph':
364 364 repo.ui.warn(
365 365 _(
366 366 b'changeset %s: unexpected block in release '
367 367 b'notes directive %s\n'
368 368 )
369 369 % (ctx, directive)
370 370 )
371 371
372 372 if pblock[b'indent'] > 0:
373 373 paragraphs.append(pblock[b'lines'])
374 374 else:
375 375 break
376 376
377 377 # TODO consider using title as paragraph for more concise notes.
378 378 if not paragraphs:
379 379 repo.ui.warn(
380 380 _(b"error parsing releasenotes for revision: '%s'\n")
381 381 % node.hex(ctx.node())
382 382 )
383 383 if title:
384 384 notes.addtitleditem(directive, title, paragraphs)
385 385 else:
386 386 notes.addnontitleditem(directive, paragraphs)
387 387
388 388 return notes
389 389
390 390
391 391 def parsereleasenotesfile(sections, text):
392 392 """Parse text content containing generated release notes."""
393 393 notes = parsedreleasenotes()
394 394
395 395 blocks = minirst.parse(text)[0]
396 396
397 397 def gatherparagraphsbullets(offset, title=False):
398 398 notefragment = []
399 399
400 400 for i in range(offset + 1, len(blocks)):
401 401 block = blocks[i]
402 402
403 403 if block[b'type'] == b'margin':
404 404 continue
405 405 elif block[b'type'] == b'section':
406 406 break
407 407 elif block[b'type'] == b'bullet':
408 408 if block[b'indent'] != 0:
409 409 raise error.Abort(_(b'indented bullet lists not supported'))
410 410 if title:
411 411 lines = [l[1:].strip() for l in block[b'lines']]
412 412 notefragment.append(lines)
413 413 continue
414 414 else:
415 415 lines = [[l[1:].strip() for l in block[b'lines']]]
416 416
417 417 for block in blocks[i + 1 :]:
418 418 if block[b'type'] in (b'bullet', b'section'):
419 419 break
420 420 if block[b'type'] == b'paragraph':
421 421 lines.append(block[b'lines'])
422 422 notefragment.append(lines)
423 423 continue
424 424 elif block[b'type'] != b'paragraph':
425 425 raise error.Abort(
426 426 _(b'unexpected block type in release notes: %s')
427 427 % block[b'type']
428 428 )
429 429 if title:
430 430 notefragment.append(block[b'lines'])
431 431
432 432 return notefragment
433 433
434 434 currentsection = None
435 435 for i, block in enumerate(blocks):
436 436 if block[b'type'] != b'section':
437 437 continue
438 438
439 439 title = block[b'lines'][0]
440 440
441 441 # TODO the parsing around paragraphs and bullet points needs some
442 442 # work.
443 443 if block[b'underline'] == b'=': # main section
444 444 name = sections.sectionfromtitle(title)
445 445 if not name:
446 446 raise error.Abort(
447 447 _(b'unknown release notes section: %s') % title
448 448 )
449 449
450 450 currentsection = name
451 451 bullet_points = gatherparagraphsbullets(i)
452 452 if bullet_points:
453 453 for para in bullet_points:
454 454 notes.addnontitleditem(currentsection, para)
455 455
456 456 elif block[b'underline'] == b'-': # sub-section
457 457 if title == BULLET_SECTION:
458 458 bullet_points = gatherparagraphsbullets(i)
459 459 for para in bullet_points:
460 460 notes.addnontitleditem(currentsection, para)
461 461 else:
462 462 paragraphs = gatherparagraphsbullets(i, True)
463 463 notes.addtitleditem(currentsection, title, paragraphs)
464 464 else:
465 465 raise error.Abort(_(b'unsupported section type for %s') % title)
466 466
467 467 return notes
468 468
469 469
470 470 def serializenotes(sections, notes):
471 471 """Serialize release notes from parsed fragments and notes.
472 472
473 473 This function essentially takes the output of ``parsenotesfromrevisions()``
474 474 and ``parserelnotesfile()`` and produces output combining the 2.
475 475 """
476 476 lines = []
477 477
478 478 for sectionname, sectiontitle in sections:
479 479 if sectionname not in notes:
480 480 continue
481 481
482 482 lines.append(sectiontitle)
483 483 lines.append(b'=' * len(sectiontitle))
484 484 lines.append(b'')
485 485
486 486 # First pass to emit sub-sections.
487 487 for title, paragraphs in notes.titledforsection(sectionname):
488 488 lines.append(title)
489 489 lines.append(b'-' * len(title))
490 490 lines.append(b'')
491 491
492 492 for i, para in enumerate(paragraphs):
493 493 if i:
494 494 lines.append(b'')
495 495 lines.extend(
496 496 stringutil.wrap(b' '.join(para), width=78).splitlines()
497 497 )
498 498
499 499 lines.append(b'')
500 500
501 501 # Second pass to emit bullet list items.
502 502
503 503 # If the section has titled and non-titled items, we can't
504 504 # simply emit the bullet list because it would appear to come
505 505 # from the last title/section. So, we emit a new sub-section
506 506 # for the non-titled items.
507 507 nontitled = notes.nontitledforsection(sectionname)
508 508 if notes.titledforsection(sectionname) and nontitled:
509 509 # TODO make configurable.
510 510 lines.append(BULLET_SECTION)
511 511 lines.append(b'-' * len(BULLET_SECTION))
512 512 lines.append(b'')
513 513
514 514 for paragraphs in nontitled:
515 515 lines.extend(
516 516 stringutil.wrap(
517 517 b' '.join(paragraphs[0]),
518 518 width=78,
519 519 initindent=b'* ',
520 520 hangindent=b' ',
521 521 ).splitlines()
522 522 )
523 523
524 524 for para in paragraphs[1:]:
525 525 lines.append(b'')
526 526 lines.extend(
527 527 stringutil.wrap(
528 528 b' '.join(para),
529 529 width=78,
530 530 initindent=b' ',
531 531 hangindent=b' ',
532 532 ).splitlines()
533 533 )
534 534
535 535 lines.append(b'')
536 536
537 537 if lines and lines[-1]:
538 538 lines.append(b'')
539 539
540 540 return b'\n'.join(lines)
541 541
542 542
543 543 @command(
544 544 b'releasenotes',
545 545 [
546 546 (
547 547 b'r',
548 548 b'rev',
549 549 b'',
550 550 _(b'revisions to process for release notes'),
551 551 _(b'REV'),
552 552 ),
553 553 (
554 554 b'c',
555 555 b'check',
556 556 False,
557 557 _(b'checks for validity of admonitions (if any)'),
558 558 _(b'REV'),
559 559 ),
560 560 (
561 561 b'l',
562 562 b'list',
563 563 False,
564 564 _(b'list the available admonitions with their title'),
565 565 None,
566 566 ),
567 567 ],
568 568 _(b'hg releasenotes [-r REV] [-c] FILE'),
569 569 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
570 570 )
571 571 def releasenotes(ui, repo, file_=None, **opts):
572 572 """parse release notes from commit messages into an output file
573 573
574 574 Given an output file and set of revisions, this command will parse commit
575 575 messages for release notes then add them to the output file.
576 576
577 577 Release notes are defined in commit messages as ReStructuredText
578 578 directives. These have the form::
579 579
580 580 .. directive:: title
581 581
582 582 content
583 583
584 584 Each ``directive`` maps to an output section in a generated release notes
585 585 file, which itself is ReStructuredText. For example, the ``.. feature::``
586 586 directive would map to a ``New Features`` section.
587 587
588 588 Release note directives can be either short-form or long-form. In short-
589 589 form, ``title`` is omitted and the release note is rendered as a bullet
590 590 list. In long form, a sub-section with the title ``title`` is added to the
591 591 section.
592 592
593 593 The ``FILE`` argument controls the output file to write gathered release
594 594 notes to. The format of the file is::
595 595
596 596 Section 1
597 597 =========
598 598
599 599 ...
600 600
601 601 Section 2
602 602 =========
603 603
604 604 ...
605 605
606 606 Only sections with defined release notes are emitted.
607 607
608 608 If a section only has short-form notes, it will consist of bullet list::
609 609
610 610 Section
611 611 =======
612 612
613 613 * Release note 1
614 614 * Release note 2
615 615
616 616 If a section has long-form notes, sub-sections will be emitted::
617 617
618 618 Section
619 619 =======
620 620
621 621 Note 1 Title
622 622 ------------
623 623
624 624 Description of the first long-form note.
625 625
626 626 Note 2 Title
627 627 ------------
628 628
629 629 Description of the second long-form note.
630 630
631 631 If the ``FILE`` argument points to an existing file, that file will be
632 632 parsed for release notes having the format that would be generated by this
633 633 command. The notes from the processed commit messages will be *merged*
634 634 into this parsed set.
635 635
636 636 During release notes merging:
637 637
638 638 * Duplicate items are automatically ignored
639 639 * Items that are different are automatically ignored if the similarity is
640 640 greater than a threshold.
641 641
642 642 This means that the release notes file can be updated independently from
643 643 this command and changes should not be lost when running this command on
644 644 that file. A particular use case for this is to tweak the wording of a
645 645 release note after it has been added to the release notes file.
646 646
647 647 The -c/--check option checks the commit message for invalid admonitions.
648 648
649 649 The -l/--list option, presents the user with a list of existing available
650 650 admonitions along with their title. This also includes the custom
651 651 admonitions (if any).
652 652 """
653 653
654 654 opts = pycompat.byteskwargs(opts)
655 655 sections = releasenotessections(ui, repo)
656 656
657 cmdutil.check_incompatible_arguments(opts, b'list', b'rev', b'check')
657 cmdutil.check_incompatible_arguments(opts, b'list', [b'rev', b'check'])
658 658
659 659 if opts.get(b'list'):
660 660 return _getadmonitionlist(ui, sections)
661 661
662 662 rev = opts.get(b'rev')
663 663 revs = scmutil.revrange(repo, [rev or b'not public()'])
664 664 if opts.get(b'check'):
665 665 return checkadmonitions(ui, repo, sections.names(), revs)
666 666
667 667 incoming = parsenotesfromrevisions(repo, sections.names(), revs)
668 668
669 669 if file_ is None:
670 670 ui.pager(b'releasenotes')
671 671 return ui.write(serializenotes(sections, incoming))
672 672
673 673 try:
674 674 with open(file_, b'rb') as fh:
675 675 notes = parsereleasenotesfile(sections, fh.read())
676 676 except IOError as e:
677 677 if e.errno != errno.ENOENT:
678 678 raise
679 679
680 680 notes = parsedreleasenotes()
681 681
682 682 notes.merge(ui, incoming)
683 683
684 684 with open(file_, b'wb') as fh:
685 685 fh.write(serializenotes(sections, notes))
686 686
687 687
688 688 @command(b'debugparsereleasenotes', norepo=True)
689 689 def debugparsereleasenotes(ui, path, repo=None):
690 690 """parse release notes and print resulting data structure"""
691 691 if path == b'-':
692 692 text = pycompat.stdin.read()
693 693 else:
694 694 with open(path, b'rb') as fh:
695 695 text = fh.read()
696 696
697 697 sections = releasenotessections(ui, repo)
698 698
699 699 notes = parsereleasenotesfile(sections, text)
700 700
701 701 for section in notes:
702 702 ui.write(_(b'section: %s\n') % section)
703 703 for title, paragraphs in notes.titledforsection(section):
704 704 ui.write(_(b' subsection: %s\n') % title)
705 705 for para in paragraphs:
706 706 ui.write(_(b' paragraph: %s\n') % b' '.join(para))
707 707
708 708 for paragraphs in notes.nontitledforsection(section):
709 709 ui.write(_(b' bullet point:\n'))
710 710 for para in paragraphs:
711 711 ui.write(_(b' paragraph: %s\n') % b' '.join(para))
@@ -1,929 +1,929 b''
1 1 # Patch transplanting extension for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to transplant changesets from another branch
9 9
10 10 This extension allows you to transplant changes to another parent revision,
11 11 possibly in another repository. The transplant is done using 'diff' patches.
12 12
13 13 Transplanted patches are recorded in .hg/transplant/transplants, as a
14 14 map from a changeset hash to its hash in the source repository.
15 15 '''
16 16 from __future__ import absolute_import
17 17
18 18 import os
19 19
20 20 from mercurial.i18n import _
21 21 from mercurial.pycompat import open
22 22 from mercurial import (
23 23 bundlerepo,
24 24 cmdutil,
25 25 error,
26 26 exchange,
27 27 hg,
28 28 logcmdutil,
29 29 match,
30 30 merge,
31 31 node as nodemod,
32 32 patch,
33 33 pycompat,
34 34 registrar,
35 35 revlog,
36 36 revset,
37 37 scmutil,
38 38 smartset,
39 39 state as statemod,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43 from mercurial.utils import (
44 44 procutil,
45 45 stringutil,
46 46 )
47 47
48 48
49 49 class TransplantError(error.Abort):
50 50 pass
51 51
52 52
53 53 cmdtable = {}
54 54 command = registrar.command(cmdtable)
55 55 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
56 56 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
57 57 # be specifying the version(s) of Mercurial they are tested with, or
58 58 # leave the attribute unspecified.
59 59 testedwith = b'ships-with-hg-core'
60 60
61 61 configtable = {}
62 62 configitem = registrar.configitem(configtable)
63 63
64 64 configitem(
65 65 b'transplant', b'filter', default=None,
66 66 )
67 67 configitem(
68 68 b'transplant', b'log', default=None,
69 69 )
70 70
71 71
72 72 class transplantentry(object):
73 73 def __init__(self, lnode, rnode):
74 74 self.lnode = lnode
75 75 self.rnode = rnode
76 76
77 77
78 78 class transplants(object):
79 79 def __init__(self, path=None, transplantfile=None, opener=None):
80 80 self.path = path
81 81 self.transplantfile = transplantfile
82 82 self.opener = opener
83 83
84 84 if not opener:
85 85 self.opener = vfsmod.vfs(self.path)
86 86 self.transplants = {}
87 87 self.dirty = False
88 88 self.read()
89 89
90 90 def read(self):
91 91 abspath = os.path.join(self.path, self.transplantfile)
92 92 if self.transplantfile and os.path.exists(abspath):
93 93 for line in self.opener.read(self.transplantfile).splitlines():
94 94 lnode, rnode = map(revlog.bin, line.split(b':'))
95 95 list = self.transplants.setdefault(rnode, [])
96 96 list.append(transplantentry(lnode, rnode))
97 97
98 98 def write(self):
99 99 if self.dirty and self.transplantfile:
100 100 if not os.path.isdir(self.path):
101 101 os.mkdir(self.path)
102 102 fp = self.opener(self.transplantfile, b'w')
103 103 for list in pycompat.itervalues(self.transplants):
104 104 for t in list:
105 105 l, r = map(nodemod.hex, (t.lnode, t.rnode))
106 106 fp.write(l + b':' + r + b'\n')
107 107 fp.close()
108 108 self.dirty = False
109 109
110 110 def get(self, rnode):
111 111 return self.transplants.get(rnode) or []
112 112
113 113 def set(self, lnode, rnode):
114 114 list = self.transplants.setdefault(rnode, [])
115 115 list.append(transplantentry(lnode, rnode))
116 116 self.dirty = True
117 117
118 118 def remove(self, transplant):
119 119 list = self.transplants.get(transplant.rnode)
120 120 if list:
121 121 del list[list.index(transplant)]
122 122 self.dirty = True
123 123
124 124
125 125 class transplanter(object):
126 126 def __init__(self, ui, repo, opts):
127 127 self.ui = ui
128 128 self.path = repo.vfs.join(b'transplant')
129 129 self.opener = vfsmod.vfs(self.path)
130 130 self.transplants = transplants(
131 131 self.path, b'transplants', opener=self.opener
132 132 )
133 133
134 134 def getcommiteditor():
135 135 editform = cmdutil.mergeeditform(repo[None], b'transplant')
136 136 return cmdutil.getcommiteditor(
137 137 editform=editform, **pycompat.strkwargs(opts)
138 138 )
139 139
140 140 self.getcommiteditor = getcommiteditor
141 141
142 142 def applied(self, repo, node, parent):
143 143 '''returns True if a node is already an ancestor of parent
144 144 or is parent or has already been transplanted'''
145 145 if hasnode(repo, parent):
146 146 parentrev = repo.changelog.rev(parent)
147 147 if hasnode(repo, node):
148 148 rev = repo.changelog.rev(node)
149 149 reachable = repo.changelog.ancestors(
150 150 [parentrev], rev, inclusive=True
151 151 )
152 152 if rev in reachable:
153 153 return True
154 154 for t in self.transplants.get(node):
155 155 # it might have been stripped
156 156 if not hasnode(repo, t.lnode):
157 157 self.transplants.remove(t)
158 158 return False
159 159 lnoderev = repo.changelog.rev(t.lnode)
160 160 if lnoderev in repo.changelog.ancestors(
161 161 [parentrev], lnoderev, inclusive=True
162 162 ):
163 163 return True
164 164 return False
165 165
166 166 def apply(self, repo, source, revmap, merges, opts=None):
167 167 '''apply the revisions in revmap one by one in revision order'''
168 168 if opts is None:
169 169 opts = {}
170 170 revs = sorted(revmap)
171 171 p1 = repo.dirstate.p1()
172 172 pulls = []
173 173 diffopts = patch.difffeatureopts(self.ui, opts)
174 174 diffopts.git = True
175 175
176 176 lock = tr = None
177 177 try:
178 178 lock = repo.lock()
179 179 tr = repo.transaction(b'transplant')
180 180 for rev in revs:
181 181 node = revmap[rev]
182 182 revstr = b'%d:%s' % (rev, nodemod.short(node))
183 183
184 184 if self.applied(repo, node, p1):
185 185 self.ui.warn(
186 186 _(b'skipping already applied revision %s\n') % revstr
187 187 )
188 188 continue
189 189
190 190 parents = source.changelog.parents(node)
191 191 if not (opts.get(b'filter') or opts.get(b'log')):
192 192 # If the changeset parent is the same as the
193 193 # wdir's parent, just pull it.
194 194 if parents[0] == p1:
195 195 pulls.append(node)
196 196 p1 = node
197 197 continue
198 198 if pulls:
199 199 if source != repo:
200 200 exchange.pull(repo, source.peer(), heads=pulls)
201 201 merge.update(
202 202 repo, pulls[-1], branchmerge=False, force=False
203 203 )
204 204 p1 = repo.dirstate.p1()
205 205 pulls = []
206 206
207 207 domerge = False
208 208 if node in merges:
209 209 # pulling all the merge revs at once would mean we
210 210 # couldn't transplant after the latest even if
211 211 # transplants before them fail.
212 212 domerge = True
213 213 if not hasnode(repo, node):
214 214 exchange.pull(repo, source.peer(), heads=[node])
215 215
216 216 skipmerge = False
217 217 if parents[1] != revlog.nullid:
218 218 if not opts.get(b'parent'):
219 219 self.ui.note(
220 220 _(b'skipping merge changeset %d:%s\n')
221 221 % (rev, nodemod.short(node))
222 222 )
223 223 skipmerge = True
224 224 else:
225 225 parent = source.lookup(opts[b'parent'])
226 226 if parent not in parents:
227 227 raise error.Abort(
228 228 _(b'%s is not a parent of %s')
229 229 % (nodemod.short(parent), nodemod.short(node))
230 230 )
231 231 else:
232 232 parent = parents[0]
233 233
234 234 if skipmerge:
235 235 patchfile = None
236 236 else:
237 237 fd, patchfile = pycompat.mkstemp(prefix=b'hg-transplant-')
238 238 fp = os.fdopen(fd, 'wb')
239 239 gen = patch.diff(source, parent, node, opts=diffopts)
240 240 for chunk in gen:
241 241 fp.write(chunk)
242 242 fp.close()
243 243
244 244 del revmap[rev]
245 245 if patchfile or domerge:
246 246 try:
247 247 try:
248 248 n = self.applyone(
249 249 repo,
250 250 node,
251 251 source.changelog.read(node),
252 252 patchfile,
253 253 merge=domerge,
254 254 log=opts.get(b'log'),
255 255 filter=opts.get(b'filter'),
256 256 )
257 257 except TransplantError:
258 258 # Do not rollback, it is up to the user to
259 259 # fix the merge or cancel everything
260 260 tr.close()
261 261 raise
262 262 if n and domerge:
263 263 self.ui.status(
264 264 _(b'%s merged at %s\n')
265 265 % (revstr, nodemod.short(n))
266 266 )
267 267 elif n:
268 268 self.ui.status(
269 269 _(b'%s transplanted to %s\n')
270 270 % (nodemod.short(node), nodemod.short(n))
271 271 )
272 272 finally:
273 273 if patchfile:
274 274 os.unlink(patchfile)
275 275 tr.close()
276 276 if pulls:
277 277 exchange.pull(repo, source.peer(), heads=pulls)
278 278 merge.update(repo, pulls[-1], branchmerge=False, force=False)
279 279 finally:
280 280 self.saveseries(revmap, merges)
281 281 self.transplants.write()
282 282 if tr:
283 283 tr.release()
284 284 if lock:
285 285 lock.release()
286 286
287 287 def filter(self, filter, node, changelog, patchfile):
288 288 '''arbitrarily rewrite changeset before applying it'''
289 289
290 290 self.ui.status(_(b'filtering %s\n') % patchfile)
291 291 user, date, msg = (changelog[1], changelog[2], changelog[4])
292 292 fd, headerfile = pycompat.mkstemp(prefix=b'hg-transplant-')
293 293 fp = os.fdopen(fd, 'wb')
294 294 fp.write(b"# HG changeset patch\n")
295 295 fp.write(b"# User %s\n" % user)
296 296 fp.write(b"# Date %d %d\n" % date)
297 297 fp.write(msg + b'\n')
298 298 fp.close()
299 299
300 300 try:
301 301 self.ui.system(
302 302 b'%s %s %s'
303 303 % (
304 304 filter,
305 305 procutil.shellquote(headerfile),
306 306 procutil.shellquote(patchfile),
307 307 ),
308 308 environ={
309 309 b'HGUSER': changelog[1],
310 310 b'HGREVISION': nodemod.hex(node),
311 311 },
312 312 onerr=error.Abort,
313 313 errprefix=_(b'filter failed'),
314 314 blockedtag=b'transplant_filter',
315 315 )
316 316 user, date, msg = self.parselog(open(headerfile, b'rb'))[1:4]
317 317 finally:
318 318 os.unlink(headerfile)
319 319
320 320 return (user, date, msg)
321 321
322 322 def applyone(
323 323 self, repo, node, cl, patchfile, merge=False, log=False, filter=None
324 324 ):
325 325 '''apply the patch in patchfile to the repository as a transplant'''
326 326 (manifest, user, (time, timezone), files, message) = cl[:5]
327 327 date = b"%d %d" % (time, timezone)
328 328 extra = {b'transplant_source': node}
329 329 if filter:
330 330 (user, date, message) = self.filter(filter, node, cl, patchfile)
331 331
332 332 if log:
333 333 # we don't translate messages inserted into commits
334 334 message += b'\n(transplanted from %s)' % nodemod.hex(node)
335 335
336 336 self.ui.status(_(b'applying %s\n') % nodemod.short(node))
337 337 self.ui.note(b'%s %s\n%s\n' % (user, date, message))
338 338
339 339 if not patchfile and not merge:
340 340 raise error.Abort(_(b'can only omit patchfile if merging'))
341 341 if patchfile:
342 342 try:
343 343 files = set()
344 344 patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
345 345 files = list(files)
346 346 except Exception as inst:
347 347 seriespath = os.path.join(self.path, b'series')
348 348 if os.path.exists(seriespath):
349 349 os.unlink(seriespath)
350 350 p1 = repo.dirstate.p1()
351 351 p2 = node
352 352 self.log(user, date, message, p1, p2, merge=merge)
353 353 self.ui.write(stringutil.forcebytestr(inst) + b'\n')
354 354 raise TransplantError(
355 355 _(
356 356 b'fix up the working directory and run '
357 357 b'hg transplant --continue'
358 358 )
359 359 )
360 360 else:
361 361 files = None
362 362 if merge:
363 363 p1 = repo.dirstate.p1()
364 364 repo.setparents(p1, node)
365 365 m = match.always()
366 366 else:
367 367 m = match.exact(files)
368 368
369 369 n = repo.commit(
370 370 message,
371 371 user,
372 372 date,
373 373 extra=extra,
374 374 match=m,
375 375 editor=self.getcommiteditor(),
376 376 )
377 377 if not n:
378 378 self.ui.warn(
379 379 _(b'skipping emptied changeset %s\n') % nodemod.short(node)
380 380 )
381 381 return None
382 382 if not merge:
383 383 self.transplants.set(n, node)
384 384
385 385 return n
386 386
387 387 def canresume(self):
388 388 return os.path.exists(os.path.join(self.path, b'journal'))
389 389
390 390 def resume(self, repo, source, opts):
391 391 '''recover last transaction and apply remaining changesets'''
392 392 if os.path.exists(os.path.join(self.path, b'journal')):
393 393 n, node = self.recover(repo, source, opts)
394 394 if n:
395 395 self.ui.status(
396 396 _(b'%s transplanted as %s\n')
397 397 % (nodemod.short(node), nodemod.short(n))
398 398 )
399 399 else:
400 400 self.ui.status(
401 401 _(b'%s skipped due to empty diff\n')
402 402 % (nodemod.short(node),)
403 403 )
404 404 seriespath = os.path.join(self.path, b'series')
405 405 if not os.path.exists(seriespath):
406 406 self.transplants.write()
407 407 return
408 408 nodes, merges = self.readseries()
409 409 revmap = {}
410 410 for n in nodes:
411 411 revmap[source.changelog.rev(n)] = n
412 412 os.unlink(seriespath)
413 413
414 414 self.apply(repo, source, revmap, merges, opts)
415 415
416 416 def recover(self, repo, source, opts):
417 417 '''commit working directory using journal metadata'''
418 418 node, user, date, message, parents = self.readlog()
419 419 merge = False
420 420
421 421 if not user or not date or not message or not parents[0]:
422 422 raise error.Abort(_(b'transplant log file is corrupt'))
423 423
424 424 parent = parents[0]
425 425 if len(parents) > 1:
426 426 if opts.get(b'parent'):
427 427 parent = source.lookup(opts[b'parent'])
428 428 if parent not in parents:
429 429 raise error.Abort(
430 430 _(b'%s is not a parent of %s')
431 431 % (nodemod.short(parent), nodemod.short(node))
432 432 )
433 433 else:
434 434 merge = True
435 435
436 436 extra = {b'transplant_source': node}
437 437 try:
438 438 p1 = repo.dirstate.p1()
439 439 if p1 != parent:
440 440 raise error.Abort(
441 441 _(b'working directory not at transplant parent %s')
442 442 % nodemod.hex(parent)
443 443 )
444 444 if merge:
445 445 repo.setparents(p1, parents[1])
446 446 st = repo.status()
447 447 modified, added, removed, deleted = (
448 448 st.modified,
449 449 st.added,
450 450 st.removed,
451 451 st.deleted,
452 452 )
453 453 if merge or modified or added or removed or deleted:
454 454 n = repo.commit(
455 455 message,
456 456 user,
457 457 date,
458 458 extra=extra,
459 459 editor=self.getcommiteditor(),
460 460 )
461 461 if not n:
462 462 raise error.Abort(_(b'commit failed'))
463 463 if not merge:
464 464 self.transplants.set(n, node)
465 465 else:
466 466 n = None
467 467 self.unlog()
468 468
469 469 return n, node
470 470 finally:
471 471 # TODO: get rid of this meaningless try/finally enclosing.
472 472 # this is kept only to reduce changes in a patch.
473 473 pass
474 474
475 475 def stop(self, ui, repo):
476 476 """logic to stop an interrupted transplant"""
477 477 if self.canresume():
478 478 startctx = repo[b'.']
479 479 hg.updaterepo(repo, startctx.node(), overwrite=True)
480 480 ui.status(_(b"stopped the interrupted transplant\n"))
481 481 ui.status(
482 482 _(b"working directory is now at %s\n") % startctx.hex()[:12]
483 483 )
484 484 self.unlog()
485 485 return 0
486 486
487 487 def readseries(self):
488 488 nodes = []
489 489 merges = []
490 490 cur = nodes
491 491 for line in self.opener.read(b'series').splitlines():
492 492 if line.startswith(b'# Merges'):
493 493 cur = merges
494 494 continue
495 495 cur.append(revlog.bin(line))
496 496
497 497 return (nodes, merges)
498 498
499 499 def saveseries(self, revmap, merges):
500 500 if not revmap:
501 501 return
502 502
503 503 if not os.path.isdir(self.path):
504 504 os.mkdir(self.path)
505 505 series = self.opener(b'series', b'w')
506 506 for rev in sorted(revmap):
507 507 series.write(nodemod.hex(revmap[rev]) + b'\n')
508 508 if merges:
509 509 series.write(b'# Merges\n')
510 510 for m in merges:
511 511 series.write(nodemod.hex(m) + b'\n')
512 512 series.close()
513 513
514 514 def parselog(self, fp):
515 515 parents = []
516 516 message = []
517 517 node = revlog.nullid
518 518 inmsg = False
519 519 user = None
520 520 date = None
521 521 for line in fp.read().splitlines():
522 522 if inmsg:
523 523 message.append(line)
524 524 elif line.startswith(b'# User '):
525 525 user = line[7:]
526 526 elif line.startswith(b'# Date '):
527 527 date = line[7:]
528 528 elif line.startswith(b'# Node ID '):
529 529 node = revlog.bin(line[10:])
530 530 elif line.startswith(b'# Parent '):
531 531 parents.append(revlog.bin(line[9:]))
532 532 elif not line.startswith(b'# '):
533 533 inmsg = True
534 534 message.append(line)
535 535 if None in (user, date):
536 536 raise error.Abort(
537 537 _(b"filter corrupted changeset (no user or date)")
538 538 )
539 539 return (node, user, date, b'\n'.join(message), parents)
540 540
541 541 def log(self, user, date, message, p1, p2, merge=False):
542 542 '''journal changelog metadata for later recover'''
543 543
544 544 if not os.path.isdir(self.path):
545 545 os.mkdir(self.path)
546 546 fp = self.opener(b'journal', b'w')
547 547 fp.write(b'# User %s\n' % user)
548 548 fp.write(b'# Date %s\n' % date)
549 549 fp.write(b'# Node ID %s\n' % nodemod.hex(p2))
550 550 fp.write(b'# Parent ' + nodemod.hex(p1) + b'\n')
551 551 if merge:
552 552 fp.write(b'# Parent ' + nodemod.hex(p2) + b'\n')
553 553 fp.write(message.rstrip() + b'\n')
554 554 fp.close()
555 555
556 556 def readlog(self):
557 557 return self.parselog(self.opener(b'journal'))
558 558
559 559 def unlog(self):
560 560 '''remove changelog journal'''
561 561 absdst = os.path.join(self.path, b'journal')
562 562 if os.path.exists(absdst):
563 563 os.unlink(absdst)
564 564
565 565 def transplantfilter(self, repo, source, root):
566 566 def matchfn(node):
567 567 if self.applied(repo, node, root):
568 568 return False
569 569 if source.changelog.parents(node)[1] != revlog.nullid:
570 570 return False
571 571 extra = source.changelog.read(node)[5]
572 572 cnode = extra.get(b'transplant_source')
573 573 if cnode and self.applied(repo, cnode, root):
574 574 return False
575 575 return True
576 576
577 577 return matchfn
578 578
579 579
580 580 def hasnode(repo, node):
581 581 try:
582 582 return repo.changelog.rev(node) is not None
583 583 except error.StorageError:
584 584 return False
585 585
586 586
587 587 def browserevs(ui, repo, nodes, opts):
588 588 '''interactively transplant changesets'''
589 589 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
590 590 transplants = []
591 591 merges = []
592 592 prompt = _(
593 593 b'apply changeset? [ynmpcq?]:'
594 594 b'$$ &yes, transplant this changeset'
595 595 b'$$ &no, skip this changeset'
596 596 b'$$ &merge at this changeset'
597 597 b'$$ show &patch'
598 598 b'$$ &commit selected changesets'
599 599 b'$$ &quit and cancel transplant'
600 600 b'$$ &? (show this help)'
601 601 )
602 602 for node in nodes:
603 603 displayer.show(repo[node])
604 604 action = None
605 605 while not action:
606 606 choice = ui.promptchoice(prompt)
607 607 action = b'ynmpcq?'[choice : choice + 1]
608 608 if action == b'?':
609 609 for c, t in ui.extractchoices(prompt)[1]:
610 610 ui.write(b'%s: %s\n' % (c, t))
611 611 action = None
612 612 elif action == b'p':
613 613 parent = repo.changelog.parents(node)[0]
614 614 for chunk in patch.diff(repo, parent, node):
615 615 ui.write(chunk)
616 616 action = None
617 617 if action == b'y':
618 618 transplants.append(node)
619 619 elif action == b'm':
620 620 merges.append(node)
621 621 elif action == b'c':
622 622 break
623 623 elif action == b'q':
624 624 transplants = ()
625 625 merges = ()
626 626 break
627 627 displayer.close()
628 628 return (transplants, merges)
629 629
630 630
631 631 @command(
632 632 b'transplant',
633 633 [
634 634 (
635 635 b's',
636 636 b'source',
637 637 b'',
638 638 _(b'transplant changesets from REPO'),
639 639 _(b'REPO'),
640 640 ),
641 641 (
642 642 b'b',
643 643 b'branch',
644 644 [],
645 645 _(b'use this source changeset as head'),
646 646 _(b'REV'),
647 647 ),
648 648 (
649 649 b'a',
650 650 b'all',
651 651 None,
652 652 _(b'pull all changesets up to the --branch revisions'),
653 653 ),
654 654 (b'p', b'prune', [], _(b'skip over REV'), _(b'REV')),
655 655 (b'm', b'merge', [], _(b'merge at REV'), _(b'REV')),
656 656 (
657 657 b'',
658 658 b'parent',
659 659 b'',
660 660 _(b'parent to choose when transplanting merge'),
661 661 _(b'REV'),
662 662 ),
663 663 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
664 664 (b'', b'log', None, _(b'append transplant info to log message')),
665 665 (b'', b'stop', False, _(b'stop interrupted transplant')),
666 666 (
667 667 b'c',
668 668 b'continue',
669 669 None,
670 670 _(b'continue last transplant session after fixing conflicts'),
671 671 ),
672 672 (
673 673 b'',
674 674 b'filter',
675 675 b'',
676 676 _(b'filter changesets through command'),
677 677 _(b'CMD'),
678 678 ),
679 679 ],
680 680 _(
681 681 b'hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
682 682 b'[-m REV] [REV]...'
683 683 ),
684 684 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
685 685 )
686 686 def transplant(ui, repo, *revs, **opts):
687 687 '''transplant changesets from another branch
688 688
689 689 Selected changesets will be applied on top of the current working
690 690 directory with the log of the original changeset. The changesets
691 691 are copied and will thus appear twice in the history with different
692 692 identities.
693 693
694 694 Consider using the graft command if everything is inside the same
695 695 repository - it will use merges and will usually give a better result.
696 696 Use the rebase extension if the changesets are unpublished and you want
697 697 to move them instead of copying them.
698 698
699 699 If --log is specified, log messages will have a comment appended
700 700 of the form::
701 701
702 702 (transplanted from CHANGESETHASH)
703 703
704 704 You can rewrite the changelog message with the --filter option.
705 705 Its argument will be invoked with the current changelog message as
706 706 $1 and the patch as $2.
707 707
708 708 --source/-s specifies another repository to use for selecting changesets,
709 709 just as if it temporarily had been pulled.
710 710 If --branch/-b is specified, these revisions will be used as
711 711 heads when deciding which changesets to transplant, just as if only
712 712 these revisions had been pulled.
713 713 If --all/-a is specified, all the revisions up to the heads specified
714 714 with --branch will be transplanted.
715 715
716 716 Example:
717 717
718 718 - transplant all changes up to REV on top of your current revision::
719 719
720 720 hg transplant --branch REV --all
721 721
722 722 You can optionally mark selected transplanted changesets as merge
723 723 changesets. You will not be prompted to transplant any ancestors
724 724 of a merged transplant, and you can merge descendants of them
725 725 normally instead of transplanting them.
726 726
727 727 Merge changesets may be transplanted directly by specifying the
728 728 proper parent changeset by calling :hg:`transplant --parent`.
729 729
730 730 If no merges or revisions are provided, :hg:`transplant` will
731 731 start an interactive changeset browser.
732 732
733 733 If a changeset application fails, you can fix the merge by hand
734 734 and then resume where you left off by calling :hg:`transplant
735 735 --continue/-c`.
736 736 '''
737 737 with repo.wlock():
738 738 return _dotransplant(ui, repo, *revs, **opts)
739 739
740 740
741 741 def _dotransplant(ui, repo, *revs, **opts):
742 742 def incwalk(repo, csets, match=util.always):
743 743 for node in csets:
744 744 if match(node):
745 745 yield node
746 746
747 747 def transplantwalk(repo, dest, heads, match=util.always):
748 748 '''Yield all nodes that are ancestors of a head but not ancestors
749 749 of dest.
750 750 If no heads are specified, the heads of repo will be used.'''
751 751 if not heads:
752 752 heads = repo.heads()
753 753 ancestors = []
754 754 ctx = repo[dest]
755 755 for head in heads:
756 756 ancestors.append(ctx.ancestor(repo[head]).node())
757 757 for node in repo.changelog.nodesbetween(ancestors, heads)[0]:
758 758 if match(node):
759 759 yield node
760 760
761 761 def checkopts(opts, revs):
762 762 if opts.get(b'continue'):
763 763 cmdutil.check_incompatible_arguments(
764 opts, b'continue', b'branch', b'all', b'merge'
764 opts, b'continue', [b'branch', b'all', b'merge']
765 765 )
766 766 return
767 767 if opts.get(b'stop'):
768 768 cmdutil.check_incompatible_arguments(
769 opts, b'stop', b'branch', b'all', b'merge'
769 opts, b'stop', [b'branch', b'all', b'merge']
770 770 )
771 771 return
772 772 if not (
773 773 opts.get(b'source')
774 774 or revs
775 775 or opts.get(b'merge')
776 776 or opts.get(b'branch')
777 777 ):
778 778 raise error.Abort(
779 779 _(
780 780 b'no source URL, branch revision, or revision '
781 781 b'list provided'
782 782 )
783 783 )
784 784 if opts.get(b'all'):
785 785 if not opts.get(b'branch'):
786 786 raise error.Abort(_(b'--all requires a branch revision'))
787 787 if revs:
788 788 raise error.Abort(
789 789 _(b'--all is incompatible with a revision list')
790 790 )
791 791
792 792 opts = pycompat.byteskwargs(opts)
793 793 checkopts(opts, revs)
794 794
795 795 if not opts.get(b'log'):
796 796 # deprecated config: transplant.log
797 797 opts[b'log'] = ui.config(b'transplant', b'log')
798 798 if not opts.get(b'filter'):
799 799 # deprecated config: transplant.filter
800 800 opts[b'filter'] = ui.config(b'transplant', b'filter')
801 801
802 802 tp = transplanter(ui, repo, opts)
803 803
804 804 p1 = repo.dirstate.p1()
805 805 if len(repo) > 0 and p1 == revlog.nullid:
806 806 raise error.Abort(_(b'no revision checked out'))
807 807 if opts.get(b'continue'):
808 808 if not tp.canresume():
809 809 raise error.Abort(_(b'no transplant to continue'))
810 810 elif opts.get(b'stop'):
811 811 if not tp.canresume():
812 812 raise error.Abort(_(b'no interrupted transplant found'))
813 813 return tp.stop(ui, repo)
814 814 else:
815 815 cmdutil.checkunfinished(repo)
816 816 cmdutil.bailifchanged(repo)
817 817
818 818 sourcerepo = opts.get(b'source')
819 819 if sourcerepo:
820 820 peer = hg.peer(repo, opts, ui.expandpath(sourcerepo))
821 821 heads = pycompat.maplist(peer.lookup, opts.get(b'branch', ()))
822 822 target = set(heads)
823 823 for r in revs:
824 824 try:
825 825 target.add(peer.lookup(r))
826 826 except error.RepoError:
827 827 pass
828 828 source, csets, cleanupfn = bundlerepo.getremotechanges(
829 829 ui, repo, peer, onlyheads=sorted(target), force=True
830 830 )
831 831 else:
832 832 source = repo
833 833 heads = pycompat.maplist(source.lookup, opts.get(b'branch', ()))
834 834 cleanupfn = None
835 835
836 836 try:
837 837 if opts.get(b'continue'):
838 838 tp.resume(repo, source, opts)
839 839 return
840 840
841 841 tf = tp.transplantfilter(repo, source, p1)
842 842 if opts.get(b'prune'):
843 843 prune = set(
844 844 source[r].node()
845 845 for r in scmutil.revrange(source, opts.get(b'prune'))
846 846 )
847 847 matchfn = lambda x: tf(x) and x not in prune
848 848 else:
849 849 matchfn = tf
850 850 merges = pycompat.maplist(source.lookup, opts.get(b'merge', ()))
851 851 revmap = {}
852 852 if revs:
853 853 for r in scmutil.revrange(source, revs):
854 854 revmap[int(r)] = source[r].node()
855 855 elif opts.get(b'all') or not merges:
856 856 if source != repo:
857 857 alltransplants = incwalk(source, csets, match=matchfn)
858 858 else:
859 859 alltransplants = transplantwalk(
860 860 source, p1, heads, match=matchfn
861 861 )
862 862 if opts.get(b'all'):
863 863 revs = alltransplants
864 864 else:
865 865 revs, newmerges = browserevs(ui, source, alltransplants, opts)
866 866 merges.extend(newmerges)
867 867 for r in revs:
868 868 revmap[source.changelog.rev(r)] = r
869 869 for r in merges:
870 870 revmap[source.changelog.rev(r)] = r
871 871
872 872 tp.apply(repo, source, revmap, merges, opts)
873 873 finally:
874 874 if cleanupfn:
875 875 cleanupfn()
876 876
877 877
878 878 def continuecmd(ui, repo):
879 879 """logic to resume an interrupted transplant using
880 880 'hg continue'"""
881 881 with repo.wlock():
882 882 tp = transplanter(ui, repo, {})
883 883 return tp.resume(repo, repo, {})
884 884
885 885
886 886 revsetpredicate = registrar.revsetpredicate()
887 887
888 888
889 889 @revsetpredicate(b'transplanted([set])')
890 890 def revsettransplanted(repo, subset, x):
891 891 """Transplanted changesets in set, or all transplanted changesets.
892 892 """
893 893 if x:
894 894 s = revset.getset(repo, subset, x)
895 895 else:
896 896 s = subset
897 897 return smartset.baseset(
898 898 [r for r in s if repo[r].extra().get(b'transplant_source')]
899 899 )
900 900
901 901
902 902 templatekeyword = registrar.templatekeyword()
903 903
904 904
905 905 @templatekeyword(b'transplanted', requires={b'ctx'})
906 906 def kwtransplanted(context, mapping):
907 907 """String. The node identifier of the transplanted
908 908 changeset if any."""
909 909 ctx = context.resource(mapping, b'ctx')
910 910 n = ctx.extra().get(b'transplant_source')
911 911 return n and nodemod.hex(n) or b''
912 912
913 913
914 914 def extsetup(ui):
915 915 statemod.addunfinished(
916 916 b'transplant',
917 917 fname=b'transplant/journal',
918 918 clearable=True,
919 919 continuefunc=continuecmd,
920 920 statushint=_(
921 921 b'To continue: hg transplant --continue\n'
922 922 b'To stop: hg transplant --stop'
923 923 ),
924 924 cmdhint=_(b"use 'hg transplant --continue' or 'hg transplant --stop'"),
925 925 )
926 926
927 927
928 928 # tell hggettext to extract docstrings from these functions:
929 929 i18nfunctions = [revsettransplanted, kwtransplanted]
@@ -1,4072 +1,4072 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy as copymod
11 11 import errno
12 12 import os
13 13 import re
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22 from .pycompat import (
23 23 getattr,
24 24 open,
25 25 setattr,
26 26 )
27 27 from .thirdparty import attr
28 28
29 29 from . import (
30 30 bookmarks,
31 31 changelog,
32 32 copies,
33 33 crecord as crecordmod,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 formatter,
38 38 logcmdutil,
39 39 match as matchmod,
40 40 merge as mergemod,
41 41 mergeutil,
42 42 obsolete,
43 43 patch,
44 44 pathutil,
45 45 phases,
46 46 pycompat,
47 47 repair,
48 48 revlog,
49 49 rewriteutil,
50 50 scmutil,
51 51 smartset,
52 52 state as statemod,
53 53 subrepoutil,
54 54 templatekw,
55 55 templater,
56 56 util,
57 57 vfs as vfsmod,
58 58 )
59 59
60 60 from .utils import (
61 61 dateutil,
62 62 stringutil,
63 63 )
64 64
65 65 if pycompat.TYPE_CHECKING:
66 66 from typing import (
67 67 Any,
68 68 Dict,
69 69 )
70 70
71 71 for t in (Any, Dict):
72 72 assert t
73 73
74 74 stringio = util.stringio
75 75
76 76 # templates of common command options
77 77
78 78 dryrunopts = [
79 79 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
80 80 ]
81 81
82 82 confirmopts = [
83 83 (b'', b'confirm', None, _(b'ask before applying actions')),
84 84 ]
85 85
86 86 remoteopts = [
87 87 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
88 88 (
89 89 b'',
90 90 b'remotecmd',
91 91 b'',
92 92 _(b'specify hg command to run on the remote side'),
93 93 _(b'CMD'),
94 94 ),
95 95 (
96 96 b'',
97 97 b'insecure',
98 98 None,
99 99 _(b'do not verify server certificate (ignoring web.cacerts config)'),
100 100 ),
101 101 ]
102 102
103 103 walkopts = [
104 104 (
105 105 b'I',
106 106 b'include',
107 107 [],
108 108 _(b'include names matching the given patterns'),
109 109 _(b'PATTERN'),
110 110 ),
111 111 (
112 112 b'X',
113 113 b'exclude',
114 114 [],
115 115 _(b'exclude names matching the given patterns'),
116 116 _(b'PATTERN'),
117 117 ),
118 118 ]
119 119
120 120 commitopts = [
121 121 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
122 122 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
123 123 ]
124 124
125 125 commitopts2 = [
126 126 (
127 127 b'd',
128 128 b'date',
129 129 b'',
130 130 _(b'record the specified date as commit date'),
131 131 _(b'DATE'),
132 132 ),
133 133 (
134 134 b'u',
135 135 b'user',
136 136 b'',
137 137 _(b'record the specified user as committer'),
138 138 _(b'USER'),
139 139 ),
140 140 ]
141 141
142 142 commitopts3 = [
143 143 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
144 144 (b'U', b'currentuser', None, _(b'record the current user as committer')),
145 145 ]
146 146
147 147 formatteropts = [
148 148 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
149 149 ]
150 150
151 151 templateopts = [
152 152 (
153 153 b'',
154 154 b'style',
155 155 b'',
156 156 _(b'display using template map file (DEPRECATED)'),
157 157 _(b'STYLE'),
158 158 ),
159 159 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
160 160 ]
161 161
162 162 logopts = [
163 163 (b'p', b'patch', None, _(b'show patch')),
164 164 (b'g', b'git', None, _(b'use git extended diff format')),
165 165 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
166 166 (b'M', b'no-merges', None, _(b'do not show merges')),
167 167 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
168 168 (b'G', b'graph', None, _(b"show the revision DAG")),
169 169 ] + templateopts
170 170
171 171 diffopts = [
172 172 (b'a', b'text', None, _(b'treat all files as text')),
173 173 (b'g', b'git', None, _(b'use git extended diff format')),
174 174 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
175 175 (b'', b'nodates', None, _(b'omit dates from diff headers')),
176 176 ]
177 177
178 178 diffwsopts = [
179 179 (
180 180 b'w',
181 181 b'ignore-all-space',
182 182 None,
183 183 _(b'ignore white space when comparing lines'),
184 184 ),
185 185 (
186 186 b'b',
187 187 b'ignore-space-change',
188 188 None,
189 189 _(b'ignore changes in the amount of white space'),
190 190 ),
191 191 (
192 192 b'B',
193 193 b'ignore-blank-lines',
194 194 None,
195 195 _(b'ignore changes whose lines are all blank'),
196 196 ),
197 197 (
198 198 b'Z',
199 199 b'ignore-space-at-eol',
200 200 None,
201 201 _(b'ignore changes in whitespace at EOL'),
202 202 ),
203 203 ]
204 204
205 205 diffopts2 = (
206 206 [
207 207 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
208 208 (
209 209 b'p',
210 210 b'show-function',
211 211 None,
212 212 _(b'show which function each change is in'),
213 213 ),
214 214 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
215 215 ]
216 216 + diffwsopts
217 217 + [
218 218 (
219 219 b'U',
220 220 b'unified',
221 221 b'',
222 222 _(b'number of lines of context to show'),
223 223 _(b'NUM'),
224 224 ),
225 225 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
226 226 (
227 227 b'',
228 228 b'root',
229 229 b'',
230 230 _(b'produce diffs relative to subdirectory'),
231 231 _(b'DIR'),
232 232 ),
233 233 ]
234 234 )
235 235
236 236 mergetoolopts = [
237 237 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
238 238 ]
239 239
240 240 similarityopts = [
241 241 (
242 242 b's',
243 243 b'similarity',
244 244 b'',
245 245 _(b'guess renamed files by similarity (0<=s<=100)'),
246 246 _(b'SIMILARITY'),
247 247 )
248 248 ]
249 249
250 250 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
251 251
252 252 debugrevlogopts = [
253 253 (b'c', b'changelog', False, _(b'open changelog')),
254 254 (b'm', b'manifest', False, _(b'open manifest')),
255 255 (b'', b'dir', b'', _(b'open directory manifest')),
256 256 ]
257 257
258 258 # special string such that everything below this line will be ingored in the
259 259 # editor text
260 260 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
261 261
262 262
263 263 def check_at_most_one_arg(opts, *args):
264 264 """abort if more than one of the arguments are in opts
265 265
266 266 Returns the unique argument or None if none of them were specified.
267 267 """
268 268
269 269 def to_display(name):
270 270 return pycompat.sysbytes(name).replace(b'_', b'-')
271 271
272 272 previous = None
273 273 for x in args:
274 274 if opts.get(x):
275 275 if previous:
276 276 raise error.Abort(
277 277 _(b'cannot specify both --%s and --%s')
278 278 % (to_display(previous), to_display(x))
279 279 )
280 280 previous = x
281 281 return previous
282 282
283 283
284 def check_incompatible_arguments(opts, first, *others):
284 def check_incompatible_arguments(opts, first, others):
285 285 """abort if the first argument is given along with any of the others
286 286
287 287 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
288 among themselves.
288 among themselves, and they're passed as a single collection.
289 289 """
290 290 for other in others:
291 291 check_at_most_one_arg(opts, first, other)
292 292
293 293
294 294 def resolvecommitoptions(ui, opts):
295 295 """modify commit options dict to handle related options
296 296
297 297 The return value indicates that ``rewrite.update-timestamp`` is the reason
298 298 the ``date`` option is set.
299 299 """
300 300 check_at_most_one_arg(opts, b'date', b'currentdate')
301 301 check_at_most_one_arg(opts, b'user', b'currentuser')
302 302
303 303 datemaydiffer = False # date-only change should be ignored?
304 304
305 305 if opts.get(b'currentdate'):
306 306 opts[b'date'] = b'%d %d' % dateutil.makedate()
307 307 elif (
308 308 not opts.get(b'date')
309 309 and ui.configbool(b'rewrite', b'update-timestamp')
310 310 and opts.get(b'currentdate') is None
311 311 ):
312 312 opts[b'date'] = b'%d %d' % dateutil.makedate()
313 313 datemaydiffer = True
314 314
315 315 if opts.get(b'currentuser'):
316 316 opts[b'user'] = ui.username()
317 317
318 318 return datemaydiffer
319 319
320 320
321 321 def checknotesize(ui, opts):
322 322 """ make sure note is of valid format """
323 323
324 324 note = opts.get(b'note')
325 325 if not note:
326 326 return
327 327
328 328 if len(note) > 255:
329 329 raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
330 330 if b'\n' in note:
331 331 raise error.Abort(_(b"note cannot contain a newline"))
332 332
333 333
334 334 def ishunk(x):
335 335 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
336 336 return isinstance(x, hunkclasses)
337 337
338 338
339 339 def newandmodified(chunks, originalchunks):
340 340 newlyaddedandmodifiedfiles = set()
341 341 alsorestore = set()
342 342 for chunk in chunks:
343 343 if (
344 344 ishunk(chunk)
345 345 and chunk.header.isnewfile()
346 346 and chunk not in originalchunks
347 347 ):
348 348 newlyaddedandmodifiedfiles.add(chunk.header.filename())
349 349 alsorestore.update(
350 350 set(chunk.header.files()) - {chunk.header.filename()}
351 351 )
352 352 return newlyaddedandmodifiedfiles, alsorestore
353 353
354 354
355 355 def parsealiases(cmd):
356 356 return cmd.split(b"|")
357 357
358 358
359 359 def setupwrapcolorwrite(ui):
360 360 # wrap ui.write so diff output can be labeled/colorized
361 361 def wrapwrite(orig, *args, **kw):
362 362 label = kw.pop('label', b'')
363 363 for chunk, l in patch.difflabel(lambda: args):
364 364 orig(chunk, label=label + l)
365 365
366 366 oldwrite = ui.write
367 367
368 368 def wrap(*args, **kwargs):
369 369 return wrapwrite(oldwrite, *args, **kwargs)
370 370
371 371 setattr(ui, 'write', wrap)
372 372 return oldwrite
373 373
374 374
375 375 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
376 376 try:
377 377 if usecurses:
378 378 if testfile:
379 379 recordfn = crecordmod.testdecorator(
380 380 testfile, crecordmod.testchunkselector
381 381 )
382 382 else:
383 383 recordfn = crecordmod.chunkselector
384 384
385 385 return crecordmod.filterpatch(
386 386 ui, originalhunks, recordfn, operation
387 387 )
388 388 except crecordmod.fallbackerror as e:
389 389 ui.warn(b'%s\n' % e)
390 390 ui.warn(_(b'falling back to text mode\n'))
391 391
392 392 return patch.filterpatch(ui, originalhunks, match, operation)
393 393
394 394
395 395 def recordfilter(ui, originalhunks, match, operation=None):
396 396 """ Prompts the user to filter the originalhunks and return a list of
397 397 selected hunks.
398 398 *operation* is used for to build ui messages to indicate the user what
399 399 kind of filtering they are doing: reverting, committing, shelving, etc.
400 400 (see patch.filterpatch).
401 401 """
402 402 usecurses = crecordmod.checkcurses(ui)
403 403 testfile = ui.config(b'experimental', b'crecordtest')
404 404 oldwrite = setupwrapcolorwrite(ui)
405 405 try:
406 406 newchunks, newopts = filterchunks(
407 407 ui, originalhunks, usecurses, testfile, match, operation
408 408 )
409 409 finally:
410 410 ui.write = oldwrite
411 411 return newchunks, newopts
412 412
413 413
414 414 def dorecord(
415 415 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
416 416 ):
417 417 opts = pycompat.byteskwargs(opts)
418 418 if not ui.interactive():
419 419 if cmdsuggest:
420 420 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
421 421 else:
422 422 msg = _(b'running non-interactively')
423 423 raise error.Abort(msg)
424 424
425 425 # make sure username is set before going interactive
426 426 if not opts.get(b'user'):
427 427 ui.username() # raise exception, username not provided
428 428
429 429 def recordfunc(ui, repo, message, match, opts):
430 430 """This is generic record driver.
431 431
432 432 Its job is to interactively filter local changes, and
433 433 accordingly prepare working directory into a state in which the
434 434 job can be delegated to a non-interactive commit command such as
435 435 'commit' or 'qrefresh'.
436 436
437 437 After the actual job is done by non-interactive command, the
438 438 working directory is restored to its original state.
439 439
440 440 In the end we'll record interesting changes, and everything else
441 441 will be left in place, so the user can continue working.
442 442 """
443 443 if not opts.get(b'interactive-unshelve'):
444 444 checkunfinished(repo, commit=True)
445 445 wctx = repo[None]
446 446 merge = len(wctx.parents()) > 1
447 447 if merge:
448 448 raise error.Abort(
449 449 _(
450 450 b'cannot partially commit a merge '
451 451 b'(use "hg commit" instead)'
452 452 )
453 453 )
454 454
455 455 def fail(f, msg):
456 456 raise error.Abort(b'%s: %s' % (f, msg))
457 457
458 458 force = opts.get(b'force')
459 459 if not force:
460 460 match = matchmod.badmatch(match, fail)
461 461
462 462 status = repo.status(match=match)
463 463
464 464 overrides = {(b'ui', b'commitsubrepos'): True}
465 465
466 466 with repo.ui.configoverride(overrides, b'record'):
467 467 # subrepoutil.precommit() modifies the status
468 468 tmpstatus = scmutil.status(
469 469 copymod.copy(status.modified),
470 470 copymod.copy(status.added),
471 471 copymod.copy(status.removed),
472 472 copymod.copy(status.deleted),
473 473 copymod.copy(status.unknown),
474 474 copymod.copy(status.ignored),
475 475 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
476 476 )
477 477
478 478 # Force allows -X subrepo to skip the subrepo.
479 479 subs, commitsubs, newstate = subrepoutil.precommit(
480 480 repo.ui, wctx, tmpstatus, match, force=True
481 481 )
482 482 for s in subs:
483 483 if s in commitsubs:
484 484 dirtyreason = wctx.sub(s).dirtyreason(True)
485 485 raise error.Abort(dirtyreason)
486 486
487 487 if not force:
488 488 repo.checkcommitpatterns(wctx, match, status, fail)
489 489 diffopts = patch.difffeatureopts(
490 490 ui,
491 491 opts=opts,
492 492 whitespace=True,
493 493 section=b'commands',
494 494 configprefix=b'commit.interactive.',
495 495 )
496 496 diffopts.nodates = True
497 497 diffopts.git = True
498 498 diffopts.showfunc = True
499 499 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
500 500 originalchunks = patch.parsepatch(originaldiff)
501 501 match = scmutil.match(repo[None], pats)
502 502
503 503 # 1. filter patch, since we are intending to apply subset of it
504 504 try:
505 505 chunks, newopts = filterfn(ui, originalchunks, match)
506 506 except error.PatchError as err:
507 507 raise error.Abort(_(b'error parsing patch: %s') % err)
508 508 opts.update(newopts)
509 509
510 510 # We need to keep a backup of files that have been newly added and
511 511 # modified during the recording process because there is a previous
512 512 # version without the edit in the workdir. We also will need to restore
513 513 # files that were the sources of renames so that the patch application
514 514 # works.
515 515 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
516 516 chunks, originalchunks
517 517 )
518 518 contenders = set()
519 519 for h in chunks:
520 520 try:
521 521 contenders.update(set(h.files()))
522 522 except AttributeError:
523 523 pass
524 524
525 525 changed = status.modified + status.added + status.removed
526 526 newfiles = [f for f in changed if f in contenders]
527 527 if not newfiles:
528 528 ui.status(_(b'no changes to record\n'))
529 529 return 0
530 530
531 531 modified = set(status.modified)
532 532
533 533 # 2. backup changed files, so we can restore them in the end
534 534
535 535 if backupall:
536 536 tobackup = changed
537 537 else:
538 538 tobackup = [
539 539 f
540 540 for f in newfiles
541 541 if f in modified or f in newlyaddedandmodifiedfiles
542 542 ]
543 543 backups = {}
544 544 if tobackup:
545 545 backupdir = repo.vfs.join(b'record-backups')
546 546 try:
547 547 os.mkdir(backupdir)
548 548 except OSError as err:
549 549 if err.errno != errno.EEXIST:
550 550 raise
551 551 try:
552 552 # backup continues
553 553 for f in tobackup:
554 554 fd, tmpname = pycompat.mkstemp(
555 555 prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
556 556 )
557 557 os.close(fd)
558 558 ui.debug(b'backup %r as %r\n' % (f, tmpname))
559 559 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
560 560 backups[f] = tmpname
561 561
562 562 fp = stringio()
563 563 for c in chunks:
564 564 fname = c.filename()
565 565 if fname in backups:
566 566 c.write(fp)
567 567 dopatch = fp.tell()
568 568 fp.seek(0)
569 569
570 570 # 2.5 optionally review / modify patch in text editor
571 571 if opts.get(b'review', False):
572 572 patchtext = (
573 573 crecordmod.diffhelptext
574 574 + crecordmod.patchhelptext
575 575 + fp.read()
576 576 )
577 577 reviewedpatch = ui.edit(
578 578 patchtext, b"", action=b"diff", repopath=repo.path
579 579 )
580 580 fp.truncate(0)
581 581 fp.write(reviewedpatch)
582 582 fp.seek(0)
583 583
584 584 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
585 585 # 3a. apply filtered patch to clean repo (clean)
586 586 if backups:
587 587 # Equivalent to hg.revert
588 588 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
589 589 mergemod.update(
590 590 repo,
591 591 repo.dirstate.p1(),
592 592 branchmerge=False,
593 593 force=True,
594 594 matcher=m,
595 595 )
596 596
597 597 # 3b. (apply)
598 598 if dopatch:
599 599 try:
600 600 ui.debug(b'applying patch\n')
601 601 ui.debug(fp.getvalue())
602 602 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
603 603 except error.PatchError as err:
604 604 raise error.Abort(pycompat.bytestr(err))
605 605 del fp
606 606
607 607 # 4. We prepared working directory according to filtered
608 608 # patch. Now is the time to delegate the job to
609 609 # commit/qrefresh or the like!
610 610
611 611 # Make all of the pathnames absolute.
612 612 newfiles = [repo.wjoin(nf) for nf in newfiles]
613 613 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
614 614 finally:
615 615 # 5. finally restore backed-up files
616 616 try:
617 617 dirstate = repo.dirstate
618 618 for realname, tmpname in pycompat.iteritems(backups):
619 619 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
620 620
621 621 if dirstate[realname] == b'n':
622 622 # without normallookup, restoring timestamp
623 623 # may cause partially committed files
624 624 # to be treated as unmodified
625 625 dirstate.normallookup(realname)
626 626
627 627 # copystat=True here and above are a hack to trick any
628 628 # editors that have f open that we haven't modified them.
629 629 #
630 630 # Also note that this racy as an editor could notice the
631 631 # file's mtime before we've finished writing it.
632 632 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
633 633 os.unlink(tmpname)
634 634 if tobackup:
635 635 os.rmdir(backupdir)
636 636 except OSError:
637 637 pass
638 638
639 639 def recordinwlock(ui, repo, message, match, opts):
640 640 with repo.wlock():
641 641 return recordfunc(ui, repo, message, match, opts)
642 642
643 643 return commit(ui, repo, recordinwlock, pats, opts)
644 644
645 645
646 646 class dirnode(object):
647 647 """
648 648 Represent a directory in user working copy with information required for
649 649 the purpose of tersing its status.
650 650
651 651 path is the path to the directory, without a trailing '/'
652 652
653 653 statuses is a set of statuses of all files in this directory (this includes
654 654 all the files in all the subdirectories too)
655 655
656 656 files is a list of files which are direct child of this directory
657 657
658 658 subdirs is a dictionary of sub-directory name as the key and it's own
659 659 dirnode object as the value
660 660 """
661 661
662 662 def __init__(self, dirpath):
663 663 self.path = dirpath
664 664 self.statuses = set()
665 665 self.files = []
666 666 self.subdirs = {}
667 667
668 668 def _addfileindir(self, filename, status):
669 669 """Add a file in this directory as a direct child."""
670 670 self.files.append((filename, status))
671 671
672 672 def addfile(self, filename, status):
673 673 """
674 674 Add a file to this directory or to its direct parent directory.
675 675
676 676 If the file is not direct child of this directory, we traverse to the
677 677 directory of which this file is a direct child of and add the file
678 678 there.
679 679 """
680 680
681 681 # the filename contains a path separator, it means it's not the direct
682 682 # child of this directory
683 683 if b'/' in filename:
684 684 subdir, filep = filename.split(b'/', 1)
685 685
686 686 # does the dirnode object for subdir exists
687 687 if subdir not in self.subdirs:
688 688 subdirpath = pathutil.join(self.path, subdir)
689 689 self.subdirs[subdir] = dirnode(subdirpath)
690 690
691 691 # try adding the file in subdir
692 692 self.subdirs[subdir].addfile(filep, status)
693 693
694 694 else:
695 695 self._addfileindir(filename, status)
696 696
697 697 if status not in self.statuses:
698 698 self.statuses.add(status)
699 699
700 700 def iterfilepaths(self):
701 701 """Yield (status, path) for files directly under this directory."""
702 702 for f, st in self.files:
703 703 yield st, pathutil.join(self.path, f)
704 704
705 705 def tersewalk(self, terseargs):
706 706 """
707 707 Yield (status, path) obtained by processing the status of this
708 708 dirnode.
709 709
710 710 terseargs is the string of arguments passed by the user with `--terse`
711 711 flag.
712 712
713 713 Following are the cases which can happen:
714 714
715 715 1) All the files in the directory (including all the files in its
716 716 subdirectories) share the same status and the user has asked us to terse
717 717 that status. -> yield (status, dirpath). dirpath will end in '/'.
718 718
719 719 2) Otherwise, we do following:
720 720
721 721 a) Yield (status, filepath) for all the files which are in this
722 722 directory (only the ones in this directory, not the subdirs)
723 723
724 724 b) Recurse the function on all the subdirectories of this
725 725 directory
726 726 """
727 727
728 728 if len(self.statuses) == 1:
729 729 onlyst = self.statuses.pop()
730 730
731 731 # Making sure we terse only when the status abbreviation is
732 732 # passed as terse argument
733 733 if onlyst in terseargs:
734 734 yield onlyst, self.path + b'/'
735 735 return
736 736
737 737 # add the files to status list
738 738 for st, fpath in self.iterfilepaths():
739 739 yield st, fpath
740 740
741 741 # recurse on the subdirs
742 742 for dirobj in self.subdirs.values():
743 743 for st, fpath in dirobj.tersewalk(terseargs):
744 744 yield st, fpath
745 745
746 746
747 747 def tersedir(statuslist, terseargs):
748 748 """
749 749 Terse the status if all the files in a directory shares the same status.
750 750
751 751 statuslist is scmutil.status() object which contains a list of files for
752 752 each status.
753 753 terseargs is string which is passed by the user as the argument to `--terse`
754 754 flag.
755 755
756 756 The function makes a tree of objects of dirnode class, and at each node it
757 757 stores the information required to know whether we can terse a certain
758 758 directory or not.
759 759 """
760 760 # the order matters here as that is used to produce final list
761 761 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
762 762
763 763 # checking the argument validity
764 764 for s in pycompat.bytestr(terseargs):
765 765 if s not in allst:
766 766 raise error.Abort(_(b"'%s' not recognized") % s)
767 767
768 768 # creating a dirnode object for the root of the repo
769 769 rootobj = dirnode(b'')
770 770 pstatus = (
771 771 b'modified',
772 772 b'added',
773 773 b'deleted',
774 774 b'clean',
775 775 b'unknown',
776 776 b'ignored',
777 777 b'removed',
778 778 )
779 779
780 780 tersedict = {}
781 781 for attrname in pstatus:
782 782 statuschar = attrname[0:1]
783 783 for f in getattr(statuslist, attrname):
784 784 rootobj.addfile(f, statuschar)
785 785 tersedict[statuschar] = []
786 786
787 787 # we won't be tersing the root dir, so add files in it
788 788 for st, fpath in rootobj.iterfilepaths():
789 789 tersedict[st].append(fpath)
790 790
791 791 # process each sub-directory and build tersedict
792 792 for subdir in rootobj.subdirs.values():
793 793 for st, f in subdir.tersewalk(terseargs):
794 794 tersedict[st].append(f)
795 795
796 796 tersedlist = []
797 797 for st in allst:
798 798 tersedict[st].sort()
799 799 tersedlist.append(tersedict[st])
800 800
801 801 return scmutil.status(*tersedlist)
802 802
803 803
804 804 def _commentlines(raw):
805 805 '''Surround lineswith a comment char and a new line'''
806 806 lines = raw.splitlines()
807 807 commentedlines = [b'# %s' % line for line in lines]
808 808 return b'\n'.join(commentedlines) + b'\n'
809 809
810 810
811 811 @attr.s(frozen=True)
812 812 class morestatus(object):
813 813 reporoot = attr.ib()
814 814 unfinishedop = attr.ib()
815 815 unfinishedmsg = attr.ib()
816 816 activemerge = attr.ib()
817 817 unresolvedpaths = attr.ib()
818 818 _formattedpaths = attr.ib(init=False, default=set())
819 819 _label = b'status.morestatus'
820 820
821 821 def formatfile(self, path, fm):
822 822 self._formattedpaths.add(path)
823 823 if self.activemerge and path in self.unresolvedpaths:
824 824 fm.data(unresolved=True)
825 825
826 826 def formatfooter(self, fm):
827 827 if self.unfinishedop or self.unfinishedmsg:
828 828 fm.startitem()
829 829 fm.data(itemtype=b'morestatus')
830 830
831 831 if self.unfinishedop:
832 832 fm.data(unfinished=self.unfinishedop)
833 833 statemsg = (
834 834 _(b'The repository is in an unfinished *%s* state.')
835 835 % self.unfinishedop
836 836 )
837 837 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
838 838 if self.unfinishedmsg:
839 839 fm.data(unfinishedmsg=self.unfinishedmsg)
840 840
841 841 # May also start new data items.
842 842 self._formatconflicts(fm)
843 843
844 844 if self.unfinishedmsg:
845 845 fm.plain(
846 846 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
847 847 )
848 848
849 849 def _formatconflicts(self, fm):
850 850 if not self.activemerge:
851 851 return
852 852
853 853 if self.unresolvedpaths:
854 854 mergeliststr = b'\n'.join(
855 855 [
856 856 b' %s'
857 857 % util.pathto(self.reporoot, encoding.getcwd(), path)
858 858 for path in self.unresolvedpaths
859 859 ]
860 860 )
861 861 msg = (
862 862 _(
863 863 '''Unresolved merge conflicts:
864 864
865 865 %s
866 866
867 867 To mark files as resolved: hg resolve --mark FILE'''
868 868 )
869 869 % mergeliststr
870 870 )
871 871
872 872 # If any paths with unresolved conflicts were not previously
873 873 # formatted, output them now.
874 874 for f in self.unresolvedpaths:
875 875 if f in self._formattedpaths:
876 876 # Already output.
877 877 continue
878 878 fm.startitem()
879 879 # We can't claim to know the status of the file - it may just
880 880 # have been in one of the states that were not requested for
881 881 # display, so it could be anything.
882 882 fm.data(itemtype=b'file', path=f, unresolved=True)
883 883
884 884 else:
885 885 msg = _(b'No unresolved merge conflicts.')
886 886
887 887 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
888 888
889 889
890 890 def readmorestatus(repo):
891 891 """Returns a morestatus object if the repo has unfinished state."""
892 892 statetuple = statemod.getrepostate(repo)
893 893 mergestate = mergemod.mergestate.read(repo)
894 894 activemerge = mergestate.active()
895 895 if not statetuple and not activemerge:
896 896 return None
897 897
898 898 unfinishedop = unfinishedmsg = unresolved = None
899 899 if statetuple:
900 900 unfinishedop, unfinishedmsg = statetuple
901 901 if activemerge:
902 902 unresolved = sorted(mergestate.unresolved())
903 903 return morestatus(
904 904 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
905 905 )
906 906
907 907
908 908 def findpossible(cmd, table, strict=False):
909 909 """
910 910 Return cmd -> (aliases, command table entry)
911 911 for each matching command.
912 912 Return debug commands (or their aliases) only if no normal command matches.
913 913 """
914 914 choice = {}
915 915 debugchoice = {}
916 916
917 917 if cmd in table:
918 918 # short-circuit exact matches, "log" alias beats "log|history"
919 919 keys = [cmd]
920 920 else:
921 921 keys = table.keys()
922 922
923 923 allcmds = []
924 924 for e in keys:
925 925 aliases = parsealiases(e)
926 926 allcmds.extend(aliases)
927 927 found = None
928 928 if cmd in aliases:
929 929 found = cmd
930 930 elif not strict:
931 931 for a in aliases:
932 932 if a.startswith(cmd):
933 933 found = a
934 934 break
935 935 if found is not None:
936 936 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
937 937 debugchoice[found] = (aliases, table[e])
938 938 else:
939 939 choice[found] = (aliases, table[e])
940 940
941 941 if not choice and debugchoice:
942 942 choice = debugchoice
943 943
944 944 return choice, allcmds
945 945
946 946
947 947 def findcmd(cmd, table, strict=True):
948 948 """Return (aliases, command table entry) for command string."""
949 949 choice, allcmds = findpossible(cmd, table, strict)
950 950
951 951 if cmd in choice:
952 952 return choice[cmd]
953 953
954 954 if len(choice) > 1:
955 955 clist = sorted(choice)
956 956 raise error.AmbiguousCommand(cmd, clist)
957 957
958 958 if choice:
959 959 return list(choice.values())[0]
960 960
961 961 raise error.UnknownCommand(cmd, allcmds)
962 962
963 963
964 964 def changebranch(ui, repo, revs, label):
965 965 """ Change the branch name of given revs to label """
966 966
967 967 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
968 968 # abort in case of uncommitted merge or dirty wdir
969 969 bailifchanged(repo)
970 970 revs = scmutil.revrange(repo, revs)
971 971 if not revs:
972 972 raise error.Abort(b"empty revision set")
973 973 roots = repo.revs(b'roots(%ld)', revs)
974 974 if len(roots) > 1:
975 975 raise error.Abort(
976 976 _(b"cannot change branch of non-linear revisions")
977 977 )
978 978 rewriteutil.precheck(repo, revs, b'change branch of')
979 979
980 980 root = repo[roots.first()]
981 981 rpb = {parent.branch() for parent in root.parents()}
982 982 if label not in rpb and label in repo.branchmap():
983 983 raise error.Abort(_(b"a branch of the same name already exists"))
984 984
985 985 if repo.revs(b'obsolete() and %ld', revs):
986 986 raise error.Abort(
987 987 _(b"cannot change branch of a obsolete changeset")
988 988 )
989 989
990 990 # make sure only topological heads
991 991 if repo.revs(b'heads(%ld) - head()', revs):
992 992 raise error.Abort(_(b"cannot change branch in middle of a stack"))
993 993
994 994 replacements = {}
995 995 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
996 996 # mercurial.subrepo -> mercurial.cmdutil
997 997 from . import context
998 998
999 999 for rev in revs:
1000 1000 ctx = repo[rev]
1001 1001 oldbranch = ctx.branch()
1002 1002 # check if ctx has same branch
1003 1003 if oldbranch == label:
1004 1004 continue
1005 1005
1006 1006 def filectxfn(repo, newctx, path):
1007 1007 try:
1008 1008 return ctx[path]
1009 1009 except error.ManifestLookupError:
1010 1010 return None
1011 1011
1012 1012 ui.debug(
1013 1013 b"changing branch of '%s' from '%s' to '%s'\n"
1014 1014 % (hex(ctx.node()), oldbranch, label)
1015 1015 )
1016 1016 extra = ctx.extra()
1017 1017 extra[b'branch_change'] = hex(ctx.node())
1018 1018 # While changing branch of set of linear commits, make sure that
1019 1019 # we base our commits on new parent rather than old parent which
1020 1020 # was obsoleted while changing the branch
1021 1021 p1 = ctx.p1().node()
1022 1022 p2 = ctx.p2().node()
1023 1023 if p1 in replacements:
1024 1024 p1 = replacements[p1][0]
1025 1025 if p2 in replacements:
1026 1026 p2 = replacements[p2][0]
1027 1027
1028 1028 mc = context.memctx(
1029 1029 repo,
1030 1030 (p1, p2),
1031 1031 ctx.description(),
1032 1032 ctx.files(),
1033 1033 filectxfn,
1034 1034 user=ctx.user(),
1035 1035 date=ctx.date(),
1036 1036 extra=extra,
1037 1037 branch=label,
1038 1038 )
1039 1039
1040 1040 newnode = repo.commitctx(mc)
1041 1041 replacements[ctx.node()] = (newnode,)
1042 1042 ui.debug(b'new node id is %s\n' % hex(newnode))
1043 1043
1044 1044 # create obsmarkers and move bookmarks
1045 1045 scmutil.cleanupnodes(
1046 1046 repo, replacements, b'branch-change', fixphase=True
1047 1047 )
1048 1048
1049 1049 # move the working copy too
1050 1050 wctx = repo[None]
1051 1051 # in-progress merge is a bit too complex for now.
1052 1052 if len(wctx.parents()) == 1:
1053 1053 newid = replacements.get(wctx.p1().node())
1054 1054 if newid is not None:
1055 1055 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1056 1056 # mercurial.cmdutil
1057 1057 from . import hg
1058 1058
1059 1059 hg.update(repo, newid[0], quietempty=True)
1060 1060
1061 1061 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1062 1062
1063 1063
1064 1064 def findrepo(p):
1065 1065 while not os.path.isdir(os.path.join(p, b".hg")):
1066 1066 oldp, p = p, os.path.dirname(p)
1067 1067 if p == oldp:
1068 1068 return None
1069 1069
1070 1070 return p
1071 1071
1072 1072
1073 1073 def bailifchanged(repo, merge=True, hint=None):
1074 1074 """ enforce the precondition that working directory must be clean.
1075 1075
1076 1076 'merge' can be set to false if a pending uncommitted merge should be
1077 1077 ignored (such as when 'update --check' runs).
1078 1078
1079 1079 'hint' is the usual hint given to Abort exception.
1080 1080 """
1081 1081
1082 1082 if merge and repo.dirstate.p2() != nullid:
1083 1083 raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
1084 1084 st = repo.status()
1085 1085 if st.modified or st.added or st.removed or st.deleted:
1086 1086 raise error.Abort(_(b'uncommitted changes'), hint=hint)
1087 1087 ctx = repo[None]
1088 1088 for s in sorted(ctx.substate):
1089 1089 ctx.sub(s).bailifchanged(hint=hint)
1090 1090
1091 1091
1092 1092 def logmessage(ui, opts):
1093 1093 """ get the log message according to -m and -l option """
1094 1094
1095 1095 check_at_most_one_arg(opts, b'message', b'logfile')
1096 1096
1097 1097 message = opts.get(b'message')
1098 1098 logfile = opts.get(b'logfile')
1099 1099
1100 1100 if not message and logfile:
1101 1101 try:
1102 1102 if isstdiofilename(logfile):
1103 1103 message = ui.fin.read()
1104 1104 else:
1105 1105 message = b'\n'.join(util.readfile(logfile).splitlines())
1106 1106 except IOError as inst:
1107 1107 raise error.Abort(
1108 1108 _(b"can't read commit message '%s': %s")
1109 1109 % (logfile, encoding.strtolocal(inst.strerror))
1110 1110 )
1111 1111 return message
1112 1112
1113 1113
1114 1114 def mergeeditform(ctxorbool, baseformname):
1115 1115 """return appropriate editform name (referencing a committemplate)
1116 1116
1117 1117 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1118 1118 merging is committed.
1119 1119
1120 1120 This returns baseformname with '.merge' appended if it is a merge,
1121 1121 otherwise '.normal' is appended.
1122 1122 """
1123 1123 if isinstance(ctxorbool, bool):
1124 1124 if ctxorbool:
1125 1125 return baseformname + b".merge"
1126 1126 elif len(ctxorbool.parents()) > 1:
1127 1127 return baseformname + b".merge"
1128 1128
1129 1129 return baseformname + b".normal"
1130 1130
1131 1131
1132 1132 def getcommiteditor(
1133 1133 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1134 1134 ):
1135 1135 """get appropriate commit message editor according to '--edit' option
1136 1136
1137 1137 'finishdesc' is a function to be called with edited commit message
1138 1138 (= 'description' of the new changeset) just after editing, but
1139 1139 before checking empty-ness. It should return actual text to be
1140 1140 stored into history. This allows to change description before
1141 1141 storing.
1142 1142
1143 1143 'extramsg' is a extra message to be shown in the editor instead of
1144 1144 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1145 1145 is automatically added.
1146 1146
1147 1147 'editform' is a dot-separated list of names, to distinguish
1148 1148 the purpose of commit text editing.
1149 1149
1150 1150 'getcommiteditor' returns 'commitforceeditor' regardless of
1151 1151 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1152 1152 they are specific for usage in MQ.
1153 1153 """
1154 1154 if edit or finishdesc or extramsg:
1155 1155 return lambda r, c, s: commitforceeditor(
1156 1156 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1157 1157 )
1158 1158 elif editform:
1159 1159 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1160 1160 else:
1161 1161 return commiteditor
1162 1162
1163 1163
1164 1164 def _escapecommandtemplate(tmpl):
1165 1165 parts = []
1166 1166 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1167 1167 if typ == b'string':
1168 1168 parts.append(stringutil.escapestr(tmpl[start:end]))
1169 1169 else:
1170 1170 parts.append(tmpl[start:end])
1171 1171 return b''.join(parts)
1172 1172
1173 1173
1174 1174 def rendercommandtemplate(ui, tmpl, props):
1175 1175 r"""Expand a literal template 'tmpl' in a way suitable for command line
1176 1176
1177 1177 '\' in outermost string is not taken as an escape character because it
1178 1178 is a directory separator on Windows.
1179 1179
1180 1180 >>> from . import ui as uimod
1181 1181 >>> ui = uimod.ui()
1182 1182 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1183 1183 'c:\\foo'
1184 1184 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1185 1185 'c:{path}'
1186 1186 """
1187 1187 if not tmpl:
1188 1188 return tmpl
1189 1189 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1190 1190 return t.renderdefault(props)
1191 1191
1192 1192
1193 1193 def rendertemplate(ctx, tmpl, props=None):
1194 1194 """Expand a literal template 'tmpl' byte-string against one changeset
1195 1195
1196 1196 Each props item must be a stringify-able value or a callable returning
1197 1197 such value, i.e. no bare list nor dict should be passed.
1198 1198 """
1199 1199 repo = ctx.repo()
1200 1200 tres = formatter.templateresources(repo.ui, repo)
1201 1201 t = formatter.maketemplater(
1202 1202 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1203 1203 )
1204 1204 mapping = {b'ctx': ctx}
1205 1205 if props:
1206 1206 mapping.update(props)
1207 1207 return t.renderdefault(mapping)
1208 1208
1209 1209
1210 1210 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1211 1211 r"""Convert old-style filename format string to template string
1212 1212
1213 1213 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1214 1214 'foo-{reporoot|basename}-{seqno}.patch'
1215 1215 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1216 1216 '{rev}{tags % "{tag}"}{node}'
1217 1217
1218 1218 '\' in outermost strings has to be escaped because it is a directory
1219 1219 separator on Windows:
1220 1220
1221 1221 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1222 1222 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1223 1223 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1224 1224 '\\\\\\\\foo\\\\bar.patch'
1225 1225 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1226 1226 '\\\\{tags % "{tag}"}'
1227 1227
1228 1228 but inner strings follow the template rules (i.e. '\' is taken as an
1229 1229 escape character):
1230 1230
1231 1231 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1232 1232 '{"c:\\tmp"}'
1233 1233 """
1234 1234 expander = {
1235 1235 b'H': b'{node}',
1236 1236 b'R': b'{rev}',
1237 1237 b'h': b'{node|short}',
1238 1238 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1239 1239 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1240 1240 b'%': b'%',
1241 1241 b'b': b'{reporoot|basename}',
1242 1242 }
1243 1243 if total is not None:
1244 1244 expander[b'N'] = b'{total}'
1245 1245 if seqno is not None:
1246 1246 expander[b'n'] = b'{seqno}'
1247 1247 if total is not None and seqno is not None:
1248 1248 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1249 1249 if pathname is not None:
1250 1250 expander[b's'] = b'{pathname|basename}'
1251 1251 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1252 1252 expander[b'p'] = b'{pathname}'
1253 1253
1254 1254 newname = []
1255 1255 for typ, start, end in templater.scantemplate(pat, raw=True):
1256 1256 if typ != b'string':
1257 1257 newname.append(pat[start:end])
1258 1258 continue
1259 1259 i = start
1260 1260 while i < end:
1261 1261 n = pat.find(b'%', i, end)
1262 1262 if n < 0:
1263 1263 newname.append(stringutil.escapestr(pat[i:end]))
1264 1264 break
1265 1265 newname.append(stringutil.escapestr(pat[i:n]))
1266 1266 if n + 2 > end:
1267 1267 raise error.Abort(
1268 1268 _(b"incomplete format spec in output filename")
1269 1269 )
1270 1270 c = pat[n + 1 : n + 2]
1271 1271 i = n + 2
1272 1272 try:
1273 1273 newname.append(expander[c])
1274 1274 except KeyError:
1275 1275 raise error.Abort(
1276 1276 _(b"invalid format spec '%%%s' in output filename") % c
1277 1277 )
1278 1278 return b''.join(newname)
1279 1279
1280 1280
1281 1281 def makefilename(ctx, pat, **props):
1282 1282 if not pat:
1283 1283 return pat
1284 1284 tmpl = _buildfntemplate(pat, **props)
1285 1285 # BUG: alias expansion shouldn't be made against template fragments
1286 1286 # rewritten from %-format strings, but we have no easy way to partially
1287 1287 # disable the expansion.
1288 1288 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1289 1289
1290 1290
1291 1291 def isstdiofilename(pat):
1292 1292 """True if the given pat looks like a filename denoting stdin/stdout"""
1293 1293 return not pat or pat == b'-'
1294 1294
1295 1295
1296 1296 class _unclosablefile(object):
1297 1297 def __init__(self, fp):
1298 1298 self._fp = fp
1299 1299
1300 1300 def close(self):
1301 1301 pass
1302 1302
1303 1303 def __iter__(self):
1304 1304 return iter(self._fp)
1305 1305
1306 1306 def __getattr__(self, attr):
1307 1307 return getattr(self._fp, attr)
1308 1308
1309 1309 def __enter__(self):
1310 1310 return self
1311 1311
1312 1312 def __exit__(self, exc_type, exc_value, exc_tb):
1313 1313 pass
1314 1314
1315 1315
1316 1316 def makefileobj(ctx, pat, mode=b'wb', **props):
1317 1317 writable = mode not in (b'r', b'rb')
1318 1318
1319 1319 if isstdiofilename(pat):
1320 1320 repo = ctx.repo()
1321 1321 if writable:
1322 1322 fp = repo.ui.fout
1323 1323 else:
1324 1324 fp = repo.ui.fin
1325 1325 return _unclosablefile(fp)
1326 1326 fn = makefilename(ctx, pat, **props)
1327 1327 return open(fn, mode)
1328 1328
1329 1329
1330 1330 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1331 1331 """opens the changelog, manifest, a filelog or a given revlog"""
1332 1332 cl = opts[b'changelog']
1333 1333 mf = opts[b'manifest']
1334 1334 dir = opts[b'dir']
1335 1335 msg = None
1336 1336 if cl and mf:
1337 1337 msg = _(b'cannot specify --changelog and --manifest at the same time')
1338 1338 elif cl and dir:
1339 1339 msg = _(b'cannot specify --changelog and --dir at the same time')
1340 1340 elif cl or mf or dir:
1341 1341 if file_:
1342 1342 msg = _(b'cannot specify filename with --changelog or --manifest')
1343 1343 elif not repo:
1344 1344 msg = _(
1345 1345 b'cannot specify --changelog or --manifest or --dir '
1346 1346 b'without a repository'
1347 1347 )
1348 1348 if msg:
1349 1349 raise error.Abort(msg)
1350 1350
1351 1351 r = None
1352 1352 if repo:
1353 1353 if cl:
1354 1354 r = repo.unfiltered().changelog
1355 1355 elif dir:
1356 1356 if b'treemanifest' not in repo.requirements:
1357 1357 raise error.Abort(
1358 1358 _(
1359 1359 b"--dir can only be used on repos with "
1360 1360 b"treemanifest enabled"
1361 1361 )
1362 1362 )
1363 1363 if not dir.endswith(b'/'):
1364 1364 dir = dir + b'/'
1365 1365 dirlog = repo.manifestlog.getstorage(dir)
1366 1366 if len(dirlog):
1367 1367 r = dirlog
1368 1368 elif mf:
1369 1369 r = repo.manifestlog.getstorage(b'')
1370 1370 elif file_:
1371 1371 filelog = repo.file(file_)
1372 1372 if len(filelog):
1373 1373 r = filelog
1374 1374
1375 1375 # Not all storage may be revlogs. If requested, try to return an actual
1376 1376 # revlog instance.
1377 1377 if returnrevlog:
1378 1378 if isinstance(r, revlog.revlog):
1379 1379 pass
1380 1380 elif util.safehasattr(r, b'_revlog'):
1381 1381 r = r._revlog # pytype: disable=attribute-error
1382 1382 elif r is not None:
1383 1383 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
1384 1384
1385 1385 if not r:
1386 1386 if not returnrevlog:
1387 1387 raise error.Abort(_(b'cannot give path to non-revlog'))
1388 1388
1389 1389 if not file_:
1390 1390 raise error.CommandError(cmd, _(b'invalid arguments'))
1391 1391 if not os.path.isfile(file_):
1392 1392 raise error.Abort(_(b"revlog '%s' not found") % file_)
1393 1393 r = revlog.revlog(
1394 1394 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1395 1395 )
1396 1396 return r
1397 1397
1398 1398
1399 1399 def openrevlog(repo, cmd, file_, opts):
1400 1400 """Obtain a revlog backing storage of an item.
1401 1401
1402 1402 This is similar to ``openstorage()`` except it always returns a revlog.
1403 1403
1404 1404 In most cases, a caller cares about the main storage object - not the
1405 1405 revlog backing it. Therefore, this function should only be used by code
1406 1406 that needs to examine low-level revlog implementation details. e.g. debug
1407 1407 commands.
1408 1408 """
1409 1409 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1410 1410
1411 1411
1412 1412 def copy(ui, repo, pats, opts, rename=False):
1413 1413 # called with the repo lock held
1414 1414 #
1415 1415 # hgsep => pathname that uses "/" to separate directories
1416 1416 # ossep => pathname that uses os.sep to separate directories
1417 1417 cwd = repo.getcwd()
1418 1418 targets = {}
1419 1419 after = opts.get(b"after")
1420 1420 dryrun = opts.get(b"dry_run")
1421 1421 wctx = repo[None]
1422 1422
1423 1423 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1424 1424
1425 1425 def walkpat(pat):
1426 1426 srcs = []
1427 1427 if after:
1428 1428 badstates = b'?'
1429 1429 else:
1430 1430 badstates = b'?r'
1431 1431 m = scmutil.match(wctx, [pat], opts, globbed=True)
1432 1432 for abs in wctx.walk(m):
1433 1433 state = repo.dirstate[abs]
1434 1434 rel = uipathfn(abs)
1435 1435 exact = m.exact(abs)
1436 1436 if state in badstates:
1437 1437 if exact and state == b'?':
1438 1438 ui.warn(_(b'%s: not copying - file is not managed\n') % rel)
1439 1439 if exact and state == b'r':
1440 1440 ui.warn(
1441 1441 _(
1442 1442 b'%s: not copying - file has been marked for'
1443 1443 b' remove\n'
1444 1444 )
1445 1445 % rel
1446 1446 )
1447 1447 continue
1448 1448 # abs: hgsep
1449 1449 # rel: ossep
1450 1450 srcs.append((abs, rel, exact))
1451 1451 return srcs
1452 1452
1453 1453 # abssrc: hgsep
1454 1454 # relsrc: ossep
1455 1455 # otarget: ossep
1456 1456 def copyfile(abssrc, relsrc, otarget, exact):
1457 1457 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1458 1458 if b'/' in abstarget:
1459 1459 # We cannot normalize abstarget itself, this would prevent
1460 1460 # case only renames, like a => A.
1461 1461 abspath, absname = abstarget.rsplit(b'/', 1)
1462 1462 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1463 1463 reltarget = repo.pathto(abstarget, cwd)
1464 1464 target = repo.wjoin(abstarget)
1465 1465 src = repo.wjoin(abssrc)
1466 1466 state = repo.dirstate[abstarget]
1467 1467
1468 1468 scmutil.checkportable(ui, abstarget)
1469 1469
1470 1470 # check for collisions
1471 1471 prevsrc = targets.get(abstarget)
1472 1472 if prevsrc is not None:
1473 1473 ui.warn(
1474 1474 _(b'%s: not overwriting - %s collides with %s\n')
1475 1475 % (
1476 1476 reltarget,
1477 1477 repo.pathto(abssrc, cwd),
1478 1478 repo.pathto(prevsrc, cwd),
1479 1479 )
1480 1480 )
1481 1481 return True # report a failure
1482 1482
1483 1483 # check for overwrites
1484 1484 exists = os.path.lexists(target)
1485 1485 samefile = False
1486 1486 if exists and abssrc != abstarget:
1487 1487 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1488 1488 abstarget
1489 1489 ):
1490 1490 if not rename:
1491 1491 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1492 1492 return True # report a failure
1493 1493 exists = False
1494 1494 samefile = True
1495 1495
1496 1496 if not after and exists or after and state in b'mn':
1497 1497 if not opts[b'force']:
1498 1498 if state in b'mn':
1499 1499 msg = _(b'%s: not overwriting - file already committed\n')
1500 1500 if after:
1501 1501 flags = b'--after --force'
1502 1502 else:
1503 1503 flags = b'--force'
1504 1504 if rename:
1505 1505 hint = (
1506 1506 _(
1507 1507 b"('hg rename %s' to replace the file by "
1508 1508 b'recording a rename)\n'
1509 1509 )
1510 1510 % flags
1511 1511 )
1512 1512 else:
1513 1513 hint = (
1514 1514 _(
1515 1515 b"('hg copy %s' to replace the file by "
1516 1516 b'recording a copy)\n'
1517 1517 )
1518 1518 % flags
1519 1519 )
1520 1520 else:
1521 1521 msg = _(b'%s: not overwriting - file exists\n')
1522 1522 if rename:
1523 1523 hint = _(
1524 1524 b"('hg rename --after' to record the rename)\n"
1525 1525 )
1526 1526 else:
1527 1527 hint = _(b"('hg copy --after' to record the copy)\n")
1528 1528 ui.warn(msg % reltarget)
1529 1529 ui.warn(hint)
1530 1530 return True # report a failure
1531 1531
1532 1532 if after:
1533 1533 if not exists:
1534 1534 if rename:
1535 1535 ui.warn(
1536 1536 _(b'%s: not recording move - %s does not exist\n')
1537 1537 % (relsrc, reltarget)
1538 1538 )
1539 1539 else:
1540 1540 ui.warn(
1541 1541 _(b'%s: not recording copy - %s does not exist\n')
1542 1542 % (relsrc, reltarget)
1543 1543 )
1544 1544 return True # report a failure
1545 1545 elif not dryrun:
1546 1546 try:
1547 1547 if exists:
1548 1548 os.unlink(target)
1549 1549 targetdir = os.path.dirname(target) or b'.'
1550 1550 if not os.path.isdir(targetdir):
1551 1551 os.makedirs(targetdir)
1552 1552 if samefile:
1553 1553 tmp = target + b"~hgrename"
1554 1554 os.rename(src, tmp)
1555 1555 os.rename(tmp, target)
1556 1556 else:
1557 1557 # Preserve stat info on renames, not on copies; this matches
1558 1558 # Linux CLI behavior.
1559 1559 util.copyfile(src, target, copystat=rename)
1560 1560 srcexists = True
1561 1561 except IOError as inst:
1562 1562 if inst.errno == errno.ENOENT:
1563 1563 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1564 1564 srcexists = False
1565 1565 else:
1566 1566 ui.warn(
1567 1567 _(b'%s: cannot copy - %s\n')
1568 1568 % (relsrc, encoding.strtolocal(inst.strerror))
1569 1569 )
1570 1570 return True # report a failure
1571 1571
1572 1572 if ui.verbose or not exact:
1573 1573 if rename:
1574 1574 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1575 1575 else:
1576 1576 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1577 1577
1578 1578 targets[abstarget] = abssrc
1579 1579
1580 1580 # fix up dirstate
1581 1581 scmutil.dirstatecopy(
1582 1582 ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1583 1583 )
1584 1584 if rename and not dryrun:
1585 1585 if not after and srcexists and not samefile:
1586 1586 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1587 1587 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1588 1588 wctx.forget([abssrc])
1589 1589
1590 1590 # pat: ossep
1591 1591 # dest ossep
1592 1592 # srcs: list of (hgsep, hgsep, ossep, bool)
1593 1593 # return: function that takes hgsep and returns ossep
1594 1594 def targetpathfn(pat, dest, srcs):
1595 1595 if os.path.isdir(pat):
1596 1596 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1597 1597 abspfx = util.localpath(abspfx)
1598 1598 if destdirexists:
1599 1599 striplen = len(os.path.split(abspfx)[0])
1600 1600 else:
1601 1601 striplen = len(abspfx)
1602 1602 if striplen:
1603 1603 striplen += len(pycompat.ossep)
1604 1604 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1605 1605 elif destdirexists:
1606 1606 res = lambda p: os.path.join(
1607 1607 dest, os.path.basename(util.localpath(p))
1608 1608 )
1609 1609 else:
1610 1610 res = lambda p: dest
1611 1611 return res
1612 1612
1613 1613 # pat: ossep
1614 1614 # dest ossep
1615 1615 # srcs: list of (hgsep, hgsep, ossep, bool)
1616 1616 # return: function that takes hgsep and returns ossep
1617 1617 def targetpathafterfn(pat, dest, srcs):
1618 1618 if matchmod.patkind(pat):
1619 1619 # a mercurial pattern
1620 1620 res = lambda p: os.path.join(
1621 1621 dest, os.path.basename(util.localpath(p))
1622 1622 )
1623 1623 else:
1624 1624 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1625 1625 if len(abspfx) < len(srcs[0][0]):
1626 1626 # A directory. Either the target path contains the last
1627 1627 # component of the source path or it does not.
1628 1628 def evalpath(striplen):
1629 1629 score = 0
1630 1630 for s in srcs:
1631 1631 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1632 1632 if os.path.lexists(t):
1633 1633 score += 1
1634 1634 return score
1635 1635
1636 1636 abspfx = util.localpath(abspfx)
1637 1637 striplen = len(abspfx)
1638 1638 if striplen:
1639 1639 striplen += len(pycompat.ossep)
1640 1640 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1641 1641 score = evalpath(striplen)
1642 1642 striplen1 = len(os.path.split(abspfx)[0])
1643 1643 if striplen1:
1644 1644 striplen1 += len(pycompat.ossep)
1645 1645 if evalpath(striplen1) > score:
1646 1646 striplen = striplen1
1647 1647 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1648 1648 else:
1649 1649 # a file
1650 1650 if destdirexists:
1651 1651 res = lambda p: os.path.join(
1652 1652 dest, os.path.basename(util.localpath(p))
1653 1653 )
1654 1654 else:
1655 1655 res = lambda p: dest
1656 1656 return res
1657 1657
1658 1658 pats = scmutil.expandpats(pats)
1659 1659 if not pats:
1660 1660 raise error.Abort(_(b'no source or destination specified'))
1661 1661 if len(pats) == 1:
1662 1662 raise error.Abort(_(b'no destination specified'))
1663 1663 dest = pats.pop()
1664 1664 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1665 1665 if not destdirexists:
1666 1666 if len(pats) > 1 or matchmod.patkind(pats[0]):
1667 1667 raise error.Abort(
1668 1668 _(
1669 1669 b'with multiple sources, destination must be an '
1670 1670 b'existing directory'
1671 1671 )
1672 1672 )
1673 1673 if util.endswithsep(dest):
1674 1674 raise error.Abort(_(b'destination %s is not a directory') % dest)
1675 1675
1676 1676 tfn = targetpathfn
1677 1677 if after:
1678 1678 tfn = targetpathafterfn
1679 1679 copylist = []
1680 1680 for pat in pats:
1681 1681 srcs = walkpat(pat)
1682 1682 if not srcs:
1683 1683 continue
1684 1684 copylist.append((tfn(pat, dest, srcs), srcs))
1685 1685 if not copylist:
1686 1686 raise error.Abort(_(b'no files to copy'))
1687 1687
1688 1688 errors = 0
1689 1689 for targetpath, srcs in copylist:
1690 1690 for abssrc, relsrc, exact in srcs:
1691 1691 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1692 1692 errors += 1
1693 1693
1694 1694 return errors != 0
1695 1695
1696 1696
1697 1697 ## facility to let extension process additional data into an import patch
1698 1698 # list of identifier to be executed in order
1699 1699 extrapreimport = [] # run before commit
1700 1700 extrapostimport = [] # run after commit
1701 1701 # mapping from identifier to actual import function
1702 1702 #
1703 1703 # 'preimport' are run before the commit is made and are provided the following
1704 1704 # arguments:
1705 1705 # - repo: the localrepository instance,
1706 1706 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1707 1707 # - extra: the future extra dictionary of the changeset, please mutate it,
1708 1708 # - opts: the import options.
1709 1709 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1710 1710 # mutation of in memory commit and more. Feel free to rework the code to get
1711 1711 # there.
1712 1712 extrapreimportmap = {}
1713 1713 # 'postimport' are run after the commit is made and are provided the following
1714 1714 # argument:
1715 1715 # - ctx: the changectx created by import.
1716 1716 extrapostimportmap = {}
1717 1717
1718 1718
1719 1719 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1720 1720 """Utility function used by commands.import to import a single patch
1721 1721
1722 1722 This function is explicitly defined here to help the evolve extension to
1723 1723 wrap this part of the import logic.
1724 1724
1725 1725 The API is currently a bit ugly because it a simple code translation from
1726 1726 the import command. Feel free to make it better.
1727 1727
1728 1728 :patchdata: a dictionary containing parsed patch data (such as from
1729 1729 ``patch.extract()``)
1730 1730 :parents: nodes that will be parent of the created commit
1731 1731 :opts: the full dict of option passed to the import command
1732 1732 :msgs: list to save commit message to.
1733 1733 (used in case we need to save it when failing)
1734 1734 :updatefunc: a function that update a repo to a given node
1735 1735 updatefunc(<repo>, <node>)
1736 1736 """
1737 1737 # avoid cycle context -> subrepo -> cmdutil
1738 1738 from . import context
1739 1739
1740 1740 tmpname = patchdata.get(b'filename')
1741 1741 message = patchdata.get(b'message')
1742 1742 user = opts.get(b'user') or patchdata.get(b'user')
1743 1743 date = opts.get(b'date') or patchdata.get(b'date')
1744 1744 branch = patchdata.get(b'branch')
1745 1745 nodeid = patchdata.get(b'nodeid')
1746 1746 p1 = patchdata.get(b'p1')
1747 1747 p2 = patchdata.get(b'p2')
1748 1748
1749 1749 nocommit = opts.get(b'no_commit')
1750 1750 importbranch = opts.get(b'import_branch')
1751 1751 update = not opts.get(b'bypass')
1752 1752 strip = opts[b"strip"]
1753 1753 prefix = opts[b"prefix"]
1754 1754 sim = float(opts.get(b'similarity') or 0)
1755 1755
1756 1756 if not tmpname:
1757 1757 return None, None, False
1758 1758
1759 1759 rejects = False
1760 1760
1761 1761 cmdline_message = logmessage(ui, opts)
1762 1762 if cmdline_message:
1763 1763 # pickup the cmdline msg
1764 1764 message = cmdline_message
1765 1765 elif message:
1766 1766 # pickup the patch msg
1767 1767 message = message.strip()
1768 1768 else:
1769 1769 # launch the editor
1770 1770 message = None
1771 1771 ui.debug(b'message:\n%s\n' % (message or b''))
1772 1772
1773 1773 if len(parents) == 1:
1774 1774 parents.append(repo[nullid])
1775 1775 if opts.get(b'exact'):
1776 1776 if not nodeid or not p1:
1777 1777 raise error.Abort(_(b'not a Mercurial patch'))
1778 1778 p1 = repo[p1]
1779 1779 p2 = repo[p2 or nullid]
1780 1780 elif p2:
1781 1781 try:
1782 1782 p1 = repo[p1]
1783 1783 p2 = repo[p2]
1784 1784 # Without any options, consider p2 only if the
1785 1785 # patch is being applied on top of the recorded
1786 1786 # first parent.
1787 1787 if p1 != parents[0]:
1788 1788 p1 = parents[0]
1789 1789 p2 = repo[nullid]
1790 1790 except error.RepoError:
1791 1791 p1, p2 = parents
1792 1792 if p2.node() == nullid:
1793 1793 ui.warn(
1794 1794 _(
1795 1795 b"warning: import the patch as a normal revision\n"
1796 1796 b"(use --exact to import the patch as a merge)\n"
1797 1797 )
1798 1798 )
1799 1799 else:
1800 1800 p1, p2 = parents
1801 1801
1802 1802 n = None
1803 1803 if update:
1804 1804 if p1 != parents[0]:
1805 1805 updatefunc(repo, p1.node())
1806 1806 if p2 != parents[1]:
1807 1807 repo.setparents(p1.node(), p2.node())
1808 1808
1809 1809 if opts.get(b'exact') or importbranch:
1810 1810 repo.dirstate.setbranch(branch or b'default')
1811 1811
1812 1812 partial = opts.get(b'partial', False)
1813 1813 files = set()
1814 1814 try:
1815 1815 patch.patch(
1816 1816 ui,
1817 1817 repo,
1818 1818 tmpname,
1819 1819 strip=strip,
1820 1820 prefix=prefix,
1821 1821 files=files,
1822 1822 eolmode=None,
1823 1823 similarity=sim / 100.0,
1824 1824 )
1825 1825 except error.PatchError as e:
1826 1826 if not partial:
1827 1827 raise error.Abort(pycompat.bytestr(e))
1828 1828 if partial:
1829 1829 rejects = True
1830 1830
1831 1831 files = list(files)
1832 1832 if nocommit:
1833 1833 if message:
1834 1834 msgs.append(message)
1835 1835 else:
1836 1836 if opts.get(b'exact') or p2:
1837 1837 # If you got here, you either use --force and know what
1838 1838 # you are doing or used --exact or a merge patch while
1839 1839 # being updated to its first parent.
1840 1840 m = None
1841 1841 else:
1842 1842 m = scmutil.matchfiles(repo, files or [])
1843 1843 editform = mergeeditform(repo[None], b'import.normal')
1844 1844 if opts.get(b'exact'):
1845 1845 editor = None
1846 1846 else:
1847 1847 editor = getcommiteditor(
1848 1848 editform=editform, **pycompat.strkwargs(opts)
1849 1849 )
1850 1850 extra = {}
1851 1851 for idfunc in extrapreimport:
1852 1852 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1853 1853 overrides = {}
1854 1854 if partial:
1855 1855 overrides[(b'ui', b'allowemptycommit')] = True
1856 1856 if opts.get(b'secret'):
1857 1857 overrides[(b'phases', b'new-commit')] = b'secret'
1858 1858 with repo.ui.configoverride(overrides, b'import'):
1859 1859 n = repo.commit(
1860 1860 message, user, date, match=m, editor=editor, extra=extra
1861 1861 )
1862 1862 for idfunc in extrapostimport:
1863 1863 extrapostimportmap[idfunc](repo[n])
1864 1864 else:
1865 1865 if opts.get(b'exact') or importbranch:
1866 1866 branch = branch or b'default'
1867 1867 else:
1868 1868 branch = p1.branch()
1869 1869 store = patch.filestore()
1870 1870 try:
1871 1871 files = set()
1872 1872 try:
1873 1873 patch.patchrepo(
1874 1874 ui,
1875 1875 repo,
1876 1876 p1,
1877 1877 store,
1878 1878 tmpname,
1879 1879 strip,
1880 1880 prefix,
1881 1881 files,
1882 1882 eolmode=None,
1883 1883 )
1884 1884 except error.PatchError as e:
1885 1885 raise error.Abort(stringutil.forcebytestr(e))
1886 1886 if opts.get(b'exact'):
1887 1887 editor = None
1888 1888 else:
1889 1889 editor = getcommiteditor(editform=b'import.bypass')
1890 1890 memctx = context.memctx(
1891 1891 repo,
1892 1892 (p1.node(), p2.node()),
1893 1893 message,
1894 1894 files=files,
1895 1895 filectxfn=store,
1896 1896 user=user,
1897 1897 date=date,
1898 1898 branch=branch,
1899 1899 editor=editor,
1900 1900 )
1901 1901 n = memctx.commit()
1902 1902 finally:
1903 1903 store.close()
1904 1904 if opts.get(b'exact') and nocommit:
1905 1905 # --exact with --no-commit is still useful in that it does merge
1906 1906 # and branch bits
1907 1907 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
1908 1908 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
1909 1909 raise error.Abort(_(b'patch is damaged or loses information'))
1910 1910 msg = _(b'applied to working directory')
1911 1911 if n:
1912 1912 # i18n: refers to a short changeset id
1913 1913 msg = _(b'created %s') % short(n)
1914 1914 return msg, n, rejects
1915 1915
1916 1916
1917 1917 # facility to let extensions include additional data in an exported patch
1918 1918 # list of identifiers to be executed in order
1919 1919 extraexport = []
1920 1920 # mapping from identifier to actual export function
1921 1921 # function as to return a string to be added to the header or None
1922 1922 # it is given two arguments (sequencenumber, changectx)
1923 1923 extraexportmap = {}
1924 1924
1925 1925
1926 1926 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1927 1927 node = scmutil.binnode(ctx)
1928 1928 parents = [p.node() for p in ctx.parents() if p]
1929 1929 branch = ctx.branch()
1930 1930 if switch_parent:
1931 1931 parents.reverse()
1932 1932
1933 1933 if parents:
1934 1934 prev = parents[0]
1935 1935 else:
1936 1936 prev = nullid
1937 1937
1938 1938 fm.context(ctx=ctx)
1939 1939 fm.plain(b'# HG changeset patch\n')
1940 1940 fm.write(b'user', b'# User %s\n', ctx.user())
1941 1941 fm.plain(b'# Date %d %d\n' % ctx.date())
1942 1942 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
1943 1943 fm.condwrite(
1944 1944 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
1945 1945 )
1946 1946 fm.write(b'node', b'# Node ID %s\n', hex(node))
1947 1947 fm.plain(b'# Parent %s\n' % hex(prev))
1948 1948 if len(parents) > 1:
1949 1949 fm.plain(b'# Parent %s\n' % hex(parents[1]))
1950 1950 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
1951 1951
1952 1952 # TODO: redesign extraexportmap function to support formatter
1953 1953 for headerid in extraexport:
1954 1954 header = extraexportmap[headerid](seqno, ctx)
1955 1955 if header is not None:
1956 1956 fm.plain(b'# %s\n' % header)
1957 1957
1958 1958 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
1959 1959 fm.plain(b'\n')
1960 1960
1961 1961 if fm.isplain():
1962 1962 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1963 1963 for chunk, label in chunkiter:
1964 1964 fm.plain(chunk, label=label)
1965 1965 else:
1966 1966 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1967 1967 # TODO: make it structured?
1968 1968 fm.data(diff=b''.join(chunkiter))
1969 1969
1970 1970
1971 1971 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1972 1972 """Export changesets to stdout or a single file"""
1973 1973 for seqno, rev in enumerate(revs, 1):
1974 1974 ctx = repo[rev]
1975 1975 if not dest.startswith(b'<'):
1976 1976 repo.ui.note(b"%s\n" % dest)
1977 1977 fm.startitem()
1978 1978 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1979 1979
1980 1980
1981 1981 def _exportfntemplate(
1982 1982 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
1983 1983 ):
1984 1984 """Export changesets to possibly multiple files"""
1985 1985 total = len(revs)
1986 1986 revwidth = max(len(str(rev)) for rev in revs)
1987 1987 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1988 1988
1989 1989 for seqno, rev in enumerate(revs, 1):
1990 1990 ctx = repo[rev]
1991 1991 dest = makefilename(
1992 1992 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
1993 1993 )
1994 1994 filemap.setdefault(dest, []).append((seqno, rev))
1995 1995
1996 1996 for dest in filemap:
1997 1997 with formatter.maybereopen(basefm, dest) as fm:
1998 1998 repo.ui.note(b"%s\n" % dest)
1999 1999 for seqno, rev in filemap[dest]:
2000 2000 fm.startitem()
2001 2001 ctx = repo[rev]
2002 2002 _exportsingle(
2003 2003 repo, ctx, fm, match, switch_parent, seqno, diffopts
2004 2004 )
2005 2005
2006 2006
2007 2007 def _prefetchchangedfiles(repo, revs, match):
2008 2008 allfiles = set()
2009 2009 for rev in revs:
2010 2010 for file in repo[rev].files():
2011 2011 if not match or match(file):
2012 2012 allfiles.add(file)
2013 2013 scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
2014 2014
2015 2015
2016 2016 def export(
2017 2017 repo,
2018 2018 revs,
2019 2019 basefm,
2020 2020 fntemplate=b'hg-%h.patch',
2021 2021 switch_parent=False,
2022 2022 opts=None,
2023 2023 match=None,
2024 2024 ):
2025 2025 '''export changesets as hg patches
2026 2026
2027 2027 Args:
2028 2028 repo: The repository from which we're exporting revisions.
2029 2029 revs: A list of revisions to export as revision numbers.
2030 2030 basefm: A formatter to which patches should be written.
2031 2031 fntemplate: An optional string to use for generating patch file names.
2032 2032 switch_parent: If True, show diffs against second parent when not nullid.
2033 2033 Default is false, which always shows diff against p1.
2034 2034 opts: diff options to use for generating the patch.
2035 2035 match: If specified, only export changes to files matching this matcher.
2036 2036
2037 2037 Returns:
2038 2038 Nothing.
2039 2039
2040 2040 Side Effect:
2041 2041 "HG Changeset Patch" data is emitted to one of the following
2042 2042 destinations:
2043 2043 fntemplate specified: Each rev is written to a unique file named using
2044 2044 the given template.
2045 2045 Otherwise: All revs will be written to basefm.
2046 2046 '''
2047 2047 _prefetchchangedfiles(repo, revs, match)
2048 2048
2049 2049 if not fntemplate:
2050 2050 _exportfile(
2051 2051 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2052 2052 )
2053 2053 else:
2054 2054 _exportfntemplate(
2055 2055 repo, revs, basefm, fntemplate, switch_parent, opts, match
2056 2056 )
2057 2057
2058 2058
2059 2059 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2060 2060 """Export changesets to the given file stream"""
2061 2061 _prefetchchangedfiles(repo, revs, match)
2062 2062
2063 2063 dest = getattr(fp, 'name', b'<unnamed>')
2064 2064 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2065 2065 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2066 2066
2067 2067
2068 2068 def showmarker(fm, marker, index=None):
2069 2069 """utility function to display obsolescence marker in a readable way
2070 2070
2071 2071 To be used by debug function."""
2072 2072 if index is not None:
2073 2073 fm.write(b'index', b'%i ', index)
2074 2074 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2075 2075 succs = marker.succnodes()
2076 2076 fm.condwrite(
2077 2077 succs,
2078 2078 b'succnodes',
2079 2079 b'%s ',
2080 2080 fm.formatlist(map(hex, succs), name=b'node'),
2081 2081 )
2082 2082 fm.write(b'flag', b'%X ', marker.flags())
2083 2083 parents = marker.parentnodes()
2084 2084 if parents is not None:
2085 2085 fm.write(
2086 2086 b'parentnodes',
2087 2087 b'{%s} ',
2088 2088 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2089 2089 )
2090 2090 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2091 2091 meta = marker.metadata().copy()
2092 2092 meta.pop(b'date', None)
2093 2093 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2094 2094 fm.write(
2095 2095 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2096 2096 )
2097 2097 fm.plain(b'\n')
2098 2098
2099 2099
2100 2100 def finddate(ui, repo, date):
2101 2101 """Find the tipmost changeset that matches the given date spec"""
2102 2102
2103 2103 df = dateutil.matchdate(date)
2104 2104 m = scmutil.matchall(repo)
2105 2105 results = {}
2106 2106
2107 2107 def prep(ctx, fns):
2108 2108 d = ctx.date()
2109 2109 if df(d[0]):
2110 2110 results[ctx.rev()] = d
2111 2111
2112 2112 for ctx in walkchangerevs(repo, m, {b'rev': None}, prep):
2113 2113 rev = ctx.rev()
2114 2114 if rev in results:
2115 2115 ui.status(
2116 2116 _(b"found revision %d from %s\n")
2117 2117 % (rev, dateutil.datestr(results[rev]))
2118 2118 )
2119 2119 return b'%d' % rev
2120 2120
2121 2121 raise error.Abort(_(b"revision matching date not found"))
2122 2122
2123 2123
2124 2124 def increasingwindows(windowsize=8, sizelimit=512):
2125 2125 while True:
2126 2126 yield windowsize
2127 2127 if windowsize < sizelimit:
2128 2128 windowsize *= 2
2129 2129
2130 2130
2131 2131 def _walkrevs(repo, opts):
2132 2132 # Default --rev value depends on --follow but --follow behavior
2133 2133 # depends on revisions resolved from --rev...
2134 2134 follow = opts.get(b'follow') or opts.get(b'follow_first')
2135 2135 if opts.get(b'rev'):
2136 2136 revs = scmutil.revrange(repo, opts[b'rev'])
2137 2137 elif follow and repo.dirstate.p1() == nullid:
2138 2138 revs = smartset.baseset()
2139 2139 elif follow:
2140 2140 revs = repo.revs(b'reverse(:.)')
2141 2141 else:
2142 2142 revs = smartset.spanset(repo)
2143 2143 revs.reverse()
2144 2144 return revs
2145 2145
2146 2146
2147 2147 class FileWalkError(Exception):
2148 2148 pass
2149 2149
2150 2150
2151 2151 def walkfilerevs(repo, match, follow, revs, fncache):
2152 2152 '''Walks the file history for the matched files.
2153 2153
2154 2154 Returns the changeset revs that are involved in the file history.
2155 2155
2156 2156 Throws FileWalkError if the file history can't be walked using
2157 2157 filelogs alone.
2158 2158 '''
2159 2159 wanted = set()
2160 2160 copies = []
2161 2161 minrev, maxrev = min(revs), max(revs)
2162 2162
2163 2163 def filerevs(filelog, last):
2164 2164 """
2165 2165 Only files, no patterns. Check the history of each file.
2166 2166
2167 2167 Examines filelog entries within minrev, maxrev linkrev range
2168 2168 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2169 2169 tuples in backwards order
2170 2170 """
2171 2171 cl_count = len(repo)
2172 2172 revs = []
2173 2173 for j in pycompat.xrange(0, last + 1):
2174 2174 linkrev = filelog.linkrev(j)
2175 2175 if linkrev < minrev:
2176 2176 continue
2177 2177 # only yield rev for which we have the changelog, it can
2178 2178 # happen while doing "hg log" during a pull or commit
2179 2179 if linkrev >= cl_count:
2180 2180 break
2181 2181
2182 2182 parentlinkrevs = []
2183 2183 for p in filelog.parentrevs(j):
2184 2184 if p != nullrev:
2185 2185 parentlinkrevs.append(filelog.linkrev(p))
2186 2186 n = filelog.node(j)
2187 2187 revs.append(
2188 2188 (linkrev, parentlinkrevs, follow and filelog.renamed(n))
2189 2189 )
2190 2190
2191 2191 return reversed(revs)
2192 2192
2193 2193 def iterfiles():
2194 2194 pctx = repo[b'.']
2195 2195 for filename in match.files():
2196 2196 if follow:
2197 2197 if filename not in pctx:
2198 2198 raise error.Abort(
2199 2199 _(
2200 2200 b'cannot follow file not in parent '
2201 2201 b'revision: "%s"'
2202 2202 )
2203 2203 % filename
2204 2204 )
2205 2205 yield filename, pctx[filename].filenode()
2206 2206 else:
2207 2207 yield filename, None
2208 2208 for filename_node in copies:
2209 2209 yield filename_node
2210 2210
2211 2211 for file_, node in iterfiles():
2212 2212 filelog = repo.file(file_)
2213 2213 if not len(filelog):
2214 2214 if node is None:
2215 2215 # A zero count may be a directory or deleted file, so
2216 2216 # try to find matching entries on the slow path.
2217 2217 if follow:
2218 2218 raise error.Abort(
2219 2219 _(b'cannot follow nonexistent file: "%s"') % file_
2220 2220 )
2221 2221 raise FileWalkError(b"Cannot walk via filelog")
2222 2222 else:
2223 2223 continue
2224 2224
2225 2225 if node is None:
2226 2226 last = len(filelog) - 1
2227 2227 else:
2228 2228 last = filelog.rev(node)
2229 2229
2230 2230 # keep track of all ancestors of the file
2231 2231 ancestors = {filelog.linkrev(last)}
2232 2232
2233 2233 # iterate from latest to oldest revision
2234 2234 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
2235 2235 if not follow:
2236 2236 if rev > maxrev:
2237 2237 continue
2238 2238 else:
2239 2239 # Note that last might not be the first interesting
2240 2240 # rev to us:
2241 2241 # if the file has been changed after maxrev, we'll
2242 2242 # have linkrev(last) > maxrev, and we still need
2243 2243 # to explore the file graph
2244 2244 if rev not in ancestors:
2245 2245 continue
2246 2246 # XXX insert 1327 fix here
2247 2247 if flparentlinkrevs:
2248 2248 ancestors.update(flparentlinkrevs)
2249 2249
2250 2250 fncache.setdefault(rev, []).append(file_)
2251 2251 wanted.add(rev)
2252 2252 if copied:
2253 2253 copies.append(copied)
2254 2254
2255 2255 return wanted
2256 2256
2257 2257
2258 2258 class _followfilter(object):
2259 2259 def __init__(self, repo, onlyfirst=False):
2260 2260 self.repo = repo
2261 2261 self.startrev = nullrev
2262 2262 self.roots = set()
2263 2263 self.onlyfirst = onlyfirst
2264 2264
2265 2265 def match(self, rev):
2266 2266 def realparents(rev):
2267 2267 if self.onlyfirst:
2268 2268 return self.repo.changelog.parentrevs(rev)[0:1]
2269 2269 else:
2270 2270 return filter(
2271 2271 lambda x: x != nullrev, self.repo.changelog.parentrevs(rev)
2272 2272 )
2273 2273
2274 2274 if self.startrev == nullrev:
2275 2275 self.startrev = rev
2276 2276 return True
2277 2277
2278 2278 if rev > self.startrev:
2279 2279 # forward: all descendants
2280 2280 if not self.roots:
2281 2281 self.roots.add(self.startrev)
2282 2282 for parent in realparents(rev):
2283 2283 if parent in self.roots:
2284 2284 self.roots.add(rev)
2285 2285 return True
2286 2286 else:
2287 2287 # backwards: all parents
2288 2288 if not self.roots:
2289 2289 self.roots.update(realparents(self.startrev))
2290 2290 if rev in self.roots:
2291 2291 self.roots.remove(rev)
2292 2292 self.roots.update(realparents(rev))
2293 2293 return True
2294 2294
2295 2295 return False
2296 2296
2297 2297
2298 2298 def walkchangerevs(repo, match, opts, prepare):
2299 2299 '''Iterate over files and the revs in which they changed.
2300 2300
2301 2301 Callers most commonly need to iterate backwards over the history
2302 2302 in which they are interested. Doing so has awful (quadratic-looking)
2303 2303 performance, so we use iterators in a "windowed" way.
2304 2304
2305 2305 We walk a window of revisions in the desired order. Within the
2306 2306 window, we first walk forwards to gather data, then in the desired
2307 2307 order (usually backwards) to display it.
2308 2308
2309 2309 This function returns an iterator yielding contexts. Before
2310 2310 yielding each context, the iterator will first call the prepare
2311 2311 function on each context in the window in forward order.'''
2312 2312
2313 2313 allfiles = opts.get(b'all_files')
2314 2314 follow = opts.get(b'follow') or opts.get(b'follow_first')
2315 2315 revs = _walkrevs(repo, opts)
2316 2316 if not revs:
2317 2317 return []
2318 2318 wanted = set()
2319 2319 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
2320 2320 fncache = {}
2321 2321 change = repo.__getitem__
2322 2322
2323 2323 # First step is to fill wanted, the set of revisions that we want to yield.
2324 2324 # When it does not induce extra cost, we also fill fncache for revisions in
2325 2325 # wanted: a cache of filenames that were changed (ctx.files()) and that
2326 2326 # match the file filtering conditions.
2327 2327
2328 2328 if match.always() or allfiles:
2329 2329 # No files, no patterns. Display all revs.
2330 2330 wanted = revs
2331 2331 elif not slowpath:
2332 2332 # We only have to read through the filelog to find wanted revisions
2333 2333
2334 2334 try:
2335 2335 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2336 2336 except FileWalkError:
2337 2337 slowpath = True
2338 2338
2339 2339 # We decided to fall back to the slowpath because at least one
2340 2340 # of the paths was not a file. Check to see if at least one of them
2341 2341 # existed in history, otherwise simply return
2342 2342 for path in match.files():
2343 2343 if path == b'.' or path in repo.store:
2344 2344 break
2345 2345 else:
2346 2346 return []
2347 2347
2348 2348 if slowpath:
2349 2349 # We have to read the changelog to match filenames against
2350 2350 # changed files
2351 2351
2352 2352 if follow:
2353 2353 raise error.Abort(
2354 2354 _(b'can only follow copies/renames for explicit filenames')
2355 2355 )
2356 2356
2357 2357 # The slow path checks files modified in every changeset.
2358 2358 # This is really slow on large repos, so compute the set lazily.
2359 2359 class lazywantedset(object):
2360 2360 def __init__(self):
2361 2361 self.set = set()
2362 2362 self.revs = set(revs)
2363 2363
2364 2364 # No need to worry about locality here because it will be accessed
2365 2365 # in the same order as the increasing window below.
2366 2366 def __contains__(self, value):
2367 2367 if value in self.set:
2368 2368 return True
2369 2369 elif not value in self.revs:
2370 2370 return False
2371 2371 else:
2372 2372 self.revs.discard(value)
2373 2373 ctx = change(value)
2374 2374 if allfiles:
2375 2375 matches = list(ctx.manifest().walk(match))
2376 2376 else:
2377 2377 matches = [f for f in ctx.files() if match(f)]
2378 2378 if matches:
2379 2379 fncache[value] = matches
2380 2380 self.set.add(value)
2381 2381 return True
2382 2382 return False
2383 2383
2384 2384 def discard(self, value):
2385 2385 self.revs.discard(value)
2386 2386 self.set.discard(value)
2387 2387
2388 2388 wanted = lazywantedset()
2389 2389
2390 2390 # it might be worthwhile to do this in the iterator if the rev range
2391 2391 # is descending and the prune args are all within that range
2392 2392 for rev in opts.get(b'prune', ()):
2393 2393 rev = repo[rev].rev()
2394 2394 ff = _followfilter(repo)
2395 2395 stop = min(revs[0], revs[-1])
2396 2396 for x in pycompat.xrange(rev, stop - 1, -1):
2397 2397 if ff.match(x):
2398 2398 wanted = wanted - [x]
2399 2399
2400 2400 # Now that wanted is correctly initialized, we can iterate over the
2401 2401 # revision range, yielding only revisions in wanted.
2402 2402 def iterate():
2403 2403 if follow and match.always():
2404 2404 ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
2405 2405
2406 2406 def want(rev):
2407 2407 return ff.match(rev) and rev in wanted
2408 2408
2409 2409 else:
2410 2410
2411 2411 def want(rev):
2412 2412 return rev in wanted
2413 2413
2414 2414 it = iter(revs)
2415 2415 stopiteration = False
2416 2416 for windowsize in increasingwindows():
2417 2417 nrevs = []
2418 2418 for i in pycompat.xrange(windowsize):
2419 2419 rev = next(it, None)
2420 2420 if rev is None:
2421 2421 stopiteration = True
2422 2422 break
2423 2423 elif want(rev):
2424 2424 nrevs.append(rev)
2425 2425 for rev in sorted(nrevs):
2426 2426 fns = fncache.get(rev)
2427 2427 ctx = change(rev)
2428 2428 if not fns:
2429 2429
2430 2430 def fns_generator():
2431 2431 if allfiles:
2432 2432
2433 2433 def bad(f, msg):
2434 2434 pass
2435 2435
2436 2436 for f in ctx.matches(matchmod.badmatch(match, bad)):
2437 2437 yield f
2438 2438 else:
2439 2439 for f in ctx.files():
2440 2440 if match(f):
2441 2441 yield f
2442 2442
2443 2443 fns = fns_generator()
2444 2444 prepare(ctx, fns)
2445 2445 for rev in nrevs:
2446 2446 yield change(rev)
2447 2447
2448 2448 if stopiteration:
2449 2449 break
2450 2450
2451 2451 return iterate()
2452 2452
2453 2453
2454 2454 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2455 2455 bad = []
2456 2456
2457 2457 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2458 2458 names = []
2459 2459 wctx = repo[None]
2460 2460 cca = None
2461 2461 abort, warn = scmutil.checkportabilityalert(ui)
2462 2462 if abort or warn:
2463 2463 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2464 2464
2465 2465 match = repo.narrowmatch(match, includeexact=True)
2466 2466 badmatch = matchmod.badmatch(match, badfn)
2467 2467 dirstate = repo.dirstate
2468 2468 # We don't want to just call wctx.walk here, since it would return a lot of
2469 2469 # clean files, which we aren't interested in and takes time.
2470 2470 for f in sorted(
2471 2471 dirstate.walk(
2472 2472 badmatch,
2473 2473 subrepos=sorted(wctx.substate),
2474 2474 unknown=True,
2475 2475 ignored=False,
2476 2476 full=False,
2477 2477 )
2478 2478 ):
2479 2479 exact = match.exact(f)
2480 2480 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2481 2481 if cca:
2482 2482 cca(f)
2483 2483 names.append(f)
2484 2484 if ui.verbose or not exact:
2485 2485 ui.status(
2486 2486 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2487 2487 )
2488 2488
2489 2489 for subpath in sorted(wctx.substate):
2490 2490 sub = wctx.sub(subpath)
2491 2491 try:
2492 2492 submatch = matchmod.subdirmatcher(subpath, match)
2493 2493 subprefix = repo.wvfs.reljoin(prefix, subpath)
2494 2494 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2495 2495 if opts.get('subrepos'):
2496 2496 bad.extend(
2497 2497 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2498 2498 )
2499 2499 else:
2500 2500 bad.extend(
2501 2501 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2502 2502 )
2503 2503 except error.LookupError:
2504 2504 ui.status(
2505 2505 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2506 2506 )
2507 2507
2508 2508 if not opts.get('dry_run'):
2509 2509 rejected = wctx.add(names, prefix)
2510 2510 bad.extend(f for f in rejected if f in match.files())
2511 2511 return bad
2512 2512
2513 2513
2514 2514 def addwebdirpath(repo, serverpath, webconf):
2515 2515 webconf[serverpath] = repo.root
2516 2516 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2517 2517
2518 2518 for r in repo.revs(b'filelog("path:.hgsub")'):
2519 2519 ctx = repo[r]
2520 2520 for subpath in ctx.substate:
2521 2521 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2522 2522
2523 2523
2524 2524 def forget(
2525 2525 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2526 2526 ):
2527 2527 if dryrun and interactive:
2528 2528 raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
2529 2529 bad = []
2530 2530 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2531 2531 wctx = repo[None]
2532 2532 forgot = []
2533 2533
2534 2534 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2535 2535 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2536 2536 if explicitonly:
2537 2537 forget = [f for f in forget if match.exact(f)]
2538 2538
2539 2539 for subpath in sorted(wctx.substate):
2540 2540 sub = wctx.sub(subpath)
2541 2541 submatch = matchmod.subdirmatcher(subpath, match)
2542 2542 subprefix = repo.wvfs.reljoin(prefix, subpath)
2543 2543 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2544 2544 try:
2545 2545 subbad, subforgot = sub.forget(
2546 2546 submatch,
2547 2547 subprefix,
2548 2548 subuipathfn,
2549 2549 dryrun=dryrun,
2550 2550 interactive=interactive,
2551 2551 )
2552 2552 bad.extend([subpath + b'/' + f for f in subbad])
2553 2553 forgot.extend([subpath + b'/' + f for f in subforgot])
2554 2554 except error.LookupError:
2555 2555 ui.status(
2556 2556 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2557 2557 )
2558 2558
2559 2559 if not explicitonly:
2560 2560 for f in match.files():
2561 2561 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2562 2562 if f not in forgot:
2563 2563 if repo.wvfs.exists(f):
2564 2564 # Don't complain if the exact case match wasn't given.
2565 2565 # But don't do this until after checking 'forgot', so
2566 2566 # that subrepo files aren't normalized, and this op is
2567 2567 # purely from data cached by the status walk above.
2568 2568 if repo.dirstate.normalize(f) in repo.dirstate:
2569 2569 continue
2570 2570 ui.warn(
2571 2571 _(
2572 2572 b'not removing %s: '
2573 2573 b'file is already untracked\n'
2574 2574 )
2575 2575 % uipathfn(f)
2576 2576 )
2577 2577 bad.append(f)
2578 2578
2579 2579 if interactive:
2580 2580 responses = _(
2581 2581 b'[Ynsa?]'
2582 2582 b'$$ &Yes, forget this file'
2583 2583 b'$$ &No, skip this file'
2584 2584 b'$$ &Skip remaining files'
2585 2585 b'$$ Include &all remaining files'
2586 2586 b'$$ &? (display help)'
2587 2587 )
2588 2588 for filename in forget[:]:
2589 2589 r = ui.promptchoice(
2590 2590 _(b'forget %s %s') % (uipathfn(filename), responses)
2591 2591 )
2592 2592 if r == 4: # ?
2593 2593 while r == 4:
2594 2594 for c, t in ui.extractchoices(responses)[1]:
2595 2595 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2596 2596 r = ui.promptchoice(
2597 2597 _(b'forget %s %s') % (uipathfn(filename), responses)
2598 2598 )
2599 2599 if r == 0: # yes
2600 2600 continue
2601 2601 elif r == 1: # no
2602 2602 forget.remove(filename)
2603 2603 elif r == 2: # Skip
2604 2604 fnindex = forget.index(filename)
2605 2605 del forget[fnindex:]
2606 2606 break
2607 2607 elif r == 3: # All
2608 2608 break
2609 2609
2610 2610 for f in forget:
2611 2611 if ui.verbose or not match.exact(f) or interactive:
2612 2612 ui.status(
2613 2613 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2614 2614 )
2615 2615
2616 2616 if not dryrun:
2617 2617 rejected = wctx.forget(forget, prefix)
2618 2618 bad.extend(f for f in rejected if f in match.files())
2619 2619 forgot.extend(f for f in forget if f not in rejected)
2620 2620 return bad, forgot
2621 2621
2622 2622
2623 2623 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2624 2624 ret = 1
2625 2625
2626 2626 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2627 2627 for f in ctx.matches(m):
2628 2628 fm.startitem()
2629 2629 fm.context(ctx=ctx)
2630 2630 if needsfctx:
2631 2631 fc = ctx[f]
2632 2632 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2633 2633 fm.data(path=f)
2634 2634 fm.plain(fmt % uipathfn(f))
2635 2635 ret = 0
2636 2636
2637 2637 for subpath in sorted(ctx.substate):
2638 2638 submatch = matchmod.subdirmatcher(subpath, m)
2639 2639 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2640 2640 if subrepos or m.exact(subpath) or any(submatch.files()):
2641 2641 sub = ctx.sub(subpath)
2642 2642 try:
2643 2643 recurse = m.exact(subpath) or subrepos
2644 2644 if (
2645 2645 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2646 2646 == 0
2647 2647 ):
2648 2648 ret = 0
2649 2649 except error.LookupError:
2650 2650 ui.status(
2651 2651 _(b"skipping missing subrepository: %s\n")
2652 2652 % uipathfn(subpath)
2653 2653 )
2654 2654
2655 2655 return ret
2656 2656
2657 2657
2658 2658 def remove(
2659 2659 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2660 2660 ):
2661 2661 ret = 0
2662 2662 s = repo.status(match=m, clean=True)
2663 2663 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2664 2664
2665 2665 wctx = repo[None]
2666 2666
2667 2667 if warnings is None:
2668 2668 warnings = []
2669 2669 warn = True
2670 2670 else:
2671 2671 warn = False
2672 2672
2673 2673 subs = sorted(wctx.substate)
2674 2674 progress = ui.makeprogress(
2675 2675 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2676 2676 )
2677 2677 for subpath in subs:
2678 2678 submatch = matchmod.subdirmatcher(subpath, m)
2679 2679 subprefix = repo.wvfs.reljoin(prefix, subpath)
2680 2680 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2681 2681 if subrepos or m.exact(subpath) or any(submatch.files()):
2682 2682 progress.increment()
2683 2683 sub = wctx.sub(subpath)
2684 2684 try:
2685 2685 if sub.removefiles(
2686 2686 submatch,
2687 2687 subprefix,
2688 2688 subuipathfn,
2689 2689 after,
2690 2690 force,
2691 2691 subrepos,
2692 2692 dryrun,
2693 2693 warnings,
2694 2694 ):
2695 2695 ret = 1
2696 2696 except error.LookupError:
2697 2697 warnings.append(
2698 2698 _(b"skipping missing subrepository: %s\n")
2699 2699 % uipathfn(subpath)
2700 2700 )
2701 2701 progress.complete()
2702 2702
2703 2703 # warn about failure to delete explicit files/dirs
2704 2704 deleteddirs = pathutil.dirs(deleted)
2705 2705 files = m.files()
2706 2706 progress = ui.makeprogress(
2707 2707 _(b'deleting'), total=len(files), unit=_(b'files')
2708 2708 )
2709 2709 for f in files:
2710 2710
2711 2711 def insubrepo():
2712 2712 for subpath in wctx.substate:
2713 2713 if f.startswith(subpath + b'/'):
2714 2714 return True
2715 2715 return False
2716 2716
2717 2717 progress.increment()
2718 2718 isdir = f in deleteddirs or wctx.hasdir(f)
2719 2719 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2720 2720 continue
2721 2721
2722 2722 if repo.wvfs.exists(f):
2723 2723 if repo.wvfs.isdir(f):
2724 2724 warnings.append(
2725 2725 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2726 2726 )
2727 2727 else:
2728 2728 warnings.append(
2729 2729 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2730 2730 )
2731 2731 # missing files will generate a warning elsewhere
2732 2732 ret = 1
2733 2733 progress.complete()
2734 2734
2735 2735 if force:
2736 2736 list = modified + deleted + clean + added
2737 2737 elif after:
2738 2738 list = deleted
2739 2739 remaining = modified + added + clean
2740 2740 progress = ui.makeprogress(
2741 2741 _(b'skipping'), total=len(remaining), unit=_(b'files')
2742 2742 )
2743 2743 for f in remaining:
2744 2744 progress.increment()
2745 2745 if ui.verbose or (f in files):
2746 2746 warnings.append(
2747 2747 _(b'not removing %s: file still exists\n') % uipathfn(f)
2748 2748 )
2749 2749 ret = 1
2750 2750 progress.complete()
2751 2751 else:
2752 2752 list = deleted + clean
2753 2753 progress = ui.makeprogress(
2754 2754 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2755 2755 )
2756 2756 for f in modified:
2757 2757 progress.increment()
2758 2758 warnings.append(
2759 2759 _(
2760 2760 b'not removing %s: file is modified (use -f'
2761 2761 b' to force removal)\n'
2762 2762 )
2763 2763 % uipathfn(f)
2764 2764 )
2765 2765 ret = 1
2766 2766 for f in added:
2767 2767 progress.increment()
2768 2768 warnings.append(
2769 2769 _(
2770 2770 b"not removing %s: file has been marked for add"
2771 2771 b" (use 'hg forget' to undo add)\n"
2772 2772 )
2773 2773 % uipathfn(f)
2774 2774 )
2775 2775 ret = 1
2776 2776 progress.complete()
2777 2777
2778 2778 list = sorted(list)
2779 2779 progress = ui.makeprogress(
2780 2780 _(b'deleting'), total=len(list), unit=_(b'files')
2781 2781 )
2782 2782 for f in list:
2783 2783 if ui.verbose or not m.exact(f):
2784 2784 progress.increment()
2785 2785 ui.status(
2786 2786 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2787 2787 )
2788 2788 progress.complete()
2789 2789
2790 2790 if not dryrun:
2791 2791 with repo.wlock():
2792 2792 if not after:
2793 2793 for f in list:
2794 2794 if f in added:
2795 2795 continue # we never unlink added files on remove
2796 2796 rmdir = repo.ui.configbool(
2797 2797 b'experimental', b'removeemptydirs'
2798 2798 )
2799 2799 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2800 2800 repo[None].forget(list)
2801 2801
2802 2802 if warn:
2803 2803 for warning in warnings:
2804 2804 ui.warn(warning)
2805 2805
2806 2806 return ret
2807 2807
2808 2808
2809 2809 def _catfmtneedsdata(fm):
2810 2810 return not fm.datahint() or b'data' in fm.datahint()
2811 2811
2812 2812
2813 2813 def _updatecatformatter(fm, ctx, matcher, path, decode):
2814 2814 """Hook for adding data to the formatter used by ``hg cat``.
2815 2815
2816 2816 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2817 2817 this method first."""
2818 2818
2819 2819 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2820 2820 # wasn't requested.
2821 2821 data = b''
2822 2822 if _catfmtneedsdata(fm):
2823 2823 data = ctx[path].data()
2824 2824 if decode:
2825 2825 data = ctx.repo().wwritedata(path, data)
2826 2826 fm.startitem()
2827 2827 fm.context(ctx=ctx)
2828 2828 fm.write(b'data', b'%s', data)
2829 2829 fm.data(path=path)
2830 2830
2831 2831
2832 2832 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2833 2833 err = 1
2834 2834 opts = pycompat.byteskwargs(opts)
2835 2835
2836 2836 def write(path):
2837 2837 filename = None
2838 2838 if fntemplate:
2839 2839 filename = makefilename(
2840 2840 ctx, fntemplate, pathname=os.path.join(prefix, path)
2841 2841 )
2842 2842 # attempt to create the directory if it does not already exist
2843 2843 try:
2844 2844 os.makedirs(os.path.dirname(filename))
2845 2845 except OSError:
2846 2846 pass
2847 2847 with formatter.maybereopen(basefm, filename) as fm:
2848 2848 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2849 2849
2850 2850 # Automation often uses hg cat on single files, so special case it
2851 2851 # for performance to avoid the cost of parsing the manifest.
2852 2852 if len(matcher.files()) == 1 and not matcher.anypats():
2853 2853 file = matcher.files()[0]
2854 2854 mfl = repo.manifestlog
2855 2855 mfnode = ctx.manifestnode()
2856 2856 try:
2857 2857 if mfnode and mfl[mfnode].find(file)[0]:
2858 2858 if _catfmtneedsdata(basefm):
2859 2859 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2860 2860 write(file)
2861 2861 return 0
2862 2862 except KeyError:
2863 2863 pass
2864 2864
2865 2865 if _catfmtneedsdata(basefm):
2866 2866 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2867 2867
2868 2868 for abs in ctx.walk(matcher):
2869 2869 write(abs)
2870 2870 err = 0
2871 2871
2872 2872 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2873 2873 for subpath in sorted(ctx.substate):
2874 2874 sub = ctx.sub(subpath)
2875 2875 try:
2876 2876 submatch = matchmod.subdirmatcher(subpath, matcher)
2877 2877 subprefix = os.path.join(prefix, subpath)
2878 2878 if not sub.cat(
2879 2879 submatch,
2880 2880 basefm,
2881 2881 fntemplate,
2882 2882 subprefix,
2883 2883 **pycompat.strkwargs(opts)
2884 2884 ):
2885 2885 err = 0
2886 2886 except error.RepoLookupError:
2887 2887 ui.status(
2888 2888 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2889 2889 )
2890 2890
2891 2891 return err
2892 2892
2893 2893
2894 2894 def commit(ui, repo, commitfunc, pats, opts):
2895 2895 '''commit the specified files or all outstanding changes'''
2896 2896 date = opts.get(b'date')
2897 2897 if date:
2898 2898 opts[b'date'] = dateutil.parsedate(date)
2899 2899 message = logmessage(ui, opts)
2900 2900 matcher = scmutil.match(repo[None], pats, opts)
2901 2901
2902 2902 dsguard = None
2903 2903 # extract addremove carefully -- this function can be called from a command
2904 2904 # that doesn't support addremove
2905 2905 if opts.get(b'addremove'):
2906 2906 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2907 2907 with dsguard or util.nullcontextmanager():
2908 2908 if dsguard:
2909 2909 relative = scmutil.anypats(pats, opts)
2910 2910 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2911 2911 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2912 2912 raise error.Abort(
2913 2913 _(b"failed to mark all new/missing files as added/removed")
2914 2914 )
2915 2915
2916 2916 return commitfunc(ui, repo, message, matcher, opts)
2917 2917
2918 2918
2919 2919 def samefile(f, ctx1, ctx2):
2920 2920 if f in ctx1.manifest():
2921 2921 a = ctx1.filectx(f)
2922 2922 if f in ctx2.manifest():
2923 2923 b = ctx2.filectx(f)
2924 2924 return not a.cmp(b) and a.flags() == b.flags()
2925 2925 else:
2926 2926 return False
2927 2927 else:
2928 2928 return f not in ctx2.manifest()
2929 2929
2930 2930
2931 2931 def amend(ui, repo, old, extra, pats, opts):
2932 2932 # avoid cycle context -> subrepo -> cmdutil
2933 2933 from . import context
2934 2934
2935 2935 # amend will reuse the existing user if not specified, but the obsolete
2936 2936 # marker creation requires that the current user's name is specified.
2937 2937 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2938 2938 ui.username() # raise exception if username not set
2939 2939
2940 2940 ui.note(_(b'amending changeset %s\n') % old)
2941 2941 base = old.p1()
2942 2942
2943 2943 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2944 2944 # Participating changesets:
2945 2945 #
2946 2946 # wctx o - workingctx that contains changes from working copy
2947 2947 # | to go into amending commit
2948 2948 # |
2949 2949 # old o - changeset to amend
2950 2950 # |
2951 2951 # base o - first parent of the changeset to amend
2952 2952 wctx = repo[None]
2953 2953
2954 2954 # Copy to avoid mutating input
2955 2955 extra = extra.copy()
2956 2956 # Update extra dict from amended commit (e.g. to preserve graft
2957 2957 # source)
2958 2958 extra.update(old.extra())
2959 2959
2960 2960 # Also update it from the from the wctx
2961 2961 extra.update(wctx.extra())
2962 2962
2963 2963 # date-only change should be ignored?
2964 2964 datemaydiffer = resolvecommitoptions(ui, opts)
2965 2965
2966 2966 date = old.date()
2967 2967 if opts.get(b'date'):
2968 2968 date = dateutil.parsedate(opts.get(b'date'))
2969 2969 user = opts.get(b'user') or old.user()
2970 2970
2971 2971 if len(old.parents()) > 1:
2972 2972 # ctx.files() isn't reliable for merges, so fall back to the
2973 2973 # slower repo.status() method
2974 2974 st = base.status(old)
2975 2975 files = set(st.modified) | set(st.added) | set(st.removed)
2976 2976 else:
2977 2977 files = set(old.files())
2978 2978
2979 2979 # add/remove the files to the working copy if the "addremove" option
2980 2980 # was specified.
2981 2981 matcher = scmutil.match(wctx, pats, opts)
2982 2982 relative = scmutil.anypats(pats, opts)
2983 2983 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2984 2984 if opts.get(b'addremove') and scmutil.addremove(
2985 2985 repo, matcher, b"", uipathfn, opts
2986 2986 ):
2987 2987 raise error.Abort(
2988 2988 _(b"failed to mark all new/missing files as added/removed")
2989 2989 )
2990 2990
2991 2991 # Check subrepos. This depends on in-place wctx._status update in
2992 2992 # subrepo.precommit(). To minimize the risk of this hack, we do
2993 2993 # nothing if .hgsub does not exist.
2994 2994 if b'.hgsub' in wctx or b'.hgsub' in old:
2995 2995 subs, commitsubs, newsubstate = subrepoutil.precommit(
2996 2996 ui, wctx, wctx._status, matcher
2997 2997 )
2998 2998 # amend should abort if commitsubrepos is enabled
2999 2999 assert not commitsubs
3000 3000 if subs:
3001 3001 subrepoutil.writestate(repo, newsubstate)
3002 3002
3003 3003 ms = mergemod.mergestate.read(repo)
3004 3004 mergeutil.checkunresolved(ms)
3005 3005
3006 3006 filestoamend = set(f for f in wctx.files() if matcher(f))
3007 3007
3008 3008 changes = len(filestoamend) > 0
3009 3009 if changes:
3010 3010 # Recompute copies (avoid recording a -> b -> a)
3011 3011 copied = copies.pathcopies(base, wctx, matcher)
3012 3012 if old.p2:
3013 3013 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3014 3014
3015 3015 # Prune files which were reverted by the updates: if old
3016 3016 # introduced file X and the file was renamed in the working
3017 3017 # copy, then those two files are the same and
3018 3018 # we can discard X from our list of files. Likewise if X
3019 3019 # was removed, it's no longer relevant. If X is missing (aka
3020 3020 # deleted), old X must be preserved.
3021 3021 files.update(filestoamend)
3022 3022 files = [
3023 3023 f
3024 3024 for f in files
3025 3025 if (f not in filestoamend or not samefile(f, wctx, base))
3026 3026 ]
3027 3027
3028 3028 def filectxfn(repo, ctx_, path):
3029 3029 try:
3030 3030 # If the file being considered is not amongst the files
3031 3031 # to be amended, we should return the file context from the
3032 3032 # old changeset. This avoids issues when only some files in
3033 3033 # the working copy are being amended but there are also
3034 3034 # changes to other files from the old changeset.
3035 3035 if path not in filestoamend:
3036 3036 return old.filectx(path)
3037 3037
3038 3038 # Return None for removed files.
3039 3039 if path in wctx.removed():
3040 3040 return None
3041 3041
3042 3042 fctx = wctx[path]
3043 3043 flags = fctx.flags()
3044 3044 mctx = context.memfilectx(
3045 3045 repo,
3046 3046 ctx_,
3047 3047 fctx.path(),
3048 3048 fctx.data(),
3049 3049 islink=b'l' in flags,
3050 3050 isexec=b'x' in flags,
3051 3051 copysource=copied.get(path),
3052 3052 )
3053 3053 return mctx
3054 3054 except KeyError:
3055 3055 return None
3056 3056
3057 3057 else:
3058 3058 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
3059 3059
3060 3060 # Use version of files as in the old cset
3061 3061 def filectxfn(repo, ctx_, path):
3062 3062 try:
3063 3063 return old.filectx(path)
3064 3064 except KeyError:
3065 3065 return None
3066 3066
3067 3067 # See if we got a message from -m or -l, if not, open the editor with
3068 3068 # the message of the changeset to amend.
3069 3069 message = logmessage(ui, opts)
3070 3070
3071 3071 editform = mergeeditform(old, b'commit.amend')
3072 3072
3073 3073 if not message:
3074 3074 message = old.description()
3075 3075 # Default if message isn't provided and --edit is not passed is to
3076 3076 # invoke editor, but allow --no-edit. If somehow we don't have any
3077 3077 # description, let's always start the editor.
3078 3078 doedit = not message or opts.get(b'edit') in [True, None]
3079 3079 else:
3080 3080 # Default if message is provided is to not invoke editor, but allow
3081 3081 # --edit.
3082 3082 doedit = opts.get(b'edit') is True
3083 3083 editor = getcommiteditor(edit=doedit, editform=editform)
3084 3084
3085 3085 pureextra = extra.copy()
3086 3086 extra[b'amend_source'] = old.hex()
3087 3087
3088 3088 new = context.memctx(
3089 3089 repo,
3090 3090 parents=[base.node(), old.p2().node()],
3091 3091 text=message,
3092 3092 files=files,
3093 3093 filectxfn=filectxfn,
3094 3094 user=user,
3095 3095 date=date,
3096 3096 extra=extra,
3097 3097 editor=editor,
3098 3098 )
3099 3099
3100 3100 newdesc = changelog.stripdesc(new.description())
3101 3101 if (
3102 3102 (not changes)
3103 3103 and newdesc == old.description()
3104 3104 and user == old.user()
3105 3105 and (date == old.date() or datemaydiffer)
3106 3106 and pureextra == old.extra()
3107 3107 ):
3108 3108 # nothing changed. continuing here would create a new node
3109 3109 # anyway because of the amend_source noise.
3110 3110 #
3111 3111 # This not what we expect from amend.
3112 3112 return old.node()
3113 3113
3114 3114 commitphase = None
3115 3115 if opts.get(b'secret'):
3116 3116 commitphase = phases.secret
3117 3117 newid = repo.commitctx(new)
3118 3118
3119 3119 # Reroute the working copy parent to the new changeset
3120 3120 repo.setparents(newid, nullid)
3121 3121 mapping = {old.node(): (newid,)}
3122 3122 obsmetadata = None
3123 3123 if opts.get(b'note'):
3124 3124 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3125 3125 backup = ui.configbool(b'rewrite', b'backup-bundle')
3126 3126 scmutil.cleanupnodes(
3127 3127 repo,
3128 3128 mapping,
3129 3129 b'amend',
3130 3130 metadata=obsmetadata,
3131 3131 fixphase=True,
3132 3132 targetphase=commitphase,
3133 3133 backup=backup,
3134 3134 )
3135 3135
3136 3136 # Fixing the dirstate because localrepo.commitctx does not update
3137 3137 # it. This is rather convenient because we did not need to update
3138 3138 # the dirstate for all the files in the new commit which commitctx
3139 3139 # could have done if it updated the dirstate. Now, we can
3140 3140 # selectively update the dirstate only for the amended files.
3141 3141 dirstate = repo.dirstate
3142 3142
3143 3143 # Update the state of the files which were added and modified in the
3144 3144 # amend to "normal" in the dirstate. We need to use "normallookup" since
3145 3145 # the files may have changed since the command started; using "normal"
3146 3146 # would mark them as clean but with uncommitted contents.
3147 3147 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3148 3148 for f in normalfiles:
3149 3149 dirstate.normallookup(f)
3150 3150
3151 3151 # Update the state of files which were removed in the amend
3152 3152 # to "removed" in the dirstate.
3153 3153 removedfiles = set(wctx.removed()) & filestoamend
3154 3154 for f in removedfiles:
3155 3155 dirstate.drop(f)
3156 3156
3157 3157 return newid
3158 3158
3159 3159
3160 3160 def commiteditor(repo, ctx, subs, editform=b''):
3161 3161 if ctx.description():
3162 3162 return ctx.description()
3163 3163 return commitforceeditor(
3164 3164 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3165 3165 )
3166 3166
3167 3167
3168 3168 def commitforceeditor(
3169 3169 repo,
3170 3170 ctx,
3171 3171 subs,
3172 3172 finishdesc=None,
3173 3173 extramsg=None,
3174 3174 editform=b'',
3175 3175 unchangedmessagedetection=False,
3176 3176 ):
3177 3177 if not extramsg:
3178 3178 extramsg = _(b"Leave message empty to abort commit.")
3179 3179
3180 3180 forms = [e for e in editform.split(b'.') if e]
3181 3181 forms.insert(0, b'changeset')
3182 3182 templatetext = None
3183 3183 while forms:
3184 3184 ref = b'.'.join(forms)
3185 3185 if repo.ui.config(b'committemplate', ref):
3186 3186 templatetext = committext = buildcommittemplate(
3187 3187 repo, ctx, subs, extramsg, ref
3188 3188 )
3189 3189 break
3190 3190 forms.pop()
3191 3191 else:
3192 3192 committext = buildcommittext(repo, ctx, subs, extramsg)
3193 3193
3194 3194 # run editor in the repository root
3195 3195 olddir = encoding.getcwd()
3196 3196 os.chdir(repo.root)
3197 3197
3198 3198 # make in-memory changes visible to external process
3199 3199 tr = repo.currenttransaction()
3200 3200 repo.dirstate.write(tr)
3201 3201 pending = tr and tr.writepending() and repo.root
3202 3202
3203 3203 editortext = repo.ui.edit(
3204 3204 committext,
3205 3205 ctx.user(),
3206 3206 ctx.extra(),
3207 3207 editform=editform,
3208 3208 pending=pending,
3209 3209 repopath=repo.path,
3210 3210 action=b'commit',
3211 3211 )
3212 3212 text = editortext
3213 3213
3214 3214 # strip away anything below this special string (used for editors that want
3215 3215 # to display the diff)
3216 3216 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3217 3217 if stripbelow:
3218 3218 text = text[: stripbelow.start()]
3219 3219
3220 3220 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3221 3221 os.chdir(olddir)
3222 3222
3223 3223 if finishdesc:
3224 3224 text = finishdesc(text)
3225 3225 if not text.strip():
3226 3226 raise error.Abort(_(b"empty commit message"))
3227 3227 if unchangedmessagedetection and editortext == templatetext:
3228 3228 raise error.Abort(_(b"commit message unchanged"))
3229 3229
3230 3230 return text
3231 3231
3232 3232
3233 3233 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3234 3234 ui = repo.ui
3235 3235 spec = formatter.templatespec(ref, None, None)
3236 3236 t = logcmdutil.changesettemplater(ui, repo, spec)
3237 3237 t.t.cache.update(
3238 3238 (k, templater.unquotestring(v))
3239 3239 for k, v in repo.ui.configitems(b'committemplate')
3240 3240 )
3241 3241
3242 3242 if not extramsg:
3243 3243 extramsg = b'' # ensure that extramsg is string
3244 3244
3245 3245 ui.pushbuffer()
3246 3246 t.show(ctx, extramsg=extramsg)
3247 3247 return ui.popbuffer()
3248 3248
3249 3249
3250 3250 def hgprefix(msg):
3251 3251 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3252 3252
3253 3253
3254 3254 def buildcommittext(repo, ctx, subs, extramsg):
3255 3255 edittext = []
3256 3256 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3257 3257 if ctx.description():
3258 3258 edittext.append(ctx.description())
3259 3259 edittext.append(b"")
3260 3260 edittext.append(b"") # Empty line between message and comments.
3261 3261 edittext.append(
3262 3262 hgprefix(
3263 3263 _(
3264 3264 b"Enter commit message."
3265 3265 b" Lines beginning with 'HG:' are removed."
3266 3266 )
3267 3267 )
3268 3268 )
3269 3269 edittext.append(hgprefix(extramsg))
3270 3270 edittext.append(b"HG: --")
3271 3271 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3272 3272 if ctx.p2():
3273 3273 edittext.append(hgprefix(_(b"branch merge")))
3274 3274 if ctx.branch():
3275 3275 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3276 3276 if bookmarks.isactivewdirparent(repo):
3277 3277 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3278 3278 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3279 3279 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3280 3280 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3281 3281 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3282 3282 if not added and not modified and not removed:
3283 3283 edittext.append(hgprefix(_(b"no files changed")))
3284 3284 edittext.append(b"")
3285 3285
3286 3286 return b"\n".join(edittext)
3287 3287
3288 3288
3289 3289 def commitstatus(repo, node, branch, bheads=None, opts=None):
3290 3290 if opts is None:
3291 3291 opts = {}
3292 3292 ctx = repo[node]
3293 3293 parents = ctx.parents()
3294 3294
3295 3295 if (
3296 3296 not opts.get(b'amend')
3297 3297 and bheads
3298 3298 and node not in bheads
3299 3299 and not [
3300 3300 x for x in parents if x.node() in bheads and x.branch() == branch
3301 3301 ]
3302 3302 ):
3303 3303 repo.ui.status(_(b'created new head\n'))
3304 3304 # The message is not printed for initial roots. For the other
3305 3305 # changesets, it is printed in the following situations:
3306 3306 #
3307 3307 # Par column: for the 2 parents with ...
3308 3308 # N: null or no parent
3309 3309 # B: parent is on another named branch
3310 3310 # C: parent is a regular non head changeset
3311 3311 # H: parent was a branch head of the current branch
3312 3312 # Msg column: whether we print "created new head" message
3313 3313 # In the following, it is assumed that there already exists some
3314 3314 # initial branch heads of the current branch, otherwise nothing is
3315 3315 # printed anyway.
3316 3316 #
3317 3317 # Par Msg Comment
3318 3318 # N N y additional topo root
3319 3319 #
3320 3320 # B N y additional branch root
3321 3321 # C N y additional topo head
3322 3322 # H N n usual case
3323 3323 #
3324 3324 # B B y weird additional branch root
3325 3325 # C B y branch merge
3326 3326 # H B n merge with named branch
3327 3327 #
3328 3328 # C C y additional head from merge
3329 3329 # C H n merge with a head
3330 3330 #
3331 3331 # H H n head merge: head count decreases
3332 3332
3333 3333 if not opts.get(b'close_branch'):
3334 3334 for r in parents:
3335 3335 if r.closesbranch() and r.branch() == branch:
3336 3336 repo.ui.status(
3337 3337 _(b'reopening closed branch head %d\n') % r.rev()
3338 3338 )
3339 3339
3340 3340 if repo.ui.debugflag:
3341 3341 repo.ui.write(
3342 3342 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3343 3343 )
3344 3344 elif repo.ui.verbose:
3345 3345 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3346 3346
3347 3347
3348 3348 def postcommitstatus(repo, pats, opts):
3349 3349 return repo.status(match=scmutil.match(repo[None], pats, opts))
3350 3350
3351 3351
3352 3352 def revert(ui, repo, ctx, parents, *pats, **opts):
3353 3353 opts = pycompat.byteskwargs(opts)
3354 3354 parent, p2 = parents
3355 3355 node = ctx.node()
3356 3356
3357 3357 mf = ctx.manifest()
3358 3358 if node == p2:
3359 3359 parent = p2
3360 3360
3361 3361 # need all matching names in dirstate and manifest of target rev,
3362 3362 # so have to walk both. do not print errors if files exist in one
3363 3363 # but not other. in both cases, filesets should be evaluated against
3364 3364 # workingctx to get consistent result (issue4497). this means 'set:**'
3365 3365 # cannot be used to select missing files from target rev.
3366 3366
3367 3367 # `names` is a mapping for all elements in working copy and target revision
3368 3368 # The mapping is in the form:
3369 3369 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3370 3370 names = {}
3371 3371 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3372 3372
3373 3373 with repo.wlock():
3374 3374 ## filling of the `names` mapping
3375 3375 # walk dirstate to fill `names`
3376 3376
3377 3377 interactive = opts.get(b'interactive', False)
3378 3378 wctx = repo[None]
3379 3379 m = scmutil.match(wctx, pats, opts)
3380 3380
3381 3381 # we'll need this later
3382 3382 targetsubs = sorted(s for s in wctx.substate if m(s))
3383 3383
3384 3384 if not m.always():
3385 3385 matcher = matchmod.badmatch(m, lambda x, y: False)
3386 3386 for abs in wctx.walk(matcher):
3387 3387 names[abs] = m.exact(abs)
3388 3388
3389 3389 # walk target manifest to fill `names`
3390 3390
3391 3391 def badfn(path, msg):
3392 3392 if path in names:
3393 3393 return
3394 3394 if path in ctx.substate:
3395 3395 return
3396 3396 path_ = path + b'/'
3397 3397 for f in names:
3398 3398 if f.startswith(path_):
3399 3399 return
3400 3400 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3401 3401
3402 3402 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3403 3403 if abs not in names:
3404 3404 names[abs] = m.exact(abs)
3405 3405
3406 3406 # Find status of all file in `names`.
3407 3407 m = scmutil.matchfiles(repo, names)
3408 3408
3409 3409 changes = repo.status(
3410 3410 node1=node, match=m, unknown=True, ignored=True, clean=True
3411 3411 )
3412 3412 else:
3413 3413 changes = repo.status(node1=node, match=m)
3414 3414 for kind in changes:
3415 3415 for abs in kind:
3416 3416 names[abs] = m.exact(abs)
3417 3417
3418 3418 m = scmutil.matchfiles(repo, names)
3419 3419
3420 3420 modified = set(changes.modified)
3421 3421 added = set(changes.added)
3422 3422 removed = set(changes.removed)
3423 3423 _deleted = set(changes.deleted)
3424 3424 unknown = set(changes.unknown)
3425 3425 unknown.update(changes.ignored)
3426 3426 clean = set(changes.clean)
3427 3427 modadded = set()
3428 3428
3429 3429 # We need to account for the state of the file in the dirstate,
3430 3430 # even when we revert against something else than parent. This will
3431 3431 # slightly alter the behavior of revert (doing back up or not, delete
3432 3432 # or just forget etc).
3433 3433 if parent == node:
3434 3434 dsmodified = modified
3435 3435 dsadded = added
3436 3436 dsremoved = removed
3437 3437 # store all local modifications, useful later for rename detection
3438 3438 localchanges = dsmodified | dsadded
3439 3439 modified, added, removed = set(), set(), set()
3440 3440 else:
3441 3441 changes = repo.status(node1=parent, match=m)
3442 3442 dsmodified = set(changes.modified)
3443 3443 dsadded = set(changes.added)
3444 3444 dsremoved = set(changes.removed)
3445 3445 # store all local modifications, useful later for rename detection
3446 3446 localchanges = dsmodified | dsadded
3447 3447
3448 3448 # only take into account for removes between wc and target
3449 3449 clean |= dsremoved - removed
3450 3450 dsremoved &= removed
3451 3451 # distinct between dirstate remove and other
3452 3452 removed -= dsremoved
3453 3453
3454 3454 modadded = added & dsmodified
3455 3455 added -= modadded
3456 3456
3457 3457 # tell newly modified apart.
3458 3458 dsmodified &= modified
3459 3459 dsmodified |= modified & dsadded # dirstate added may need backup
3460 3460 modified -= dsmodified
3461 3461
3462 3462 # We need to wait for some post-processing to update this set
3463 3463 # before making the distinction. The dirstate will be used for
3464 3464 # that purpose.
3465 3465 dsadded = added
3466 3466
3467 3467 # in case of merge, files that are actually added can be reported as
3468 3468 # modified, we need to post process the result
3469 3469 if p2 != nullid:
3470 3470 mergeadd = set(dsmodified)
3471 3471 for path in dsmodified:
3472 3472 if path in mf:
3473 3473 mergeadd.remove(path)
3474 3474 dsadded |= mergeadd
3475 3475 dsmodified -= mergeadd
3476 3476
3477 3477 # if f is a rename, update `names` to also revert the source
3478 3478 for f in localchanges:
3479 3479 src = repo.dirstate.copied(f)
3480 3480 # XXX should we check for rename down to target node?
3481 3481 if src and src not in names and repo.dirstate[src] == b'r':
3482 3482 dsremoved.add(src)
3483 3483 names[src] = True
3484 3484
3485 3485 # determine the exact nature of the deleted changesets
3486 3486 deladded = set(_deleted)
3487 3487 for path in _deleted:
3488 3488 if path in mf:
3489 3489 deladded.remove(path)
3490 3490 deleted = _deleted - deladded
3491 3491
3492 3492 # distinguish between file to forget and the other
3493 3493 added = set()
3494 3494 for abs in dsadded:
3495 3495 if repo.dirstate[abs] != b'a':
3496 3496 added.add(abs)
3497 3497 dsadded -= added
3498 3498
3499 3499 for abs in deladded:
3500 3500 if repo.dirstate[abs] == b'a':
3501 3501 dsadded.add(abs)
3502 3502 deladded -= dsadded
3503 3503
3504 3504 # For files marked as removed, we check if an unknown file is present at
3505 3505 # the same path. If a such file exists it may need to be backed up.
3506 3506 # Making the distinction at this stage helps have simpler backup
3507 3507 # logic.
3508 3508 removunk = set()
3509 3509 for abs in removed:
3510 3510 target = repo.wjoin(abs)
3511 3511 if os.path.lexists(target):
3512 3512 removunk.add(abs)
3513 3513 removed -= removunk
3514 3514
3515 3515 dsremovunk = set()
3516 3516 for abs in dsremoved:
3517 3517 target = repo.wjoin(abs)
3518 3518 if os.path.lexists(target):
3519 3519 dsremovunk.add(abs)
3520 3520 dsremoved -= dsremovunk
3521 3521
3522 3522 # action to be actually performed by revert
3523 3523 # (<list of file>, message>) tuple
3524 3524 actions = {
3525 3525 b'revert': ([], _(b'reverting %s\n')),
3526 3526 b'add': ([], _(b'adding %s\n')),
3527 3527 b'remove': ([], _(b'removing %s\n')),
3528 3528 b'drop': ([], _(b'removing %s\n')),
3529 3529 b'forget': ([], _(b'forgetting %s\n')),
3530 3530 b'undelete': ([], _(b'undeleting %s\n')),
3531 3531 b'noop': (None, _(b'no changes needed to %s\n')),
3532 3532 b'unknown': (None, _(b'file not managed: %s\n')),
3533 3533 }
3534 3534
3535 3535 # "constant" that convey the backup strategy.
3536 3536 # All set to `discard` if `no-backup` is set do avoid checking
3537 3537 # no_backup lower in the code.
3538 3538 # These values are ordered for comparison purposes
3539 3539 backupinteractive = 3 # do backup if interactively modified
3540 3540 backup = 2 # unconditionally do backup
3541 3541 check = 1 # check if the existing file differs from target
3542 3542 discard = 0 # never do backup
3543 3543 if opts.get(b'no_backup'):
3544 3544 backupinteractive = backup = check = discard
3545 3545 if interactive:
3546 3546 dsmodifiedbackup = backupinteractive
3547 3547 else:
3548 3548 dsmodifiedbackup = backup
3549 3549 tobackup = set()
3550 3550
3551 3551 backupanddel = actions[b'remove']
3552 3552 if not opts.get(b'no_backup'):
3553 3553 backupanddel = actions[b'drop']
3554 3554
3555 3555 disptable = (
3556 3556 # dispatch table:
3557 3557 # file state
3558 3558 # action
3559 3559 # make backup
3560 3560 ## Sets that results that will change file on disk
3561 3561 # Modified compared to target, no local change
3562 3562 (modified, actions[b'revert'], discard),
3563 3563 # Modified compared to target, but local file is deleted
3564 3564 (deleted, actions[b'revert'], discard),
3565 3565 # Modified compared to target, local change
3566 3566 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3567 3567 # Added since target
3568 3568 (added, actions[b'remove'], discard),
3569 3569 # Added in working directory
3570 3570 (dsadded, actions[b'forget'], discard),
3571 3571 # Added since target, have local modification
3572 3572 (modadded, backupanddel, backup),
3573 3573 # Added since target but file is missing in working directory
3574 3574 (deladded, actions[b'drop'], discard),
3575 3575 # Removed since target, before working copy parent
3576 3576 (removed, actions[b'add'], discard),
3577 3577 # Same as `removed` but an unknown file exists at the same path
3578 3578 (removunk, actions[b'add'], check),
3579 3579 # Removed since targe, marked as such in working copy parent
3580 3580 (dsremoved, actions[b'undelete'], discard),
3581 3581 # Same as `dsremoved` but an unknown file exists at the same path
3582 3582 (dsremovunk, actions[b'undelete'], check),
3583 3583 ## the following sets does not result in any file changes
3584 3584 # File with no modification
3585 3585 (clean, actions[b'noop'], discard),
3586 3586 # Existing file, not tracked anywhere
3587 3587 (unknown, actions[b'unknown'], discard),
3588 3588 )
3589 3589
3590 3590 for abs, exact in sorted(names.items()):
3591 3591 # target file to be touch on disk (relative to cwd)
3592 3592 target = repo.wjoin(abs)
3593 3593 # search the entry in the dispatch table.
3594 3594 # if the file is in any of these sets, it was touched in the working
3595 3595 # directory parent and we are sure it needs to be reverted.
3596 3596 for table, (xlist, msg), dobackup in disptable:
3597 3597 if abs not in table:
3598 3598 continue
3599 3599 if xlist is not None:
3600 3600 xlist.append(abs)
3601 3601 if dobackup:
3602 3602 # If in interactive mode, don't automatically create
3603 3603 # .orig files (issue4793)
3604 3604 if dobackup == backupinteractive:
3605 3605 tobackup.add(abs)
3606 3606 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3607 3607 absbakname = scmutil.backuppath(ui, repo, abs)
3608 3608 bakname = os.path.relpath(
3609 3609 absbakname, start=repo.root
3610 3610 )
3611 3611 ui.note(
3612 3612 _(b'saving current version of %s as %s\n')
3613 3613 % (uipathfn(abs), uipathfn(bakname))
3614 3614 )
3615 3615 if not opts.get(b'dry_run'):
3616 3616 if interactive:
3617 3617 util.copyfile(target, absbakname)
3618 3618 else:
3619 3619 util.rename(target, absbakname)
3620 3620 if opts.get(b'dry_run'):
3621 3621 if ui.verbose or not exact:
3622 3622 ui.status(msg % uipathfn(abs))
3623 3623 elif exact:
3624 3624 ui.warn(msg % uipathfn(abs))
3625 3625 break
3626 3626
3627 3627 if not opts.get(b'dry_run'):
3628 3628 needdata = (b'revert', b'add', b'undelete')
3629 3629 oplist = [actions[name][0] for name in needdata]
3630 3630 prefetch = scmutil.prefetchfiles
3631 3631 matchfiles = scmutil.matchfiles
3632 3632 prefetch(
3633 3633 repo,
3634 3634 [ctx.rev()],
3635 3635 matchfiles(repo, [f for sublist in oplist for f in sublist]),
3636 3636 )
3637 3637 match = scmutil.match(repo[None], pats)
3638 3638 _performrevert(
3639 3639 repo,
3640 3640 parents,
3641 3641 ctx,
3642 3642 names,
3643 3643 uipathfn,
3644 3644 actions,
3645 3645 match,
3646 3646 interactive,
3647 3647 tobackup,
3648 3648 )
3649 3649
3650 3650 if targetsubs:
3651 3651 # Revert the subrepos on the revert list
3652 3652 for sub in targetsubs:
3653 3653 try:
3654 3654 wctx.sub(sub).revert(
3655 3655 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3656 3656 )
3657 3657 except KeyError:
3658 3658 raise error.Abort(
3659 3659 b"subrepository '%s' does not exist in %s!"
3660 3660 % (sub, short(ctx.node()))
3661 3661 )
3662 3662
3663 3663
3664 3664 def _performrevert(
3665 3665 repo,
3666 3666 parents,
3667 3667 ctx,
3668 3668 names,
3669 3669 uipathfn,
3670 3670 actions,
3671 3671 match,
3672 3672 interactive=False,
3673 3673 tobackup=None,
3674 3674 ):
3675 3675 """function that actually perform all the actions computed for revert
3676 3676
3677 3677 This is an independent function to let extension to plug in and react to
3678 3678 the imminent revert.
3679 3679
3680 3680 Make sure you have the working directory locked when calling this function.
3681 3681 """
3682 3682 parent, p2 = parents
3683 3683 node = ctx.node()
3684 3684 excluded_files = []
3685 3685
3686 3686 def checkout(f):
3687 3687 fc = ctx[f]
3688 3688 repo.wwrite(f, fc.data(), fc.flags())
3689 3689
3690 3690 def doremove(f):
3691 3691 try:
3692 3692 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3693 3693 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3694 3694 except OSError:
3695 3695 pass
3696 3696 repo.dirstate.remove(f)
3697 3697
3698 3698 def prntstatusmsg(action, f):
3699 3699 exact = names[f]
3700 3700 if repo.ui.verbose or not exact:
3701 3701 repo.ui.status(actions[action][1] % uipathfn(f))
3702 3702
3703 3703 audit_path = pathutil.pathauditor(repo.root, cached=True)
3704 3704 for f in actions[b'forget'][0]:
3705 3705 if interactive:
3706 3706 choice = repo.ui.promptchoice(
3707 3707 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3708 3708 )
3709 3709 if choice == 0:
3710 3710 prntstatusmsg(b'forget', f)
3711 3711 repo.dirstate.drop(f)
3712 3712 else:
3713 3713 excluded_files.append(f)
3714 3714 else:
3715 3715 prntstatusmsg(b'forget', f)
3716 3716 repo.dirstate.drop(f)
3717 3717 for f in actions[b'remove'][0]:
3718 3718 audit_path(f)
3719 3719 if interactive:
3720 3720 choice = repo.ui.promptchoice(
3721 3721 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3722 3722 )
3723 3723 if choice == 0:
3724 3724 prntstatusmsg(b'remove', f)
3725 3725 doremove(f)
3726 3726 else:
3727 3727 excluded_files.append(f)
3728 3728 else:
3729 3729 prntstatusmsg(b'remove', f)
3730 3730 doremove(f)
3731 3731 for f in actions[b'drop'][0]:
3732 3732 audit_path(f)
3733 3733 prntstatusmsg(b'drop', f)
3734 3734 repo.dirstate.remove(f)
3735 3735
3736 3736 normal = None
3737 3737 if node == parent:
3738 3738 # We're reverting to our parent. If possible, we'd like status
3739 3739 # to report the file as clean. We have to use normallookup for
3740 3740 # merges to avoid losing information about merged/dirty files.
3741 3741 if p2 != nullid:
3742 3742 normal = repo.dirstate.normallookup
3743 3743 else:
3744 3744 normal = repo.dirstate.normal
3745 3745
3746 3746 newlyaddedandmodifiedfiles = set()
3747 3747 if interactive:
3748 3748 # Prompt the user for changes to revert
3749 3749 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3750 3750 m = scmutil.matchfiles(repo, torevert)
3751 3751 diffopts = patch.difffeatureopts(
3752 3752 repo.ui,
3753 3753 whitespace=True,
3754 3754 section=b'commands',
3755 3755 configprefix=b'revert.interactive.',
3756 3756 )
3757 3757 diffopts.nodates = True
3758 3758 diffopts.git = True
3759 3759 operation = b'apply'
3760 3760 if node == parent:
3761 3761 if repo.ui.configbool(
3762 3762 b'experimental', b'revert.interactive.select-to-keep'
3763 3763 ):
3764 3764 operation = b'keep'
3765 3765 else:
3766 3766 operation = b'discard'
3767 3767
3768 3768 if operation == b'apply':
3769 3769 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3770 3770 else:
3771 3771 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3772 3772 originalchunks = patch.parsepatch(diff)
3773 3773
3774 3774 try:
3775 3775
3776 3776 chunks, opts = recordfilter(
3777 3777 repo.ui, originalchunks, match, operation=operation
3778 3778 )
3779 3779 if operation == b'discard':
3780 3780 chunks = patch.reversehunks(chunks)
3781 3781
3782 3782 except error.PatchError as err:
3783 3783 raise error.Abort(_(b'error parsing patch: %s') % err)
3784 3784
3785 3785 # FIXME: when doing an interactive revert of a copy, there's no way of
3786 3786 # performing a partial revert of the added file, the only option is
3787 3787 # "remove added file <name> (Yn)?", so we don't need to worry about the
3788 3788 # alsorestore value. Ideally we'd be able to partially revert
3789 3789 # copied/renamed files.
3790 3790 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3791 3791 chunks, originalchunks
3792 3792 )
3793 3793 if tobackup is None:
3794 3794 tobackup = set()
3795 3795 # Apply changes
3796 3796 fp = stringio()
3797 3797 # chunks are serialized per file, but files aren't sorted
3798 3798 for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
3799 3799 prntstatusmsg(b'revert', f)
3800 3800 files = set()
3801 3801 for c in chunks:
3802 3802 if ishunk(c):
3803 3803 abs = c.header.filename()
3804 3804 # Create a backup file only if this hunk should be backed up
3805 3805 if c.header.filename() in tobackup:
3806 3806 target = repo.wjoin(abs)
3807 3807 bakname = scmutil.backuppath(repo.ui, repo, abs)
3808 3808 util.copyfile(target, bakname)
3809 3809 tobackup.remove(abs)
3810 3810 if abs not in files:
3811 3811 files.add(abs)
3812 3812 if operation == b'keep':
3813 3813 checkout(abs)
3814 3814 c.write(fp)
3815 3815 dopatch = fp.tell()
3816 3816 fp.seek(0)
3817 3817 if dopatch:
3818 3818 try:
3819 3819 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3820 3820 except error.PatchError as err:
3821 3821 raise error.Abort(pycompat.bytestr(err))
3822 3822 del fp
3823 3823 else:
3824 3824 for f in actions[b'revert'][0]:
3825 3825 prntstatusmsg(b'revert', f)
3826 3826 checkout(f)
3827 3827 if normal:
3828 3828 normal(f)
3829 3829
3830 3830 for f in actions[b'add'][0]:
3831 3831 # Don't checkout modified files, they are already created by the diff
3832 3832 if f not in newlyaddedandmodifiedfiles:
3833 3833 prntstatusmsg(b'add', f)
3834 3834 checkout(f)
3835 3835 repo.dirstate.add(f)
3836 3836
3837 3837 normal = repo.dirstate.normallookup
3838 3838 if node == parent and p2 == nullid:
3839 3839 normal = repo.dirstate.normal
3840 3840 for f in actions[b'undelete'][0]:
3841 3841 if interactive:
3842 3842 choice = repo.ui.promptchoice(
3843 3843 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3844 3844 )
3845 3845 if choice == 0:
3846 3846 prntstatusmsg(b'undelete', f)
3847 3847 checkout(f)
3848 3848 normal(f)
3849 3849 else:
3850 3850 excluded_files.append(f)
3851 3851 else:
3852 3852 prntstatusmsg(b'undelete', f)
3853 3853 checkout(f)
3854 3854 normal(f)
3855 3855
3856 3856 copied = copies.pathcopies(repo[parent], ctx)
3857 3857
3858 3858 for f in (
3859 3859 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3860 3860 ):
3861 3861 if f in copied:
3862 3862 repo.dirstate.copy(copied[f], f)
3863 3863
3864 3864
3865 3865 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3866 3866 # commands.outgoing. "missing" is "missing" of the result of
3867 3867 # "findcommonoutgoing()"
3868 3868 outgoinghooks = util.hooks()
3869 3869
3870 3870 # a list of (ui, repo) functions called by commands.summary
3871 3871 summaryhooks = util.hooks()
3872 3872
3873 3873 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3874 3874 #
3875 3875 # functions should return tuple of booleans below, if 'changes' is None:
3876 3876 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3877 3877 #
3878 3878 # otherwise, 'changes' is a tuple of tuples below:
3879 3879 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3880 3880 # - (desturl, destbranch, destpeer, outgoing)
3881 3881 summaryremotehooks = util.hooks()
3882 3882
3883 3883
3884 3884 def checkunfinished(repo, commit=False, skipmerge=False):
3885 3885 '''Look for an unfinished multistep operation, like graft, and abort
3886 3886 if found. It's probably good to check this right before
3887 3887 bailifchanged().
3888 3888 '''
3889 3889 # Check for non-clearable states first, so things like rebase will take
3890 3890 # precedence over update.
3891 3891 for state in statemod._unfinishedstates:
3892 3892 if (
3893 3893 state._clearable
3894 3894 or (commit and state._allowcommit)
3895 3895 or state._reportonly
3896 3896 ):
3897 3897 continue
3898 3898 if state.isunfinished(repo):
3899 3899 raise error.Abort(state.msg(), hint=state.hint())
3900 3900
3901 3901 for s in statemod._unfinishedstates:
3902 3902 if (
3903 3903 not s._clearable
3904 3904 or (commit and s._allowcommit)
3905 3905 or (s._opname == b'merge' and skipmerge)
3906 3906 or s._reportonly
3907 3907 ):
3908 3908 continue
3909 3909 if s.isunfinished(repo):
3910 3910 raise error.Abort(s.msg(), hint=s.hint())
3911 3911
3912 3912
3913 3913 def clearunfinished(repo):
3914 3914 '''Check for unfinished operations (as above), and clear the ones
3915 3915 that are clearable.
3916 3916 '''
3917 3917 for state in statemod._unfinishedstates:
3918 3918 if state._reportonly:
3919 3919 continue
3920 3920 if not state._clearable and state.isunfinished(repo):
3921 3921 raise error.Abort(state.msg(), hint=state.hint())
3922 3922
3923 3923 for s in statemod._unfinishedstates:
3924 3924 if s._opname == b'merge' or state._reportonly:
3925 3925 continue
3926 3926 if s._clearable and s.isunfinished(repo):
3927 3927 util.unlink(repo.vfs.join(s._fname))
3928 3928
3929 3929
3930 3930 def getunfinishedstate(repo):
3931 3931 ''' Checks for unfinished operations and returns statecheck object
3932 3932 for it'''
3933 3933 for state in statemod._unfinishedstates:
3934 3934 if state.isunfinished(repo):
3935 3935 return state
3936 3936 return None
3937 3937
3938 3938
3939 3939 def howtocontinue(repo):
3940 3940 '''Check for an unfinished operation and return the command to finish
3941 3941 it.
3942 3942
3943 3943 statemod._unfinishedstates list is checked for an unfinished operation
3944 3944 and the corresponding message to finish it is generated if a method to
3945 3945 continue is supported by the operation.
3946 3946
3947 3947 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3948 3948 a boolean.
3949 3949 '''
3950 3950 contmsg = _(b"continue: %s")
3951 3951 for state in statemod._unfinishedstates:
3952 3952 if not state._continueflag:
3953 3953 continue
3954 3954 if state.isunfinished(repo):
3955 3955 return contmsg % state.continuemsg(), True
3956 3956 if repo[None].dirty(missing=True, merge=False, branch=False):
3957 3957 return contmsg % _(b"hg commit"), False
3958 3958 return None, None
3959 3959
3960 3960
3961 3961 def checkafterresolved(repo):
3962 3962 '''Inform the user about the next action after completing hg resolve
3963 3963
3964 3964 If there's a an unfinished operation that supports continue flag,
3965 3965 howtocontinue will yield repo.ui.warn as the reporter.
3966 3966
3967 3967 Otherwise, it will yield repo.ui.note.
3968 3968 '''
3969 3969 msg, warning = howtocontinue(repo)
3970 3970 if msg is not None:
3971 3971 if warning:
3972 3972 repo.ui.warn(b"%s\n" % msg)
3973 3973 else:
3974 3974 repo.ui.note(b"%s\n" % msg)
3975 3975
3976 3976
3977 3977 def wrongtooltocontinue(repo, task):
3978 3978 '''Raise an abort suggesting how to properly continue if there is an
3979 3979 active task.
3980 3980
3981 3981 Uses howtocontinue() to find the active task.
3982 3982
3983 3983 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3984 3984 a hint.
3985 3985 '''
3986 3986 after = howtocontinue(repo)
3987 3987 hint = None
3988 3988 if after[1]:
3989 3989 hint = after[0]
3990 3990 raise error.Abort(_(b'no %s in progress') % task, hint=hint)
3991 3991
3992 3992
3993 3993 def abortgraft(ui, repo, graftstate):
3994 3994 """abort the interrupted graft and rollbacks to the state before interrupted
3995 3995 graft"""
3996 3996 if not graftstate.exists():
3997 3997 raise error.Abort(_(b"no interrupted graft to abort"))
3998 3998 statedata = readgraftstate(repo, graftstate)
3999 3999 newnodes = statedata.get(b'newnodes')
4000 4000 if newnodes is None:
4001 4001 # and old graft state which does not have all the data required to abort
4002 4002 # the graft
4003 4003 raise error.Abort(_(b"cannot abort using an old graftstate"))
4004 4004
4005 4005 # changeset from which graft operation was started
4006 4006 if len(newnodes) > 0:
4007 4007 startctx = repo[newnodes[0]].p1()
4008 4008 else:
4009 4009 startctx = repo[b'.']
4010 4010 # whether to strip or not
4011 4011 cleanup = False
4012 4012 from . import hg
4013 4013
4014 4014 if newnodes:
4015 4015 newnodes = [repo[r].rev() for r in newnodes]
4016 4016 cleanup = True
4017 4017 # checking that none of the newnodes turned public or is public
4018 4018 immutable = [c for c in newnodes if not repo[c].mutable()]
4019 4019 if immutable:
4020 4020 repo.ui.warn(
4021 4021 _(b"cannot clean up public changesets %s\n")
4022 4022 % b', '.join(bytes(repo[r]) for r in immutable),
4023 4023 hint=_(b"see 'hg help phases' for details"),
4024 4024 )
4025 4025 cleanup = False
4026 4026
4027 4027 # checking that no new nodes are created on top of grafted revs
4028 4028 desc = set(repo.changelog.descendants(newnodes))
4029 4029 if desc - set(newnodes):
4030 4030 repo.ui.warn(
4031 4031 _(
4032 4032 b"new changesets detected on destination "
4033 4033 b"branch, can't strip\n"
4034 4034 )
4035 4035 )
4036 4036 cleanup = False
4037 4037
4038 4038 if cleanup:
4039 4039 with repo.wlock(), repo.lock():
4040 4040 hg.updaterepo(repo, startctx.node(), overwrite=True)
4041 4041 # stripping the new nodes created
4042 4042 strippoints = [
4043 4043 c.node() for c in repo.set(b"roots(%ld)", newnodes)
4044 4044 ]
4045 4045 repair.strip(repo.ui, repo, strippoints, backup=False)
4046 4046
4047 4047 if not cleanup:
4048 4048 # we don't update to the startnode if we can't strip
4049 4049 startctx = repo[b'.']
4050 4050 hg.updaterepo(repo, startctx.node(), overwrite=True)
4051 4051
4052 4052 ui.status(_(b"graft aborted\n"))
4053 4053 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
4054 4054 graftstate.delete()
4055 4055 return 0
4056 4056
4057 4057
4058 4058 def readgraftstate(repo, graftstate):
4059 4059 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
4060 4060 """read the graft state file and return a dict of the data stored in it"""
4061 4061 try:
4062 4062 return graftstate.read()
4063 4063 except error.CorruptedState:
4064 4064 nodes = repo.vfs.read(b'graftstate').splitlines()
4065 4065 return {b'nodes': nodes}
4066 4066
4067 4067
4068 4068 def hgabortgraft(ui, repo):
4069 4069 """ abort logic for aborting graft using 'hg abort'"""
4070 4070 with repo.wlock():
4071 4071 graftstate = statemod.cmdstate(repo, b'graftstate')
4072 4072 return abortgraft(ui, repo, graftstate)
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now