##// END OF EJS Templates
error: normalize "unresolved conflicts" error messages with a custom class...
Daniel Ploch -
r45711:e429e7c8 default
parent child Browse files
Show More
@@ -1,2262 +1,2257
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 https://mercurial-scm.org/wiki/RebaseExtension
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 import errno
20 20 import os
21 21
22 22 from mercurial.i18n import _
23 23 from mercurial.node import (
24 24 nullrev,
25 25 short,
26 26 )
27 27 from mercurial.pycompat import open
28 28 from mercurial import (
29 29 bookmarks,
30 30 cmdutil,
31 31 commands,
32 32 copies,
33 33 destutil,
34 34 dirstateguard,
35 35 error,
36 36 extensions,
37 37 hg,
38 38 merge as mergemod,
39 39 mergestate as mergestatemod,
40 40 mergeutil,
41 41 node as nodemod,
42 42 obsolete,
43 43 obsutil,
44 44 patch,
45 45 phases,
46 46 pycompat,
47 47 registrar,
48 48 repair,
49 49 revset,
50 50 revsetlang,
51 51 rewriteutil,
52 52 scmutil,
53 53 smartset,
54 54 state as statemod,
55 55 util,
56 56 )
57 57
58 58 # The following constants are used throughout the rebase module. The ordering of
59 59 # their values must be maintained.
60 60
61 61 # Indicates that a revision needs to be rebased
62 62 revtodo = -1
63 63 revtodostr = b'-1'
64 64
65 65 # legacy revstates no longer needed in current code
66 66 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
67 67 legacystates = {b'-2', b'-3', b'-4', b'-5'}
68 68
69 69 cmdtable = {}
70 70 command = registrar.command(cmdtable)
71 71 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
72 72 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
73 73 # be specifying the version(s) of Mercurial they are tested with, or
74 74 # leave the attribute unspecified.
75 75 testedwith = b'ships-with-hg-core'
76 76
77 77
78 78 def _nothingtorebase():
79 79 return 1
80 80
81 81
82 82 def _savegraft(ctx, extra):
83 83 s = ctx.extra().get(b'source', None)
84 84 if s is not None:
85 85 extra[b'source'] = s
86 86 s = ctx.extra().get(b'intermediate-source', None)
87 87 if s is not None:
88 88 extra[b'intermediate-source'] = s
89 89
90 90
91 91 def _savebranch(ctx, extra):
92 92 extra[b'branch'] = ctx.branch()
93 93
94 94
95 95 def _destrebase(repo, sourceset, destspace=None):
96 96 """small wrapper around destmerge to pass the right extra args
97 97
98 98 Please wrap destutil.destmerge instead."""
99 99 return destutil.destmerge(
100 100 repo,
101 101 action=b'rebase',
102 102 sourceset=sourceset,
103 103 onheadcheck=False,
104 104 destspace=destspace,
105 105 )
106 106
107 107
108 108 revsetpredicate = registrar.revsetpredicate()
109 109
110 110
111 111 @revsetpredicate(b'_destrebase')
112 112 def _revsetdestrebase(repo, subset, x):
113 113 # ``_rebasedefaultdest()``
114 114
115 115 # default destination for rebase.
116 116 # # XXX: Currently private because I expect the signature to change.
117 117 # # XXX: - bailing out in case of ambiguity vs returning all data.
118 118 # i18n: "_rebasedefaultdest" is a keyword
119 119 sourceset = None
120 120 if x is not None:
121 121 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
122 122 return subset & smartset.baseset([_destrebase(repo, sourceset)])
123 123
124 124
125 125 @revsetpredicate(b'_destautoorphanrebase')
126 126 def _revsetdestautoorphanrebase(repo, subset, x):
127 127 # ``_destautoorphanrebase()``
128 128
129 129 # automatic rebase destination for a single orphan revision.
130 130 unfi = repo.unfiltered()
131 131 obsoleted = unfi.revs(b'obsolete()')
132 132
133 133 src = revset.getset(repo, subset, x).first()
134 134
135 135 # Empty src or already obsoleted - Do not return a destination
136 136 if not src or src in obsoleted:
137 137 return smartset.baseset()
138 138 dests = destutil.orphanpossibledestination(repo, src)
139 139 if len(dests) > 1:
140 140 raise error.Abort(
141 141 _(b"ambiguous automatic rebase: %r could end up on any of %r")
142 142 % (src, dests)
143 143 )
144 144 # We have zero or one destination, so we can just return here.
145 145 return smartset.baseset(dests)
146 146
147 147
148 148 def _ctxdesc(ctx):
149 149 """short description for a context"""
150 150 desc = b'%d:%s "%s"' % (
151 151 ctx.rev(),
152 152 ctx,
153 153 ctx.description().split(b'\n', 1)[0],
154 154 )
155 155 repo = ctx.repo()
156 156 names = []
157 157 for nsname, ns in pycompat.iteritems(repo.names):
158 158 if nsname == b'branches':
159 159 continue
160 160 names.extend(ns.names(repo, ctx.node()))
161 161 if names:
162 162 desc += b' (%s)' % b' '.join(names)
163 163 return desc
164 164
165 165
166 166 class rebaseruntime(object):
167 167 """This class is a container for rebase runtime state"""
168 168
169 169 def __init__(self, repo, ui, inmemory=False, opts=None):
170 170 if opts is None:
171 171 opts = {}
172 172
173 173 # prepared: whether we have rebasestate prepared or not. Currently it
174 174 # decides whether "self.repo" is unfiltered or not.
175 175 # The rebasestate has explicit hash to hash instructions not depending
176 176 # on visibility. If rebasestate exists (in-memory or on-disk), use
177 177 # unfiltered repo to avoid visibility issues.
178 178 # Before knowing rebasestate (i.e. when starting a new rebase (not
179 179 # --continue or --abort)), the original repo should be used so
180 180 # visibility-dependent revsets are correct.
181 181 self.prepared = False
182 182 self.resume = False
183 183 self._repo = repo
184 184
185 185 self.ui = ui
186 186 self.opts = opts
187 187 self.originalwd = None
188 188 self.external = nullrev
189 189 # Mapping between the old revision id and either what is the new rebased
190 190 # revision or what needs to be done with the old revision. The state
191 191 # dict will be what contains most of the rebase progress state.
192 192 self.state = {}
193 193 self.activebookmark = None
194 194 self.destmap = {}
195 195 self.skipped = set()
196 196
197 197 self.collapsef = opts.get(b'collapse', False)
198 198 self.collapsemsg = cmdutil.logmessage(ui, opts)
199 199 self.date = opts.get(b'date', None)
200 200
201 201 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
202 202 self.extrafns = [_savegraft]
203 203 if e:
204 204 self.extrafns = [e]
205 205
206 206 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
207 207 self.keepf = opts.get(b'keep', False)
208 208 self.keepbranchesf = opts.get(b'keepbranches', False)
209 209 self.skipemptysuccessorf = rewriteutil.skip_empty_successor(
210 210 repo.ui, b'rebase'
211 211 )
212 212 self.obsoletenotrebased = {}
213 213 self.obsoletewithoutsuccessorindestination = set()
214 214 self.inmemory = inmemory
215 215 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
216 216
217 217 @property
218 218 def repo(self):
219 219 if self.prepared:
220 220 return self._repo.unfiltered()
221 221 else:
222 222 return self._repo
223 223
224 224 def storestatus(self, tr=None):
225 225 """Store the current status to allow recovery"""
226 226 if tr:
227 227 tr.addfilegenerator(
228 228 b'rebasestate',
229 229 (b'rebasestate',),
230 230 self._writestatus,
231 231 location=b'plain',
232 232 )
233 233 else:
234 234 with self.repo.vfs(b"rebasestate", b"w") as f:
235 235 self._writestatus(f)
236 236
237 237 def _writestatus(self, f):
238 238 repo = self.repo
239 239 assert repo.filtername is None
240 240 f.write(repo[self.originalwd].hex() + b'\n')
241 241 # was "dest". we now write dest per src root below.
242 242 f.write(b'\n')
243 243 f.write(repo[self.external].hex() + b'\n')
244 244 f.write(b'%d\n' % int(self.collapsef))
245 245 f.write(b'%d\n' % int(self.keepf))
246 246 f.write(b'%d\n' % int(self.keepbranchesf))
247 247 f.write(b'%s\n' % (self.activebookmark or b''))
248 248 destmap = self.destmap
249 249 for d, v in pycompat.iteritems(self.state):
250 250 oldrev = repo[d].hex()
251 251 if v >= 0:
252 252 newrev = repo[v].hex()
253 253 else:
254 254 newrev = b"%d" % v
255 255 destnode = repo[destmap[d]].hex()
256 256 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
257 257 repo.ui.debug(b'rebase status stored\n')
258 258
259 259 def restorestatus(self):
260 260 """Restore a previously stored status"""
261 261 if not self.stateobj.exists():
262 262 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
263 263
264 264 data = self._read()
265 265 self.repo.ui.debug(b'rebase status resumed\n')
266 266
267 267 self.originalwd = data[b'originalwd']
268 268 self.destmap = data[b'destmap']
269 269 self.state = data[b'state']
270 270 self.skipped = data[b'skipped']
271 271 self.collapsef = data[b'collapse']
272 272 self.keepf = data[b'keep']
273 273 self.keepbranchesf = data[b'keepbranches']
274 274 self.external = data[b'external']
275 275 self.activebookmark = data[b'activebookmark']
276 276
277 277 def _read(self):
278 278 self.prepared = True
279 279 repo = self.repo
280 280 assert repo.filtername is None
281 281 data = {
282 282 b'keepbranches': None,
283 283 b'collapse': None,
284 284 b'activebookmark': None,
285 285 b'external': nullrev,
286 286 b'keep': None,
287 287 b'originalwd': None,
288 288 }
289 289 legacydest = None
290 290 state = {}
291 291 destmap = {}
292 292
293 293 if True:
294 294 f = repo.vfs(b"rebasestate")
295 295 for i, l in enumerate(f.read().splitlines()):
296 296 if i == 0:
297 297 data[b'originalwd'] = repo[l].rev()
298 298 elif i == 1:
299 299 # this line should be empty in newer version. but legacy
300 300 # clients may still use it
301 301 if l:
302 302 legacydest = repo[l].rev()
303 303 elif i == 2:
304 304 data[b'external'] = repo[l].rev()
305 305 elif i == 3:
306 306 data[b'collapse'] = bool(int(l))
307 307 elif i == 4:
308 308 data[b'keep'] = bool(int(l))
309 309 elif i == 5:
310 310 data[b'keepbranches'] = bool(int(l))
311 311 elif i == 6 and not (len(l) == 81 and b':' in l):
312 312 # line 6 is a recent addition, so for backwards
313 313 # compatibility check that the line doesn't look like the
314 314 # oldrev:newrev lines
315 315 data[b'activebookmark'] = l
316 316 else:
317 317 args = l.split(b':')
318 318 oldrev = repo[args[0]].rev()
319 319 newrev = args[1]
320 320 if newrev in legacystates:
321 321 continue
322 322 if len(args) > 2:
323 323 destrev = repo[args[2]].rev()
324 324 else:
325 325 destrev = legacydest
326 326 destmap[oldrev] = destrev
327 327 if newrev == revtodostr:
328 328 state[oldrev] = revtodo
329 329 # Legacy compat special case
330 330 else:
331 331 state[oldrev] = repo[newrev].rev()
332 332
333 333 if data[b'keepbranches'] is None:
334 334 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
335 335
336 336 data[b'destmap'] = destmap
337 337 data[b'state'] = state
338 338 skipped = set()
339 339 # recompute the set of skipped revs
340 340 if not data[b'collapse']:
341 341 seen = set(destmap.values())
342 342 for old, new in sorted(state.items()):
343 343 if new != revtodo and new in seen:
344 344 skipped.add(old)
345 345 seen.add(new)
346 346 data[b'skipped'] = skipped
347 347 repo.ui.debug(
348 348 b'computed skipped revs: %s\n'
349 349 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
350 350 )
351 351
352 352 return data
353 353
354 354 def _handleskippingobsolete(self, obsoleterevs, destmap):
355 355 """Compute structures necessary for skipping obsolete revisions
356 356
357 357 obsoleterevs: iterable of all obsolete revisions in rebaseset
358 358 destmap: {srcrev: destrev} destination revisions
359 359 """
360 360 self.obsoletenotrebased = {}
361 361 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
362 362 return
363 363 obsoleteset = set(obsoleterevs)
364 364 (
365 365 self.obsoletenotrebased,
366 366 self.obsoletewithoutsuccessorindestination,
367 367 obsoleteextinctsuccessors,
368 368 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
369 369 skippedset = set(self.obsoletenotrebased)
370 370 skippedset.update(self.obsoletewithoutsuccessorindestination)
371 371 skippedset.update(obsoleteextinctsuccessors)
372 372 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
373 373
374 374 def _prepareabortorcontinue(
375 375 self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False
376 376 ):
377 377 self.resume = True
378 378 try:
379 379 self.restorestatus()
380 380 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
381 381 except error.RepoLookupError:
382 382 if isabort:
383 383 clearstatus(self.repo)
384 384 clearcollapsemsg(self.repo)
385 385 self.repo.ui.warn(
386 386 _(
387 387 b'rebase aborted (no revision is removed,'
388 388 b' only broken state is cleared)\n'
389 389 )
390 390 )
391 391 return 0
392 392 else:
393 393 msg = _(b'cannot continue inconsistent rebase')
394 394 hint = _(b'use "hg rebase --abort" to clear broken state')
395 395 raise error.Abort(msg, hint=hint)
396 396
397 397 if isabort:
398 398 backup = backup and self.backupf
399 399 return self._abort(
400 400 backup=backup,
401 401 suppwarns=suppwarns,
402 402 dryrun=dryrun,
403 403 confirm=confirm,
404 404 )
405 405
406 406 def _preparenewrebase(self, destmap):
407 407 if not destmap:
408 408 return _nothingtorebase()
409 409
410 410 rebaseset = destmap.keys()
411 411 if not self.keepf:
412 412 try:
413 413 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
414 414 except error.Abort as e:
415 415 if e.hint is None:
416 416 e.hint = _(b'use --keep to keep original changesets')
417 417 raise e
418 418
419 419 result = buildstate(self.repo, destmap, self.collapsef)
420 420
421 421 if not result:
422 422 # Empty state built, nothing to rebase
423 423 self.ui.status(_(b'nothing to rebase\n'))
424 424 return _nothingtorebase()
425 425
426 426 (self.originalwd, self.destmap, self.state) = result
427 427 if self.collapsef:
428 428 dests = set(self.destmap.values())
429 429 if len(dests) != 1:
430 430 raise error.Abort(
431 431 _(b'--collapse does not work with multiple destinations')
432 432 )
433 433 destrev = next(iter(dests))
434 434 destancestors = self.repo.changelog.ancestors(
435 435 [destrev], inclusive=True
436 436 )
437 437 self.external = externalparent(self.repo, self.state, destancestors)
438 438
439 439 for destrev in sorted(set(destmap.values())):
440 440 dest = self.repo[destrev]
441 441 if dest.closesbranch() and not self.keepbranchesf:
442 442 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
443 443
444 444 self.prepared = True
445 445
446 446 def _assignworkingcopy(self):
447 447 if self.inmemory:
448 448 from mercurial.context import overlayworkingctx
449 449
450 450 self.wctx = overlayworkingctx(self.repo)
451 451 self.repo.ui.debug(b"rebasing in-memory\n")
452 452 else:
453 453 self.wctx = self.repo[None]
454 454 self.repo.ui.debug(b"rebasing on disk\n")
455 455 self.repo.ui.log(
456 456 b"rebase",
457 457 b"using in-memory rebase: %r\n",
458 458 self.inmemory,
459 459 rebase_imm_used=self.inmemory,
460 460 )
461 461
462 462 def _performrebase(self, tr):
463 463 self._assignworkingcopy()
464 464 repo, ui = self.repo, self.ui
465 465 if self.keepbranchesf:
466 466 # insert _savebranch at the start of extrafns so if
467 467 # there's a user-provided extrafn it can clobber branch if
468 468 # desired
469 469 self.extrafns.insert(0, _savebranch)
470 470 if self.collapsef:
471 471 branches = set()
472 472 for rev in self.state:
473 473 branches.add(repo[rev].branch())
474 474 if len(branches) > 1:
475 475 raise error.Abort(
476 476 _(b'cannot collapse multiple named branches')
477 477 )
478 478
479 479 # Calculate self.obsoletenotrebased
480 480 obsrevs = _filterobsoleterevs(self.repo, self.state)
481 481 self._handleskippingobsolete(obsrevs, self.destmap)
482 482
483 483 # Keep track of the active bookmarks in order to reset them later
484 484 self.activebookmark = self.activebookmark or repo._activebookmark
485 485 if self.activebookmark:
486 486 bookmarks.deactivate(repo)
487 487
488 488 # Store the state before we begin so users can run 'hg rebase --abort'
489 489 # if we fail before the transaction closes.
490 490 self.storestatus()
491 491 if tr:
492 492 # When using single transaction, store state when transaction
493 493 # commits.
494 494 self.storestatus(tr)
495 495
496 496 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
497 497 p = repo.ui.makeprogress(
498 498 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
499 499 )
500 500
501 501 def progress(ctx):
502 502 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
503 503
504 504 allowdivergence = self.ui.configbool(
505 505 b'experimental', b'evolution.allowdivergence'
506 506 )
507 507 for subset in sortsource(self.destmap):
508 508 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
509 509 if not allowdivergence:
510 510 sortedrevs -= self.repo.revs(
511 511 b'descendants(%ld) and not %ld',
512 512 self.obsoletewithoutsuccessorindestination,
513 513 self.obsoletewithoutsuccessorindestination,
514 514 )
515 515 for rev in sortedrevs:
516 516 self._rebasenode(tr, rev, allowdivergence, progress)
517 517 p.complete()
518 518 ui.note(_(b'rebase merging completed\n'))
519 519
520 520 def _concludenode(self, rev, p1, editor, commitmsg=None):
521 521 '''Commit the wd changes with parents p1 and p2.
522 522
523 523 Reuse commit info from rev but also store useful information in extra.
524 524 Return node of committed revision.'''
525 525 repo = self.repo
526 526 ctx = repo[rev]
527 527 if commitmsg is None:
528 528 commitmsg = ctx.description()
529 529 date = self.date
530 530 if date is None:
531 531 date = ctx.date()
532 532 extra = {b'rebase_source': ctx.hex()}
533 533 for c in self.extrafns:
534 534 c(ctx, extra)
535 535 destphase = max(ctx.phase(), phases.draft)
536 536 overrides = {
537 537 (b'phases', b'new-commit'): destphase,
538 538 (b'ui', b'allowemptycommit'): not self.skipemptysuccessorf,
539 539 }
540 540 with repo.ui.configoverride(overrides, b'rebase'):
541 541 if self.inmemory:
542 542 newnode = commitmemorynode(
543 543 repo,
544 544 wctx=self.wctx,
545 545 extra=extra,
546 546 commitmsg=commitmsg,
547 547 editor=editor,
548 548 user=ctx.user(),
549 549 date=date,
550 550 )
551 551 mergestatemod.mergestate.clean(repo)
552 552 else:
553 553 newnode = commitnode(
554 554 repo,
555 555 extra=extra,
556 556 commitmsg=commitmsg,
557 557 editor=editor,
558 558 user=ctx.user(),
559 559 date=date,
560 560 )
561 561
562 562 return newnode
563 563
564 564 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
565 565 repo, ui, opts = self.repo, self.ui, self.opts
566 566 dest = self.destmap[rev]
567 567 ctx = repo[rev]
568 568 desc = _ctxdesc(ctx)
569 569 if self.state[rev] == rev:
570 570 ui.status(_(b'already rebased %s\n') % desc)
571 571 elif (
572 572 not allowdivergence
573 573 and rev in self.obsoletewithoutsuccessorindestination
574 574 ):
575 575 msg = (
576 576 _(
577 577 b'note: not rebasing %s and its descendants as '
578 578 b'this would cause divergence\n'
579 579 )
580 580 % desc
581 581 )
582 582 repo.ui.status(msg)
583 583 self.skipped.add(rev)
584 584 elif rev in self.obsoletenotrebased:
585 585 succ = self.obsoletenotrebased[rev]
586 586 if succ is None:
587 587 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
588 588 else:
589 589 succdesc = _ctxdesc(repo[succ])
590 590 msg = _(
591 591 b'note: not rebasing %s, already in destination as %s\n'
592 592 ) % (desc, succdesc)
593 593 repo.ui.status(msg)
594 594 # Make clearrebased aware state[rev] is not a true successor
595 595 self.skipped.add(rev)
596 596 # Record rev as moved to its desired destination in self.state.
597 597 # This helps bookmark and working parent movement.
598 598 dest = max(
599 599 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
600 600 )
601 601 self.state[rev] = dest
602 602 elif self.state[rev] == revtodo:
603 603 ui.status(_(b'rebasing %s\n') % desc)
604 604 progressfn(ctx)
605 605 p1, p2, base = defineparents(
606 606 repo,
607 607 rev,
608 608 self.destmap,
609 609 self.state,
610 610 self.skipped,
611 611 self.obsoletenotrebased,
612 612 )
613 613 if self.resume and self.wctx.p1().rev() == p1:
614 614 repo.ui.debug(b'resuming interrupted rebase\n')
615 615 self.resume = False
616 616 else:
617 617 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
618 618 with ui.configoverride(overrides, b'rebase'):
619 619 stats = rebasenode(
620 620 repo,
621 621 rev,
622 622 p1,
623 623 p2,
624 624 base,
625 625 self.collapsef,
626 626 dest,
627 627 wctx=self.wctx,
628 628 )
629 629 if stats.unresolvedcount > 0:
630 630 if self.inmemory:
631 631 raise error.InMemoryMergeConflictsError()
632 632 else:
633 raise error.InterventionRequired(
634 _(
635 b"unresolved conflicts (see 'hg "
636 b"resolve', then 'hg rebase --continue')"
637 )
638 )
633 raise error.ConflictResolutionRequired(b'rebase')
639 634 if not self.collapsef:
640 635 merging = p2 != nullrev
641 636 editform = cmdutil.mergeeditform(merging, b'rebase')
642 637 editor = cmdutil.getcommiteditor(
643 638 editform=editform, **pycompat.strkwargs(opts)
644 639 )
645 640 # We need to set parents again here just in case we're continuing
646 641 # a rebase started with an old hg version (before 9c9cfecd4600),
647 642 # because those old versions would have left us with two dirstate
648 643 # parents, and we don't want to create a merge commit here (unless
649 644 # we're rebasing a merge commit).
650 645 self.wctx.setparents(repo[p1].node(), repo[p2].node())
651 646 newnode = self._concludenode(rev, p1, editor)
652 647 else:
653 648 # Skip commit if we are collapsing
654 649 newnode = None
655 650 # Update the state
656 651 if newnode is not None:
657 652 self.state[rev] = repo[newnode].rev()
658 653 ui.debug(b'rebased as %s\n' % short(newnode))
659 654 if repo[newnode].isempty():
660 655 ui.warn(
661 656 _(
662 657 b'note: created empty successor for %s, its '
663 658 b'destination already has all its changes\n'
664 659 )
665 660 % desc
666 661 )
667 662 else:
668 663 if not self.collapsef:
669 664 ui.warn(
670 665 _(
671 666 b'note: not rebasing %s, its destination already '
672 667 b'has all its changes\n'
673 668 )
674 669 % desc
675 670 )
676 671 self.skipped.add(rev)
677 672 self.state[rev] = p1
678 673 ui.debug(b'next revision set to %d\n' % p1)
679 674 else:
680 675 ui.status(
681 676 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
682 677 )
683 678 if not tr:
684 679 # When not using single transaction, store state after each
685 680 # commit is completely done. On InterventionRequired, we thus
686 681 # won't store the status. Instead, we'll hit the "len(parents) == 2"
687 682 # case and realize that the commit was in progress.
688 683 self.storestatus()
689 684
690 685 def _finishrebase(self):
691 686 repo, ui, opts = self.repo, self.ui, self.opts
692 687 fm = ui.formatter(b'rebase', opts)
693 688 fm.startitem()
694 689 if self.collapsef:
695 690 p1, p2, _base = defineparents(
696 691 repo,
697 692 min(self.state),
698 693 self.destmap,
699 694 self.state,
700 695 self.skipped,
701 696 self.obsoletenotrebased,
702 697 )
703 698 editopt = opts.get(b'edit')
704 699 editform = b'rebase.collapse'
705 700 if self.collapsemsg:
706 701 commitmsg = self.collapsemsg
707 702 else:
708 703 commitmsg = b'Collapsed revision'
709 704 for rebased in sorted(self.state):
710 705 if rebased not in self.skipped:
711 706 commitmsg += b'\n* %s' % repo[rebased].description()
712 707 editopt = True
713 708 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
714 709 revtoreuse = max(self.state)
715 710
716 711 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
717 712 newnode = self._concludenode(
718 713 revtoreuse, p1, editor, commitmsg=commitmsg
719 714 )
720 715
721 716 if newnode is not None:
722 717 newrev = repo[newnode].rev()
723 718 for oldrev in self.state:
724 719 self.state[oldrev] = newrev
725 720
726 721 if b'qtip' in repo.tags():
727 722 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
728 723
729 724 # restore original working directory
730 725 # (we do this before stripping)
731 726 newwd = self.state.get(self.originalwd, self.originalwd)
732 727 if newwd < 0:
733 728 # original directory is a parent of rebase set root or ignored
734 729 newwd = self.originalwd
735 730 if newwd not in [c.rev() for c in repo[None].parents()]:
736 731 ui.note(_(b"update back to initial working directory parent\n"))
737 732 hg.updaterepo(repo, newwd, overwrite=False)
738 733
739 734 collapsedas = None
740 735 if self.collapsef and not self.keepf:
741 736 collapsedas = newnode
742 737 clearrebased(
743 738 ui,
744 739 repo,
745 740 self.destmap,
746 741 self.state,
747 742 self.skipped,
748 743 collapsedas,
749 744 self.keepf,
750 745 fm=fm,
751 746 backup=self.backupf,
752 747 )
753 748
754 749 clearstatus(repo)
755 750 clearcollapsemsg(repo)
756 751
757 752 ui.note(_(b"rebase completed\n"))
758 753 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
759 754 if self.skipped:
760 755 skippedlen = len(self.skipped)
761 756 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
762 757 fm.end()
763 758
764 759 if (
765 760 self.activebookmark
766 761 and self.activebookmark in repo._bookmarks
767 762 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
768 763 ):
769 764 bookmarks.activate(repo, self.activebookmark)
770 765
771 766 def _abort(self, backup=True, suppwarns=False, dryrun=False, confirm=False):
772 767 '''Restore the repository to its original state.'''
773 768
774 769 repo = self.repo
775 770 try:
776 771 # If the first commits in the rebased set get skipped during the
777 772 # rebase, their values within the state mapping will be the dest
778 773 # rev id. The rebased list must must not contain the dest rev
779 774 # (issue4896)
780 775 rebased = [
781 776 s
782 777 for r, s in self.state.items()
783 778 if s >= 0 and s != r and s != self.destmap[r]
784 779 ]
785 780 immutable = [d for d in rebased if not repo[d].mutable()]
786 781 cleanup = True
787 782 if immutable:
788 783 repo.ui.warn(
789 784 _(b"warning: can't clean up public changesets %s\n")
790 785 % b', '.join(bytes(repo[r]) for r in immutable),
791 786 hint=_(b"see 'hg help phases' for details"),
792 787 )
793 788 cleanup = False
794 789
795 790 descendants = set()
796 791 if rebased:
797 792 descendants = set(repo.changelog.descendants(rebased))
798 793 if descendants - set(rebased):
799 794 repo.ui.warn(
800 795 _(
801 796 b"warning: new changesets detected on "
802 797 b"destination branch, can't strip\n"
803 798 )
804 799 )
805 800 cleanup = False
806 801
807 802 if cleanup:
808 803 if rebased:
809 804 strippoints = [
810 805 c.node() for c in repo.set(b'roots(%ld)', rebased)
811 806 ]
812 807
813 808 updateifonnodes = set(rebased)
814 809 updateifonnodes.update(self.destmap.values())
815 810
816 811 if not dryrun and not confirm:
817 812 updateifonnodes.add(self.originalwd)
818 813
819 814 shouldupdate = repo[b'.'].rev() in updateifonnodes
820 815
821 816 # Update away from the rebase if necessary
822 817 if shouldupdate:
823 818 mergemod.clean_update(repo[self.originalwd])
824 819
825 820 # Strip from the first rebased revision
826 821 if rebased:
827 822 repair.strip(repo.ui, repo, strippoints, backup=backup)
828 823
829 824 if self.activebookmark and self.activebookmark in repo._bookmarks:
830 825 bookmarks.activate(repo, self.activebookmark)
831 826
832 827 finally:
833 828 clearstatus(repo)
834 829 clearcollapsemsg(repo)
835 830 if not suppwarns:
836 831 repo.ui.warn(_(b'rebase aborted\n'))
837 832 return 0
838 833
839 834
840 835 @command(
841 836 b'rebase',
842 837 [
843 838 (
844 839 b's',
845 840 b'source',
846 841 [],
847 842 _(b'rebase the specified changesets and their descendants'),
848 843 _(b'REV'),
849 844 ),
850 845 (
851 846 b'b',
852 847 b'base',
853 848 [],
854 849 _(b'rebase everything from branching point of specified changeset'),
855 850 _(b'REV'),
856 851 ),
857 852 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
858 853 (
859 854 b'd',
860 855 b'dest',
861 856 b'',
862 857 _(b'rebase onto the specified changeset'),
863 858 _(b'REV'),
864 859 ),
865 860 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
866 861 (
867 862 b'm',
868 863 b'message',
869 864 b'',
870 865 _(b'use text as collapse commit message'),
871 866 _(b'TEXT'),
872 867 ),
873 868 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
874 869 (
875 870 b'l',
876 871 b'logfile',
877 872 b'',
878 873 _(b'read collapse commit message from file'),
879 874 _(b'FILE'),
880 875 ),
881 876 (b'k', b'keep', False, _(b'keep original changesets')),
882 877 (b'', b'keepbranches', False, _(b'keep original branch names')),
883 878 (b'D', b'detach', False, _(b'(DEPRECATED)')),
884 879 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
885 880 (b't', b'tool', b'', _(b'specify merge tool')),
886 881 (b'', b'stop', False, _(b'stop interrupted rebase')),
887 882 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
888 883 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
889 884 (
890 885 b'',
891 886 b'auto-orphans',
892 887 b'',
893 888 _(
894 889 b'automatically rebase orphan revisions '
895 890 b'in the specified revset (EXPERIMENTAL)'
896 891 ),
897 892 ),
898 893 ]
899 894 + cmdutil.dryrunopts
900 895 + cmdutil.formatteropts
901 896 + cmdutil.confirmopts,
902 897 _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'),
903 898 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
904 899 )
905 900 def rebase(ui, repo, **opts):
906 901 """move changeset (and descendants) to a different branch
907 902
908 903 Rebase uses repeated merging to graft changesets from one part of
909 904 history (the source) onto another (the destination). This can be
910 905 useful for linearizing *local* changes relative to a master
911 906 development tree.
912 907
913 908 Published commits cannot be rebased (see :hg:`help phases`).
914 909 To copy commits, see :hg:`help graft`.
915 910
916 911 If you don't specify a destination changeset (``-d/--dest``), rebase
917 912 will use the same logic as :hg:`merge` to pick a destination. if
918 913 the current branch contains exactly one other head, the other head
919 914 is merged with by default. Otherwise, an explicit revision with
920 915 which to merge with must be provided. (destination changeset is not
921 916 modified by rebasing, but new changesets are added as its
922 917 descendants.)
923 918
924 919 Here are the ways to select changesets:
925 920
926 921 1. Explicitly select them using ``--rev``.
927 922
928 923 2. Use ``--source`` to select a root changeset and include all of its
929 924 descendants.
930 925
931 926 3. Use ``--base`` to select a changeset; rebase will find ancestors
932 927 and their descendants which are not also ancestors of the destination.
933 928
934 929 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
935 930 rebase will use ``--base .`` as above.
936 931
937 932 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
938 933 can be used in ``--dest``. Destination would be calculated per source
939 934 revision with ``SRC`` substituted by that single source revision and
940 935 ``ALLSRC`` substituted by all source revisions.
941 936
942 937 Rebase will destroy original changesets unless you use ``--keep``.
943 938 It will also move your bookmarks (even if you do).
944 939
945 940 Some changesets may be dropped if they do not contribute changes
946 941 (e.g. merges from the destination branch).
947 942
948 943 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
949 944 a named branch with two heads. You will need to explicitly specify source
950 945 and/or destination.
951 946
952 947 If you need to use a tool to automate merge/conflict decisions, you
953 948 can specify one with ``--tool``, see :hg:`help merge-tools`.
954 949 As a caveat: the tool will not be used to mediate when a file was
955 950 deleted, there is no hook presently available for this.
956 951
957 952 If a rebase is interrupted to manually resolve a conflict, it can be
958 953 continued with --continue/-c, aborted with --abort/-a, or stopped with
959 954 --stop.
960 955
961 956 .. container:: verbose
962 957
963 958 Examples:
964 959
965 960 - move "local changes" (current commit back to branching point)
966 961 to the current branch tip after a pull::
967 962
968 963 hg rebase
969 964
970 965 - move a single changeset to the stable branch::
971 966
972 967 hg rebase -r 5f493448 -d stable
973 968
974 969 - splice a commit and all its descendants onto another part of history::
975 970
976 971 hg rebase --source c0c3 --dest 4cf9
977 972
978 973 - rebase everything on a branch marked by a bookmark onto the
979 974 default branch::
980 975
981 976 hg rebase --base myfeature --dest default
982 977
983 978 - collapse a sequence of changes into a single commit::
984 979
985 980 hg rebase --collapse -r 1520:1525 -d .
986 981
987 982 - move a named branch while preserving its name::
988 983
989 984 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
990 985
991 986 - stabilize orphaned changesets so history looks linear::
992 987
993 988 hg rebase -r 'orphan()-obsolete()'\
994 989 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
995 990 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
996 991
997 992 Configuration Options:
998 993
999 994 You can make rebase require a destination if you set the following config
1000 995 option::
1001 996
1002 997 [commands]
1003 998 rebase.requiredest = True
1004 999
1005 1000 By default, rebase will close the transaction after each commit. For
1006 1001 performance purposes, you can configure rebase to use a single transaction
1007 1002 across the entire rebase. WARNING: This setting introduces a significant
1008 1003 risk of losing the work you've done in a rebase if the rebase aborts
1009 1004 unexpectedly::
1010 1005
1011 1006 [rebase]
1012 1007 singletransaction = True
1013 1008
1014 1009 By default, rebase writes to the working copy, but you can configure it to
1015 1010 run in-memory for better performance. When the rebase is not moving the
1016 1011 parent(s) of the working copy (AKA the "currently checked out changesets"),
1017 1012 this may also allow it to run even if the working copy is dirty::
1018 1013
1019 1014 [rebase]
1020 1015 experimental.inmemory = True
1021 1016
1022 1017 Return Values:
1023 1018
1024 1019 Returns 0 on success, 1 if nothing to rebase or there are
1025 1020 unresolved conflicts.
1026 1021
1027 1022 """
1028 1023 opts = pycompat.byteskwargs(opts)
1029 1024 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1030 1025 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1031 1026 if action:
1032 1027 cmdutil.check_incompatible_arguments(
1033 1028 opts, action, [b'confirm', b'dry_run']
1034 1029 )
1035 1030 cmdutil.check_incompatible_arguments(
1036 1031 opts, action, [b'rev', b'source', b'base', b'dest']
1037 1032 )
1038 1033 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1039 1034 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1040 1035
1041 1036 if action or repo.currenttransaction() is not None:
1042 1037 # in-memory rebase is not compatible with resuming rebases.
1043 1038 # (Or if it is run within a transaction, since the restart logic can
1044 1039 # fail the entire transaction.)
1045 1040 inmemory = False
1046 1041
1047 1042 if opts.get(b'auto_orphans'):
1048 1043 disallowed_opts = set(opts) - {b'auto_orphans'}
1049 1044 cmdutil.check_incompatible_arguments(
1050 1045 opts, b'auto_orphans', disallowed_opts
1051 1046 )
1052 1047
1053 1048 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1054 1049 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1055 1050 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1056 1051
1057 1052 if opts.get(b'dry_run') or opts.get(b'confirm'):
1058 1053 return _dryrunrebase(ui, repo, action, opts)
1059 1054 elif action == b'stop':
1060 1055 rbsrt = rebaseruntime(repo, ui)
1061 1056 with repo.wlock(), repo.lock():
1062 1057 rbsrt.restorestatus()
1063 1058 if rbsrt.collapsef:
1064 1059 raise error.Abort(_(b"cannot stop in --collapse session"))
1065 1060 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1066 1061 if not (rbsrt.keepf or allowunstable):
1067 1062 raise error.Abort(
1068 1063 _(
1069 1064 b"cannot remove original changesets with"
1070 1065 b" unrebased descendants"
1071 1066 ),
1072 1067 hint=_(
1073 1068 b'either enable obsmarkers to allow unstable '
1074 1069 b'revisions or use --keep to keep original '
1075 1070 b'changesets'
1076 1071 ),
1077 1072 )
1078 1073 # update to the current working revision
1079 1074 # to clear interrupted merge
1080 1075 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1081 1076 rbsrt._finishrebase()
1082 1077 return 0
1083 1078 elif inmemory:
1084 1079 try:
1085 1080 # in-memory merge doesn't support conflicts, so if we hit any, abort
1086 1081 # and re-run as an on-disk merge.
1087 1082 overrides = {(b'rebase', b'singletransaction'): True}
1088 1083 with ui.configoverride(overrides, b'rebase'):
1089 1084 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1090 1085 except error.InMemoryMergeConflictsError:
1091 1086 ui.warn(
1092 1087 _(
1093 1088 b'hit merge conflicts; re-running rebase without in-memory'
1094 1089 b' merge\n'
1095 1090 )
1096 1091 )
1097 1092 # TODO: Make in-memory merge not use the on-disk merge state, so
1098 1093 # we don't have to clean it here
1099 1094 mergestatemod.mergestate.clean(repo)
1100 1095 clearstatus(repo)
1101 1096 clearcollapsemsg(repo)
1102 1097 return _dorebase(ui, repo, action, opts, inmemory=False)
1103 1098 else:
1104 1099 return _dorebase(ui, repo, action, opts)
1105 1100
1106 1101
1107 1102 def _dryrunrebase(ui, repo, action, opts):
1108 1103 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1109 1104 confirm = opts.get(b'confirm')
1110 1105 if confirm:
1111 1106 ui.status(_(b'starting in-memory rebase\n'))
1112 1107 else:
1113 1108 ui.status(
1114 1109 _(b'starting dry-run rebase; repository will not be changed\n')
1115 1110 )
1116 1111 with repo.wlock(), repo.lock():
1117 1112 needsabort = True
1118 1113 try:
1119 1114 overrides = {(b'rebase', b'singletransaction'): True}
1120 1115 with ui.configoverride(overrides, b'rebase'):
1121 1116 _origrebase(
1122 1117 ui,
1123 1118 repo,
1124 1119 action,
1125 1120 opts,
1126 1121 rbsrt,
1127 1122 inmemory=True,
1128 1123 leaveunfinished=True,
1129 1124 )
1130 1125 except error.InMemoryMergeConflictsError:
1131 1126 ui.status(_(b'hit a merge conflict\n'))
1132 1127 return 1
1133 1128 except error.Abort:
1134 1129 needsabort = False
1135 1130 raise
1136 1131 else:
1137 1132 if confirm:
1138 1133 ui.status(_(b'rebase completed successfully\n'))
1139 1134 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1140 1135 # finish unfinished rebase
1141 1136 rbsrt._finishrebase()
1142 1137 else:
1143 1138 rbsrt._prepareabortorcontinue(
1144 1139 isabort=True,
1145 1140 backup=False,
1146 1141 suppwarns=True,
1147 1142 confirm=confirm,
1148 1143 )
1149 1144 needsabort = False
1150 1145 else:
1151 1146 ui.status(
1152 1147 _(
1153 1148 b'dry-run rebase completed successfully; run without'
1154 1149 b' -n/--dry-run to perform this rebase\n'
1155 1150 )
1156 1151 )
1157 1152 return 0
1158 1153 finally:
1159 1154 if needsabort:
1160 1155 # no need to store backup in case of dryrun
1161 1156 rbsrt._prepareabortorcontinue(
1162 1157 isabort=True,
1163 1158 backup=False,
1164 1159 suppwarns=True,
1165 1160 dryrun=opts.get(b'dry_run'),
1166 1161 )
1167 1162
1168 1163
1169 1164 def _dorebase(ui, repo, action, opts, inmemory=False):
1170 1165 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1171 1166 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1172 1167
1173 1168
1174 1169 def _origrebase(
1175 1170 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1176 1171 ):
1177 1172 assert action != b'stop'
1178 1173 with repo.wlock(), repo.lock():
1179 1174 if opts.get(b'interactive'):
1180 1175 try:
1181 1176 if extensions.find(b'histedit'):
1182 1177 enablehistedit = b''
1183 1178 except KeyError:
1184 1179 enablehistedit = b" --config extensions.histedit="
1185 1180 help = b"hg%s help -e histedit" % enablehistedit
1186 1181 msg = (
1187 1182 _(
1188 1183 b"interactive history editing is supported by the "
1189 1184 b"'histedit' extension (see \"%s\")"
1190 1185 )
1191 1186 % help
1192 1187 )
1193 1188 raise error.Abort(msg)
1194 1189
1195 1190 if rbsrt.collapsemsg and not rbsrt.collapsef:
1196 1191 raise error.Abort(_(b'message can only be specified with collapse'))
1197 1192
1198 1193 if action:
1199 1194 if rbsrt.collapsef:
1200 1195 raise error.Abort(
1201 1196 _(b'cannot use collapse with continue or abort')
1202 1197 )
1203 1198 if action == b'abort' and opts.get(b'tool', False):
1204 1199 ui.warn(_(b'tool option will be ignored\n'))
1205 1200 if action == b'continue':
1206 1201 ms = mergestatemod.mergestate.read(repo)
1207 1202 mergeutil.checkunresolved(ms)
1208 1203
1209 1204 retcode = rbsrt._prepareabortorcontinue(
1210 1205 isabort=(action == b'abort')
1211 1206 )
1212 1207 if retcode is not None:
1213 1208 return retcode
1214 1209 else:
1215 1210 # search default destination in this space
1216 1211 # used in the 'hg pull --rebase' case, see issue 5214.
1217 1212 destspace = opts.get(b'_destspace')
1218 1213 destmap = _definedestmap(
1219 1214 ui,
1220 1215 repo,
1221 1216 inmemory,
1222 1217 opts.get(b'dest', None),
1223 1218 opts.get(b'source', []),
1224 1219 opts.get(b'base', []),
1225 1220 opts.get(b'rev', []),
1226 1221 destspace=destspace,
1227 1222 )
1228 1223 retcode = rbsrt._preparenewrebase(destmap)
1229 1224 if retcode is not None:
1230 1225 return retcode
1231 1226 storecollapsemsg(repo, rbsrt.collapsemsg)
1232 1227
1233 1228 tr = None
1234 1229
1235 1230 singletr = ui.configbool(b'rebase', b'singletransaction')
1236 1231 if singletr:
1237 1232 tr = repo.transaction(b'rebase')
1238 1233
1239 1234 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1240 1235 # one transaction here. Otherwise, transactions are obtained when
1241 1236 # committing each node, which is slower but allows partial success.
1242 1237 with util.acceptintervention(tr):
1243 1238 # Same logic for the dirstate guard, except we don't create one when
1244 1239 # rebasing in-memory (it's not needed).
1245 1240 dsguard = None
1246 1241 if singletr and not inmemory:
1247 1242 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1248 1243 with util.acceptintervention(dsguard):
1249 1244 rbsrt._performrebase(tr)
1250 1245 if not leaveunfinished:
1251 1246 rbsrt._finishrebase()
1252 1247
1253 1248
1254 1249 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
1255 1250 """use revisions argument to define destmap {srcrev: destrev}"""
1256 1251 if revf is None:
1257 1252 revf = []
1258 1253
1259 1254 # destspace is here to work around issues with `hg pull --rebase` see
1260 1255 # issue5214 for details
1261 1256
1262 1257 cmdutil.checkunfinished(repo)
1263 1258 if not inmemory:
1264 1259 cmdutil.bailifchanged(repo)
1265 1260
1266 1261 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1267 1262 raise error.Abort(
1268 1263 _(b'you must specify a destination'),
1269 1264 hint=_(b'use: hg rebase -d REV'),
1270 1265 )
1271 1266
1272 1267 dest = None
1273 1268
1274 1269 if revf:
1275 1270 rebaseset = scmutil.revrange(repo, revf)
1276 1271 if not rebaseset:
1277 1272 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1278 1273 return None
1279 1274 elif srcf:
1280 1275 src = scmutil.revrange(repo, srcf)
1281 1276 if not src:
1282 1277 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1283 1278 return None
1284 1279 # `+ (%ld)` to work around `wdir()::` being empty
1285 1280 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1286 1281 else:
1287 1282 base = scmutil.revrange(repo, basef or [b'.'])
1288 1283 if not base:
1289 1284 ui.status(
1290 1285 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1291 1286 )
1292 1287 return None
1293 1288 if destf:
1294 1289 # --base does not support multiple destinations
1295 1290 dest = scmutil.revsingle(repo, destf)
1296 1291 else:
1297 1292 dest = repo[_destrebase(repo, base, destspace=destspace)]
1298 1293 destf = bytes(dest)
1299 1294
1300 1295 roots = [] # selected children of branching points
1301 1296 bpbase = {} # {branchingpoint: [origbase]}
1302 1297 for b in base: # group bases by branching points
1303 1298 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1304 1299 bpbase[bp] = bpbase.get(bp, []) + [b]
1305 1300 if None in bpbase:
1306 1301 # emulate the old behavior, showing "nothing to rebase" (a better
1307 1302 # behavior may be abort with "cannot find branching point" error)
1308 1303 bpbase.clear()
1309 1304 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1310 1305 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1311 1306
1312 1307 rebaseset = repo.revs(b'%ld::', roots)
1313 1308
1314 1309 if not rebaseset:
1315 1310 # transform to list because smartsets are not comparable to
1316 1311 # lists. This should be improved to honor laziness of
1317 1312 # smartset.
1318 1313 if list(base) == [dest.rev()]:
1319 1314 if basef:
1320 1315 ui.status(
1321 1316 _(
1322 1317 b'nothing to rebase - %s is both "base"'
1323 1318 b' and destination\n'
1324 1319 )
1325 1320 % dest
1326 1321 )
1327 1322 else:
1328 1323 ui.status(
1329 1324 _(
1330 1325 b'nothing to rebase - working directory '
1331 1326 b'parent is also destination\n'
1332 1327 )
1333 1328 )
1334 1329 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1335 1330 if basef:
1336 1331 ui.status(
1337 1332 _(
1338 1333 b'nothing to rebase - "base" %s is '
1339 1334 b'already an ancestor of destination '
1340 1335 b'%s\n'
1341 1336 )
1342 1337 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1343 1338 )
1344 1339 else:
1345 1340 ui.status(
1346 1341 _(
1347 1342 b'nothing to rebase - working '
1348 1343 b'directory parent is already an '
1349 1344 b'ancestor of destination %s\n'
1350 1345 )
1351 1346 % dest
1352 1347 )
1353 1348 else: # can it happen?
1354 1349 ui.status(
1355 1350 _(b'nothing to rebase from %s to %s\n')
1356 1351 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1357 1352 )
1358 1353 return None
1359 1354
1360 1355 if nodemod.wdirrev in rebaseset:
1361 1356 raise error.Abort(_(b'cannot rebase the working copy'))
1362 1357 rebasingwcp = repo[b'.'].rev() in rebaseset
1363 1358 ui.log(
1364 1359 b"rebase",
1365 1360 b"rebasing working copy parent: %r\n",
1366 1361 rebasingwcp,
1367 1362 rebase_rebasing_wcp=rebasingwcp,
1368 1363 )
1369 1364 if inmemory and rebasingwcp:
1370 1365 # Check these since we did not before.
1371 1366 cmdutil.checkunfinished(repo)
1372 1367 cmdutil.bailifchanged(repo)
1373 1368
1374 1369 if not destf:
1375 1370 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1376 1371 destf = bytes(dest)
1377 1372
1378 1373 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1379 1374 alias = {b'ALLSRC': allsrc}
1380 1375
1381 1376 if dest is None:
1382 1377 try:
1383 1378 # fast path: try to resolve dest without SRC alias
1384 1379 dest = scmutil.revsingle(repo, destf, localalias=alias)
1385 1380 except error.RepoLookupError:
1386 1381 # multi-dest path: resolve dest for each SRC separately
1387 1382 destmap = {}
1388 1383 for r in rebaseset:
1389 1384 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1390 1385 # use repo.anyrevs instead of scmutil.revsingle because we
1391 1386 # don't want to abort if destset is empty.
1392 1387 destset = repo.anyrevs([destf], user=True, localalias=alias)
1393 1388 size = len(destset)
1394 1389 if size == 1:
1395 1390 destmap[r] = destset.first()
1396 1391 elif size == 0:
1397 1392 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1398 1393 else:
1399 1394 raise error.Abort(
1400 1395 _(b'rebase destination for %s is not unique') % repo[r]
1401 1396 )
1402 1397
1403 1398 if dest is not None:
1404 1399 # single-dest case: assign dest to each rev in rebaseset
1405 1400 destrev = dest.rev()
1406 1401 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1407 1402
1408 1403 if not destmap:
1409 1404 ui.status(_(b'nothing to rebase - empty destination\n'))
1410 1405 return None
1411 1406
1412 1407 return destmap
1413 1408
1414 1409
1415 1410 def externalparent(repo, state, destancestors):
1416 1411 """Return the revision that should be used as the second parent
1417 1412 when the revisions in state is collapsed on top of destancestors.
1418 1413 Abort if there is more than one parent.
1419 1414 """
1420 1415 parents = set()
1421 1416 source = min(state)
1422 1417 for rev in state:
1423 1418 if rev == source:
1424 1419 continue
1425 1420 for p in repo[rev].parents():
1426 1421 if p.rev() not in state and p.rev() not in destancestors:
1427 1422 parents.add(p.rev())
1428 1423 if not parents:
1429 1424 return nullrev
1430 1425 if len(parents) == 1:
1431 1426 return parents.pop()
1432 1427 raise error.Abort(
1433 1428 _(
1434 1429 b'unable to collapse on top of %d, there is more '
1435 1430 b'than one external parent: %s'
1436 1431 )
1437 1432 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1438 1433 )
1439 1434
1440 1435
1441 1436 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1442 1437 '''Commit the memory changes with parents p1 and p2.
1443 1438 Return node of committed revision.'''
1444 1439 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1445 1440 # ``branch`` (used when passing ``--keepbranches``).
1446 1441 branch = None
1447 1442 if b'branch' in extra:
1448 1443 branch = extra[b'branch']
1449 1444
1450 1445 memctx = wctx.tomemctx(
1451 1446 commitmsg,
1452 1447 date=date,
1453 1448 extra=extra,
1454 1449 user=user,
1455 1450 branch=branch,
1456 1451 editor=editor,
1457 1452 )
1458 1453 if memctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1459 1454 return None
1460 1455 commitres = repo.commitctx(memctx)
1461 1456 wctx.clean() # Might be reused
1462 1457 return commitres
1463 1458
1464 1459
1465 1460 def commitnode(repo, editor, extra, user, date, commitmsg):
1466 1461 '''Commit the wd changes with parents p1 and p2.
1467 1462 Return node of committed revision.'''
1468 1463 dsguard = util.nullcontextmanager()
1469 1464 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1470 1465 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1471 1466 with dsguard:
1472 1467 # Commit might fail if unresolved files exist
1473 1468 newnode = repo.commit(
1474 1469 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1475 1470 )
1476 1471
1477 1472 repo.dirstate.setbranch(repo[newnode].branch())
1478 1473 return newnode
1479 1474
1480 1475
1481 1476 def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
1482 1477 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1483 1478 # Merge phase
1484 1479 # Update to destination and merge it with local
1485 1480 p1ctx = repo[p1]
1486 1481 if wctx.isinmemory():
1487 1482 wctx.setbase(p1ctx)
1488 1483 else:
1489 1484 if repo[b'.'].rev() != p1:
1490 1485 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1491 1486 mergemod.clean_update(p1ctx)
1492 1487 else:
1493 1488 repo.ui.debug(b" already in destination\n")
1494 1489 # This is, alas, necessary to invalidate workingctx's manifest cache,
1495 1490 # as well as other data we litter on it in other places.
1496 1491 wctx = repo[None]
1497 1492 repo.dirstate.write(repo.currenttransaction())
1498 1493 ctx = repo[rev]
1499 1494 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1500 1495 if base is not None:
1501 1496 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1502 1497
1503 1498 # See explanation in merge.graft()
1504 1499 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1505 1500 stats = mergemod.update(
1506 1501 repo,
1507 1502 rev,
1508 1503 branchmerge=True,
1509 1504 force=True,
1510 1505 ancestor=base,
1511 1506 mergeancestor=mergeancestor,
1512 1507 labels=[b'dest', b'source'],
1513 1508 wc=wctx,
1514 1509 )
1515 1510 wctx.setparents(p1ctx.node(), repo[p2].node())
1516 1511 if collapse:
1517 1512 copies.graftcopies(wctx, ctx, repo[dest])
1518 1513 else:
1519 1514 # If we're not using --collapse, we need to
1520 1515 # duplicate copies between the revision we're
1521 1516 # rebasing and its first parent.
1522 1517 copies.graftcopies(wctx, ctx, ctx.p1())
1523 1518 return stats
1524 1519
1525 1520
1526 1521 def adjustdest(repo, rev, destmap, state, skipped):
1527 1522 r"""adjust rebase destination given the current rebase state
1528 1523
1529 1524 rev is what is being rebased. Return a list of two revs, which are the
1530 1525 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1531 1526 nullrev, return dest without adjustment for it.
1532 1527
1533 1528 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1534 1529 to B1, and E's destination will be adjusted from F to B1.
1535 1530
1536 1531 B1 <- written during rebasing B
1537 1532 |
1538 1533 F <- original destination of B, E
1539 1534 |
1540 1535 | E <- rev, which is being rebased
1541 1536 | |
1542 1537 | D <- prev, one parent of rev being checked
1543 1538 | |
1544 1539 | x <- skipped, ex. no successor or successor in (::dest)
1545 1540 | |
1546 1541 | C <- rebased as C', different destination
1547 1542 | |
1548 1543 | B <- rebased as B1 C'
1549 1544 |/ |
1550 1545 A G <- destination of C, different
1551 1546
1552 1547 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1553 1548 first move C to C1, G to G1, and when it's checking H, the adjusted
1554 1549 destinations will be [C1, G1].
1555 1550
1556 1551 H C1 G1
1557 1552 /| | /
1558 1553 F G |/
1559 1554 K | | -> K
1560 1555 | C D |
1561 1556 | |/ |
1562 1557 | B | ...
1563 1558 |/ |/
1564 1559 A A
1565 1560
1566 1561 Besides, adjust dest according to existing rebase information. For example,
1567 1562
1568 1563 B C D B needs to be rebased on top of C, C needs to be rebased on top
1569 1564 \|/ of D. We will rebase C first.
1570 1565 A
1571 1566
1572 1567 C' After rebasing C, when considering B's destination, use C'
1573 1568 | instead of the original C.
1574 1569 B D
1575 1570 \ /
1576 1571 A
1577 1572 """
1578 1573 # pick already rebased revs with same dest from state as interesting source
1579 1574 dest = destmap[rev]
1580 1575 source = [
1581 1576 s
1582 1577 for s, d in state.items()
1583 1578 if d > 0 and destmap[s] == dest and s not in skipped
1584 1579 ]
1585 1580
1586 1581 result = []
1587 1582 for prev in repo.changelog.parentrevs(rev):
1588 1583 adjusted = dest
1589 1584 if prev != nullrev:
1590 1585 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1591 1586 if candidate is not None:
1592 1587 adjusted = state[candidate]
1593 1588 if adjusted == dest and dest in state:
1594 1589 adjusted = state[dest]
1595 1590 if adjusted == revtodo:
1596 1591 # sortsource should produce an order that makes this impossible
1597 1592 raise error.ProgrammingError(
1598 1593 b'rev %d should be rebased already at this time' % dest
1599 1594 )
1600 1595 result.append(adjusted)
1601 1596 return result
1602 1597
1603 1598
1604 1599 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1605 1600 """
1606 1601 Abort if rebase will create divergence or rebase is noop because of markers
1607 1602
1608 1603 `rebaseobsrevs`: set of obsolete revision in source
1609 1604 `rebaseobsskipped`: set of revisions from source skipped because they have
1610 1605 successors in destination or no non-obsolete successor.
1611 1606 """
1612 1607 # Obsolete node with successors not in dest leads to divergence
1613 1608 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1614 1609 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1615 1610
1616 1611 if divergencebasecandidates and not divergenceok:
1617 1612 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1618 1613 msg = _(b"this rebase will cause divergences from: %s")
1619 1614 h = _(
1620 1615 b"to force the rebase please set "
1621 1616 b"experimental.evolution.allowdivergence=True"
1622 1617 )
1623 1618 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1624 1619
1625 1620
1626 1621 def successorrevs(unfi, rev):
1627 1622 """yield revision numbers for successors of rev"""
1628 1623 assert unfi.filtername is None
1629 1624 get_rev = unfi.changelog.index.get_rev
1630 1625 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1631 1626 r = get_rev(s)
1632 1627 if r is not None:
1633 1628 yield r
1634 1629
1635 1630
1636 1631 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1637 1632 """Return new parents and optionally a merge base for rev being rebased
1638 1633
1639 1634 The destination specified by "dest" cannot always be used directly because
1640 1635 previously rebase result could affect destination. For example,
1641 1636
1642 1637 D E rebase -r C+D+E -d B
1643 1638 |/ C will be rebased to C'
1644 1639 B C D's new destination will be C' instead of B
1645 1640 |/ E's new destination will be C' instead of B
1646 1641 A
1647 1642
1648 1643 The new parents of a merge is slightly more complicated. See the comment
1649 1644 block below.
1650 1645 """
1651 1646 # use unfiltered changelog since successorrevs may return filtered nodes
1652 1647 assert repo.filtername is None
1653 1648 cl = repo.changelog
1654 1649 isancestor = cl.isancestorrev
1655 1650
1656 1651 dest = destmap[rev]
1657 1652 oldps = repo.changelog.parentrevs(rev) # old parents
1658 1653 newps = [nullrev, nullrev] # new parents
1659 1654 dests = adjustdest(repo, rev, destmap, state, skipped)
1660 1655 bases = list(oldps) # merge base candidates, initially just old parents
1661 1656
1662 1657 if all(r == nullrev for r in oldps[1:]):
1663 1658 # For non-merge changeset, just move p to adjusted dest as requested.
1664 1659 newps[0] = dests[0]
1665 1660 else:
1666 1661 # For merge changeset, if we move p to dests[i] unconditionally, both
1667 1662 # parents may change and the end result looks like "the merge loses a
1668 1663 # parent", which is a surprise. This is a limit because "--dest" only
1669 1664 # accepts one dest per src.
1670 1665 #
1671 1666 # Therefore, only move p with reasonable conditions (in this order):
1672 1667 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1673 1668 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1674 1669 #
1675 1670 # Comparing with adjustdest, the logic here does some additional work:
1676 1671 # 1. decide which parents will not be moved towards dest
1677 1672 # 2. if the above decision is "no", should a parent still be moved
1678 1673 # because it was rebased?
1679 1674 #
1680 1675 # For example:
1681 1676 #
1682 1677 # C # "rebase -r C -d D" is an error since none of the parents
1683 1678 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1684 1679 # A B D # B (using rule "2."), since B will be rebased.
1685 1680 #
1686 1681 # The loop tries to be not rely on the fact that a Mercurial node has
1687 1682 # at most 2 parents.
1688 1683 for i, p in enumerate(oldps):
1689 1684 np = p # new parent
1690 1685 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1691 1686 np = dests[i]
1692 1687 elif p in state and state[p] > 0:
1693 1688 np = state[p]
1694 1689
1695 1690 # If one parent becomes an ancestor of the other, drop the ancestor
1696 1691 for j, x in enumerate(newps[:i]):
1697 1692 if x == nullrev:
1698 1693 continue
1699 1694 if isancestor(np, x): # CASE-1
1700 1695 np = nullrev
1701 1696 elif isancestor(x, np): # CASE-2
1702 1697 newps[j] = np
1703 1698 np = nullrev
1704 1699 # New parents forming an ancestor relationship does not
1705 1700 # mean the old parents have a similar relationship. Do not
1706 1701 # set bases[x] to nullrev.
1707 1702 bases[j], bases[i] = bases[i], bases[j]
1708 1703
1709 1704 newps[i] = np
1710 1705
1711 1706 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1712 1707 # base. If only p2 changes, merging using unchanged p1 as merge base is
1713 1708 # suboptimal. Therefore swap parents to make the merge sane.
1714 1709 if newps[1] != nullrev and oldps[0] == newps[0]:
1715 1710 assert len(newps) == 2 and len(oldps) == 2
1716 1711 newps.reverse()
1717 1712 bases.reverse()
1718 1713
1719 1714 # No parent change might be an error because we fail to make rev a
1720 1715 # descendent of requested dest. This can happen, for example:
1721 1716 #
1722 1717 # C # rebase -r C -d D
1723 1718 # /| # None of A and B will be changed to D and rebase fails.
1724 1719 # A B D
1725 1720 if set(newps) == set(oldps) and dest not in newps:
1726 1721 raise error.Abort(
1727 1722 _(
1728 1723 b'cannot rebase %d:%s without '
1729 1724 b'moving at least one of its parents'
1730 1725 )
1731 1726 % (rev, repo[rev])
1732 1727 )
1733 1728
1734 1729 # Source should not be ancestor of dest. The check here guarantees it's
1735 1730 # impossible. With multi-dest, the initial check does not cover complex
1736 1731 # cases since we don't have abstractions to dry-run rebase cheaply.
1737 1732 if any(p != nullrev and isancestor(rev, p) for p in newps):
1738 1733 raise error.Abort(_(b'source is ancestor of destination'))
1739 1734
1740 1735 # Check if the merge will contain unwanted changes. That may happen if
1741 1736 # there are multiple special (non-changelog ancestor) merge bases, which
1742 1737 # cannot be handled well by the 3-way merge algorithm. For example:
1743 1738 #
1744 1739 # F
1745 1740 # /|
1746 1741 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1747 1742 # | | # as merge base, the difference between D and F will include
1748 1743 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1749 1744 # |/ # chosen, the rebased F will contain B.
1750 1745 # A Z
1751 1746 #
1752 1747 # But our merge base candidates (D and E in above case) could still be
1753 1748 # better than the default (ancestor(F, Z) == null). Therefore still
1754 1749 # pick one (so choose p1 above).
1755 1750 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1756 1751 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1757 1752 for i, base in enumerate(bases):
1758 1753 if base == nullrev or base in newps:
1759 1754 continue
1760 1755 # Revisions in the side (not chosen as merge base) branch that
1761 1756 # might contain "surprising" contents
1762 1757 other_bases = set(bases) - {base}
1763 1758 siderevs = list(
1764 1759 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1765 1760 )
1766 1761
1767 1762 # If those revisions are covered by rebaseset, the result is good.
1768 1763 # A merge in rebaseset would be considered to cover its ancestors.
1769 1764 if siderevs:
1770 1765 rebaseset = [
1771 1766 r for r, d in state.items() if d > 0 and r not in obsskipped
1772 1767 ]
1773 1768 merges = [
1774 1769 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1775 1770 ]
1776 1771 unwanted[i] = list(
1777 1772 repo.revs(
1778 1773 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1779 1774 )
1780 1775 )
1781 1776
1782 1777 if any(revs is not None for revs in unwanted):
1783 1778 # Choose a merge base that has a minimal number of unwanted revs.
1784 1779 l, i = min(
1785 1780 (len(revs), i)
1786 1781 for i, revs in enumerate(unwanted)
1787 1782 if revs is not None
1788 1783 )
1789 1784
1790 1785 # The merge will include unwanted revisions. Abort now. Revisit this if
1791 1786 # we have a more advanced merge algorithm that handles multiple bases.
1792 1787 if l > 0:
1793 1788 unwanteddesc = _(b' or ').join(
1794 1789 (
1795 1790 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1796 1791 for revs in unwanted
1797 1792 if revs is not None
1798 1793 )
1799 1794 )
1800 1795 raise error.Abort(
1801 1796 _(b'rebasing %d:%s will include unwanted changes from %s')
1802 1797 % (rev, repo[rev], unwanteddesc)
1803 1798 )
1804 1799
1805 1800 # newps[0] should match merge base if possible. Currently, if newps[i]
1806 1801 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1807 1802 # the other's ancestor. In that case, it's fine to not swap newps here.
1808 1803 # (see CASE-1 and CASE-2 above)
1809 1804 if i != 0:
1810 1805 if newps[i] != nullrev:
1811 1806 newps[0], newps[i] = newps[i], newps[0]
1812 1807 bases[0], bases[i] = bases[i], bases[0]
1813 1808
1814 1809 # "rebasenode" updates to new p1, use the corresponding merge base.
1815 1810 base = bases[0]
1816 1811
1817 1812 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1818 1813
1819 1814 return newps[0], newps[1], base
1820 1815
1821 1816
1822 1817 def isagitpatch(repo, patchname):
1823 1818 """Return true if the given patch is in git format"""
1824 1819 mqpatch = os.path.join(repo.mq.path, patchname)
1825 1820 for line in patch.linereader(open(mqpatch, b'rb')):
1826 1821 if line.startswith(b'diff --git'):
1827 1822 return True
1828 1823 return False
1829 1824
1830 1825
1831 1826 def updatemq(repo, state, skipped, **opts):
1832 1827 """Update rebased mq patches - finalize and then import them"""
1833 1828 mqrebase = {}
1834 1829 mq = repo.mq
1835 1830 original_series = mq.fullseries[:]
1836 1831 skippedpatches = set()
1837 1832
1838 1833 for p in mq.applied:
1839 1834 rev = repo[p.node].rev()
1840 1835 if rev in state:
1841 1836 repo.ui.debug(
1842 1837 b'revision %d is an mq patch (%s), finalize it.\n'
1843 1838 % (rev, p.name)
1844 1839 )
1845 1840 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1846 1841 else:
1847 1842 # Applied but not rebased, not sure this should happen
1848 1843 skippedpatches.add(p.name)
1849 1844
1850 1845 if mqrebase:
1851 1846 mq.finish(repo, mqrebase.keys())
1852 1847
1853 1848 # We must start import from the newest revision
1854 1849 for rev in sorted(mqrebase, reverse=True):
1855 1850 if rev not in skipped:
1856 1851 name, isgit = mqrebase[rev]
1857 1852 repo.ui.note(
1858 1853 _(b'updating mq patch %s to %d:%s\n')
1859 1854 % (name, state[rev], repo[state[rev]])
1860 1855 )
1861 1856 mq.qimport(
1862 1857 repo,
1863 1858 (),
1864 1859 patchname=name,
1865 1860 git=isgit,
1866 1861 rev=[b"%d" % state[rev]],
1867 1862 )
1868 1863 else:
1869 1864 # Rebased and skipped
1870 1865 skippedpatches.add(mqrebase[rev][0])
1871 1866
1872 1867 # Patches were either applied and rebased and imported in
1873 1868 # order, applied and removed or unapplied. Discard the removed
1874 1869 # ones while preserving the original series order and guards.
1875 1870 newseries = [
1876 1871 s
1877 1872 for s in original_series
1878 1873 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1879 1874 ]
1880 1875 mq.fullseries[:] = newseries
1881 1876 mq.seriesdirty = True
1882 1877 mq.savedirty()
1883 1878
1884 1879
1885 1880 def storecollapsemsg(repo, collapsemsg):
1886 1881 """Store the collapse message to allow recovery"""
1887 1882 collapsemsg = collapsemsg or b''
1888 1883 f = repo.vfs(b"last-message.txt", b"w")
1889 1884 f.write(b"%s\n" % collapsemsg)
1890 1885 f.close()
1891 1886
1892 1887
1893 1888 def clearcollapsemsg(repo):
1894 1889 """Remove collapse message file"""
1895 1890 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1896 1891
1897 1892
1898 1893 def restorecollapsemsg(repo, isabort):
1899 1894 """Restore previously stored collapse message"""
1900 1895 try:
1901 1896 f = repo.vfs(b"last-message.txt")
1902 1897 collapsemsg = f.readline().strip()
1903 1898 f.close()
1904 1899 except IOError as err:
1905 1900 if err.errno != errno.ENOENT:
1906 1901 raise
1907 1902 if isabort:
1908 1903 # Oh well, just abort like normal
1909 1904 collapsemsg = b''
1910 1905 else:
1911 1906 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1912 1907 return collapsemsg
1913 1908
1914 1909
1915 1910 def clearstatus(repo):
1916 1911 """Remove the status files"""
1917 1912 # Make sure the active transaction won't write the state file
1918 1913 tr = repo.currenttransaction()
1919 1914 if tr:
1920 1915 tr.removefilegenerator(b'rebasestate')
1921 1916 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1922 1917
1923 1918
1924 1919 def sortsource(destmap):
1925 1920 """yield source revisions in an order that we only rebase things once
1926 1921
1927 1922 If source and destination overlaps, we should filter out revisions
1928 1923 depending on other revisions which hasn't been rebased yet.
1929 1924
1930 1925 Yield a sorted list of revisions each time.
1931 1926
1932 1927 For example, when rebasing A to B, B to C. This function yields [B], then
1933 1928 [A], indicating B needs to be rebased first.
1934 1929
1935 1930 Raise if there is a cycle so the rebase is impossible.
1936 1931 """
1937 1932 srcset = set(destmap)
1938 1933 while srcset:
1939 1934 srclist = sorted(srcset)
1940 1935 result = []
1941 1936 for r in srclist:
1942 1937 if destmap[r] not in srcset:
1943 1938 result.append(r)
1944 1939 if not result:
1945 1940 raise error.Abort(_(b'source and destination form a cycle'))
1946 1941 srcset -= set(result)
1947 1942 yield result
1948 1943
1949 1944
1950 1945 def buildstate(repo, destmap, collapse):
1951 1946 '''Define which revisions are going to be rebased and where
1952 1947
1953 1948 repo: repo
1954 1949 destmap: {srcrev: destrev}
1955 1950 '''
1956 1951 rebaseset = destmap.keys()
1957 1952 originalwd = repo[b'.'].rev()
1958 1953
1959 1954 # This check isn't strictly necessary, since mq detects commits over an
1960 1955 # applied patch. But it prevents messing up the working directory when
1961 1956 # a partially completed rebase is blocked by mq.
1962 1957 if b'qtip' in repo.tags():
1963 1958 mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
1964 1959 if set(destmap.values()) & mqapplied:
1965 1960 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1966 1961
1967 1962 # Get "cycle" error early by exhausting the generator.
1968 1963 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1969 1964 if not sortedsrc:
1970 1965 raise error.Abort(_(b'no matching revisions'))
1971 1966
1972 1967 # Only check the first batch of revisions to rebase not depending on other
1973 1968 # rebaseset. This means "source is ancestor of destination" for the second
1974 1969 # (and following) batches of revisions are not checked here. We rely on
1975 1970 # "defineparents" to do that check.
1976 1971 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1977 1972 if not roots:
1978 1973 raise error.Abort(_(b'no matching revisions'))
1979 1974
1980 1975 def revof(r):
1981 1976 return r.rev()
1982 1977
1983 1978 roots = sorted(roots, key=revof)
1984 1979 state = dict.fromkeys(rebaseset, revtodo)
1985 1980 emptyrebase = len(sortedsrc) == 1
1986 1981 for root in roots:
1987 1982 dest = repo[destmap[root.rev()]]
1988 1983 commonbase = root.ancestor(dest)
1989 1984 if commonbase == root:
1990 1985 raise error.Abort(_(b'source is ancestor of destination'))
1991 1986 if commonbase == dest:
1992 1987 wctx = repo[None]
1993 1988 if dest == wctx.p1():
1994 1989 # when rebasing to '.', it will use the current wd branch name
1995 1990 samebranch = root.branch() == wctx.branch()
1996 1991 else:
1997 1992 samebranch = root.branch() == dest.branch()
1998 1993 if not collapse and samebranch and dest in root.parents():
1999 1994 # mark the revision as done by setting its new revision
2000 1995 # equal to its old (current) revisions
2001 1996 state[root.rev()] = root.rev()
2002 1997 repo.ui.debug(b'source is a child of destination\n')
2003 1998 continue
2004 1999
2005 2000 emptyrebase = False
2006 2001 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
2007 2002 if emptyrebase:
2008 2003 return None
2009 2004 for rev in sorted(state):
2010 2005 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2011 2006 # if all parents of this revision are done, then so is this revision
2012 2007 if parents and all((state.get(p) == p for p in parents)):
2013 2008 state[rev] = rev
2014 2009 return originalwd, destmap, state
2015 2010
2016 2011
2017 2012 def clearrebased(
2018 2013 ui,
2019 2014 repo,
2020 2015 destmap,
2021 2016 state,
2022 2017 skipped,
2023 2018 collapsedas=None,
2024 2019 keepf=False,
2025 2020 fm=None,
2026 2021 backup=True,
2027 2022 ):
2028 2023 """dispose of rebased revision at the end of the rebase
2029 2024
2030 2025 If `collapsedas` is not None, the rebase was a collapse whose result if the
2031 2026 `collapsedas` node.
2032 2027
2033 2028 If `keepf` is not True, the rebase has --keep set and no nodes should be
2034 2029 removed (but bookmarks still need to be moved).
2035 2030
2036 2031 If `backup` is False, no backup will be stored when stripping rebased
2037 2032 revisions.
2038 2033 """
2039 2034 tonode = repo.changelog.node
2040 2035 replacements = {}
2041 2036 moves = {}
2042 2037 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2043 2038
2044 2039 collapsednodes = []
2045 2040 for rev, newrev in sorted(state.items()):
2046 2041 if newrev >= 0 and newrev != rev:
2047 2042 oldnode = tonode(rev)
2048 2043 newnode = collapsedas or tonode(newrev)
2049 2044 moves[oldnode] = newnode
2050 2045 succs = None
2051 2046 if rev in skipped:
2052 2047 if stripcleanup or not repo[rev].obsolete():
2053 2048 succs = ()
2054 2049 elif collapsedas:
2055 2050 collapsednodes.append(oldnode)
2056 2051 else:
2057 2052 succs = (newnode,)
2058 2053 if succs is not None:
2059 2054 replacements[(oldnode,)] = succs
2060 2055 if collapsednodes:
2061 2056 replacements[tuple(collapsednodes)] = (collapsedas,)
2062 2057 if fm:
2063 2058 hf = fm.hexfunc
2064 2059 fl = fm.formatlist
2065 2060 fd = fm.formatdict
2066 2061 changes = {}
2067 2062 for oldns, newn in pycompat.iteritems(replacements):
2068 2063 for oldn in oldns:
2069 2064 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2070 2065 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2071 2066 fm.data(nodechanges=nodechanges)
2072 2067 if keepf:
2073 2068 replacements = {}
2074 2069 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2075 2070
2076 2071
2077 2072 def pullrebase(orig, ui, repo, *args, **opts):
2078 2073 """Call rebase after pull if the latter has been invoked with --rebase"""
2079 2074 if opts.get('rebase'):
2080 2075 if ui.configbool(b'commands', b'rebase.requiredest'):
2081 2076 msg = _(b'rebase destination required by configuration')
2082 2077 hint = _(b'use hg pull followed by hg rebase -d DEST')
2083 2078 raise error.Abort(msg, hint=hint)
2084 2079
2085 2080 with repo.wlock(), repo.lock():
2086 2081 if opts.get('update'):
2087 2082 del opts['update']
2088 2083 ui.debug(
2089 2084 b'--update and --rebase are not compatible, ignoring '
2090 2085 b'the update flag\n'
2091 2086 )
2092 2087
2093 2088 cmdutil.checkunfinished(repo, skipmerge=True)
2094 2089 cmdutil.bailifchanged(
2095 2090 repo,
2096 2091 hint=_(
2097 2092 b'cannot pull with rebase: '
2098 2093 b'please commit or shelve your changes first'
2099 2094 ),
2100 2095 )
2101 2096
2102 2097 revsprepull = len(repo)
2103 2098 origpostincoming = commands.postincoming
2104 2099
2105 2100 def _dummy(*args, **kwargs):
2106 2101 pass
2107 2102
2108 2103 commands.postincoming = _dummy
2109 2104 try:
2110 2105 ret = orig(ui, repo, *args, **opts)
2111 2106 finally:
2112 2107 commands.postincoming = origpostincoming
2113 2108 revspostpull = len(repo)
2114 2109 if revspostpull > revsprepull:
2115 2110 # --rev option from pull conflict with rebase own --rev
2116 2111 # dropping it
2117 2112 if 'rev' in opts:
2118 2113 del opts['rev']
2119 2114 # positional argument from pull conflicts with rebase's own
2120 2115 # --source.
2121 2116 if 'source' in opts:
2122 2117 del opts['source']
2123 2118 # revsprepull is the len of the repo, not revnum of tip.
2124 2119 destspace = list(repo.changelog.revs(start=revsprepull))
2125 2120 opts['_destspace'] = destspace
2126 2121 try:
2127 2122 rebase(ui, repo, **opts)
2128 2123 except error.NoMergeDestAbort:
2129 2124 # we can maybe update instead
2130 2125 rev, _a, _b = destutil.destupdate(repo)
2131 2126 if rev == repo[b'.'].rev():
2132 2127 ui.status(_(b'nothing to rebase\n'))
2133 2128 else:
2134 2129 ui.status(_(b'nothing to rebase - updating instead\n'))
2135 2130 # not passing argument to get the bare update behavior
2136 2131 # with warning and trumpets
2137 2132 commands.update(ui, repo)
2138 2133 else:
2139 2134 if opts.get('tool'):
2140 2135 raise error.Abort(_(b'--tool can only be used with --rebase'))
2141 2136 ret = orig(ui, repo, *args, **opts)
2142 2137
2143 2138 return ret
2144 2139
2145 2140
2146 2141 def _filterobsoleterevs(repo, revs):
2147 2142 """returns a set of the obsolete revisions in revs"""
2148 2143 return {r for r in revs if repo[r].obsolete()}
2149 2144
2150 2145
2151 2146 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2152 2147 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2153 2148
2154 2149 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2155 2150 obsolete nodes to be rebased given in `rebaseobsrevs`.
2156 2151
2157 2152 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2158 2153 without a successor in destination.
2159 2154
2160 2155 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2161 2156 obsolete successors.
2162 2157 """
2163 2158 obsoletenotrebased = {}
2164 2159 obsoletewithoutsuccessorindestination = set()
2165 2160 obsoleteextinctsuccessors = set()
2166 2161
2167 2162 assert repo.filtername is None
2168 2163 cl = repo.changelog
2169 2164 get_rev = cl.index.get_rev
2170 2165 extinctrevs = set(repo.revs(b'extinct()'))
2171 2166 for srcrev in rebaseobsrevs:
2172 2167 srcnode = cl.node(srcrev)
2173 2168 # XXX: more advanced APIs are required to handle split correctly
2174 2169 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2175 2170 # obsutil.allsuccessors includes node itself
2176 2171 successors.remove(srcnode)
2177 2172 succrevs = {get_rev(s) for s in successors}
2178 2173 succrevs.discard(None)
2179 2174 if succrevs.issubset(extinctrevs):
2180 2175 # all successors are extinct
2181 2176 obsoleteextinctsuccessors.add(srcrev)
2182 2177 if not successors:
2183 2178 # no successor
2184 2179 obsoletenotrebased[srcrev] = None
2185 2180 else:
2186 2181 dstrev = destmap[srcrev]
2187 2182 for succrev in succrevs:
2188 2183 if cl.isancestorrev(succrev, dstrev):
2189 2184 obsoletenotrebased[srcrev] = succrev
2190 2185 break
2191 2186 else:
2192 2187 # If 'srcrev' has a successor in rebase set but none in
2193 2188 # destination (which would be catched above), we shall skip it
2194 2189 # and its descendants to avoid divergence.
2195 2190 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2196 2191 obsoletewithoutsuccessorindestination.add(srcrev)
2197 2192
2198 2193 return (
2199 2194 obsoletenotrebased,
2200 2195 obsoletewithoutsuccessorindestination,
2201 2196 obsoleteextinctsuccessors,
2202 2197 )
2203 2198
2204 2199
2205 2200 def abortrebase(ui, repo):
2206 2201 with repo.wlock(), repo.lock():
2207 2202 rbsrt = rebaseruntime(repo, ui)
2208 2203 rbsrt._prepareabortorcontinue(isabort=True)
2209 2204
2210 2205
2211 2206 def continuerebase(ui, repo):
2212 2207 with repo.wlock(), repo.lock():
2213 2208 rbsrt = rebaseruntime(repo, ui)
2214 2209 ms = mergestatemod.mergestate.read(repo)
2215 2210 mergeutil.checkunresolved(ms)
2216 2211 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2217 2212 if retcode is not None:
2218 2213 return retcode
2219 2214 rbsrt._performrebase(None)
2220 2215 rbsrt._finishrebase()
2221 2216
2222 2217
2223 2218 def summaryhook(ui, repo):
2224 2219 if not repo.vfs.exists(b'rebasestate'):
2225 2220 return
2226 2221 try:
2227 2222 rbsrt = rebaseruntime(repo, ui, {})
2228 2223 rbsrt.restorestatus()
2229 2224 state = rbsrt.state
2230 2225 except error.RepoLookupError:
2231 2226 # i18n: column positioning for "hg summary"
2232 2227 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2233 2228 ui.write(msg)
2234 2229 return
2235 2230 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2236 2231 # i18n: column positioning for "hg summary"
2237 2232 ui.write(
2238 2233 _(b'rebase: %s, %s (rebase --continue)\n')
2239 2234 % (
2240 2235 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2241 2236 ui.label(_(b'%d remaining'), b'rebase.remaining')
2242 2237 % (len(state) - numrebased),
2243 2238 )
2244 2239 )
2245 2240
2246 2241
2247 2242 def uisetup(ui):
2248 2243 # Replace pull with a decorator to provide --rebase option
2249 2244 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2250 2245 entry[1].append(
2251 2246 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2252 2247 )
2253 2248 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2254 2249 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2255 2250 statemod.addunfinished(
2256 2251 b'rebase',
2257 2252 fname=b'rebasestate',
2258 2253 stopflag=True,
2259 2254 continueflag=True,
2260 2255 abortfunc=abortrebase,
2261 2256 continuefunc=continuerebase,
2262 2257 )
@@ -1,438 +1,454
1 1 # error.py - Mercurial exceptions
2 2 #
3 3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Mercurial exceptions.
9 9
10 10 This allows us to catch exceptions at higher levels without forcing
11 11 imports.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 # Do not import anything but pycompat here, please
17 17 from . import pycompat
18 18
19 19
20 20 def _tobytes(exc):
21 21 """Byte-stringify exception in the same way as BaseException_str()"""
22 22 if not exc.args:
23 23 return b''
24 24 if len(exc.args) == 1:
25 25 return pycompat.bytestr(exc.args[0])
26 26 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
27 27
28 28
29 29 class Hint(object):
30 30 """Mix-in to provide a hint of an error
31 31
32 32 This should come first in the inheritance list to consume a hint and
33 33 pass remaining arguments to the exception class.
34 34 """
35 35
36 36 def __init__(self, *args, **kw):
37 37 self.hint = kw.pop('hint', None)
38 38 super(Hint, self).__init__(*args, **kw)
39 39
40 40
41 41 class StorageError(Hint, Exception):
42 42 """Raised when an error occurs in a storage layer.
43 43
44 44 Usually subclassed by a storage-specific exception.
45 45 """
46 46
47 47 __bytes__ = _tobytes
48 48
49 49
50 50 class RevlogError(StorageError):
51 51 __bytes__ = _tobytes
52 52
53 53
54 54 class SidedataHashError(RevlogError):
55 55 def __init__(self, key, expected, got):
56 56 self.sidedatakey = key
57 57 self.expecteddigest = expected
58 58 self.actualdigest = got
59 59
60 60
61 61 class FilteredIndexError(IndexError):
62 62 __bytes__ = _tobytes
63 63
64 64
65 65 class LookupError(RevlogError, KeyError):
66 66 def __init__(self, name, index, message):
67 67 self.name = name
68 68 self.index = index
69 69 # this can't be called 'message' because at least some installs of
70 70 # Python 2.6+ complain about the 'message' property being deprecated
71 71 self.lookupmessage = message
72 72 if isinstance(name, bytes) and len(name) == 20:
73 73 from .node import short
74 74
75 75 name = short(name)
76 76 RevlogError.__init__(self, b'%s@%s: %s' % (index, name, message))
77 77
78 78 def __bytes__(self):
79 79 return RevlogError.__bytes__(self)
80 80
81 81 def __str__(self):
82 82 return RevlogError.__str__(self)
83 83
84 84
85 85 class AmbiguousPrefixLookupError(LookupError):
86 86 pass
87 87
88 88
89 89 class FilteredLookupError(LookupError):
90 90 pass
91 91
92 92
93 93 class ManifestLookupError(LookupError):
94 94 pass
95 95
96 96
97 97 class CommandError(Exception):
98 98 """Exception raised on errors in parsing the command line."""
99 99
100 100 __bytes__ = _tobytes
101 101
102 102
103 103 class InterventionRequired(Hint, Exception):
104 104 """Exception raised when a command requires human intervention."""
105 105
106 106 __bytes__ = _tobytes
107 107
108 108
109 class ConflictResolutionRequired(InterventionRequired):
110 """Exception raised when a continuable command required merge conflict resolution."""
111
112 def __init__(self, opname):
113 from .i18n import _
114
115 self.opname = opname
116 InterventionRequired.__init__(
117 self,
118 _(
119 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
120 )
121 % opname,
122 )
123
124
109 125 class Abort(Hint, Exception):
110 126 """Raised if a command needs to print an error and exit."""
111 127
112 128 __bytes__ = _tobytes
113 129
114 130 if pycompat.ispy3:
115 131
116 132 def __str__(self):
117 133 # the output would be unreadable if the message was translated,
118 134 # but do not replace it with encoding.strfromlocal(), which
119 135 # may raise another exception.
120 136 return pycompat.sysstr(self.__bytes__())
121 137
122 138
123 139 class HookLoadError(Abort):
124 140 """raised when loading a hook fails, aborting an operation
125 141
126 142 Exists to allow more specialized catching."""
127 143
128 144
129 145 class HookAbort(Abort):
130 146 """raised when a validation hook fails, aborting an operation
131 147
132 148 Exists to allow more specialized catching."""
133 149
134 150
135 151 class ConfigError(Abort):
136 152 """Exception raised when parsing config files"""
137 153
138 154
139 155 class UpdateAbort(Abort):
140 156 """Raised when an update is aborted for destination issue"""
141 157
142 158
143 159 class MergeDestAbort(Abort):
144 160 """Raised when an update is aborted for destination issues"""
145 161
146 162
147 163 class NoMergeDestAbort(MergeDestAbort):
148 164 """Raised when an update is aborted because there is nothing to merge"""
149 165
150 166
151 167 class ManyMergeDestAbort(MergeDestAbort):
152 168 """Raised when an update is aborted because destination is ambiguous"""
153 169
154 170
155 171 class ResponseExpected(Abort):
156 172 """Raised when an EOF is received for a prompt"""
157 173
158 174 def __init__(self):
159 175 from .i18n import _
160 176
161 177 Abort.__init__(self, _(b'response expected'))
162 178
163 179
164 180 class OutOfBandError(Hint, Exception):
165 181 """Exception raised when a remote repo reports failure"""
166 182
167 183 __bytes__ = _tobytes
168 184
169 185
170 186 class ParseError(Hint, Exception):
171 187 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
172 188
173 189 __bytes__ = _tobytes
174 190
175 191
176 192 class PatchError(Exception):
177 193 __bytes__ = _tobytes
178 194
179 195
180 196 class UnknownIdentifier(ParseError):
181 197 """Exception raised when a {rev,file}set references an unknown identifier"""
182 198
183 199 def __init__(self, function, symbols):
184 200 from .i18n import _
185 201
186 202 ParseError.__init__(self, _(b"unknown identifier: %s") % function)
187 203 self.function = function
188 204 self.symbols = symbols
189 205
190 206
191 207 class RepoError(Hint, Exception):
192 208 __bytes__ = _tobytes
193 209
194 210
195 211 class RepoLookupError(RepoError):
196 212 pass
197 213
198 214
199 215 class FilteredRepoLookupError(RepoLookupError):
200 216 pass
201 217
202 218
203 219 class CapabilityError(RepoError):
204 220 pass
205 221
206 222
207 223 class RequirementError(RepoError):
208 224 """Exception raised if .hg/requires has an unknown entry."""
209 225
210 226
211 227 class StdioError(IOError):
212 228 """Raised if I/O to stdout or stderr fails"""
213 229
214 230 def __init__(self, err):
215 231 IOError.__init__(self, err.errno, err.strerror)
216 232
217 233 # no __bytes__() because error message is derived from the standard IOError
218 234
219 235
220 236 class UnsupportedMergeRecords(Abort):
221 237 def __init__(self, recordtypes):
222 238 from .i18n import _
223 239
224 240 self.recordtypes = sorted(recordtypes)
225 241 s = b' '.join(self.recordtypes)
226 242 Abort.__init__(
227 243 self,
228 244 _(b'unsupported merge state records: %s') % s,
229 245 hint=_(
230 246 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
231 247 b'more information'
232 248 ),
233 249 )
234 250
235 251
236 252 class UnknownVersion(Abort):
237 253 """generic exception for aborting from an encounter with an unknown version
238 254 """
239 255
240 256 def __init__(self, msg, hint=None, version=None):
241 257 self.version = version
242 258 super(UnknownVersion, self).__init__(msg, hint=hint)
243 259
244 260
245 261 class LockError(IOError):
246 262 def __init__(self, errno, strerror, filename, desc):
247 263 IOError.__init__(self, errno, strerror, filename)
248 264 self.desc = desc
249 265
250 266 # no __bytes__() because error message is derived from the standard IOError
251 267
252 268
253 269 class LockHeld(LockError):
254 270 def __init__(self, errno, filename, desc, locker):
255 271 LockError.__init__(self, errno, b'Lock held', filename, desc)
256 272 self.locker = locker
257 273
258 274
259 275 class LockUnavailable(LockError):
260 276 pass
261 277
262 278
263 279 # LockError is for errors while acquiring the lock -- this is unrelated
264 280 class LockInheritanceContractViolation(RuntimeError):
265 281 __bytes__ = _tobytes
266 282
267 283
268 284 class ResponseError(Exception):
269 285 """Raised to print an error with part of output and exit."""
270 286
271 287 __bytes__ = _tobytes
272 288
273 289
274 290 class UnknownCommand(Exception):
275 291 """Exception raised if command is not in the command table."""
276 292
277 293 __bytes__ = _tobytes
278 294
279 295
280 296 class AmbiguousCommand(Exception):
281 297 """Exception raised if command shortcut matches more than one command."""
282 298
283 299 __bytes__ = _tobytes
284 300
285 301
286 302 # derived from KeyboardInterrupt to simplify some breakout code
287 303 class SignalInterrupt(KeyboardInterrupt):
288 304 """Exception raised on SIGTERM and SIGHUP."""
289 305
290 306
291 307 class SignatureError(Exception):
292 308 __bytes__ = _tobytes
293 309
294 310
295 311 class PushRaced(RuntimeError):
296 312 """An exception raised during unbundling that indicate a push race"""
297 313
298 314 __bytes__ = _tobytes
299 315
300 316
301 317 class ProgrammingError(Hint, RuntimeError):
302 318 """Raised if a mercurial (core or extension) developer made a mistake"""
303 319
304 320 def __init__(self, msg, *args, **kwargs):
305 321 # On Python 3, turn the message back into a string since this is
306 322 # an internal-only error that won't be printed except in a
307 323 # stack traces.
308 324 msg = pycompat.sysstr(msg)
309 325 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
310 326
311 327 __bytes__ = _tobytes
312 328
313 329
314 330 class WdirUnsupported(Exception):
315 331 """An exception which is raised when 'wdir()' is not supported"""
316 332
317 333 __bytes__ = _tobytes
318 334
319 335
320 336 # bundle2 related errors
321 337 class BundleValueError(ValueError):
322 338 """error raised when bundle2 cannot be processed"""
323 339
324 340 __bytes__ = _tobytes
325 341
326 342
327 343 class BundleUnknownFeatureError(BundleValueError):
328 344 def __init__(self, parttype=None, params=(), values=()):
329 345 self.parttype = parttype
330 346 self.params = params
331 347 self.values = values
332 348 if self.parttype is None:
333 349 msg = b'Stream Parameter'
334 350 else:
335 351 msg = parttype
336 352 entries = self.params
337 353 if self.params and self.values:
338 354 assert len(self.params) == len(self.values)
339 355 entries = []
340 356 for idx, par in enumerate(self.params):
341 357 val = self.values[idx]
342 358 if val is None:
343 359 entries.append(val)
344 360 else:
345 361 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
346 362 if entries:
347 363 msg = b'%s - %s' % (msg, b', '.join(entries))
348 364 ValueError.__init__(self, msg)
349 365
350 366
351 367 class ReadOnlyPartError(RuntimeError):
352 368 """error raised when code tries to alter a part being generated"""
353 369
354 370 __bytes__ = _tobytes
355 371
356 372
357 373 class PushkeyFailed(Abort):
358 374 """error raised when a pushkey part failed to update a value"""
359 375
360 376 def __init__(
361 377 self, partid, namespace=None, key=None, new=None, old=None, ret=None
362 378 ):
363 379 self.partid = partid
364 380 self.namespace = namespace
365 381 self.key = key
366 382 self.new = new
367 383 self.old = old
368 384 self.ret = ret
369 385 # no i18n expected to be processed into a better message
370 386 Abort.__init__(
371 387 self, b'failed to update value for "%s/%s"' % (namespace, key)
372 388 )
373 389
374 390
375 391 class CensoredNodeError(StorageError):
376 392 """error raised when content verification fails on a censored node
377 393
378 394 Also contains the tombstone data substituted for the uncensored data.
379 395 """
380 396
381 397 def __init__(self, filename, node, tombstone):
382 398 from .node import short
383 399
384 400 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
385 401 self.tombstone = tombstone
386 402
387 403
388 404 class CensoredBaseError(StorageError):
389 405 """error raised when a delta is rejected because its base is censored
390 406
391 407 A delta based on a censored revision must be formed as single patch
392 408 operation which replaces the entire base with new content. This ensures
393 409 the delta may be applied by clones which have not censored the base.
394 410 """
395 411
396 412
397 413 class InvalidBundleSpecification(Exception):
398 414 """error raised when a bundle specification is invalid.
399 415
400 416 This is used for syntax errors as opposed to support errors.
401 417 """
402 418
403 419 __bytes__ = _tobytes
404 420
405 421
406 422 class UnsupportedBundleSpecification(Exception):
407 423 """error raised when a bundle specification is not supported."""
408 424
409 425 __bytes__ = _tobytes
410 426
411 427
412 428 class CorruptedState(Exception):
413 429 """error raised when a command is not able to read its state from file"""
414 430
415 431 __bytes__ = _tobytes
416 432
417 433
418 434 class PeerTransportError(Abort):
419 435 """Transport-level I/O error when communicating with a peer repo."""
420 436
421 437
422 438 class InMemoryMergeConflictsError(Exception):
423 439 """Exception raised when merge conflicts arose during an in-memory merge."""
424 440
425 441 __bytes__ = _tobytes
426 442
427 443
428 444 class WireprotoCommandError(Exception):
429 445 """Represents an error during execution of a wire protocol command.
430 446
431 447 Should only be thrown by wire protocol version 2 commands.
432 448
433 449 The error is a formatter string and an optional iterable of arguments.
434 450 """
435 451
436 452 def __init__(self, message, args=None):
437 453 self.message = message
438 454 self.messageargs = args
@@ -1,1179 +1,1174
1 1 # shelve.py - save/restore working directory state
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """save and restore changes to the working directory
9 9
10 10 The "hg shelve" command saves changes made to the working directory
11 11 and reverts those changes, resetting the working directory to a clean
12 12 state.
13 13
14 14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 15 shelve". Changes can be restored even after updating to a different
16 16 parent, in which case Mercurial's merge machinery will resolve any
17 17 conflicts if necessary.
18 18
19 19 You can have more than one shelved change outstanding at a time; each
20 20 shelved change has a distinct name. For details, see the help for "hg
21 21 shelve".
22 22 """
23 23 from __future__ import absolute_import
24 24
25 25 import collections
26 26 import errno
27 27 import itertools
28 28 import stat
29 29
30 30 from .i18n import _
31 31 from .pycompat import open
32 32 from . import (
33 33 bookmarks,
34 34 bundle2,
35 35 bundlerepo,
36 36 changegroup,
37 37 cmdutil,
38 38 discovery,
39 39 error,
40 40 exchange,
41 41 hg,
42 42 lock as lockmod,
43 43 mdiff,
44 44 merge,
45 45 mergestate as mergestatemod,
46 46 node as nodemod,
47 47 patch,
48 48 phases,
49 49 pycompat,
50 50 repair,
51 51 scmutil,
52 52 templatefilters,
53 53 util,
54 54 vfs as vfsmod,
55 55 )
56 56 from .utils import (
57 57 dateutil,
58 58 stringutil,
59 59 )
60 60
61 61 backupdir = b'shelve-backup'
62 62 shelvedir = b'shelved'
63 63 shelvefileextensions = [b'hg', b'patch', b'shelve']
64 64 # universal extension is present in all types of shelves
65 65 patchextension = b'patch'
66 66
67 67 # we never need the user, so we use a
68 68 # generic user for all shelve operations
69 69 shelveuser = b'shelve@localhost'
70 70
71 71
72 72 class shelvedfile(object):
73 73 """Helper for the file storing a single shelve
74 74
75 75 Handles common functions on shelve files (.hg/.patch) using
76 76 the vfs layer"""
77 77
78 78 def __init__(self, repo, name, filetype=None):
79 79 self.repo = repo
80 80 self.name = name
81 81 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
82 82 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
83 83 self.ui = self.repo.ui
84 84 if filetype:
85 85 self.fname = name + b'.' + filetype
86 86 else:
87 87 self.fname = name
88 88
89 89 def exists(self):
90 90 return self.vfs.exists(self.fname)
91 91
92 92 def filename(self):
93 93 return self.vfs.join(self.fname)
94 94
95 95 def backupfilename(self):
96 96 def gennames(base):
97 97 yield base
98 98 base, ext = base.rsplit(b'.', 1)
99 99 for i in itertools.count(1):
100 100 yield b'%s-%d.%s' % (base, i, ext)
101 101
102 102 name = self.backupvfs.join(self.fname)
103 103 for n in gennames(name):
104 104 if not self.backupvfs.exists(n):
105 105 return n
106 106
107 107 def movetobackup(self):
108 108 if not self.backupvfs.isdir():
109 109 self.backupvfs.makedir()
110 110 util.rename(self.filename(), self.backupfilename())
111 111
112 112 def stat(self):
113 113 return self.vfs.stat(self.fname)
114 114
115 115 def opener(self, mode=b'rb'):
116 116 try:
117 117 return self.vfs(self.fname, mode)
118 118 except IOError as err:
119 119 if err.errno != errno.ENOENT:
120 120 raise
121 121 raise error.Abort(_(b"shelved change '%s' not found") % self.name)
122 122
123 123 def applybundle(self, tr):
124 124 fp = self.opener()
125 125 try:
126 126 targetphase = phases.internal
127 127 if not phases.supportinternal(self.repo):
128 128 targetphase = phases.secret
129 129 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
130 130 pretip = self.repo[b'tip']
131 131 bundle2.applybundle(
132 132 self.repo,
133 133 gen,
134 134 tr,
135 135 source=b'unshelve',
136 136 url=b'bundle:' + self.vfs.join(self.fname),
137 137 targetphase=targetphase,
138 138 )
139 139 shelvectx = self.repo[b'tip']
140 140 if pretip == shelvectx:
141 141 shelverev = tr.changes[b'revduplicates'][-1]
142 142 shelvectx = self.repo[shelverev]
143 143 return shelvectx
144 144 finally:
145 145 fp.close()
146 146
147 147 def bundlerepo(self):
148 148 path = self.vfs.join(self.fname)
149 149 return bundlerepo.instance(
150 150 self.repo.baseui, b'bundle://%s+%s' % (self.repo.root, path), False
151 151 )
152 152
153 153 def writebundle(self, bases, node):
154 154 cgversion = changegroup.safeversion(self.repo)
155 155 if cgversion == b'01':
156 156 btype = b'HG10BZ'
157 157 compression = None
158 158 else:
159 159 btype = b'HG20'
160 160 compression = b'BZ'
161 161
162 162 repo = self.repo.unfiltered()
163 163
164 164 outgoing = discovery.outgoing(
165 165 repo, missingroots=bases, ancestorsof=[node]
166 166 )
167 167 cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve')
168 168
169 169 bundle2.writebundle(
170 170 self.ui, cg, self.fname, btype, self.vfs, compression=compression
171 171 )
172 172
173 173 def writeinfo(self, info):
174 174 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
175 175
176 176 def readinfo(self):
177 177 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
178 178
179 179
180 180 class shelvedstate(object):
181 181 """Handle persistence during unshelving operations.
182 182
183 183 Handles saving and restoring a shelved state. Ensures that different
184 184 versions of a shelved state are possible and handles them appropriately.
185 185 """
186 186
187 187 _version = 2
188 188 _filename = b'shelvedstate'
189 189 _keep = b'keep'
190 190 _nokeep = b'nokeep'
191 191 # colon is essential to differentiate from a real bookmark name
192 192 _noactivebook = b':no-active-bookmark'
193 193 _interactive = b'interactive'
194 194
195 195 @classmethod
196 196 def _verifyandtransform(cls, d):
197 197 """Some basic shelvestate syntactic verification and transformation"""
198 198 try:
199 199 d[b'originalwctx'] = nodemod.bin(d[b'originalwctx'])
200 200 d[b'pendingctx'] = nodemod.bin(d[b'pendingctx'])
201 201 d[b'parents'] = [nodemod.bin(h) for h in d[b'parents'].split(b' ')]
202 202 d[b'nodestoremove'] = [
203 203 nodemod.bin(h) for h in d[b'nodestoremove'].split(b' ')
204 204 ]
205 205 except (ValueError, TypeError, KeyError) as err:
206 206 raise error.CorruptedState(pycompat.bytestr(err))
207 207
208 208 @classmethod
209 209 def _getversion(cls, repo):
210 210 """Read version information from shelvestate file"""
211 211 fp = repo.vfs(cls._filename)
212 212 try:
213 213 version = int(fp.readline().strip())
214 214 except ValueError as err:
215 215 raise error.CorruptedState(pycompat.bytestr(err))
216 216 finally:
217 217 fp.close()
218 218 return version
219 219
220 220 @classmethod
221 221 def _readold(cls, repo):
222 222 """Read the old position-based version of a shelvestate file"""
223 223 # Order is important, because old shelvestate file uses it
224 224 # to detemine values of fields (i.g. name is on the second line,
225 225 # originalwctx is on the third and so forth). Please do not change.
226 226 keys = [
227 227 b'version',
228 228 b'name',
229 229 b'originalwctx',
230 230 b'pendingctx',
231 231 b'parents',
232 232 b'nodestoremove',
233 233 b'branchtorestore',
234 234 b'keep',
235 235 b'activebook',
236 236 ]
237 237 # this is executed only seldomly, so it is not a big deal
238 238 # that we open this file twice
239 239 fp = repo.vfs(cls._filename)
240 240 d = {}
241 241 try:
242 242 for key in keys:
243 243 d[key] = fp.readline().strip()
244 244 finally:
245 245 fp.close()
246 246 return d
247 247
248 248 @classmethod
249 249 def load(cls, repo):
250 250 version = cls._getversion(repo)
251 251 if version < cls._version:
252 252 d = cls._readold(repo)
253 253 elif version == cls._version:
254 254 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename).read(
255 255 firstlinenonkeyval=True
256 256 )
257 257 else:
258 258 raise error.Abort(
259 259 _(
260 260 b'this version of shelve is incompatible '
261 261 b'with the version used in this repo'
262 262 )
263 263 )
264 264
265 265 cls._verifyandtransform(d)
266 266 try:
267 267 obj = cls()
268 268 obj.name = d[b'name']
269 269 obj.wctx = repo[d[b'originalwctx']]
270 270 obj.pendingctx = repo[d[b'pendingctx']]
271 271 obj.parents = d[b'parents']
272 272 obj.nodestoremove = d[b'nodestoremove']
273 273 obj.branchtorestore = d.get(b'branchtorestore', b'')
274 274 obj.keep = d.get(b'keep') == cls._keep
275 275 obj.activebookmark = b''
276 276 if d.get(b'activebook', b'') != cls._noactivebook:
277 277 obj.activebookmark = d.get(b'activebook', b'')
278 278 obj.interactive = d.get(b'interactive') == cls._interactive
279 279 except (error.RepoLookupError, KeyError) as err:
280 280 raise error.CorruptedState(pycompat.bytestr(err))
281 281
282 282 return obj
283 283
284 284 @classmethod
285 285 def save(
286 286 cls,
287 287 repo,
288 288 name,
289 289 originalwctx,
290 290 pendingctx,
291 291 nodestoremove,
292 292 branchtorestore,
293 293 keep=False,
294 294 activebook=b'',
295 295 interactive=False,
296 296 ):
297 297 info = {
298 298 b"name": name,
299 299 b"originalwctx": nodemod.hex(originalwctx.node()),
300 300 b"pendingctx": nodemod.hex(pendingctx.node()),
301 301 b"parents": b' '.join(
302 302 [nodemod.hex(p) for p in repo.dirstate.parents()]
303 303 ),
304 304 b"nodestoremove": b' '.join(
305 305 [nodemod.hex(n) for n in nodestoremove]
306 306 ),
307 307 b"branchtorestore": branchtorestore,
308 308 b"keep": cls._keep if keep else cls._nokeep,
309 309 b"activebook": activebook or cls._noactivebook,
310 310 }
311 311 if interactive:
312 312 info[b'interactive'] = cls._interactive
313 313 scmutil.simplekeyvaluefile(repo.vfs, cls._filename).write(
314 314 info, firstline=(b"%d" % cls._version)
315 315 )
316 316
317 317 @classmethod
318 318 def clear(cls, repo):
319 319 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
320 320
321 321
322 322 def cleanupoldbackups(repo):
323 323 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
324 324 maxbackups = repo.ui.configint(b'shelve', b'maxbackups')
325 325 hgfiles = [f for f in vfs.listdir() if f.endswith(b'.' + patchextension)]
326 326 hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles])
327 327 if maxbackups > 0 and maxbackups < len(hgfiles):
328 328 bordermtime = hgfiles[-maxbackups][0]
329 329 else:
330 330 bordermtime = None
331 331 for mtime, f in hgfiles[: len(hgfiles) - maxbackups]:
332 332 if mtime == bordermtime:
333 333 # keep it, because timestamp can't decide exact order of backups
334 334 continue
335 335 base = f[: -(1 + len(patchextension))]
336 336 for ext in shelvefileextensions:
337 337 vfs.tryunlink(base + b'.' + ext)
338 338
339 339
340 340 def _backupactivebookmark(repo):
341 341 activebookmark = repo._activebookmark
342 342 if activebookmark:
343 343 bookmarks.deactivate(repo)
344 344 return activebookmark
345 345
346 346
347 347 def _restoreactivebookmark(repo, mark):
348 348 if mark:
349 349 bookmarks.activate(repo, mark)
350 350
351 351
352 352 def _aborttransaction(repo, tr):
353 353 '''Abort current transaction for shelve/unshelve, but keep dirstate
354 354 '''
355 355 dirstatebackupname = b'dirstate.shelve'
356 356 repo.dirstate.savebackup(tr, dirstatebackupname)
357 357 tr.abort()
358 358 repo.dirstate.restorebackup(None, dirstatebackupname)
359 359
360 360
361 361 def getshelvename(repo, parent, opts):
362 362 """Decide on the name this shelve is going to have"""
363 363
364 364 def gennames():
365 365 yield label
366 366 for i in itertools.count(1):
367 367 yield b'%s-%02d' % (label, i)
368 368
369 369 name = opts.get(b'name')
370 370 label = repo._activebookmark or parent.branch() or b'default'
371 371 # slashes aren't allowed in filenames, therefore we rename it
372 372 label = label.replace(b'/', b'_')
373 373 label = label.replace(b'\\', b'_')
374 374 # filenames must not start with '.' as it should not be hidden
375 375 if label.startswith(b'.'):
376 376 label = label.replace(b'.', b'_', 1)
377 377
378 378 if name:
379 379 if shelvedfile(repo, name, patchextension).exists():
380 380 e = _(b"a shelved change named '%s' already exists") % name
381 381 raise error.Abort(e)
382 382
383 383 # ensure we are not creating a subdirectory or a hidden file
384 384 if b'/' in name or b'\\' in name:
385 385 raise error.Abort(
386 386 _(b'shelved change names can not contain slashes')
387 387 )
388 388 if name.startswith(b'.'):
389 389 raise error.Abort(_(b"shelved change names can not start with '.'"))
390 390
391 391 else:
392 392 for n in gennames():
393 393 if not shelvedfile(repo, n, patchextension).exists():
394 394 name = n
395 395 break
396 396
397 397 return name
398 398
399 399
400 400 def mutableancestors(ctx):
401 401 """return all mutable ancestors for ctx (included)
402 402
403 403 Much faster than the revset ancestors(ctx) & draft()"""
404 404 seen = {nodemod.nullrev}
405 405 visit = collections.deque()
406 406 visit.append(ctx)
407 407 while visit:
408 408 ctx = visit.popleft()
409 409 yield ctx.node()
410 410 for parent in ctx.parents():
411 411 rev = parent.rev()
412 412 if rev not in seen:
413 413 seen.add(rev)
414 414 if parent.mutable():
415 415 visit.append(parent)
416 416
417 417
418 418 def getcommitfunc(extra, interactive, editor=False):
419 419 def commitfunc(ui, repo, message, match, opts):
420 420 hasmq = util.safehasattr(repo, b'mq')
421 421 if hasmq:
422 422 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
423 423
424 424 targetphase = phases.internal
425 425 if not phases.supportinternal(repo):
426 426 targetphase = phases.secret
427 427 overrides = {(b'phases', b'new-commit'): targetphase}
428 428 try:
429 429 editor_ = False
430 430 if editor:
431 431 editor_ = cmdutil.getcommiteditor(
432 432 editform=b'shelve.shelve', **pycompat.strkwargs(opts)
433 433 )
434 434 with repo.ui.configoverride(overrides):
435 435 return repo.commit(
436 436 message,
437 437 shelveuser,
438 438 opts.get(b'date'),
439 439 match,
440 440 editor=editor_,
441 441 extra=extra,
442 442 )
443 443 finally:
444 444 if hasmq:
445 445 repo.mq.checkapplied = saved
446 446
447 447 def interactivecommitfunc(ui, repo, *pats, **opts):
448 448 opts = pycompat.byteskwargs(opts)
449 449 match = scmutil.match(repo[b'.'], pats, {})
450 450 message = opts[b'message']
451 451 return commitfunc(ui, repo, message, match, opts)
452 452
453 453 return interactivecommitfunc if interactive else commitfunc
454 454
455 455
456 456 def _nothingtoshelvemessaging(ui, repo, pats, opts):
457 457 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
458 458 if stat.deleted:
459 459 ui.status(
460 460 _(b"nothing changed (%d missing files, see 'hg status')\n")
461 461 % len(stat.deleted)
462 462 )
463 463 else:
464 464 ui.status(_(b"nothing changed\n"))
465 465
466 466
467 467 def _shelvecreatedcommit(repo, node, name, match):
468 468 info = {b'node': nodemod.hex(node)}
469 469 shelvedfile(repo, name, b'shelve').writeinfo(info)
470 470 bases = list(mutableancestors(repo[node]))
471 471 shelvedfile(repo, name, b'hg').writebundle(bases, node)
472 472 with shelvedfile(repo, name, patchextension).opener(b'wb') as fp:
473 473 cmdutil.exportfile(
474 474 repo, [node], fp, opts=mdiff.diffopts(git=True), match=match
475 475 )
476 476
477 477
478 478 def _includeunknownfiles(repo, pats, opts, extra):
479 479 s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True)
480 480 if s.unknown:
481 481 extra[b'shelve_unknown'] = b'\0'.join(s.unknown)
482 482 repo[None].add(s.unknown)
483 483
484 484
485 485 def _finishshelve(repo, tr):
486 486 if phases.supportinternal(repo):
487 487 tr.close()
488 488 else:
489 489 _aborttransaction(repo, tr)
490 490
491 491
492 492 def createcmd(ui, repo, pats, opts):
493 493 """subcommand that creates a new shelve"""
494 494 with repo.wlock():
495 495 cmdutil.checkunfinished(repo)
496 496 return _docreatecmd(ui, repo, pats, opts)
497 497
498 498
499 499 def _docreatecmd(ui, repo, pats, opts):
500 500 wctx = repo[None]
501 501 parents = wctx.parents()
502 502 parent = parents[0]
503 503 origbranch = wctx.branch()
504 504
505 505 if parent.node() != nodemod.nullid:
506 506 desc = b"changes to: %s" % parent.description().split(b'\n', 1)[0]
507 507 else:
508 508 desc = b'(changes in empty repository)'
509 509
510 510 if not opts.get(b'message'):
511 511 opts[b'message'] = desc
512 512
513 513 lock = tr = activebookmark = None
514 514 try:
515 515 lock = repo.lock()
516 516
517 517 # use an uncommitted transaction to generate the bundle to avoid
518 518 # pull races. ensure we don't print the abort message to stderr.
519 519 tr = repo.transaction(b'shelve', report=lambda x: None)
520 520
521 521 interactive = opts.get(b'interactive', False)
522 522 includeunknown = opts.get(b'unknown', False) and not opts.get(
523 523 b'addremove', False
524 524 )
525 525
526 526 name = getshelvename(repo, parent, opts)
527 527 activebookmark = _backupactivebookmark(repo)
528 528 extra = {b'internal': b'shelve'}
529 529 if includeunknown:
530 530 _includeunknownfiles(repo, pats, opts, extra)
531 531
532 532 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
533 533 # In non-bare shelve we don't store newly created branch
534 534 # at bundled commit
535 535 repo.dirstate.setbranch(repo[b'.'].branch())
536 536
537 537 commitfunc = getcommitfunc(extra, interactive, editor=True)
538 538 if not interactive:
539 539 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
540 540 else:
541 541 node = cmdutil.dorecord(
542 542 ui,
543 543 repo,
544 544 commitfunc,
545 545 None,
546 546 False,
547 547 cmdutil.recordfilter,
548 548 *pats,
549 549 **pycompat.strkwargs(opts)
550 550 )
551 551 if not node:
552 552 _nothingtoshelvemessaging(ui, repo, pats, opts)
553 553 return 1
554 554
555 555 # Create a matcher so that prefetch doesn't attempt to fetch
556 556 # the entire repository pointlessly, and as an optimisation
557 557 # for movedirstate, if needed.
558 558 match = scmutil.matchfiles(repo, repo[node].files())
559 559 _shelvecreatedcommit(repo, node, name, match)
560 560
561 561 ui.status(_(b'shelved as %s\n') % name)
562 562 if opts[b'keep']:
563 563 with repo.dirstate.parentchange():
564 564 scmutil.movedirstate(repo, parent, match)
565 565 else:
566 566 hg.update(repo, parent.node())
567 567 if origbranch != repo[b'.'].branch() and not _isbareshelve(pats, opts):
568 568 repo.dirstate.setbranch(origbranch)
569 569
570 570 _finishshelve(repo, tr)
571 571 finally:
572 572 _restoreactivebookmark(repo, activebookmark)
573 573 lockmod.release(tr, lock)
574 574
575 575
576 576 def _isbareshelve(pats, opts):
577 577 return (
578 578 not pats
579 579 and not opts.get(b'interactive', False)
580 580 and not opts.get(b'include', False)
581 581 and not opts.get(b'exclude', False)
582 582 )
583 583
584 584
585 585 def _iswctxonnewbranch(repo):
586 586 return repo[None].branch() != repo[b'.'].branch()
587 587
588 588
589 589 def cleanupcmd(ui, repo):
590 590 """subcommand that deletes all shelves"""
591 591
592 592 with repo.wlock():
593 593 for (name, _type) in repo.vfs.readdir(shelvedir):
594 594 suffix = name.rsplit(b'.', 1)[-1]
595 595 if suffix in shelvefileextensions:
596 596 shelvedfile(repo, name).movetobackup()
597 597 cleanupoldbackups(repo)
598 598
599 599
600 600 def deletecmd(ui, repo, pats):
601 601 """subcommand that deletes a specific shelve"""
602 602 if not pats:
603 603 raise error.Abort(_(b'no shelved changes specified!'))
604 604 with repo.wlock():
605 605 for name in pats:
606 606 try:
607 607 for suffix in shelvefileextensions:
608 608 shfile = shelvedfile(repo, name, suffix)
609 609 # patch file is necessary, as it should
610 610 # be present for any kind of shelve,
611 611 # but the .hg file is optional as in future we
612 612 # will add obsolete shelve with does not create a
613 613 # bundle
614 614 if shfile.exists() or suffix == patchextension:
615 615 shfile.movetobackup()
616 616 except OSError as err:
617 617 if err.errno != errno.ENOENT:
618 618 raise
619 619 raise error.Abort(_(b"shelved change '%s' not found") % name)
620 620 cleanupoldbackups(repo)
621 621
622 622
623 623 def listshelves(repo):
624 624 """return all shelves in repo as list of (time, filename)"""
625 625 try:
626 626 names = repo.vfs.readdir(shelvedir)
627 627 except OSError as err:
628 628 if err.errno != errno.ENOENT:
629 629 raise
630 630 return []
631 631 info = []
632 632 for (name, _type) in names:
633 633 pfx, sfx = name.rsplit(b'.', 1)
634 634 if not pfx or sfx != patchextension:
635 635 continue
636 636 st = shelvedfile(repo, name).stat()
637 637 info.append((st[stat.ST_MTIME], shelvedfile(repo, pfx).filename()))
638 638 return sorted(info, reverse=True)
639 639
640 640
641 641 def listcmd(ui, repo, pats, opts):
642 642 """subcommand that displays the list of shelves"""
643 643 pats = set(pats)
644 644 width = 80
645 645 if not ui.plain():
646 646 width = ui.termwidth()
647 647 namelabel = b'shelve.newest'
648 648 ui.pager(b'shelve')
649 649 for mtime, name in listshelves(repo):
650 650 sname = util.split(name)[1]
651 651 if pats and sname not in pats:
652 652 continue
653 653 ui.write(sname, label=namelabel)
654 654 namelabel = b'shelve.name'
655 655 if ui.quiet:
656 656 ui.write(b'\n')
657 657 continue
658 658 ui.write(b' ' * (16 - len(sname)))
659 659 used = 16
660 660 date = dateutil.makedate(mtime)
661 661 age = b'(%s)' % templatefilters.age(date, abbrev=True)
662 662 ui.write(age, label=b'shelve.age')
663 663 ui.write(b' ' * (12 - len(age)))
664 664 used += 12
665 665 with open(name + b'.' + patchextension, b'rb') as fp:
666 666 while True:
667 667 line = fp.readline()
668 668 if not line:
669 669 break
670 670 if not line.startswith(b'#'):
671 671 desc = line.rstrip()
672 672 if ui.formatted():
673 673 desc = stringutil.ellipsis(desc, width - used)
674 674 ui.write(desc)
675 675 break
676 676 ui.write(b'\n')
677 677 if not (opts[b'patch'] or opts[b'stat']):
678 678 continue
679 679 difflines = fp.readlines()
680 680 if opts[b'patch']:
681 681 for chunk, label in patch.difflabel(iter, difflines):
682 682 ui.write(chunk, label=label)
683 683 if opts[b'stat']:
684 684 for chunk, label in patch.diffstatui(difflines, width=width):
685 685 ui.write(chunk, label=label)
686 686
687 687
688 688 def patchcmds(ui, repo, pats, opts):
689 689 """subcommand that displays shelves"""
690 690 if len(pats) == 0:
691 691 shelves = listshelves(repo)
692 692 if not shelves:
693 693 raise error.Abort(_(b"there are no shelves to show"))
694 694 mtime, name = shelves[0]
695 695 sname = util.split(name)[1]
696 696 pats = [sname]
697 697
698 698 for shelfname in pats:
699 699 if not shelvedfile(repo, shelfname, patchextension).exists():
700 700 raise error.Abort(_(b"cannot find shelf %s") % shelfname)
701 701
702 702 listcmd(ui, repo, pats, opts)
703 703
704 704
705 705 def checkparents(repo, state):
706 706 """check parent while resuming an unshelve"""
707 707 if state.parents != repo.dirstate.parents():
708 708 raise error.Abort(
709 709 _(b'working directory parents do not match unshelve state')
710 710 )
711 711
712 712
713 713 def _loadshelvedstate(ui, repo, opts):
714 714 try:
715 715 state = shelvedstate.load(repo)
716 716 if opts.get(b'keep') is None:
717 717 opts[b'keep'] = state.keep
718 718 except IOError as err:
719 719 if err.errno != errno.ENOENT:
720 720 raise
721 721 cmdutil.wrongtooltocontinue(repo, _(b'unshelve'))
722 722 except error.CorruptedState as err:
723 723 ui.debug(pycompat.bytestr(err) + b'\n')
724 724 if opts.get(b'continue'):
725 725 msg = _(b'corrupted shelved state file')
726 726 hint = _(
727 727 b'please run hg unshelve --abort to abort unshelve '
728 728 b'operation'
729 729 )
730 730 raise error.Abort(msg, hint=hint)
731 731 elif opts.get(b'abort'):
732 732 shelvedstate.clear(repo)
733 733 raise error.Abort(
734 734 _(
735 735 b'could not read shelved state file, your '
736 736 b'working copy may be in an unexpected state\n'
737 737 b'please update to some commit\n'
738 738 )
739 739 )
740 740 return state
741 741
742 742
743 743 def unshelveabort(ui, repo, state):
744 744 """subcommand that abort an in-progress unshelve"""
745 745 with repo.lock():
746 746 try:
747 747 checkparents(repo, state)
748 748
749 749 merge.clean_update(state.pendingctx)
750 750 if state.activebookmark and state.activebookmark in repo._bookmarks:
751 751 bookmarks.activate(repo, state.activebookmark)
752 752 mergefiles(ui, repo, state.wctx, state.pendingctx)
753 753 if not phases.supportinternal(repo):
754 754 repair.strip(
755 755 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
756 756 )
757 757 finally:
758 758 shelvedstate.clear(repo)
759 759 ui.warn(_(b"unshelve of '%s' aborted\n") % state.name)
760 760
761 761
762 762 def hgabortunshelve(ui, repo):
763 763 """logic to abort unshelve using 'hg abort"""
764 764 with repo.wlock():
765 765 state = _loadshelvedstate(ui, repo, {b'abort': True})
766 766 return unshelveabort(ui, repo, state)
767 767
768 768
769 769 def mergefiles(ui, repo, wctx, shelvectx):
770 770 """updates to wctx and merges the changes from shelvectx into the
771 771 dirstate."""
772 772 with ui.configoverride({(b'ui', b'quiet'): True}):
773 773 hg.update(repo, wctx.node())
774 774 ui.pushbuffer(True)
775 775 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents())
776 776 ui.popbuffer()
777 777
778 778
779 779 def restorebranch(ui, repo, branchtorestore):
780 780 if branchtorestore and branchtorestore != repo.dirstate.branch():
781 781 repo.dirstate.setbranch(branchtorestore)
782 782 ui.status(
783 783 _(b'marked working directory as branch %s\n') % branchtorestore
784 784 )
785 785
786 786
787 787 def unshelvecleanup(ui, repo, name, opts):
788 788 """remove related files after an unshelve"""
789 789 if not opts.get(b'keep'):
790 790 for filetype in shelvefileextensions:
791 791 shfile = shelvedfile(repo, name, filetype)
792 792 if shfile.exists():
793 793 shfile.movetobackup()
794 794 cleanupoldbackups(repo)
795 795
796 796
797 797 def unshelvecontinue(ui, repo, state, opts):
798 798 """subcommand to continue an in-progress unshelve"""
799 799 # We're finishing off a merge. First parent is our original
800 800 # parent, second is the temporary "fake" commit we're unshelving.
801 801 interactive = state.interactive
802 802 basename = state.name
803 803 with repo.lock():
804 804 checkparents(repo, state)
805 805 ms = mergestatemod.mergestate.read(repo)
806 806 if list(ms.unresolved()):
807 807 raise error.Abort(
808 808 _(b"unresolved conflicts, can't continue"),
809 809 hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
810 810 )
811 811
812 812 shelvectx = repo[state.parents[1]]
813 813 pendingctx = state.pendingctx
814 814
815 815 with repo.dirstate.parentchange():
816 816 repo.setparents(state.pendingctx.node(), nodemod.nullid)
817 817 repo.dirstate.write(repo.currenttransaction())
818 818
819 819 targetphase = phases.internal
820 820 if not phases.supportinternal(repo):
821 821 targetphase = phases.secret
822 822 overrides = {(b'phases', b'new-commit'): targetphase}
823 823 with repo.ui.configoverride(overrides, b'unshelve'):
824 824 with repo.dirstate.parentchange():
825 825 repo.setparents(state.parents[0], nodemod.nullid)
826 826 newnode, ispartialunshelve = _createunshelvectx(
827 827 ui, repo, shelvectx, basename, interactive, opts
828 828 )
829 829
830 830 if newnode is None:
831 831 shelvectx = state.pendingctx
832 832 msg = _(
833 833 b'note: unshelved changes already existed '
834 834 b'in the working copy\n'
835 835 )
836 836 ui.status(msg)
837 837 else:
838 838 # only strip the shelvectx if we produced one
839 839 state.nodestoremove.append(newnode)
840 840 shelvectx = repo[newnode]
841 841
842 842 hg.updaterepo(repo, pendingctx.node(), overwrite=False)
843 843 mergefiles(ui, repo, state.wctx, shelvectx)
844 844 restorebranch(ui, repo, state.branchtorestore)
845 845
846 846 if not phases.supportinternal(repo):
847 847 repair.strip(
848 848 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
849 849 )
850 850 shelvedstate.clear(repo)
851 851 if not ispartialunshelve:
852 852 unshelvecleanup(ui, repo, state.name, opts)
853 853 _restoreactivebookmark(repo, state.activebookmark)
854 854 ui.status(_(b"unshelve of '%s' complete\n") % state.name)
855 855
856 856
857 857 def hgcontinueunshelve(ui, repo):
858 858 """logic to resume unshelve using 'hg continue'"""
859 859 with repo.wlock():
860 860 state = _loadshelvedstate(ui, repo, {b'continue': True})
861 861 return unshelvecontinue(ui, repo, state, {b'keep': state.keep})
862 862
863 863
864 864 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
865 865 """Temporarily commit working copy changes before moving unshelve commit"""
866 866 # Store pending changes in a commit and remember added in case a shelve
867 867 # contains unknown files that are part of the pending change
868 868 s = repo.status()
869 869 addedbefore = frozenset(s.added)
870 870 if not (s.modified or s.added or s.removed):
871 871 return tmpwctx, addedbefore
872 872 ui.status(
873 873 _(
874 874 b"temporarily committing pending changes "
875 875 b"(restore with 'hg unshelve --abort')\n"
876 876 )
877 877 )
878 878 extra = {b'internal': b'shelve'}
879 879 commitfunc = getcommitfunc(extra=extra, interactive=False, editor=False)
880 880 tempopts = {}
881 881 tempopts[b'message'] = b"pending changes temporary commit"
882 882 tempopts[b'date'] = opts.get(b'date')
883 883 with ui.configoverride({(b'ui', b'quiet'): True}):
884 884 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
885 885 tmpwctx = repo[node]
886 886 return tmpwctx, addedbefore
887 887
888 888
889 889 def _unshelverestorecommit(ui, repo, tr, basename):
890 890 """Recreate commit in the repository during the unshelve"""
891 891 repo = repo.unfiltered()
892 892 node = None
893 893 if shelvedfile(repo, basename, b'shelve').exists():
894 894 node = shelvedfile(repo, basename, b'shelve').readinfo()[b'node']
895 895 if node is None or node not in repo:
896 896 with ui.configoverride({(b'ui', b'quiet'): True}):
897 897 shelvectx = shelvedfile(repo, basename, b'hg').applybundle(tr)
898 898 # We might not strip the unbundled changeset, so we should keep track of
899 899 # the unshelve node in case we need to reuse it (eg: unshelve --keep)
900 900 if node is None:
901 901 info = {b'node': nodemod.hex(shelvectx.node())}
902 902 shelvedfile(repo, basename, b'shelve').writeinfo(info)
903 903 else:
904 904 shelvectx = repo[node]
905 905
906 906 return repo, shelvectx
907 907
908 908
909 909 def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts):
910 910 """Handles the creation of unshelve commit and updates the shelve if it
911 911 was partially unshelved.
912 912
913 913 If interactive is:
914 914
915 915 * False: Commits all the changes in the working directory.
916 916 * True: Prompts the user to select changes to unshelve and commit them.
917 917 Update the shelve with remaining changes.
918 918
919 919 Returns the node of the new commit formed and a bool indicating whether
920 920 the shelve was partially unshelved.Creates a commit ctx to unshelve
921 921 interactively or non-interactively.
922 922
923 923 The user might want to unshelve certain changes only from the stored
924 924 shelve in interactive. So, we would create two commits. One with requested
925 925 changes to unshelve at that time and the latter is shelved for future.
926 926
927 927 Here, we return both the newnode which is created interactively and a
928 928 bool to know whether the shelve is partly done or completely done.
929 929 """
930 930 opts[b'message'] = shelvectx.description()
931 931 opts[b'interactive-unshelve'] = True
932 932 pats = []
933 933 if not interactive:
934 934 newnode = repo.commit(
935 935 text=shelvectx.description(),
936 936 extra=shelvectx.extra(),
937 937 user=shelvectx.user(),
938 938 date=shelvectx.date(),
939 939 )
940 940 return newnode, False
941 941
942 942 commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, editor=True)
943 943 newnode = cmdutil.dorecord(
944 944 ui,
945 945 repo,
946 946 commitfunc,
947 947 None,
948 948 False,
949 949 cmdutil.recordfilter,
950 950 *pats,
951 951 **pycompat.strkwargs(opts)
952 952 )
953 953 snode = repo.commit(
954 954 text=shelvectx.description(),
955 955 extra=shelvectx.extra(),
956 956 user=shelvectx.user(),
957 957 )
958 958 if snode:
959 959 m = scmutil.matchfiles(repo, repo[snode].files())
960 960 _shelvecreatedcommit(repo, snode, basename, m)
961 961
962 962 return newnode, bool(snode)
963 963
964 964
965 965 def _rebaserestoredcommit(
966 966 ui,
967 967 repo,
968 968 opts,
969 969 tr,
970 970 oldtiprev,
971 971 basename,
972 972 pctx,
973 973 tmpwctx,
974 974 shelvectx,
975 975 branchtorestore,
976 976 activebookmark,
977 977 ):
978 978 """Rebase restored commit from its original location to a destination"""
979 979 # If the shelve is not immediately on top of the commit
980 980 # we'll be merging with, rebase it to be on top.
981 981 interactive = opts.get(b'interactive')
982 982 if tmpwctx.node() == shelvectx.p1().node() and not interactive:
983 983 # We won't skip on interactive mode because, the user might want to
984 984 # unshelve certain changes only.
985 985 return shelvectx, False
986 986
987 987 overrides = {
988 988 (b'ui', b'forcemerge'): opts.get(b'tool', b''),
989 989 (b'phases', b'new-commit'): phases.secret,
990 990 }
991 991 with repo.ui.configoverride(overrides, b'unshelve'):
992 992 ui.status(_(b'rebasing shelved changes\n'))
993 993 stats = merge.graft(
994 994 repo,
995 995 shelvectx,
996 996 labels=[b'working-copy', b'shelve'],
997 997 keepconflictparent=True,
998 998 )
999 999 if stats.unresolvedcount:
1000 1000 tr.close()
1001 1001
1002 1002 nodestoremove = [
1003 1003 repo.changelog.node(rev)
1004 1004 for rev in pycompat.xrange(oldtiprev, len(repo))
1005 1005 ]
1006 1006 shelvedstate.save(
1007 1007 repo,
1008 1008 basename,
1009 1009 pctx,
1010 1010 tmpwctx,
1011 1011 nodestoremove,
1012 1012 branchtorestore,
1013 1013 opts.get(b'keep'),
1014 1014 activebookmark,
1015 1015 interactive,
1016 1016 )
1017 raise error.InterventionRequired(
1018 _(
1019 b"unresolved conflicts (see 'hg resolve', then "
1020 b"'hg unshelve --continue')"
1021 )
1022 )
1017 raise error.ConflictResolutionRequired(b'unshelve')
1023 1018
1024 1019 with repo.dirstate.parentchange():
1025 1020 repo.setparents(tmpwctx.node(), nodemod.nullid)
1026 1021 newnode, ispartialunshelve = _createunshelvectx(
1027 1022 ui, repo, shelvectx, basename, interactive, opts
1028 1023 )
1029 1024
1030 1025 if newnode is None:
1031 1026 shelvectx = tmpwctx
1032 1027 msg = _(
1033 1028 b'note: unshelved changes already existed '
1034 1029 b'in the working copy\n'
1035 1030 )
1036 1031 ui.status(msg)
1037 1032 else:
1038 1033 shelvectx = repo[newnode]
1039 1034 hg.updaterepo(repo, tmpwctx.node(), False)
1040 1035
1041 1036 return shelvectx, ispartialunshelve
1042 1037
1043 1038
1044 1039 def _forgetunknownfiles(repo, shelvectx, addedbefore):
1045 1040 # Forget any files that were unknown before the shelve, unknown before
1046 1041 # unshelve started, but are now added.
1047 1042 shelveunknown = shelvectx.extra().get(b'shelve_unknown')
1048 1043 if not shelveunknown:
1049 1044 return
1050 1045 shelveunknown = frozenset(shelveunknown.split(b'\0'))
1051 1046 addedafter = frozenset(repo.status().added)
1052 1047 toforget = (addedafter & shelveunknown) - addedbefore
1053 1048 repo[None].forget(toforget)
1054 1049
1055 1050
1056 1051 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
1057 1052 _restoreactivebookmark(repo, activebookmark)
1058 1053 # The transaction aborting will strip all the commits for us,
1059 1054 # but it doesn't update the inmemory structures, so addchangegroup
1060 1055 # hooks still fire and try to operate on the missing commits.
1061 1056 # Clean up manually to prevent this.
1062 1057 repo.unfiltered().changelog.strip(oldtiprev, tr)
1063 1058 _aborttransaction(repo, tr)
1064 1059
1065 1060
1066 1061 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
1067 1062 """Check potential problems which may result from working
1068 1063 copy having untracked changes."""
1069 1064 wcdeleted = set(repo.status().deleted)
1070 1065 shelvetouched = set(shelvectx.files())
1071 1066 intersection = wcdeleted.intersection(shelvetouched)
1072 1067 if intersection:
1073 1068 m = _(b"shelved change touches missing files")
1074 1069 hint = _(b"run hg status to see which files are missing")
1075 1070 raise error.Abort(m, hint=hint)
1076 1071
1077 1072
1078 1073 def unshelvecmd(ui, repo, *shelved, **opts):
1079 1074 opts = pycompat.byteskwargs(opts)
1080 1075 abortf = opts.get(b'abort')
1081 1076 continuef = opts.get(b'continue')
1082 1077 interactive = opts.get(b'interactive')
1083 1078 if not abortf and not continuef:
1084 1079 cmdutil.checkunfinished(repo)
1085 1080 shelved = list(shelved)
1086 1081 if opts.get(b"name"):
1087 1082 shelved.append(opts[b"name"])
1088 1083
1089 1084 if interactive and opts.get(b'keep'):
1090 1085 raise error.Abort(_(b'--keep on --interactive is not yet supported'))
1091 1086 if abortf or continuef:
1092 1087 if abortf and continuef:
1093 1088 raise error.Abort(_(b'cannot use both abort and continue'))
1094 1089 if shelved:
1095 1090 raise error.Abort(
1096 1091 _(
1097 1092 b'cannot combine abort/continue with '
1098 1093 b'naming a shelved change'
1099 1094 )
1100 1095 )
1101 1096 if abortf and opts.get(b'tool', False):
1102 1097 ui.warn(_(b'tool option will be ignored\n'))
1103 1098
1104 1099 state = _loadshelvedstate(ui, repo, opts)
1105 1100 if abortf:
1106 1101 return unshelveabort(ui, repo, state)
1107 1102 elif continuef and interactive:
1108 1103 raise error.Abort(_(b'cannot use both continue and interactive'))
1109 1104 elif continuef:
1110 1105 return unshelvecontinue(ui, repo, state, opts)
1111 1106 elif len(shelved) > 1:
1112 1107 raise error.Abort(_(b'can only unshelve one change at a time'))
1113 1108 elif not shelved:
1114 1109 shelved = listshelves(repo)
1115 1110 if not shelved:
1116 1111 raise error.Abort(_(b'no shelved changes to apply!'))
1117 1112 basename = util.split(shelved[0][1])[1]
1118 1113 ui.status(_(b"unshelving change '%s'\n") % basename)
1119 1114 else:
1120 1115 basename = shelved[0]
1121 1116
1122 1117 if not shelvedfile(repo, basename, patchextension).exists():
1123 1118 raise error.Abort(_(b"shelved change '%s' not found") % basename)
1124 1119
1125 1120 return _dounshelve(ui, repo, basename, opts)
1126 1121
1127 1122
1128 1123 def _dounshelve(ui, repo, basename, opts):
1129 1124 repo = repo.unfiltered()
1130 1125 lock = tr = None
1131 1126 try:
1132 1127 lock = repo.lock()
1133 1128 tr = repo.transaction(b'unshelve', report=lambda x: None)
1134 1129 oldtiprev = len(repo)
1135 1130
1136 1131 pctx = repo[b'.']
1137 1132 tmpwctx = pctx
1138 1133 # The goal is to have a commit structure like so:
1139 1134 # ...-> pctx -> tmpwctx -> shelvectx
1140 1135 # where tmpwctx is an optional commit with the user's pending changes
1141 1136 # and shelvectx is the unshelved changes. Then we merge it all down
1142 1137 # to the original pctx.
1143 1138
1144 1139 activebookmark = _backupactivebookmark(repo)
1145 1140 tmpwctx, addedbefore = _commitworkingcopychanges(
1146 1141 ui, repo, opts, tmpwctx
1147 1142 )
1148 1143 repo, shelvectx = _unshelverestorecommit(ui, repo, tr, basename)
1149 1144 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
1150 1145 branchtorestore = b''
1151 1146 if shelvectx.branch() != shelvectx.p1().branch():
1152 1147 branchtorestore = shelvectx.branch()
1153 1148
1154 1149 shelvectx, ispartialunshelve = _rebaserestoredcommit(
1155 1150 ui,
1156 1151 repo,
1157 1152 opts,
1158 1153 tr,
1159 1154 oldtiprev,
1160 1155 basename,
1161 1156 pctx,
1162 1157 tmpwctx,
1163 1158 shelvectx,
1164 1159 branchtorestore,
1165 1160 activebookmark,
1166 1161 )
1167 1162 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
1168 1163 with ui.configoverride(overrides, b'unshelve'):
1169 1164 mergefiles(ui, repo, pctx, shelvectx)
1170 1165 restorebranch(ui, repo, branchtorestore)
1171 1166 shelvedstate.clear(repo)
1172 1167 _finishunshelve(repo, oldtiprev, tr, activebookmark)
1173 1168 _forgetunknownfiles(repo, shelvectx, addedbefore)
1174 1169 if not ispartialunshelve:
1175 1170 unshelvecleanup(ui, repo, basename, opts)
1176 1171 finally:
1177 1172 if tr:
1178 1173 tr.release()
1179 1174 lockmod.release(lock)
General Comments 0
You need to be logged in to leave comments. Login now