##// END OF EJS Templates
rebase: move bookmarks with --keep (issue5682)...
Jun Wu -
r34364:2f427b57 4.3.3 stable
parent child Browse files
Show More
@@ -1,1540 +1,1538 b''
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 https://mercurial-scm.org/wiki/RebaseExtension
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 import errno
20 20 import os
21 21
22 22 from mercurial.i18n import _
23 23 from mercurial.node import (
24 24 hex,
25 25 nullid,
26 26 nullrev,
27 27 short,
28 28 )
29 29 from mercurial import (
30 30 bookmarks,
31 31 cmdutil,
32 32 commands,
33 33 copies,
34 34 destutil,
35 35 dirstateguard,
36 36 error,
37 37 extensions,
38 38 hg,
39 39 lock,
40 40 merge as mergemod,
41 41 mergeutil,
42 42 obsolete,
43 43 obsutil,
44 44 patch,
45 45 phases,
46 46 registrar,
47 47 repair,
48 48 repoview,
49 49 revset,
50 50 scmutil,
51 51 smartset,
52 52 util,
53 53 )
54 54
55 55 release = lock.release
56 56 templateopts = cmdutil.templateopts
57 57
58 58 # The following constants are used throughout the rebase module. The ordering of
59 59 # their values must be maintained.
60 60
61 61 # Indicates that a revision needs to be rebased
62 62 revtodo = -1
63 63 nullmerge = -2
64 64 revignored = -3
65 65 # successor in rebase destination
66 66 revprecursor = -4
67 67 # plain prune (no successor)
68 68 revpruned = -5
69 69 revskipped = (revignored, revprecursor, revpruned)
70 70
71 71 cmdtable = {}
72 72 command = registrar.command(cmdtable)
73 73 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
74 74 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
75 75 # be specifying the version(s) of Mercurial they are tested with, or
76 76 # leave the attribute unspecified.
77 77 testedwith = 'ships-with-hg-core'
78 78
79 79 def _nothingtorebase():
80 80 return 1
81 81
82 82 def _savegraft(ctx, extra):
83 83 s = ctx.extra().get('source', None)
84 84 if s is not None:
85 85 extra['source'] = s
86 86 s = ctx.extra().get('intermediate-source', None)
87 87 if s is not None:
88 88 extra['intermediate-source'] = s
89 89
90 90 def _savebranch(ctx, extra):
91 91 extra['branch'] = ctx.branch()
92 92
93 93 def _makeextrafn(copiers):
94 94 """make an extrafn out of the given copy-functions.
95 95
96 96 A copy function takes a context and an extra dict, and mutates the
97 97 extra dict as needed based on the given context.
98 98 """
99 99 def extrafn(ctx, extra):
100 100 for c in copiers:
101 101 c(ctx, extra)
102 102 return extrafn
103 103
104 104 def _destrebase(repo, sourceset, destspace=None):
105 105 """small wrapper around destmerge to pass the right extra args
106 106
107 107 Please wrap destutil.destmerge instead."""
108 108 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
109 109 onheadcheck=False, destspace=destspace)
110 110
111 111 revsetpredicate = registrar.revsetpredicate()
112 112
113 113 @revsetpredicate('_destrebase')
114 114 def _revsetdestrebase(repo, subset, x):
115 115 # ``_rebasedefaultdest()``
116 116
117 117 # default destination for rebase.
118 118 # # XXX: Currently private because I expect the signature to change.
119 119 # # XXX: - bailing out in case of ambiguity vs returning all data.
120 120 # i18n: "_rebasedefaultdest" is a keyword
121 121 sourceset = None
122 122 if x is not None:
123 123 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
124 124 return subset & smartset.baseset([_destrebase(repo, sourceset)])
125 125
126 126 class rebaseruntime(object):
127 127 """This class is a container for rebase runtime state"""
128 128 def __init__(self, repo, ui, opts=None):
129 129 if opts is None:
130 130 opts = {}
131 131
132 132 self.repo = repo
133 133 self.ui = ui
134 134 self.opts = opts
135 135 self.originalwd = None
136 136 self.external = nullrev
137 137 # Mapping between the old revision id and either what is the new rebased
138 138 # revision or what needs to be done with the old revision. The state
139 139 # dict will be what contains most of the rebase progress state.
140 140 self.state = {}
141 141 self.activebookmark = None
142 142 self.dest = None
143 143 self.skipped = set()
144 144 self.destancestors = set()
145 145
146 146 self.collapsef = opts.get('collapse', False)
147 147 self.collapsemsg = cmdutil.logmessage(ui, opts)
148 148 self.date = opts.get('date', None)
149 149
150 150 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
151 151 self.extrafns = [_savegraft]
152 152 if e:
153 153 self.extrafns = [e]
154 154
155 155 self.keepf = opts.get('keep', False)
156 156 self.keepbranchesf = opts.get('keepbranches', False)
157 157 # keepopen is not meant for use on the command line, but by
158 158 # other extensions
159 159 self.keepopen = opts.get('keepopen', False)
160 160 self.obsoletenotrebased = {}
161 161
162 162 def storestatus(self, tr=None):
163 163 """Store the current status to allow recovery"""
164 164 if tr:
165 165 tr.addfilegenerator('rebasestate', ('rebasestate',),
166 166 self._writestatus, location='plain')
167 167 else:
168 168 with self.repo.vfs("rebasestate", "w") as f:
169 169 self._writestatus(f)
170 170
171 171 def _writestatus(self, f):
172 172 repo = self.repo.unfiltered()
173 173 f.write(repo[self.originalwd].hex() + '\n')
174 174 f.write(repo[self.dest].hex() + '\n')
175 175 f.write(repo[self.external].hex() + '\n')
176 176 f.write('%d\n' % int(self.collapsef))
177 177 f.write('%d\n' % int(self.keepf))
178 178 f.write('%d\n' % int(self.keepbranchesf))
179 179 f.write('%s\n' % (self.activebookmark or ''))
180 180 for d, v in self.state.iteritems():
181 181 oldrev = repo[d].hex()
182 182 if v >= 0:
183 183 newrev = repo[v].hex()
184 184 elif v == revtodo:
185 185 # To maintain format compatibility, we have to use nullid.
186 186 # Please do remove this special case when upgrading the format.
187 187 newrev = hex(nullid)
188 188 else:
189 189 newrev = v
190 190 f.write("%s:%s\n" % (oldrev, newrev))
191 191 repo.ui.debug('rebase status stored\n')
192 192
193 193 def restorestatus(self):
194 194 """Restore a previously stored status"""
195 195 repo = self.repo
196 196 keepbranches = None
197 197 dest = None
198 198 collapse = False
199 199 external = nullrev
200 200 activebookmark = None
201 201 state = {}
202 202
203 203 try:
204 204 f = repo.vfs("rebasestate")
205 205 for i, l in enumerate(f.read().splitlines()):
206 206 if i == 0:
207 207 originalwd = repo[l].rev()
208 208 elif i == 1:
209 209 dest = repo[l].rev()
210 210 elif i == 2:
211 211 external = repo[l].rev()
212 212 elif i == 3:
213 213 collapse = bool(int(l))
214 214 elif i == 4:
215 215 keep = bool(int(l))
216 216 elif i == 5:
217 217 keepbranches = bool(int(l))
218 218 elif i == 6 and not (len(l) == 81 and ':' in l):
219 219 # line 6 is a recent addition, so for backwards
220 220 # compatibility check that the line doesn't look like the
221 221 # oldrev:newrev lines
222 222 activebookmark = l
223 223 else:
224 224 oldrev, newrev = l.split(':')
225 225 if newrev in (str(nullmerge), str(revignored),
226 226 str(revprecursor), str(revpruned)):
227 227 state[repo[oldrev].rev()] = int(newrev)
228 228 elif newrev == nullid:
229 229 state[repo[oldrev].rev()] = revtodo
230 230 # Legacy compat special case
231 231 else:
232 232 state[repo[oldrev].rev()] = repo[newrev].rev()
233 233
234 234 except IOError as err:
235 235 if err.errno != errno.ENOENT:
236 236 raise
237 237 cmdutil.wrongtooltocontinue(repo, _('rebase'))
238 238
239 239 if keepbranches is None:
240 240 raise error.Abort(_('.hg/rebasestate is incomplete'))
241 241
242 242 skipped = set()
243 243 # recompute the set of skipped revs
244 244 if not collapse:
245 245 seen = {dest}
246 246 for old, new in sorted(state.items()):
247 247 if new != revtodo and new in seen:
248 248 skipped.add(old)
249 249 seen.add(new)
250 250 repo.ui.debug('computed skipped revs: %s\n' %
251 251 (' '.join(str(r) for r in sorted(skipped)) or None))
252 252 repo.ui.debug('rebase status resumed\n')
253 253 _setrebasesetvisibility(repo, set(state.keys()) | {originalwd})
254 254
255 255 self.originalwd = originalwd
256 256 self.dest = dest
257 257 self.state = state
258 258 self.skipped = skipped
259 259 self.collapsef = collapse
260 260 self.keepf = keep
261 261 self.keepbranchesf = keepbranches
262 262 self.external = external
263 263 self.activebookmark = activebookmark
264 264
265 265 def _handleskippingobsolete(self, rebaserevs, obsoleterevs, dest):
266 266 """Compute structures necessary for skipping obsolete revisions
267 267
268 268 rebaserevs: iterable of all revisions that are to be rebased
269 269 obsoleterevs: iterable of all obsolete revisions in rebaseset
270 270 dest: a destination revision for the rebase operation
271 271 """
272 272 self.obsoletenotrebased = {}
273 273 if not self.ui.configbool('experimental', 'rebaseskipobsolete',
274 274 default=True):
275 275 return
276 276 rebaseset = set(rebaserevs)
277 277 obsoleteset = set(obsoleterevs)
278 278 self.obsoletenotrebased = _computeobsoletenotrebased(self.repo,
279 279 obsoleteset, dest)
280 280 skippedset = set(self.obsoletenotrebased)
281 281 _checkobsrebase(self.repo, self.ui, obsoleteset, rebaseset, skippedset)
282 282
283 283 def _prepareabortorcontinue(self, isabort):
284 284 try:
285 285 self.restorestatus()
286 286 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
287 287 except error.RepoLookupError:
288 288 if isabort:
289 289 clearstatus(self.repo)
290 290 clearcollapsemsg(self.repo)
291 291 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
292 292 ' only broken state is cleared)\n'))
293 293 return 0
294 294 else:
295 295 msg = _('cannot continue inconsistent rebase')
296 296 hint = _('use "hg rebase --abort" to clear broken state')
297 297 raise error.Abort(msg, hint=hint)
298 298 if isabort:
299 299 return abort(self.repo, self.originalwd, self.dest,
300 300 self.state, activebookmark=self.activebookmark)
301 301
302 302 obsrevs = (r for r, st in self.state.items() if st == revprecursor)
303 303 self._handleskippingobsolete(self.state.keys(), obsrevs, self.dest)
304 304
305 305 def _preparenewrebase(self, dest, rebaseset):
306 306 if dest is None:
307 307 return _nothingtorebase()
308 308
309 309 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
310 310 if (not (self.keepf or allowunstable)
311 311 and self.repo.revs('first(children(%ld) - %ld)',
312 312 rebaseset, rebaseset)):
313 313 raise error.Abort(
314 314 _("can't remove original changesets with"
315 315 " unrebased descendants"),
316 316 hint=_('use --keep to keep original changesets'))
317 317
318 318 obsrevs = _filterobsoleterevs(self.repo, set(rebaseset))
319 319 self._handleskippingobsolete(rebaseset, obsrevs, dest.rev())
320 320
321 321 result = buildstate(self.repo, dest, rebaseset, self.collapsef,
322 322 self.obsoletenotrebased)
323 323
324 324 if not result:
325 325 # Empty state built, nothing to rebase
326 326 self.ui.status(_('nothing to rebase\n'))
327 327 return _nothingtorebase()
328 328
329 329 for root in self.repo.set('roots(%ld)', rebaseset):
330 330 if not self.keepf and not root.mutable():
331 331 raise error.Abort(_("can't rebase public changeset %s")
332 332 % root,
333 333 hint=_("see 'hg help phases' for details"))
334 334
335 335 (self.originalwd, self.dest, self.state) = result
336 336 if self.collapsef:
337 337 self.destancestors = self.repo.changelog.ancestors(
338 338 [self.dest],
339 339 inclusive=True)
340 340 self.external = externalparent(self.repo, self.state,
341 341 self.destancestors)
342 342
343 343 if dest.closesbranch() and not self.keepbranchesf:
344 344 self.ui.status(_('reopening closed branch head %s\n') % dest)
345 345
346 346 def _performrebase(self, tr):
347 347 repo, ui, opts = self.repo, self.ui, self.opts
348 348 if self.keepbranchesf:
349 349 # insert _savebranch at the start of extrafns so if
350 350 # there's a user-provided extrafn it can clobber branch if
351 351 # desired
352 352 self.extrafns.insert(0, _savebranch)
353 353 if self.collapsef:
354 354 branches = set()
355 355 for rev in self.state:
356 356 branches.add(repo[rev].branch())
357 357 if len(branches) > 1:
358 358 raise error.Abort(_('cannot collapse multiple named '
359 359 'branches'))
360 360
361 361 # Rebase
362 362 if not self.destancestors:
363 363 self.destancestors = repo.changelog.ancestors([self.dest],
364 364 inclusive=True)
365 365
366 366 # Keep track of the active bookmarks in order to reset them later
367 367 self.activebookmark = self.activebookmark or repo._activebookmark
368 368 if self.activebookmark:
369 369 bookmarks.deactivate(repo)
370 370
371 371 # Store the state before we begin so users can run 'hg rebase --abort'
372 372 # if we fail before the transaction closes.
373 373 self.storestatus()
374 374
375 375 sortedrevs = repo.revs('sort(%ld, -topo)', self.state)
376 376 cands = [k for k, v in self.state.iteritems() if v == revtodo]
377 377 total = len(cands)
378 378 pos = 0
379 379 for rev in sortedrevs:
380 380 ctx = repo[rev]
381 381 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
382 382 ctx.description().split('\n', 1)[0])
383 383 names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
384 384 if names:
385 385 desc += ' (%s)' % ' '.join(names)
386 386 if self.state[rev] == rev:
387 387 ui.status(_('already rebased %s\n') % desc)
388 388 elif self.state[rev] == revtodo:
389 389 pos += 1
390 390 ui.status(_('rebasing %s\n') % desc)
391 391 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
392 392 _('changesets'), total)
393 393 p1, p2, base = defineparents(repo, rev, self.dest,
394 394 self.state,
395 395 self.destancestors,
396 396 self.obsoletenotrebased)
397 397 self.storestatus(tr=tr)
398 398 storecollapsemsg(repo, self.collapsemsg)
399 399 if len(repo[None].parents()) == 2:
400 400 repo.ui.debug('resuming interrupted rebase\n')
401 401 else:
402 402 try:
403 403 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
404 404 'rebase')
405 405 stats = rebasenode(repo, rev, p1, base, self.state,
406 406 self.collapsef, self.dest)
407 407 if stats and stats[3] > 0:
408 408 raise error.InterventionRequired(
409 409 _('unresolved conflicts (see hg '
410 410 'resolve, then hg rebase --continue)'))
411 411 finally:
412 412 ui.setconfig('ui', 'forcemerge', '', 'rebase')
413 413 if not self.collapsef:
414 414 merging = p2 != nullrev
415 415 editform = cmdutil.mergeeditform(merging, 'rebase')
416 416 editor = cmdutil.getcommiteditor(editform=editform, **opts)
417 417 newnode = concludenode(repo, rev, p1, p2,
418 418 extrafn=_makeextrafn(self.extrafns),
419 419 editor=editor,
420 420 keepbranches=self.keepbranchesf,
421 421 date=self.date)
422 422 if newnode is None:
423 423 # If it ended up being a no-op commit, then the normal
424 424 # merge state clean-up path doesn't happen, so do it
425 425 # here. Fix issue5494
426 426 mergemod.mergestate.clean(repo)
427 427 else:
428 428 # Skip commit if we are collapsing
429 429 repo.setparents(repo[p1].node())
430 430 newnode = None
431 431 # Update the state
432 432 if newnode is not None:
433 433 self.state[rev] = repo[newnode].rev()
434 434 ui.debug('rebased as %s\n' % short(newnode))
435 435 else:
436 436 if not self.collapsef:
437 437 ui.warn(_('note: rebase of %d:%s created no changes '
438 438 'to commit\n') % (rev, ctx))
439 439 self.skipped.add(rev)
440 440 self.state[rev] = p1
441 441 ui.debug('next revision set to %s\n' % p1)
442 442 elif self.state[rev] == nullmerge:
443 443 ui.debug('ignoring null merge rebase of %s\n' % rev)
444 444 elif self.state[rev] == revignored:
445 445 ui.status(_('not rebasing ignored %s\n') % desc)
446 446 elif self.state[rev] == revprecursor:
447 447 destctx = repo[self.obsoletenotrebased[rev]]
448 448 descdest = '%d:%s "%s"' % (destctx.rev(), destctx,
449 449 destctx.description().split('\n', 1)[0])
450 450 msg = _('note: not rebasing %s, already in destination as %s\n')
451 451 ui.status(msg % (desc, descdest))
452 452 elif self.state[rev] == revpruned:
453 453 msg = _('note: not rebasing %s, it has no successor\n')
454 454 ui.status(msg % desc)
455 455 else:
456 456 ui.status(_('already rebased %s as %s\n') %
457 457 (desc, repo[self.state[rev]]))
458 458
459 459 ui.progress(_('rebasing'), None)
460 460 ui.note(_('rebase merging completed\n'))
461 461
462 462 def _finishrebase(self):
463 463 repo, ui, opts = self.repo, self.ui, self.opts
464 464 if self.collapsef and not self.keepopen:
465 465 p1, p2, _base = defineparents(repo, min(self.state),
466 466 self.dest, self.state,
467 467 self.destancestors,
468 468 self.obsoletenotrebased)
469 469 editopt = opts.get('edit')
470 470 editform = 'rebase.collapse'
471 471 if self.collapsemsg:
472 472 commitmsg = self.collapsemsg
473 473 else:
474 474 commitmsg = 'Collapsed revision'
475 475 for rebased in sorted(self.state):
476 476 if rebased not in self.skipped and\
477 477 self.state[rebased] > nullmerge:
478 478 commitmsg += '\n* %s' % repo[rebased].description()
479 479 editopt = True
480 480 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
481 481 revtoreuse = max(self.state)
482 482 newnode = concludenode(repo, revtoreuse, p1, self.external,
483 483 commitmsg=commitmsg,
484 484 extrafn=_makeextrafn(self.extrafns),
485 485 editor=editor,
486 486 keepbranches=self.keepbranchesf,
487 487 date=self.date)
488 488 if newnode is None:
489 489 newrev = self.dest
490 490 else:
491 491 newrev = repo[newnode].rev()
492 492 for oldrev in self.state.iterkeys():
493 493 if self.state[oldrev] > nullmerge:
494 494 self.state[oldrev] = newrev
495 495
496 496 if 'qtip' in repo.tags():
497 497 updatemq(repo, self.state, self.skipped, **opts)
498 498
499 499 # restore original working directory
500 500 # (we do this before stripping)
501 501 newwd = self.state.get(self.originalwd, self.originalwd)
502 502 if newwd == revprecursor:
503 503 newwd = self.obsoletenotrebased[self.originalwd]
504 504 elif newwd < 0:
505 505 # original directory is a parent of rebase set root or ignored
506 506 newwd = self.originalwd
507 507 if newwd not in [c.rev() for c in repo[None].parents()]:
508 508 ui.note(_("update back to initial working directory parent\n"))
509 509 hg.updaterepo(repo, newwd, False)
510 510
511 collapsedas = None
511 512 if not self.keepf:
512 collapsedas = None
513 513 if self.collapsef:
514 514 collapsedas = newnode
515 515 clearrebased(ui, repo, self.dest, self.state, self.skipped,
516 collapsedas)
516 collapsedas, self.keepf)
517 517
518 518 clearstatus(repo)
519 519 clearcollapsemsg(repo)
520 520
521 521 ui.note(_("rebase completed\n"))
522 522 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
523 523 if self.skipped:
524 524 skippedlen = len(self.skipped)
525 525 ui.note(_("%d revisions have been skipped\n") % skippedlen)
526 526
527 527 if (self.activebookmark and self.activebookmark in repo._bookmarks and
528 528 repo['.'].node() == repo._bookmarks[self.activebookmark]):
529 529 bookmarks.activate(repo, self.activebookmark)
530 530
531 531 @command('rebase',
532 532 [('s', 'source', '',
533 533 _('rebase the specified changeset and descendants'), _('REV')),
534 534 ('b', 'base', '',
535 535 _('rebase everything from branching point of specified changeset'),
536 536 _('REV')),
537 537 ('r', 'rev', [],
538 538 _('rebase these revisions'),
539 539 _('REV')),
540 540 ('d', 'dest', '',
541 541 _('rebase onto the specified changeset'), _('REV')),
542 542 ('', 'collapse', False, _('collapse the rebased changesets')),
543 543 ('m', 'message', '',
544 544 _('use text as collapse commit message'), _('TEXT')),
545 545 ('e', 'edit', False, _('invoke editor on commit messages')),
546 546 ('l', 'logfile', '',
547 547 _('read collapse commit message from file'), _('FILE')),
548 548 ('k', 'keep', False, _('keep original changesets')),
549 549 ('', 'keepbranches', False, _('keep original branch names')),
550 550 ('D', 'detach', False, _('(DEPRECATED)')),
551 551 ('i', 'interactive', False, _('(DEPRECATED)')),
552 552 ('t', 'tool', '', _('specify merge tool')),
553 553 ('c', 'continue', False, _('continue an interrupted rebase')),
554 554 ('a', 'abort', False, _('abort an interrupted rebase'))] +
555 555 templateopts,
556 556 _('[-s REV | -b REV] [-d REV] [OPTION]'))
557 557 def rebase(ui, repo, **opts):
558 558 """move changeset (and descendants) to a different branch
559 559
560 560 Rebase uses repeated merging to graft changesets from one part of
561 561 history (the source) onto another (the destination). This can be
562 562 useful for linearizing *local* changes relative to a master
563 563 development tree.
564 564
565 565 Published commits cannot be rebased (see :hg:`help phases`).
566 566 To copy commits, see :hg:`help graft`.
567 567
568 568 If you don't specify a destination changeset (``-d/--dest``), rebase
569 569 will use the same logic as :hg:`merge` to pick a destination. if
570 570 the current branch contains exactly one other head, the other head
571 571 is merged with by default. Otherwise, an explicit revision with
572 572 which to merge with must be provided. (destination changeset is not
573 573 modified by rebasing, but new changesets are added as its
574 574 descendants.)
575 575
576 576 Here are the ways to select changesets:
577 577
578 578 1. Explicitly select them using ``--rev``.
579 579
580 580 2. Use ``--source`` to select a root changeset and include all of its
581 581 descendants.
582 582
583 583 3. Use ``--base`` to select a changeset; rebase will find ancestors
584 584 and their descendants which are not also ancestors of the destination.
585 585
586 586 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
587 587 rebase will use ``--base .`` as above.
588 588
589 589 Rebase will destroy original changesets unless you use ``--keep``.
590 590 It will also move your bookmarks (even if you do).
591 591
592 592 Some changesets may be dropped if they do not contribute changes
593 593 (e.g. merges from the destination branch).
594 594
595 595 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
596 596 a named branch with two heads. You will need to explicitly specify source
597 597 and/or destination.
598 598
599 599 If you need to use a tool to automate merge/conflict decisions, you
600 600 can specify one with ``--tool``, see :hg:`help merge-tools`.
601 601 As a caveat: the tool will not be used to mediate when a file was
602 602 deleted, there is no hook presently available for this.
603 603
604 604 If a rebase is interrupted to manually resolve a conflict, it can be
605 605 continued with --continue/-c or aborted with --abort/-a.
606 606
607 607 .. container:: verbose
608 608
609 609 Examples:
610 610
611 611 - move "local changes" (current commit back to branching point)
612 612 to the current branch tip after a pull::
613 613
614 614 hg rebase
615 615
616 616 - move a single changeset to the stable branch::
617 617
618 618 hg rebase -r 5f493448 -d stable
619 619
620 620 - splice a commit and all its descendants onto another part of history::
621 621
622 622 hg rebase --source c0c3 --dest 4cf9
623 623
624 624 - rebase everything on a branch marked by a bookmark onto the
625 625 default branch::
626 626
627 627 hg rebase --base myfeature --dest default
628 628
629 629 - collapse a sequence of changes into a single commit::
630 630
631 631 hg rebase --collapse -r 1520:1525 -d .
632 632
633 633 - move a named branch while preserving its name::
634 634
635 635 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
636 636
637 637 Configuration Options:
638 638
639 639 You can make rebase require a destination if you set the following config
640 640 option::
641 641
642 642 [commands]
643 643 rebase.requiredest = True
644 644
645 645 By default, rebase will close the transaction after each commit. For
646 646 performance purposes, you can configure rebase to use a single transaction
647 647 across the entire rebase. WARNING: This setting introduces a significant
648 648 risk of losing the work you've done in a rebase if the rebase aborts
649 649 unexpectedly::
650 650
651 651 [rebase]
652 652 singletransaction = True
653 653
654 654 Return Values:
655 655
656 656 Returns 0 on success, 1 if nothing to rebase or there are
657 657 unresolved conflicts.
658 658
659 659 """
660 660 rbsrt = rebaseruntime(repo, ui, opts)
661 661
662 662 with repo.wlock(), repo.lock():
663 663 # Validate input and define rebasing points
664 664 destf = opts.get('dest', None)
665 665 srcf = opts.get('source', None)
666 666 basef = opts.get('base', None)
667 667 revf = opts.get('rev', [])
668 668 # search default destination in this space
669 669 # used in the 'hg pull --rebase' case, see issue 5214.
670 670 destspace = opts.get('_destspace')
671 671 contf = opts.get('continue')
672 672 abortf = opts.get('abort')
673 673 if opts.get('interactive'):
674 674 try:
675 675 if extensions.find('histedit'):
676 676 enablehistedit = ''
677 677 except KeyError:
678 678 enablehistedit = " --config extensions.histedit="
679 679 help = "hg%s help -e histedit" % enablehistedit
680 680 msg = _("interactive history editing is supported by the "
681 681 "'histedit' extension (see \"%s\")") % help
682 682 raise error.Abort(msg)
683 683
684 684 if rbsrt.collapsemsg and not rbsrt.collapsef:
685 685 raise error.Abort(
686 686 _('message can only be specified with collapse'))
687 687
688 688 if contf or abortf:
689 689 if contf and abortf:
690 690 raise error.Abort(_('cannot use both abort and continue'))
691 691 if rbsrt.collapsef:
692 692 raise error.Abort(
693 693 _('cannot use collapse with continue or abort'))
694 694 if srcf or basef or destf:
695 695 raise error.Abort(
696 696 _('abort and continue do not allow specifying revisions'))
697 697 if abortf and opts.get('tool', False):
698 698 ui.warn(_('tool option will be ignored\n'))
699 699 if contf:
700 700 ms = mergemod.mergestate.read(repo)
701 701 mergeutil.checkunresolved(ms)
702 702
703 703 retcode = rbsrt._prepareabortorcontinue(abortf)
704 704 if retcode is not None:
705 705 return retcode
706 706 else:
707 707 dest, rebaseset = _definesets(ui, repo, destf, srcf, basef, revf,
708 708 destspace=destspace)
709 709 retcode = rbsrt._preparenewrebase(dest, rebaseset)
710 710 if retcode is not None:
711 711 return retcode
712 712
713 713 tr = None
714 714 if ui.configbool('rebase', 'singletransaction'):
715 715 tr = repo.transaction('rebase')
716 716 with util.acceptintervention(tr):
717 717 rbsrt._performrebase(tr)
718 718
719 719 rbsrt._finishrebase()
720 720
721 721 def _definesets(ui, repo, destf=None, srcf=None, basef=None, revf=None,
722 722 destspace=None):
723 723 """use revisions argument to define destination and rebase set
724 724 """
725 725 if revf is None:
726 726 revf = []
727 727
728 728 # destspace is here to work around issues with `hg pull --rebase` see
729 729 # issue5214 for details
730 730 if srcf and basef:
731 731 raise error.Abort(_('cannot specify both a source and a base'))
732 732 if revf and basef:
733 733 raise error.Abort(_('cannot specify both a revision and a base'))
734 734 if revf and srcf:
735 735 raise error.Abort(_('cannot specify both a revision and a source'))
736 736
737 737 cmdutil.checkunfinished(repo)
738 738 cmdutil.bailifchanged(repo)
739 739
740 740 if ui.configbool('commands', 'rebase.requiredest') and not destf:
741 741 raise error.Abort(_('you must specify a destination'),
742 742 hint=_('use: hg rebase -d REV'))
743 743
744 744 if destf:
745 745 dest = scmutil.revsingle(repo, destf)
746 746
747 747 if revf:
748 748 rebaseset = scmutil.revrange(repo, revf)
749 749 if not rebaseset:
750 750 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
751 751 return None, None
752 752 elif srcf:
753 753 src = scmutil.revrange(repo, [srcf])
754 754 if not src:
755 755 ui.status(_('empty "source" revision set - nothing to rebase\n'))
756 756 return None, None
757 757 rebaseset = repo.revs('(%ld)::', src)
758 758 assert rebaseset
759 759 else:
760 760 base = scmutil.revrange(repo, [basef or '.'])
761 761 if not base:
762 762 ui.status(_('empty "base" revision set - '
763 763 "can't compute rebase set\n"))
764 764 return None, None
765 765 if not destf:
766 766 dest = repo[_destrebase(repo, base, destspace=destspace)]
767 767 destf = str(dest)
768 768
769 769 roots = [] # selected children of branching points
770 770 bpbase = {} # {branchingpoint: [origbase]}
771 771 for b in base: # group bases by branching points
772 772 bp = repo.revs('ancestor(%d, %d)', b, dest).first()
773 773 bpbase[bp] = bpbase.get(bp, []) + [b]
774 774 if None in bpbase:
775 775 # emulate the old behavior, showing "nothing to rebase" (a better
776 776 # behavior may be abort with "cannot find branching point" error)
777 777 bpbase.clear()
778 778 for bp, bs in bpbase.iteritems(): # calculate roots
779 779 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
780 780
781 781 rebaseset = repo.revs('%ld::', roots)
782 782
783 783 if not rebaseset:
784 784 # transform to list because smartsets are not comparable to
785 785 # lists. This should be improved to honor laziness of
786 786 # smartset.
787 787 if list(base) == [dest.rev()]:
788 788 if basef:
789 789 ui.status(_('nothing to rebase - %s is both "base"'
790 790 ' and destination\n') % dest)
791 791 else:
792 792 ui.status(_('nothing to rebase - working directory '
793 793 'parent is also destination\n'))
794 794 elif not repo.revs('%ld - ::%d', base, dest):
795 795 if basef:
796 796 ui.status(_('nothing to rebase - "base" %s is '
797 797 'already an ancestor of destination '
798 798 '%s\n') %
799 799 ('+'.join(str(repo[r]) for r in base),
800 800 dest))
801 801 else:
802 802 ui.status(_('nothing to rebase - working '
803 803 'directory parent is already an '
804 804 'ancestor of destination %s\n') % dest)
805 805 else: # can it happen?
806 806 ui.status(_('nothing to rebase from %s to %s\n') %
807 807 ('+'.join(str(repo[r]) for r in base), dest))
808 808 return None, None
809 809
810 810 if not destf:
811 811 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
812 812 destf = str(dest)
813 813
814 814 return dest, rebaseset
815 815
816 816 def externalparent(repo, state, destancestors):
817 817 """Return the revision that should be used as the second parent
818 818 when the revisions in state is collapsed on top of destancestors.
819 819 Abort if there is more than one parent.
820 820 """
821 821 parents = set()
822 822 source = min(state)
823 823 for rev in state:
824 824 if rev == source:
825 825 continue
826 826 for p in repo[rev].parents():
827 827 if (p.rev() not in state
828 828 and p.rev() not in destancestors):
829 829 parents.add(p.rev())
830 830 if not parents:
831 831 return nullrev
832 832 if len(parents) == 1:
833 833 return parents.pop()
834 834 raise error.Abort(_('unable to collapse on top of %s, there is more '
835 835 'than one external parent: %s') %
836 836 (max(destancestors),
837 837 ', '.join(str(p) for p in sorted(parents))))
838 838
839 839 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None,
840 840 keepbranches=False, date=None):
841 841 '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
842 842 but also store useful information in extra.
843 843 Return node of committed revision.'''
844 844 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
845 845 try:
846 846 repo.setparents(repo[p1].node(), repo[p2].node())
847 847 ctx = repo[rev]
848 848 if commitmsg is None:
849 849 commitmsg = ctx.description()
850 850 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
851 851 extra = {'rebase_source': ctx.hex()}
852 852 if extrafn:
853 853 extrafn(ctx, extra)
854 854
855 855 destphase = max(ctx.phase(), phases.draft)
856 856 overrides = {('phases', 'new-commit'): destphase}
857 857 with repo.ui.configoverride(overrides, 'rebase'):
858 858 if keepbranch:
859 859 repo.ui.setconfig('ui', 'allowemptycommit', True)
860 860 # Commit might fail if unresolved files exist
861 861 if date is None:
862 862 date = ctx.date()
863 863 newnode = repo.commit(text=commitmsg, user=ctx.user(),
864 864 date=date, extra=extra, editor=editor)
865 865
866 866 repo.dirstate.setbranch(repo[newnode].branch())
867 867 dsguard.close()
868 868 return newnode
869 869 finally:
870 870 release(dsguard)
871 871
872 872 def rebasenode(repo, rev, p1, base, state, collapse, dest):
873 873 'Rebase a single revision rev on top of p1 using base as merge ancestor'
874 874 # Merge phase
875 875 # Update to destination and merge it with local
876 876 if repo['.'].rev() != p1:
877 877 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
878 878 mergemod.update(repo, p1, False, True)
879 879 else:
880 880 repo.ui.debug(" already in destination\n")
881 881 repo.dirstate.write(repo.currenttransaction())
882 882 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
883 883 if base is not None:
884 884 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
885 885 # When collapsing in-place, the parent is the common ancestor, we
886 886 # have to allow merging with it.
887 887 stats = mergemod.update(repo, rev, True, True, base, collapse,
888 888 labels=['dest', 'source'])
889 889 if collapse:
890 890 copies.duplicatecopies(repo, rev, dest)
891 891 else:
892 892 # If we're not using --collapse, we need to
893 893 # duplicate copies between the revision we're
894 894 # rebasing and its first parent, but *not*
895 895 # duplicate any copies that have already been
896 896 # performed in the destination.
897 897 p1rev = repo[rev].p1().rev()
898 898 copies.duplicatecopies(repo, rev, p1rev, skiprev=dest)
899 899 return stats
900 900
901 901 def adjustdest(repo, rev, dest, state):
902 902 """adjust rebase destination given the current rebase state
903 903
904 904 rev is what is being rebased. Return a list of two revs, which are the
905 905 adjusted destinations for rev's p1 and p2, respectively. If a parent is
906 906 nullrev, return dest without adjustment for it.
907 907
908 908 For example, when doing rebase -r B+E -d F, rebase will first move B to B1,
909 909 and E's destination will be adjusted from F to B1.
910 910
911 911 B1 <- written during rebasing B
912 912 |
913 913 F <- original destination of B, E
914 914 |
915 915 | E <- rev, which is being rebased
916 916 | |
917 917 | D <- prev, one parent of rev being checked
918 918 | |
919 919 | x <- skipped, ex. no successor or successor in (::dest)
920 920 | |
921 921 | C
922 922 | |
923 923 | B <- rebased as B1
924 924 |/
925 925 A
926 926
927 927 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
928 928 first move C to C1, G to G1, and when it's checking H, the adjusted
929 929 destinations will be [C1, G1].
930 930
931 931 H C1 G1
932 932 /| | /
933 933 F G |/
934 934 K | | -> K
935 935 | C D |
936 936 | |/ |
937 937 | B | ...
938 938 |/ |/
939 939 A A
940 940 """
941 941 result = []
942 942 for prev in repo.changelog.parentrevs(rev):
943 943 adjusted = dest
944 944 if prev != nullrev:
945 945 # pick already rebased revs from state
946 946 source = [s for s, d in state.items() if d > 0]
947 947 candidate = repo.revs('max(%ld and (::%d))', source, prev).first()
948 948 if candidate is not None:
949 949 adjusted = state[candidate]
950 950 result.append(adjusted)
951 951 return result
952 952
953 953 def nearestrebased(repo, rev, state):
954 954 """return the nearest ancestors of rev in the rebase result"""
955 955 rebased = [r for r in state if state[r] > nullmerge]
956 956 candidates = repo.revs('max(%ld and (::%d))', rebased, rev)
957 957 if candidates:
958 958 return state[candidates.first()]
959 959 else:
960 960 return None
961 961
962 962 def _checkobsrebase(repo, ui, rebaseobsrevs, rebasesetrevs, rebaseobsskipped):
963 963 """
964 964 Abort if rebase will create divergence or rebase is noop because of markers
965 965
966 966 `rebaseobsrevs`: set of obsolete revision in source
967 967 `rebasesetrevs`: set of revisions to be rebased from source
968 968 `rebaseobsskipped`: set of revisions from source skipped because they have
969 969 successors in destination
970 970 """
971 971 # Obsolete node with successors not in dest leads to divergence
972 972 divergenceok = ui.configbool('experimental',
973 973 'allowdivergence')
974 974 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
975 975
976 976 if divergencebasecandidates and not divergenceok:
977 977 divhashes = (str(repo[r])
978 978 for r in divergencebasecandidates)
979 979 msg = _("this rebase will cause "
980 980 "divergences from: %s")
981 981 h = _("to force the rebase please set "
982 982 "experimental.allowdivergence=True")
983 983 raise error.Abort(msg % (",".join(divhashes),), hint=h)
984 984
985 985 def defineparents(repo, rev, dest, state, destancestors,
986 986 obsoletenotrebased):
987 987 'Return the new parent relationship of the revision that will be rebased'
988 988 parents = repo[rev].parents()
989 989 p1 = p2 = nullrev
990 990 rp1 = None
991 991
992 992 p1n = parents[0].rev()
993 993 if p1n in destancestors:
994 994 p1 = dest
995 995 elif p1n in state:
996 996 if state[p1n] == nullmerge:
997 997 p1 = dest
998 998 elif state[p1n] in revskipped:
999 999 p1 = nearestrebased(repo, p1n, state)
1000 1000 if p1 is None:
1001 1001 p1 = dest
1002 1002 else:
1003 1003 p1 = state[p1n]
1004 1004 else: # p1n external
1005 1005 p1 = dest
1006 1006 p2 = p1n
1007 1007
1008 1008 if len(parents) == 2 and parents[1].rev() not in destancestors:
1009 1009 p2n = parents[1].rev()
1010 1010 # interesting second parent
1011 1011 if p2n in state:
1012 1012 if p1 == dest: # p1n in destancestors or external
1013 1013 p1 = state[p2n]
1014 1014 if p1 == revprecursor:
1015 1015 rp1 = obsoletenotrebased[p2n]
1016 1016 elif state[p2n] in revskipped:
1017 1017 p2 = nearestrebased(repo, p2n, state)
1018 1018 if p2 is None:
1019 1019 # no ancestors rebased yet, detach
1020 1020 p2 = dest
1021 1021 else:
1022 1022 p2 = state[p2n]
1023 1023 else: # p2n external
1024 1024 if p2 != nullrev: # p1n external too => rev is a merged revision
1025 1025 raise error.Abort(_('cannot use revision %d as base, result '
1026 1026 'would have 3 parents') % rev)
1027 1027 p2 = p2n
1028 1028 repo.ui.debug(" future parents are %d and %d\n" %
1029 1029 (repo[rp1 or p1].rev(), repo[p2].rev()))
1030 1030
1031 1031 if not any(p.rev() in state for p in parents):
1032 1032 # Case (1) root changeset of a non-detaching rebase set.
1033 1033 # Let the merge mechanism find the base itself.
1034 1034 base = None
1035 1035 elif not repo[rev].p2():
1036 1036 # Case (2) detaching the node with a single parent, use this parent
1037 1037 base = repo[rev].p1().rev()
1038 1038 else:
1039 1039 # Assuming there is a p1, this is the case where there also is a p2.
1040 1040 # We are thus rebasing a merge and need to pick the right merge base.
1041 1041 #
1042 1042 # Imagine we have:
1043 1043 # - M: current rebase revision in this step
1044 1044 # - A: one parent of M
1045 1045 # - B: other parent of M
1046 1046 # - D: destination of this merge step (p1 var)
1047 1047 #
1048 1048 # Consider the case where D is a descendant of A or B and the other is
1049 1049 # 'outside'. In this case, the right merge base is the D ancestor.
1050 1050 #
1051 1051 # An informal proof, assuming A is 'outside' and B is the D ancestor:
1052 1052 #
1053 1053 # If we pick B as the base, the merge involves:
1054 1054 # - changes from B to M (actual changeset payload)
1055 1055 # - changes from B to D (induced by rebase) as D is a rebased
1056 1056 # version of B)
1057 1057 # Which exactly represent the rebase operation.
1058 1058 #
1059 1059 # If we pick A as the base, the merge involves:
1060 1060 # - changes from A to M (actual changeset payload)
1061 1061 # - changes from A to D (with include changes between unrelated A and B
1062 1062 # plus changes induced by rebase)
1063 1063 # Which does not represent anything sensible and creates a lot of
1064 1064 # conflicts. A is thus not the right choice - B is.
1065 1065 #
1066 1066 # Note: The base found in this 'proof' is only correct in the specified
1067 1067 # case. This base does not make sense if is not D a descendant of A or B
1068 1068 # or if the other is not parent 'outside' (especially not if the other
1069 1069 # parent has been rebased). The current implementation does not
1070 1070 # make it feasible to consider different cases separately. In these
1071 1071 # other cases we currently just leave it to the user to correctly
1072 1072 # resolve an impossible merge using a wrong ancestor.
1073 1073 #
1074 1074 # xx, p1 could be -4, and both parents could probably be -4...
1075 1075 for p in repo[rev].parents():
1076 1076 if state.get(p.rev()) == p1:
1077 1077 base = p.rev()
1078 1078 break
1079 1079 else: # fallback when base not found
1080 1080 base = None
1081 1081
1082 1082 # Raise because this function is called wrong (see issue 4106)
1083 1083 raise AssertionError('no base found to rebase on '
1084 1084 '(defineparents called wrong)')
1085 1085 return rp1 or p1, p2, base
1086 1086
1087 1087 def isagitpatch(repo, patchname):
1088 1088 'Return true if the given patch is in git format'
1089 1089 mqpatch = os.path.join(repo.mq.path, patchname)
1090 1090 for line in patch.linereader(file(mqpatch, 'rb')):
1091 1091 if line.startswith('diff --git'):
1092 1092 return True
1093 1093 return False
1094 1094
1095 1095 def updatemq(repo, state, skipped, **opts):
1096 1096 'Update rebased mq patches - finalize and then import them'
1097 1097 mqrebase = {}
1098 1098 mq = repo.mq
1099 1099 original_series = mq.fullseries[:]
1100 1100 skippedpatches = set()
1101 1101
1102 1102 for p in mq.applied:
1103 1103 rev = repo[p.node].rev()
1104 1104 if rev in state:
1105 1105 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1106 1106 (rev, p.name))
1107 1107 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1108 1108 else:
1109 1109 # Applied but not rebased, not sure this should happen
1110 1110 skippedpatches.add(p.name)
1111 1111
1112 1112 if mqrebase:
1113 1113 mq.finish(repo, mqrebase.keys())
1114 1114
1115 1115 # We must start import from the newest revision
1116 1116 for rev in sorted(mqrebase, reverse=True):
1117 1117 if rev not in skipped:
1118 1118 name, isgit = mqrebase[rev]
1119 1119 repo.ui.note(_('updating mq patch %s to %s:%s\n') %
1120 1120 (name, state[rev], repo[state[rev]]))
1121 1121 mq.qimport(repo, (), patchname=name, git=isgit,
1122 1122 rev=[str(state[rev])])
1123 1123 else:
1124 1124 # Rebased and skipped
1125 1125 skippedpatches.add(mqrebase[rev][0])
1126 1126
1127 1127 # Patches were either applied and rebased and imported in
1128 1128 # order, applied and removed or unapplied. Discard the removed
1129 1129 # ones while preserving the original series order and guards.
1130 1130 newseries = [s for s in original_series
1131 1131 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1132 1132 mq.fullseries[:] = newseries
1133 1133 mq.seriesdirty = True
1134 1134 mq.savedirty()
1135 1135
1136 1136 def storecollapsemsg(repo, collapsemsg):
1137 1137 'Store the collapse message to allow recovery'
1138 1138 collapsemsg = collapsemsg or ''
1139 1139 f = repo.vfs("last-message.txt", "w")
1140 1140 f.write("%s\n" % collapsemsg)
1141 1141 f.close()
1142 1142
1143 1143 def clearcollapsemsg(repo):
1144 1144 'Remove collapse message file'
1145 1145 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1146 1146
1147 1147 def restorecollapsemsg(repo, isabort):
1148 1148 'Restore previously stored collapse message'
1149 1149 try:
1150 1150 f = repo.vfs("last-message.txt")
1151 1151 collapsemsg = f.readline().strip()
1152 1152 f.close()
1153 1153 except IOError as err:
1154 1154 if err.errno != errno.ENOENT:
1155 1155 raise
1156 1156 if isabort:
1157 1157 # Oh well, just abort like normal
1158 1158 collapsemsg = ''
1159 1159 else:
1160 1160 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1161 1161 return collapsemsg
1162 1162
1163 1163 def clearstatus(repo):
1164 1164 'Remove the status files'
1165 1165 _clearrebasesetvisibiliy(repo)
1166 1166 # Make sure the active transaction won't write the state file
1167 1167 tr = repo.currenttransaction()
1168 1168 if tr:
1169 1169 tr.removefilegenerator('rebasestate')
1170 1170 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1171 1171
1172 1172 def needupdate(repo, state):
1173 1173 '''check whether we should `update --clean` away from a merge, or if
1174 1174 somehow the working dir got forcibly updated, e.g. by older hg'''
1175 1175 parents = [p.rev() for p in repo[None].parents()]
1176 1176
1177 1177 # Are we in a merge state at all?
1178 1178 if len(parents) < 2:
1179 1179 return False
1180 1180
1181 1181 # We should be standing on the first as-of-yet unrebased commit.
1182 1182 firstunrebased = min([old for old, new in state.iteritems()
1183 1183 if new == nullrev])
1184 1184 if firstunrebased in parents:
1185 1185 return True
1186 1186
1187 1187 return False
1188 1188
1189 1189 def abort(repo, originalwd, dest, state, activebookmark=None):
1190 1190 '''Restore the repository to its original state. Additional args:
1191 1191
1192 1192 activebookmark: the name of the bookmark that should be active after the
1193 1193 restore'''
1194 1194
1195 1195 try:
1196 1196 # If the first commits in the rebased set get skipped during the rebase,
1197 1197 # their values within the state mapping will be the dest rev id. The
1198 1198 # dstates list must must not contain the dest rev (issue4896)
1199 1199 dstates = [s for s in state.values() if s >= 0 and s != dest]
1200 1200 immutable = [d for d in dstates if not repo[d].mutable()]
1201 1201 cleanup = True
1202 1202 if immutable:
1203 1203 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1204 1204 % ', '.join(str(repo[r]) for r in immutable),
1205 1205 hint=_("see 'hg help phases' for details"))
1206 1206 cleanup = False
1207 1207
1208 1208 descendants = set()
1209 1209 if dstates:
1210 1210 descendants = set(repo.changelog.descendants(dstates))
1211 1211 if descendants - set(dstates):
1212 1212 repo.ui.warn(_("warning: new changesets detected on destination "
1213 1213 "branch, can't strip\n"))
1214 1214 cleanup = False
1215 1215
1216 1216 if cleanup:
1217 1217 shouldupdate = False
1218 1218 rebased = filter(lambda x: x >= 0 and x != dest, state.values())
1219 1219 if rebased:
1220 1220 strippoints = [
1221 1221 c.node() for c in repo.set('roots(%ld)', rebased)]
1222 1222
1223 1223 updateifonnodes = set(rebased)
1224 1224 updateifonnodes.add(dest)
1225 1225 updateifonnodes.add(originalwd)
1226 1226 shouldupdate = repo['.'].rev() in updateifonnodes
1227 1227
1228 1228 # Update away from the rebase if necessary
1229 1229 if shouldupdate or needupdate(repo, state):
1230 1230 mergemod.update(repo, originalwd, False, True)
1231 1231
1232 1232 # Strip from the first rebased revision
1233 1233 if rebased:
1234 1234 # no backup of rebased cset versions needed
1235 1235 repair.strip(repo.ui, repo, strippoints)
1236 1236
1237 1237 if activebookmark and activebookmark in repo._bookmarks:
1238 1238 bookmarks.activate(repo, activebookmark)
1239 1239
1240 1240 finally:
1241 1241 clearstatus(repo)
1242 1242 clearcollapsemsg(repo)
1243 1243 repo.ui.warn(_('rebase aborted\n'))
1244 1244 return 0
1245 1245
1246 1246 def buildstate(repo, dest, rebaseset, collapse, obsoletenotrebased):
1247 1247 '''Define which revisions are going to be rebased and where
1248 1248
1249 1249 repo: repo
1250 1250 dest: context
1251 1251 rebaseset: set of rev
1252 1252 '''
1253 1253 originalwd = repo['.'].rev()
1254 1254 _setrebasesetvisibility(repo, set(rebaseset) | {originalwd})
1255 1255
1256 1256 # This check isn't strictly necessary, since mq detects commits over an
1257 1257 # applied patch. But it prevents messing up the working directory when
1258 1258 # a partially completed rebase is blocked by mq.
1259 1259 if 'qtip' in repo.tags() and (dest.node() in
1260 1260 [s.node for s in repo.mq.applied]):
1261 1261 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1262 1262
1263 1263 roots = list(repo.set('roots(%ld)', rebaseset))
1264 1264 if not roots:
1265 1265 raise error.Abort(_('no matching revisions'))
1266 1266 roots.sort()
1267 1267 state = dict.fromkeys(rebaseset, revtodo)
1268 1268 detachset = set()
1269 1269 emptyrebase = True
1270 1270 for root in roots:
1271 1271 commonbase = root.ancestor(dest)
1272 1272 if commonbase == root:
1273 1273 raise error.Abort(_('source is ancestor of destination'))
1274 1274 if commonbase == dest:
1275 1275 wctx = repo[None]
1276 1276 if dest == wctx.p1():
1277 1277 # when rebasing to '.', it will use the current wd branch name
1278 1278 samebranch = root.branch() == wctx.branch()
1279 1279 else:
1280 1280 samebranch = root.branch() == dest.branch()
1281 1281 if not collapse and samebranch and dest in root.parents():
1282 1282 # mark the revision as done by setting its new revision
1283 1283 # equal to its old (current) revisions
1284 1284 state[root.rev()] = root.rev()
1285 1285 repo.ui.debug('source is a child of destination\n')
1286 1286 continue
1287 1287
1288 1288 emptyrebase = False
1289 1289 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1290 1290 # Rebase tries to turn <dest> into a parent of <root> while
1291 1291 # preserving the number of parents of rebased changesets:
1292 1292 #
1293 1293 # - A changeset with a single parent will always be rebased as a
1294 1294 # changeset with a single parent.
1295 1295 #
1296 1296 # - A merge will be rebased as merge unless its parents are both
1297 1297 # ancestors of <dest> or are themselves in the rebased set and
1298 1298 # pruned while rebased.
1299 1299 #
1300 1300 # If one parent of <root> is an ancestor of <dest>, the rebased
1301 1301 # version of this parent will be <dest>. This is always true with
1302 1302 # --base option.
1303 1303 #
1304 1304 # Otherwise, we need to *replace* the original parents with
1305 1305 # <dest>. This "detaches" the rebased set from its former location
1306 1306 # and rebases it onto <dest>. Changes introduced by ancestors of
1307 1307 # <root> not common with <dest> (the detachset, marked as
1308 1308 # nullmerge) are "removed" from the rebased changesets.
1309 1309 #
1310 1310 # - If <root> has a single parent, set it to <dest>.
1311 1311 #
1312 1312 # - If <root> is a merge, we cannot decide which parent to
1313 1313 # replace, the rebase operation is not clearly defined.
1314 1314 #
1315 1315 # The table below sums up this behavior:
1316 1316 #
1317 1317 # +------------------+----------------------+-------------------------+
1318 1318 # | | one parent | merge |
1319 1319 # +------------------+----------------------+-------------------------+
1320 1320 # | parent in | new parent is <dest> | parents in ::<dest> are |
1321 1321 # | ::<dest> | | remapped to <dest> |
1322 1322 # +------------------+----------------------+-------------------------+
1323 1323 # | unrelated source | new parent is <dest> | ambiguous, abort |
1324 1324 # +------------------+----------------------+-------------------------+
1325 1325 #
1326 1326 # The actual abort is handled by `defineparents`
1327 1327 if len(root.parents()) <= 1:
1328 1328 # ancestors of <root> not ancestors of <dest>
1329 1329 detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
1330 1330 [root.rev()]))
1331 1331 if emptyrebase:
1332 1332 return None
1333 1333 for rev in sorted(state):
1334 1334 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1335 1335 # if all parents of this revision are done, then so is this revision
1336 1336 if parents and all((state.get(p) == p for p in parents)):
1337 1337 state[rev] = rev
1338 1338 for r in detachset:
1339 1339 if r not in state:
1340 1340 state[r] = nullmerge
1341 1341 if len(roots) > 1:
1342 1342 # If we have multiple roots, we may have "hole" in the rebase set.
1343 1343 # Rebase roots that descend from those "hole" should not be detached as
1344 1344 # other root are. We use the special `revignored` to inform rebase that
1345 1345 # the revision should be ignored but that `defineparents` should search
1346 1346 # a rebase destination that make sense regarding rebased topology.
1347 1347 rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
1348 1348 for ignored in set(rebasedomain) - set(rebaseset):
1349 1349 state[ignored] = revignored
1350 1350 for r in obsoletenotrebased:
1351 1351 if obsoletenotrebased[r] is None:
1352 1352 state[r] = revpruned
1353 1353 else:
1354 1354 state[r] = revprecursor
1355 1355 return originalwd, dest.rev(), state
1356 1356
1357 def clearrebased(ui, repo, dest, state, skipped, collapsedas=None):
1357 def clearrebased(ui, repo, dest, state, skipped, collapsedas=None, keepf=False):
1358 1358 """dispose of rebased revision at the end of the rebase
1359 1359
1360 1360 If `collapsedas` is not None, the rebase was a collapse whose result if the
1361 `collapsedas` node."""
1361 `collapsedas` node.
1362
1363 If `keepf` is not True, the rebase has --keep set and no nodes should be
1364 removed (but bookmarks still need to be moved).
1365 """
1362 1366 tonode = repo.changelog.node
1363 # Move bookmark of skipped nodes to destination. This cannot be handled
1364 # by scmutil.cleanupnodes since it will treat rev as removed (no successor)
1365 # and move bookmark backwards.
1366 bmchanges = [(name, tonode(max(adjustdest(repo, rev, dest, state))))
1367 for rev in skipped
1368 for name in repo.nodebookmarks(tonode(rev))]
1369 if bmchanges:
1370 with repo.transaction('rebase') as tr:
1371 repo._bookmarks.applychanges(repo, tr, bmchanges)
1372 mapping = {}
1367 replacements = {}
1368 moves = {}
1373 1369 for rev, newrev in sorted(state.items()):
1374 1370 if newrev >= 0 and newrev != rev:
1371 oldnode = tonode(rev)
1372 newnode = collapsedas or tonode(newrev)
1373 moves[oldnode] = newnode
1374 if not keepf:
1375 1375 if rev in skipped:
1376 1376 succs = ()
1377 elif collapsedas is not None:
1378 succs = (collapsedas,)
1379 1377 else:
1380 succs = (tonode(newrev),)
1381 mapping[tonode(rev)] = succs
1382 scmutil.cleanupnodes(repo, mapping, 'rebase')
1378 succs = (newnode,)
1379 replacements[oldnode] = succs
1380 scmutil.cleanupnodes(repo, replacements, 'rebase', moves)
1383 1381
1384 1382 def pullrebase(orig, ui, repo, *args, **opts):
1385 1383 'Call rebase after pull if the latter has been invoked with --rebase'
1386 1384 ret = None
1387 1385 if opts.get('rebase'):
1388 1386 if ui.configbool('commands', 'rebase.requiredest'):
1389 1387 msg = _('rebase destination required by configuration')
1390 1388 hint = _('use hg pull followed by hg rebase -d DEST')
1391 1389 raise error.Abort(msg, hint=hint)
1392 1390
1393 1391 with repo.wlock(), repo.lock():
1394 1392 if opts.get('update'):
1395 1393 del opts['update']
1396 1394 ui.debug('--update and --rebase are not compatible, ignoring '
1397 1395 'the update flag\n')
1398 1396
1399 1397 cmdutil.checkunfinished(repo)
1400 1398 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1401 1399 'please commit or shelve your changes first'))
1402 1400
1403 1401 revsprepull = len(repo)
1404 1402 origpostincoming = commands.postincoming
1405 1403 def _dummy(*args, **kwargs):
1406 1404 pass
1407 1405 commands.postincoming = _dummy
1408 1406 try:
1409 1407 ret = orig(ui, repo, *args, **opts)
1410 1408 finally:
1411 1409 commands.postincoming = origpostincoming
1412 1410 revspostpull = len(repo)
1413 1411 if revspostpull > revsprepull:
1414 1412 # --rev option from pull conflict with rebase own --rev
1415 1413 # dropping it
1416 1414 if 'rev' in opts:
1417 1415 del opts['rev']
1418 1416 # positional argument from pull conflicts with rebase's own
1419 1417 # --source.
1420 1418 if 'source' in opts:
1421 1419 del opts['source']
1422 1420 # revsprepull is the len of the repo, not revnum of tip.
1423 1421 destspace = list(repo.changelog.revs(start=revsprepull))
1424 1422 opts['_destspace'] = destspace
1425 1423 try:
1426 1424 rebase(ui, repo, **opts)
1427 1425 except error.NoMergeDestAbort:
1428 1426 # we can maybe update instead
1429 1427 rev, _a, _b = destutil.destupdate(repo)
1430 1428 if rev == repo['.'].rev():
1431 1429 ui.status(_('nothing to rebase\n'))
1432 1430 else:
1433 1431 ui.status(_('nothing to rebase - updating instead\n'))
1434 1432 # not passing argument to get the bare update behavior
1435 1433 # with warning and trumpets
1436 1434 commands.update(ui, repo)
1437 1435 else:
1438 1436 if opts.get('tool'):
1439 1437 raise error.Abort(_('--tool can only be used with --rebase'))
1440 1438 ret = orig(ui, repo, *args, **opts)
1441 1439
1442 1440 return ret
1443 1441
1444 1442 def _setrebasesetvisibility(repo, revs):
1445 1443 """store the currently rebased set on the repo object
1446 1444
1447 1445 This is used by another function to prevent rebased revision to because
1448 1446 hidden (see issue4504)"""
1449 1447 repo = repo.unfiltered()
1450 1448 repo._rebaseset = revs
1451 1449 # invalidate cache if visibility changes
1452 1450 hiddens = repo.filteredrevcache.get('visible', set())
1453 1451 if revs & hiddens:
1454 1452 repo.invalidatevolatilesets()
1455 1453
1456 1454 def _clearrebasesetvisibiliy(repo):
1457 1455 """remove rebaseset data from the repo"""
1458 1456 repo = repo.unfiltered()
1459 1457 if '_rebaseset' in vars(repo):
1460 1458 del repo._rebaseset
1461 1459
1462 1460 def _rebasedvisible(orig, repo):
1463 1461 """ensure rebased revs stay visible (see issue4504)"""
1464 1462 blockers = orig(repo)
1465 1463 blockers.update(getattr(repo, '_rebaseset', ()))
1466 1464 return blockers
1467 1465
1468 1466 def _filterobsoleterevs(repo, revs):
1469 1467 """returns a set of the obsolete revisions in revs"""
1470 1468 return set(r for r in revs if repo[r].obsolete())
1471 1469
1472 1470 def _computeobsoletenotrebased(repo, rebaseobsrevs, dest):
1473 1471 """return a mapping obsolete => successor for all obsolete nodes to be
1474 1472 rebased that have a successors in the destination
1475 1473
1476 1474 obsolete => None entries in the mapping indicate nodes with no successor"""
1477 1475 obsoletenotrebased = {}
1478 1476
1479 1477 # Build a mapping successor => obsolete nodes for the obsolete
1480 1478 # nodes to be rebased
1481 1479 allsuccessors = {}
1482 1480 cl = repo.changelog
1483 1481 for r in rebaseobsrevs:
1484 1482 node = cl.node(r)
1485 1483 for s in obsutil.allsuccessors(repo.obsstore, [node]):
1486 1484 try:
1487 1485 allsuccessors[cl.rev(s)] = cl.rev(node)
1488 1486 except LookupError:
1489 1487 pass
1490 1488
1491 1489 if allsuccessors:
1492 1490 # Look for successors of obsolete nodes to be rebased among
1493 1491 # the ancestors of dest
1494 1492 ancs = cl.ancestors([dest],
1495 1493 stoprev=min(allsuccessors),
1496 1494 inclusive=True)
1497 1495 for s in allsuccessors:
1498 1496 if s in ancs:
1499 1497 obsoletenotrebased[allsuccessors[s]] = s
1500 1498 elif (s == allsuccessors[s] and
1501 1499 allsuccessors.values().count(s) == 1):
1502 1500 # plain prune
1503 1501 obsoletenotrebased[s] = None
1504 1502
1505 1503 return obsoletenotrebased
1506 1504
1507 1505 def summaryhook(ui, repo):
1508 1506 if not repo.vfs.exists('rebasestate'):
1509 1507 return
1510 1508 try:
1511 1509 rbsrt = rebaseruntime(repo, ui, {})
1512 1510 rbsrt.restorestatus()
1513 1511 state = rbsrt.state
1514 1512 except error.RepoLookupError:
1515 1513 # i18n: column positioning for "hg summary"
1516 1514 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1517 1515 ui.write(msg)
1518 1516 return
1519 1517 numrebased = len([i for i in state.itervalues() if i >= 0])
1520 1518 # i18n: column positioning for "hg summary"
1521 1519 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1522 1520 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1523 1521 ui.label(_('%d remaining'), 'rebase.remaining') %
1524 1522 (len(state) - numrebased)))
1525 1523
1526 1524 def uisetup(ui):
1527 1525 #Replace pull with a decorator to provide --rebase option
1528 1526 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1529 1527 entry[1].append(('', 'rebase', None,
1530 1528 _("rebase working directory to branch head")))
1531 1529 entry[1].append(('t', 'tool', '',
1532 1530 _("specify merge tool for rebase")))
1533 1531 cmdutil.summaryhooks.add('rebase', summaryhook)
1534 1532 cmdutil.unfinishedstates.append(
1535 1533 ['rebasestate', False, False, _('rebase in progress'),
1536 1534 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1537 1535 cmdutil.afterresolvedstates.append(
1538 1536 ['rebasestate', _('hg rebase --continue')])
1539 1537 # ensure rebased rev are not hidden
1540 1538 extensions.wrapfunction(repoview, 'pinnedrevs', _rebasedvisible)
@@ -1,1110 +1,1124 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 wdirid,
23 23 wdirrev,
24 24 )
25 25
26 26 from . import (
27 27 encoding,
28 28 error,
29 29 match as matchmod,
30 30 obsolete,
31 31 obsutil,
32 32 pathutil,
33 33 phases,
34 34 pycompat,
35 35 revsetlang,
36 36 similar,
37 37 util,
38 38 )
39 39
40 40 if pycompat.osname == 'nt':
41 41 from . import scmwindows as scmplatform
42 42 else:
43 43 from . import scmposix as scmplatform
44 44
45 45 termsize = scmplatform.termsize
46 46
47 47 class status(tuple):
48 48 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
49 49 and 'ignored' properties are only relevant to the working copy.
50 50 '''
51 51
52 52 __slots__ = ()
53 53
54 54 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
55 55 clean):
56 56 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
57 57 ignored, clean))
58 58
59 59 @property
60 60 def modified(self):
61 61 '''files that have been modified'''
62 62 return self[0]
63 63
64 64 @property
65 65 def added(self):
66 66 '''files that have been added'''
67 67 return self[1]
68 68
69 69 @property
70 70 def removed(self):
71 71 '''files that have been removed'''
72 72 return self[2]
73 73
74 74 @property
75 75 def deleted(self):
76 76 '''files that are in the dirstate, but have been deleted from the
77 77 working copy (aka "missing")
78 78 '''
79 79 return self[3]
80 80
81 81 @property
82 82 def unknown(self):
83 83 '''files not in the dirstate that are not ignored'''
84 84 return self[4]
85 85
86 86 @property
87 87 def ignored(self):
88 88 '''files not in the dirstate that are ignored (by _dirignore())'''
89 89 return self[5]
90 90
91 91 @property
92 92 def clean(self):
93 93 '''files that have not been modified'''
94 94 return self[6]
95 95
96 96 def __repr__(self, *args, **kwargs):
97 97 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
98 98 'unknown=%r, ignored=%r, clean=%r>') % self)
99 99
100 100 def itersubrepos(ctx1, ctx2):
101 101 """find subrepos in ctx1 or ctx2"""
102 102 # Create a (subpath, ctx) mapping where we prefer subpaths from
103 103 # ctx1. The subpaths from ctx2 are important when the .hgsub file
104 104 # has been modified (in ctx2) but not yet committed (in ctx1).
105 105 subpaths = dict.fromkeys(ctx2.substate, ctx2)
106 106 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
107 107
108 108 missing = set()
109 109
110 110 for subpath in ctx2.substate:
111 111 if subpath not in ctx1.substate:
112 112 del subpaths[subpath]
113 113 missing.add(subpath)
114 114
115 115 for subpath, ctx in sorted(subpaths.iteritems()):
116 116 yield subpath, ctx.sub(subpath)
117 117
118 118 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
119 119 # status and diff will have an accurate result when it does
120 120 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
121 121 # against itself.
122 122 for subpath in missing:
123 123 yield subpath, ctx2.nullsub(subpath, ctx1)
124 124
125 125 def nochangesfound(ui, repo, excluded=None):
126 126 '''Report no changes for push/pull, excluded is None or a list of
127 127 nodes excluded from the push/pull.
128 128 '''
129 129 secretlist = []
130 130 if excluded:
131 131 for n in excluded:
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def callcatch(ui, func):
143 143 """call func() with global exception handling
144 144
145 145 return func() if no exception happens. otherwise do some error handling
146 146 and return an exit code accordingly. does not handle all exceptions.
147 147 """
148 148 try:
149 149 try:
150 150 return func()
151 151 except: # re-raises
152 152 ui.traceback()
153 153 raise
154 154 # Global exception handling, alphabetically
155 155 # Mercurial-specific first, followed by built-in and library exceptions
156 156 except error.LockHeld as inst:
157 157 if inst.errno == errno.ETIMEDOUT:
158 158 reason = _('timed out waiting for lock held by %r') % inst.locker
159 159 else:
160 160 reason = _('lock held by %r') % inst.locker
161 161 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
162 162 if not inst.locker:
163 163 ui.warn(_("(lock might be very busy)\n"))
164 164 except error.LockUnavailable as inst:
165 165 ui.warn(_("abort: could not lock %s: %s\n") %
166 166 (inst.desc or inst.filename, inst.strerror))
167 167 except error.OutOfBandError as inst:
168 168 if inst.args:
169 169 msg = _("abort: remote error:\n")
170 170 else:
171 171 msg = _("abort: remote error\n")
172 172 ui.warn(msg)
173 173 if inst.args:
174 174 ui.warn(''.join(inst.args))
175 175 if inst.hint:
176 176 ui.warn('(%s)\n' % inst.hint)
177 177 except error.RepoError as inst:
178 178 ui.warn(_("abort: %s!\n") % inst)
179 179 if inst.hint:
180 180 ui.warn(_("(%s)\n") % inst.hint)
181 181 except error.ResponseError as inst:
182 182 ui.warn(_("abort: %s") % inst.args[0])
183 183 if not isinstance(inst.args[1], basestring):
184 184 ui.warn(" %r\n" % (inst.args[1],))
185 185 elif not inst.args[1]:
186 186 ui.warn(_(" empty string\n"))
187 187 else:
188 188 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
189 189 except error.CensoredNodeError as inst:
190 190 ui.warn(_("abort: file censored %s!\n") % inst)
191 191 except error.RevlogError as inst:
192 192 ui.warn(_("abort: %s!\n") % inst)
193 193 except error.InterventionRequired as inst:
194 194 ui.warn("%s\n" % inst)
195 195 if inst.hint:
196 196 ui.warn(_("(%s)\n") % inst.hint)
197 197 return 1
198 198 except error.WdirUnsupported:
199 199 ui.warn(_("abort: working directory revision cannot be specified\n"))
200 200 except error.Abort as inst:
201 201 ui.warn(_("abort: %s\n") % inst)
202 202 if inst.hint:
203 203 ui.warn(_("(%s)\n") % inst.hint)
204 204 except ImportError as inst:
205 205 ui.warn(_("abort: %s!\n") % inst)
206 206 m = str(inst).split()[-1]
207 207 if m in "mpatch bdiff".split():
208 208 ui.warn(_("(did you forget to compile extensions?)\n"))
209 209 elif m in "zlib".split():
210 210 ui.warn(_("(is your Python install correct?)\n"))
211 211 except IOError as inst:
212 212 if util.safehasattr(inst, "code"):
213 213 ui.warn(_("abort: %s\n") % inst)
214 214 elif util.safehasattr(inst, "reason"):
215 215 try: # usually it is in the form (errno, strerror)
216 216 reason = inst.reason.args[1]
217 217 except (AttributeError, IndexError):
218 218 # it might be anything, for example a string
219 219 reason = inst.reason
220 220 if isinstance(reason, unicode):
221 221 # SSLError of Python 2.7.9 contains a unicode
222 222 reason = encoding.unitolocal(reason)
223 223 ui.warn(_("abort: error: %s\n") % reason)
224 224 elif (util.safehasattr(inst, "args")
225 225 and inst.args and inst.args[0] == errno.EPIPE):
226 226 pass
227 227 elif getattr(inst, "strerror", None):
228 228 if getattr(inst, "filename", None):
229 229 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
230 230 else:
231 231 ui.warn(_("abort: %s\n") % inst.strerror)
232 232 else:
233 233 raise
234 234 except OSError as inst:
235 235 if getattr(inst, "filename", None) is not None:
236 236 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
237 237 else:
238 238 ui.warn(_("abort: %s\n") % inst.strerror)
239 239 except MemoryError:
240 240 ui.warn(_("abort: out of memory\n"))
241 241 except SystemExit as inst:
242 242 # Commands shouldn't sys.exit directly, but give a return code.
243 243 # Just in case catch this and and pass exit code to caller.
244 244 return inst.code
245 245 except socket.error as inst:
246 246 ui.warn(_("abort: %s\n") % inst.args[-1])
247 247
248 248 return -1
249 249
250 250 def checknewlabel(repo, lbl, kind):
251 251 # Do not use the "kind" parameter in ui output.
252 252 # It makes strings difficult to translate.
253 253 if lbl in ['tip', '.', 'null']:
254 254 raise error.Abort(_("the name '%s' is reserved") % lbl)
255 255 for c in (':', '\0', '\n', '\r'):
256 256 if c in lbl:
257 257 raise error.Abort(_("%r cannot be used in a name") % c)
258 258 try:
259 259 int(lbl)
260 260 raise error.Abort(_("cannot use an integer as a name"))
261 261 except ValueError:
262 262 pass
263 263
264 264 def checkfilename(f):
265 265 '''Check that the filename f is an acceptable filename for a tracked file'''
266 266 if '\r' in f or '\n' in f:
267 267 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
268 268
269 269 def checkportable(ui, f):
270 270 '''Check if filename f is portable and warn or abort depending on config'''
271 271 checkfilename(f)
272 272 abort, warn = checkportabilityalert(ui)
273 273 if abort or warn:
274 274 msg = util.checkwinfilename(f)
275 275 if msg:
276 276 msg = "%s: %r" % (msg, f)
277 277 if abort:
278 278 raise error.Abort(msg)
279 279 ui.warn(_("warning: %s\n") % msg)
280 280
281 281 def checkportabilityalert(ui):
282 282 '''check if the user's config requests nothing, a warning, or abort for
283 283 non-portable filenames'''
284 284 val = ui.config('ui', 'portablefilenames')
285 285 lval = val.lower()
286 286 bval = util.parsebool(val)
287 287 abort = pycompat.osname == 'nt' or lval == 'abort'
288 288 warn = bval or lval == 'warn'
289 289 if bval is None and not (warn or abort or lval == 'ignore'):
290 290 raise error.ConfigError(
291 291 _("ui.portablefilenames value is invalid ('%s')") % val)
292 292 return abort, warn
293 293
294 294 class casecollisionauditor(object):
295 295 def __init__(self, ui, abort, dirstate):
296 296 self._ui = ui
297 297 self._abort = abort
298 298 allfiles = '\0'.join(dirstate._map)
299 299 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
300 300 self._dirstate = dirstate
301 301 # The purpose of _newfiles is so that we don't complain about
302 302 # case collisions if someone were to call this object with the
303 303 # same filename twice.
304 304 self._newfiles = set()
305 305
306 306 def __call__(self, f):
307 307 if f in self._newfiles:
308 308 return
309 309 fl = encoding.lower(f)
310 310 if fl in self._loweredfiles and f not in self._dirstate:
311 311 msg = _('possible case-folding collision for %s') % f
312 312 if self._abort:
313 313 raise error.Abort(msg)
314 314 self._ui.warn(_("warning: %s\n") % msg)
315 315 self._loweredfiles.add(fl)
316 316 self._newfiles.add(f)
317 317
318 318 def filteredhash(repo, maxrev):
319 319 """build hash of filtered revisions in the current repoview.
320 320
321 321 Multiple caches perform up-to-date validation by checking that the
322 322 tiprev and tipnode stored in the cache file match the current repository.
323 323 However, this is not sufficient for validating repoviews because the set
324 324 of revisions in the view may change without the repository tiprev and
325 325 tipnode changing.
326 326
327 327 This function hashes all the revs filtered from the view and returns
328 328 that SHA-1 digest.
329 329 """
330 330 cl = repo.changelog
331 331 if not cl.filteredrevs:
332 332 return None
333 333 key = None
334 334 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
335 335 if revs:
336 336 s = hashlib.sha1()
337 337 for rev in revs:
338 338 s.update('%d;' % rev)
339 339 key = s.digest()
340 340 return key
341 341
342 342 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
343 343 '''yield every hg repository under path, always recursively.
344 344 The recurse flag will only control recursion into repo working dirs'''
345 345 def errhandler(err):
346 346 if err.filename == path:
347 347 raise err
348 348 samestat = getattr(os.path, 'samestat', None)
349 349 if followsym and samestat is not None:
350 350 def adddir(dirlst, dirname):
351 351 match = False
352 352 dirstat = os.stat(dirname)
353 353 for lstdirstat in dirlst:
354 354 if samestat(dirstat, lstdirstat):
355 355 match = True
356 356 break
357 357 if not match:
358 358 dirlst.append(dirstat)
359 359 return not match
360 360 else:
361 361 followsym = False
362 362
363 363 if (seen_dirs is None) and followsym:
364 364 seen_dirs = []
365 365 adddir(seen_dirs, path)
366 366 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
367 367 dirs.sort()
368 368 if '.hg' in dirs:
369 369 yield root # found a repository
370 370 qroot = os.path.join(root, '.hg', 'patches')
371 371 if os.path.isdir(os.path.join(qroot, '.hg')):
372 372 yield qroot # we have a patch queue repo here
373 373 if recurse:
374 374 # avoid recursing inside the .hg directory
375 375 dirs.remove('.hg')
376 376 else:
377 377 dirs[:] = [] # don't descend further
378 378 elif followsym:
379 379 newdirs = []
380 380 for d in dirs:
381 381 fname = os.path.join(root, d)
382 382 if adddir(seen_dirs, fname):
383 383 if os.path.islink(fname):
384 384 for hgname in walkrepos(fname, True, seen_dirs):
385 385 yield hgname
386 386 else:
387 387 newdirs.append(d)
388 388 dirs[:] = newdirs
389 389
390 390 def binnode(ctx):
391 391 """Return binary node id for a given basectx"""
392 392 node = ctx.node()
393 393 if node is None:
394 394 return wdirid
395 395 return node
396 396
397 397 def intrev(ctx):
398 398 """Return integer for a given basectx that can be used in comparison or
399 399 arithmetic operation"""
400 400 rev = ctx.rev()
401 401 if rev is None:
402 402 return wdirrev
403 403 return rev
404 404
405 405 def revsingle(repo, revspec, default='.'):
406 406 if not revspec and revspec != 0:
407 407 return repo[default]
408 408
409 409 l = revrange(repo, [revspec])
410 410 if not l:
411 411 raise error.Abort(_('empty revision set'))
412 412 return repo[l.last()]
413 413
414 414 def _pairspec(revspec):
415 415 tree = revsetlang.parse(revspec)
416 416 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
417 417
418 418 def revpair(repo, revs):
419 419 if not revs:
420 420 return repo.dirstate.p1(), None
421 421
422 422 l = revrange(repo, revs)
423 423
424 424 if not l:
425 425 first = second = None
426 426 elif l.isascending():
427 427 first = l.min()
428 428 second = l.max()
429 429 elif l.isdescending():
430 430 first = l.max()
431 431 second = l.min()
432 432 else:
433 433 first = l.first()
434 434 second = l.last()
435 435
436 436 if first is None:
437 437 raise error.Abort(_('empty revision range'))
438 438 if (first == second and len(revs) >= 2
439 439 and not all(revrange(repo, [r]) for r in revs)):
440 440 raise error.Abort(_('empty revision on one side of range'))
441 441
442 442 # if top-level is range expression, the result must always be a pair
443 443 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
444 444 return repo.lookup(first), None
445 445
446 446 return repo.lookup(first), repo.lookup(second)
447 447
448 448 def revrange(repo, specs):
449 449 """Execute 1 to many revsets and return the union.
450 450
451 451 This is the preferred mechanism for executing revsets using user-specified
452 452 config options, such as revset aliases.
453 453
454 454 The revsets specified by ``specs`` will be executed via a chained ``OR``
455 455 expression. If ``specs`` is empty, an empty result is returned.
456 456
457 457 ``specs`` can contain integers, in which case they are assumed to be
458 458 revision numbers.
459 459
460 460 It is assumed the revsets are already formatted. If you have arguments
461 461 that need to be expanded in the revset, call ``revsetlang.formatspec()``
462 462 and pass the result as an element of ``specs``.
463 463
464 464 Specifying a single revset is allowed.
465 465
466 466 Returns a ``revset.abstractsmartset`` which is a list-like interface over
467 467 integer revisions.
468 468 """
469 469 allspecs = []
470 470 for spec in specs:
471 471 if isinstance(spec, int):
472 472 spec = revsetlang.formatspec('rev(%d)', spec)
473 473 allspecs.append(spec)
474 474 return repo.anyrevs(allspecs, user=True)
475 475
476 476 def meaningfulparents(repo, ctx):
477 477 """Return list of meaningful (or all if debug) parentrevs for rev.
478 478
479 479 For merges (two non-nullrev revisions) both parents are meaningful.
480 480 Otherwise the first parent revision is considered meaningful if it
481 481 is not the preceding revision.
482 482 """
483 483 parents = ctx.parents()
484 484 if len(parents) > 1:
485 485 return parents
486 486 if repo.ui.debugflag:
487 487 return [parents[0], repo['null']]
488 488 if parents[0].rev() >= intrev(ctx) - 1:
489 489 return []
490 490 return parents
491 491
492 492 def expandpats(pats):
493 493 '''Expand bare globs when running on windows.
494 494 On posix we assume it already has already been done by sh.'''
495 495 if not util.expandglobs:
496 496 return list(pats)
497 497 ret = []
498 498 for kindpat in pats:
499 499 kind, pat = matchmod._patsplit(kindpat, None)
500 500 if kind is None:
501 501 try:
502 502 globbed = glob.glob(pat)
503 503 except re.error:
504 504 globbed = [pat]
505 505 if globbed:
506 506 ret.extend(globbed)
507 507 continue
508 508 ret.append(kindpat)
509 509 return ret
510 510
511 511 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
512 512 badfn=None):
513 513 '''Return a matcher and the patterns that were used.
514 514 The matcher will warn about bad matches, unless an alternate badfn callback
515 515 is provided.'''
516 516 if pats == ("",):
517 517 pats = []
518 518 if opts is None:
519 519 opts = {}
520 520 if not globbed and default == 'relpath':
521 521 pats = expandpats(pats or [])
522 522
523 523 def bad(f, msg):
524 524 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
525 525
526 526 if badfn is None:
527 527 badfn = bad
528 528
529 529 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
530 530 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
531 531
532 532 if m.always():
533 533 pats = []
534 534 return m, pats
535 535
536 536 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
537 537 badfn=None):
538 538 '''Return a matcher that will warn about bad matches.'''
539 539 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
540 540
541 541 def matchall(repo):
542 542 '''Return a matcher that will efficiently match everything.'''
543 543 return matchmod.always(repo.root, repo.getcwd())
544 544
545 545 def matchfiles(repo, files, badfn=None):
546 546 '''Return a matcher that will efficiently match exactly these files.'''
547 547 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
548 548
549 549 def origpath(ui, repo, filepath):
550 550 '''customize where .orig files are created
551 551
552 552 Fetch user defined path from config file: [ui] origbackuppath = <path>
553 553 Fall back to default (filepath) if not specified
554 554 '''
555 555 origbackuppath = ui.config('ui', 'origbackuppath')
556 556 if origbackuppath is None:
557 557 return filepath + ".orig"
558 558
559 559 filepathfromroot = os.path.relpath(filepath, start=repo.root)
560 560 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
561 561
562 562 origbackupdir = repo.vfs.dirname(fullorigpath)
563 563 if not repo.vfs.exists(origbackupdir):
564 564 ui.note(_('creating directory: %s\n') % origbackupdir)
565 565 util.makedirs(origbackupdir)
566 566
567 567 return fullorigpath + ".orig"
568 568
569 569 class _containsnode(object):
570 570 """proxy __contains__(node) to container.__contains__ which accepts revs"""
571 571
572 572 def __init__(self, repo, revcontainer):
573 573 self._torev = repo.changelog.rev
574 574 self._revcontains = revcontainer.__contains__
575 575
576 576 def __contains__(self, node):
577 577 return self._revcontains(self._torev(node))
578 578
579 def cleanupnodes(repo, replacements, operation):
579 def cleanupnodes(repo, replacements, operation, moves=None):
580 580 """do common cleanups when old nodes are replaced by new nodes
581 581
582 582 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
583 583 (we might also want to move working directory parent in the future)
584 584
585 By default, bookmark moves are calculated automatically from 'replacements',
586 but 'moves' can be used to override that. Also, 'moves' may include
587 additional bookmark moves that should not have associated obsmarkers.
588
585 589 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
586 590 have replacements. operation is a string, like "rebase".
587 591 """
592 if not replacements and not moves:
593 return
594
595 # translate mapping's other forms
588 596 if not util.safehasattr(replacements, 'items'):
589 597 replacements = {n: () for n in replacements}
590 598
591 599 # Calculate bookmark movements
600 if moves is None:
592 601 moves = {}
593 602 # Unfiltered repo is needed since nodes in replacements might be hidden.
594 603 unfi = repo.unfiltered()
595 604 for oldnode, newnodes in replacements.items():
605 if oldnode in moves:
606 continue
596 607 if len(newnodes) > 1:
597 608 # usually a split, take the one with biggest rev number
598 609 newnode = next(unfi.set('max(%ln)', newnodes)).node()
599 610 elif len(newnodes) == 0:
600 611 # move bookmark backwards
601 612 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
602 613 list(replacements)))
603 614 if roots:
604 615 newnode = roots[0].node()
605 616 else:
606 617 newnode = nullid
607 618 else:
608 619 newnode = newnodes[0]
609 620 moves[oldnode] = newnode
610 621
611 622 with repo.transaction('cleanup') as tr:
612 623 # Move bookmarks
613 624 bmarks = repo._bookmarks
614 625 bmarkchanges = []
615 626 allnewnodes = [n for ns in replacements.values() for n in ns]
616 627 for oldnode, newnode in moves.items():
617 628 oldbmarks = repo.nodebookmarks(oldnode)
618 629 if not oldbmarks:
619 630 continue
620 631 from . import bookmarks # avoid import cycle
621 632 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
622 633 (oldbmarks, hex(oldnode), hex(newnode)))
623 634 # Delete divergent bookmarks being parents of related newnodes
624 635 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
625 636 allnewnodes, newnode, oldnode)
626 637 deletenodes = _containsnode(repo, deleterevs)
627 638 for name in oldbmarks:
628 639 bmarkchanges.append((name, newnode))
629 640 for b in bookmarks.divergent2delete(repo, deletenodes, name):
630 641 bmarkchanges.append((b, None))
631 642
632 643 if bmarkchanges:
633 644 bmarks.applychanges(repo, tr, bmarkchanges)
634 645
635 646 # Obsolete or strip nodes
636 647 if obsolete.isenabled(repo, obsolete.createmarkersopt):
637 648 # If a node is already obsoleted, and we want to obsolete it
638 649 # without a successor, skip that obssolete request since it's
639 650 # unnecessary. That's the "if s or not isobs(n)" check below.
640 651 # Also sort the node in topology order, that might be useful for
641 652 # some obsstore logic.
642 653 # NOTE: the filtering and sorting might belong to createmarkers.
643 654 isobs = unfi.obsstore.successors.__contains__
644 655 torev = unfi.changelog.rev
645 656 sortfunc = lambda ns: torev(ns[0])
646 657 rels = [(unfi[n], tuple(unfi[m] for m in s))
647 658 for n, s in sorted(replacements.items(), key=sortfunc)
648 659 if s or not isobs(n)]
660 if rels:
649 661 obsolete.createmarkers(repo, rels, operation=operation)
650 662 else:
651 663 from . import repair # avoid import cycle
652 repair.delayedstrip(repo.ui, repo, list(replacements), operation)
664 tostrip = list(replacements)
665 if tostrip:
666 repair.delayedstrip(repo.ui, repo, tostrip, operation)
653 667
654 668 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
655 669 if opts is None:
656 670 opts = {}
657 671 m = matcher
658 672 if dry_run is None:
659 673 dry_run = opts.get('dry_run')
660 674 if similarity is None:
661 675 similarity = float(opts.get('similarity') or 0)
662 676
663 677 ret = 0
664 678 join = lambda f: os.path.join(prefix, f)
665 679
666 680 wctx = repo[None]
667 681 for subpath in sorted(wctx.substate):
668 682 submatch = matchmod.subdirmatcher(subpath, m)
669 683 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
670 684 sub = wctx.sub(subpath)
671 685 try:
672 686 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
673 687 ret = 1
674 688 except error.LookupError:
675 689 repo.ui.status(_("skipping missing subrepository: %s\n")
676 690 % join(subpath))
677 691
678 692 rejected = []
679 693 def badfn(f, msg):
680 694 if f in m.files():
681 695 m.bad(f, msg)
682 696 rejected.append(f)
683 697
684 698 badmatch = matchmod.badmatch(m, badfn)
685 699 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
686 700 badmatch)
687 701
688 702 unknownset = set(unknown + forgotten)
689 703 toprint = unknownset.copy()
690 704 toprint.update(deleted)
691 705 for abs in sorted(toprint):
692 706 if repo.ui.verbose or not m.exact(abs):
693 707 if abs in unknownset:
694 708 status = _('adding %s\n') % m.uipath(abs)
695 709 else:
696 710 status = _('removing %s\n') % m.uipath(abs)
697 711 repo.ui.status(status)
698 712
699 713 renames = _findrenames(repo, m, added + unknown, removed + deleted,
700 714 similarity)
701 715
702 716 if not dry_run:
703 717 _markchanges(repo, unknown + forgotten, deleted, renames)
704 718
705 719 for f in rejected:
706 720 if f in m.files():
707 721 return 1
708 722 return ret
709 723
710 724 def marktouched(repo, files, similarity=0.0):
711 725 '''Assert that files have somehow been operated upon. files are relative to
712 726 the repo root.'''
713 727 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
714 728 rejected = []
715 729
716 730 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
717 731
718 732 if repo.ui.verbose:
719 733 unknownset = set(unknown + forgotten)
720 734 toprint = unknownset.copy()
721 735 toprint.update(deleted)
722 736 for abs in sorted(toprint):
723 737 if abs in unknownset:
724 738 status = _('adding %s\n') % abs
725 739 else:
726 740 status = _('removing %s\n') % abs
727 741 repo.ui.status(status)
728 742
729 743 renames = _findrenames(repo, m, added + unknown, removed + deleted,
730 744 similarity)
731 745
732 746 _markchanges(repo, unknown + forgotten, deleted, renames)
733 747
734 748 for f in rejected:
735 749 if f in m.files():
736 750 return 1
737 751 return 0
738 752
739 753 def _interestingfiles(repo, matcher):
740 754 '''Walk dirstate with matcher, looking for files that addremove would care
741 755 about.
742 756
743 757 This is different from dirstate.status because it doesn't care about
744 758 whether files are modified or clean.'''
745 759 added, unknown, deleted, removed, forgotten = [], [], [], [], []
746 760 audit_path = pathutil.pathauditor(repo.root, cached=True)
747 761
748 762 ctx = repo[None]
749 763 dirstate = repo.dirstate
750 764 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
751 765 full=False)
752 766 for abs, st in walkresults.iteritems():
753 767 dstate = dirstate[abs]
754 768 if dstate == '?' and audit_path.check(abs):
755 769 unknown.append(abs)
756 770 elif dstate != 'r' and not st:
757 771 deleted.append(abs)
758 772 elif dstate == 'r' and st:
759 773 forgotten.append(abs)
760 774 # for finding renames
761 775 elif dstate == 'r' and not st:
762 776 removed.append(abs)
763 777 elif dstate == 'a':
764 778 added.append(abs)
765 779
766 780 return added, unknown, deleted, removed, forgotten
767 781
768 782 def _findrenames(repo, matcher, added, removed, similarity):
769 783 '''Find renames from removed files to added ones.'''
770 784 renames = {}
771 785 if similarity > 0:
772 786 for old, new, score in similar.findrenames(repo, added, removed,
773 787 similarity):
774 788 if (repo.ui.verbose or not matcher.exact(old)
775 789 or not matcher.exact(new)):
776 790 repo.ui.status(_('recording removal of %s as rename to %s '
777 791 '(%d%% similar)\n') %
778 792 (matcher.rel(old), matcher.rel(new),
779 793 score * 100))
780 794 renames[new] = old
781 795 return renames
782 796
783 797 def _markchanges(repo, unknown, deleted, renames):
784 798 '''Marks the files in unknown as added, the files in deleted as removed,
785 799 and the files in renames as copied.'''
786 800 wctx = repo[None]
787 801 with repo.wlock():
788 802 wctx.forget(deleted)
789 803 wctx.add(unknown)
790 804 for new, old in renames.iteritems():
791 805 wctx.copy(old, new)
792 806
793 807 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
794 808 """Update the dirstate to reflect the intent of copying src to dst. For
795 809 different reasons it might not end with dst being marked as copied from src.
796 810 """
797 811 origsrc = repo.dirstate.copied(src) or src
798 812 if dst == origsrc: # copying back a copy?
799 813 if repo.dirstate[dst] not in 'mn' and not dryrun:
800 814 repo.dirstate.normallookup(dst)
801 815 else:
802 816 if repo.dirstate[origsrc] == 'a' and origsrc == src:
803 817 if not ui.quiet:
804 818 ui.warn(_("%s has not been committed yet, so no copy "
805 819 "data will be stored for %s.\n")
806 820 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
807 821 if repo.dirstate[dst] in '?r' and not dryrun:
808 822 wctx.add([dst])
809 823 elif not dryrun:
810 824 wctx.copy(origsrc, dst)
811 825
812 826 def readrequires(opener, supported):
813 827 '''Reads and parses .hg/requires and checks if all entries found
814 828 are in the list of supported features.'''
815 829 requirements = set(opener.read("requires").splitlines())
816 830 missings = []
817 831 for r in requirements:
818 832 if r not in supported:
819 833 if not r or not r[0].isalnum():
820 834 raise error.RequirementError(_(".hg/requires file is corrupt"))
821 835 missings.append(r)
822 836 missings.sort()
823 837 if missings:
824 838 raise error.RequirementError(
825 839 _("repository requires features unknown to this Mercurial: %s")
826 840 % " ".join(missings),
827 841 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
828 842 " for more information"))
829 843 return requirements
830 844
831 845 def writerequires(opener, requirements):
832 846 with opener('requires', 'w') as fp:
833 847 for r in sorted(requirements):
834 848 fp.write("%s\n" % r)
835 849
836 850 class filecachesubentry(object):
837 851 def __init__(self, path, stat):
838 852 self.path = path
839 853 self.cachestat = None
840 854 self._cacheable = None
841 855
842 856 if stat:
843 857 self.cachestat = filecachesubentry.stat(self.path)
844 858
845 859 if self.cachestat:
846 860 self._cacheable = self.cachestat.cacheable()
847 861 else:
848 862 # None means we don't know yet
849 863 self._cacheable = None
850 864
851 865 def refresh(self):
852 866 if self.cacheable():
853 867 self.cachestat = filecachesubentry.stat(self.path)
854 868
855 869 def cacheable(self):
856 870 if self._cacheable is not None:
857 871 return self._cacheable
858 872
859 873 # we don't know yet, assume it is for now
860 874 return True
861 875
862 876 def changed(self):
863 877 # no point in going further if we can't cache it
864 878 if not self.cacheable():
865 879 return True
866 880
867 881 newstat = filecachesubentry.stat(self.path)
868 882
869 883 # we may not know if it's cacheable yet, check again now
870 884 if newstat and self._cacheable is None:
871 885 self._cacheable = newstat.cacheable()
872 886
873 887 # check again
874 888 if not self._cacheable:
875 889 return True
876 890
877 891 if self.cachestat != newstat:
878 892 self.cachestat = newstat
879 893 return True
880 894 else:
881 895 return False
882 896
883 897 @staticmethod
884 898 def stat(path):
885 899 try:
886 900 return util.cachestat(path)
887 901 except OSError as e:
888 902 if e.errno != errno.ENOENT:
889 903 raise
890 904
891 905 class filecacheentry(object):
892 906 def __init__(self, paths, stat=True):
893 907 self._entries = []
894 908 for path in paths:
895 909 self._entries.append(filecachesubentry(path, stat))
896 910
897 911 def changed(self):
898 912 '''true if any entry has changed'''
899 913 for entry in self._entries:
900 914 if entry.changed():
901 915 return True
902 916 return False
903 917
904 918 def refresh(self):
905 919 for entry in self._entries:
906 920 entry.refresh()
907 921
908 922 class filecache(object):
909 923 '''A property like decorator that tracks files under .hg/ for updates.
910 924
911 925 Records stat info when called in _filecache.
912 926
913 927 On subsequent calls, compares old stat info with new info, and recreates the
914 928 object when any of the files changes, updating the new stat info in
915 929 _filecache.
916 930
917 931 Mercurial either atomic renames or appends for files under .hg,
918 932 so to ensure the cache is reliable we need the filesystem to be able
919 933 to tell us if a file has been replaced. If it can't, we fallback to
920 934 recreating the object on every call (essentially the same behavior as
921 935 propertycache).
922 936
923 937 '''
924 938 def __init__(self, *paths):
925 939 self.paths = paths
926 940
927 941 def join(self, obj, fname):
928 942 """Used to compute the runtime path of a cached file.
929 943
930 944 Users should subclass filecache and provide their own version of this
931 945 function to call the appropriate join function on 'obj' (an instance
932 946 of the class that its member function was decorated).
933 947 """
934 948 raise NotImplementedError
935 949
936 950 def __call__(self, func):
937 951 self.func = func
938 952 self.name = func.__name__.encode('ascii')
939 953 return self
940 954
941 955 def __get__(self, obj, type=None):
942 956 # if accessed on the class, return the descriptor itself.
943 957 if obj is None:
944 958 return self
945 959 # do we need to check if the file changed?
946 960 if self.name in obj.__dict__:
947 961 assert self.name in obj._filecache, self.name
948 962 return obj.__dict__[self.name]
949 963
950 964 entry = obj._filecache.get(self.name)
951 965
952 966 if entry:
953 967 if entry.changed():
954 968 entry.obj = self.func(obj)
955 969 else:
956 970 paths = [self.join(obj, path) for path in self.paths]
957 971
958 972 # We stat -before- creating the object so our cache doesn't lie if
959 973 # a writer modified between the time we read and stat
960 974 entry = filecacheentry(paths, True)
961 975 entry.obj = self.func(obj)
962 976
963 977 obj._filecache[self.name] = entry
964 978
965 979 obj.__dict__[self.name] = entry.obj
966 980 return entry.obj
967 981
968 982 def __set__(self, obj, value):
969 983 if self.name not in obj._filecache:
970 984 # we add an entry for the missing value because X in __dict__
971 985 # implies X in _filecache
972 986 paths = [self.join(obj, path) for path in self.paths]
973 987 ce = filecacheentry(paths, False)
974 988 obj._filecache[self.name] = ce
975 989 else:
976 990 ce = obj._filecache[self.name]
977 991
978 992 ce.obj = value # update cached copy
979 993 obj.__dict__[self.name] = value # update copy returned by obj.x
980 994
981 995 def __delete__(self, obj):
982 996 try:
983 997 del obj.__dict__[self.name]
984 998 except KeyError:
985 999 raise AttributeError(self.name)
986 1000
987 1001 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
988 1002 if lock is None:
989 1003 raise error.LockInheritanceContractViolation(
990 1004 'lock can only be inherited while held')
991 1005 if environ is None:
992 1006 environ = {}
993 1007 with lock.inherit() as locker:
994 1008 environ[envvar] = locker
995 1009 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
996 1010
997 1011 def wlocksub(repo, cmd, *args, **kwargs):
998 1012 """run cmd as a subprocess that allows inheriting repo's wlock
999 1013
1000 1014 This can only be called while the wlock is held. This takes all the
1001 1015 arguments that ui.system does, and returns the exit code of the
1002 1016 subprocess."""
1003 1017 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1004 1018 **kwargs)
1005 1019
1006 1020 def gdinitconfig(ui):
1007 1021 """helper function to know if a repo should be created as general delta
1008 1022 """
1009 1023 # experimental config: format.generaldelta
1010 1024 return (ui.configbool('format', 'generaldelta')
1011 1025 or ui.configbool('format', 'usegeneraldelta'))
1012 1026
1013 1027 def gddeltaconfig(ui):
1014 1028 """helper function to know if incoming delta should be optimised
1015 1029 """
1016 1030 # experimental config: format.generaldelta
1017 1031 return ui.configbool('format', 'generaldelta')
1018 1032
1019 1033 class simplekeyvaluefile(object):
1020 1034 """A simple file with key=value lines
1021 1035
1022 1036 Keys must be alphanumerics and start with a letter, values must not
1023 1037 contain '\n' characters"""
1024 1038 firstlinekey = '__firstline'
1025 1039
1026 1040 def __init__(self, vfs, path, keys=None):
1027 1041 self.vfs = vfs
1028 1042 self.path = path
1029 1043
1030 1044 def read(self, firstlinenonkeyval=False):
1031 1045 """Read the contents of a simple key-value file
1032 1046
1033 1047 'firstlinenonkeyval' indicates whether the first line of file should
1034 1048 be treated as a key-value pair or reuturned fully under the
1035 1049 __firstline key."""
1036 1050 lines = self.vfs.readlines(self.path)
1037 1051 d = {}
1038 1052 if firstlinenonkeyval:
1039 1053 if not lines:
1040 1054 e = _("empty simplekeyvalue file")
1041 1055 raise error.CorruptedState(e)
1042 1056 # we don't want to include '\n' in the __firstline
1043 1057 d[self.firstlinekey] = lines[0][:-1]
1044 1058 del lines[0]
1045 1059
1046 1060 try:
1047 1061 # the 'if line.strip()' part prevents us from failing on empty
1048 1062 # lines which only contain '\n' therefore are not skipped
1049 1063 # by 'if line'
1050 1064 updatedict = dict(line[:-1].split('=', 1) for line in lines
1051 1065 if line.strip())
1052 1066 if self.firstlinekey in updatedict:
1053 1067 e = _("%r can't be used as a key")
1054 1068 raise error.CorruptedState(e % self.firstlinekey)
1055 1069 d.update(updatedict)
1056 1070 except ValueError as e:
1057 1071 raise error.CorruptedState(str(e))
1058 1072 return d
1059 1073
1060 1074 def write(self, data, firstline=None):
1061 1075 """Write key=>value mapping to a file
1062 1076 data is a dict. Keys must be alphanumerical and start with a letter.
1063 1077 Values must not contain newline characters.
1064 1078
1065 1079 If 'firstline' is not None, it is written to file before
1066 1080 everything else, as it is, not in a key=value form"""
1067 1081 lines = []
1068 1082 if firstline is not None:
1069 1083 lines.append('%s\n' % firstline)
1070 1084
1071 1085 for k, v in data.items():
1072 1086 if k == self.firstlinekey:
1073 1087 e = "key name '%s' is reserved" % self.firstlinekey
1074 1088 raise error.ProgrammingError(e)
1075 1089 if not k[0].isalpha():
1076 1090 e = "keys must start with a letter in a key-value file"
1077 1091 raise error.ProgrammingError(e)
1078 1092 if not k.isalnum():
1079 1093 e = "invalid key name in a simple key-value file"
1080 1094 raise error.ProgrammingError(e)
1081 1095 if '\n' in v:
1082 1096 e = "invalid value in a simple key-value file"
1083 1097 raise error.ProgrammingError(e)
1084 1098 lines.append("%s=%s\n" % (k, v))
1085 1099 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1086 1100 fp.write(''.join(lines))
1087 1101
1088 1102 _reportobsoletedsource = [
1089 1103 'debugobsolete',
1090 1104 'pull',
1091 1105 'push',
1092 1106 'serve',
1093 1107 'unbundle',
1094 1108 ]
1095 1109
1096 1110 def registersummarycallback(repo, otr, txnname=''):
1097 1111 """register a callback to issue a summary after the transaction is closed
1098 1112 """
1099 1113 for source in _reportobsoletedsource:
1100 1114 if txnname.startswith(source):
1101 1115 reporef = weakref.ref(repo)
1102 1116 def reportsummary(tr):
1103 1117 """the actual callback reporting the summary"""
1104 1118 repo = reporef()
1105 1119 obsoleted = obsutil.getobsoleted(repo, tr)
1106 1120 if obsoleted:
1107 1121 repo.ui.status(_('obsoleted %i changesets\n')
1108 1122 % len(obsoleted))
1109 1123 otr.addpostclose('00-txnreport', reportsummary)
1110 1124 break
@@ -1,212 +1,245 b''
1 1 $ cat >> $HGRCPATH <<EOF
2 2 > [extensions]
3 3 > rebase=
4 > drawdag=$TESTDIR/drawdag.py
4 5 >
5 6 > [phases]
6 7 > publish=False
7 8 >
8 9 > [alias]
9 10 > tglog = log -G --template "{rev}: '{desc}' bookmarks: {bookmarks}\n"
10 11 > EOF
11 12
12 13 Create a repo with several bookmarks
13 14 $ hg init a
14 15 $ cd a
15 16
16 17 $ echo a > a
17 18 $ hg ci -Am A
18 19 adding a
19 20
20 21 $ echo b > b
21 22 $ hg ci -Am B
22 23 adding b
23 24 $ hg book 'X'
24 25 $ hg book 'Y'
25 26
26 27 $ echo c > c
27 28 $ hg ci -Am C
28 29 adding c
29 30 $ hg book 'Z'
30 31
31 32 $ hg up -q 0
32 33
33 34 $ echo d > d
34 35 $ hg ci -Am D
35 36 adding d
36 37 created new head
37 38
38 39 $ hg book W
39 40
40 41 $ hg tglog
41 42 @ 3: 'D' bookmarks: W
42 43 |
43 44 | o 2: 'C' bookmarks: Y Z
44 45 | |
45 46 | o 1: 'B' bookmarks: X
46 47 |/
47 48 o 0: 'A' bookmarks:
48 49
49 50
50 51 Move only rebased bookmarks
51 52
52 53 $ cd ..
53 54 $ hg clone -q a a1
54 55
55 56 $ cd a1
56 57 $ hg up -q Z
57 58
58 59 Test deleting divergent bookmarks from dest (issue3685)
59 60
60 61 $ hg book -r 3 Z@diverge
61 62
62 63 ... and also test that bookmarks not on dest or not being moved aren't deleted
63 64
64 65 $ hg book -r 3 X@diverge
65 66 $ hg book -r 0 Y@diverge
66 67
67 68 $ hg tglog
68 69 o 3: 'D' bookmarks: W X@diverge Z@diverge
69 70 |
70 71 | @ 2: 'C' bookmarks: Y Z
71 72 | |
72 73 | o 1: 'B' bookmarks: X
73 74 |/
74 75 o 0: 'A' bookmarks: Y@diverge
75 76
76 77 $ hg rebase -s Y -d 3
77 78 rebasing 2:49cb3485fa0c "C" (Y Z)
78 79 saved backup bundle to $TESTTMP/a1/.hg/strip-backup/49cb3485fa0c-126f3e97-rebase.hg (glob)
79 80
80 81 $ hg tglog
81 82 @ 3: 'C' bookmarks: Y Z
82 83 |
83 84 o 2: 'D' bookmarks: W X@diverge
84 85 |
85 86 | o 1: 'B' bookmarks: X
86 87 |/
87 88 o 0: 'A' bookmarks: Y@diverge
88 89
89 90 Do not try to keep active but deleted divergent bookmark
90 91
91 92 $ cd ..
92 93 $ hg clone -q a a4
93 94
94 95 $ cd a4
95 96 $ hg up -q 2
96 97 $ hg book W@diverge
97 98
98 99 $ hg rebase -s W -d .
99 100 rebasing 3:41acb9dca9eb "D" (tip W)
100 101 saved backup bundle to $TESTTMP/a4/.hg/strip-backup/41acb9dca9eb-b35a6a63-rebase.hg (glob)
101 102
102 103 $ hg bookmarks
103 104 W 3:0d3554f74897
104 105 X 1:6c81ed0049f8
105 106 Y 2:49cb3485fa0c
106 107 Z 2:49cb3485fa0c
107 108
108 109 Keep bookmarks to the correct rebased changeset
109 110
110 111 $ cd ..
111 112 $ hg clone -q a a2
112 113
113 114 $ cd a2
114 115 $ hg up -q Z
115 116
116 117 $ hg rebase -s 1 -d 3
117 118 rebasing 1:6c81ed0049f8 "B" (X)
118 119 rebasing 2:49cb3485fa0c "C" (Y Z)
119 120 saved backup bundle to $TESTTMP/a2/.hg/strip-backup/6c81ed0049f8-a687065f-rebase.hg (glob)
120 121
121 122 $ hg tglog
122 123 @ 3: 'C' bookmarks: Y Z
123 124 |
124 125 o 2: 'B' bookmarks: X
125 126 |
126 127 o 1: 'D' bookmarks: W
127 128 |
128 129 o 0: 'A' bookmarks:
129 130
130 131
131 132 Keep active bookmark on the correct changeset
132 133
133 134 $ cd ..
134 135 $ hg clone -q a a3
135 136
136 137 $ cd a3
137 138 $ hg up -q X
138 139
139 140 $ hg rebase -d W
140 141 rebasing 1:6c81ed0049f8 "B" (X)
141 142 rebasing 2:49cb3485fa0c "C" (Y Z)
142 143 saved backup bundle to $TESTTMP/a3/.hg/strip-backup/6c81ed0049f8-a687065f-rebase.hg (glob)
143 144
144 145 $ hg tglog
145 146 o 3: 'C' bookmarks: Y Z
146 147 |
147 148 @ 2: 'B' bookmarks: X
148 149 |
149 150 o 1: 'D' bookmarks: W
150 151 |
151 152 o 0: 'A' bookmarks:
152 153
153 154 $ hg bookmarks
154 155 W 1:41acb9dca9eb
155 156 * X 2:e926fccfa8ec
156 157 Y 3:3d5fa227f4b5
157 158 Z 3:3d5fa227f4b5
158 159
159 160 rebase --continue with bookmarks present (issue3802)
160 161
161 162 $ hg up 2
162 163 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 164 (leaving bookmark X)
164 165 $ echo 'C' > c
165 166 $ hg add c
166 167 $ hg ci -m 'other C'
167 168 created new head
168 169 $ hg up 3
169 170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
170 171 $ hg rebase --dest 4
171 172 rebasing 3:3d5fa227f4b5 "C" (Y Z)
172 173 merging c
173 174 warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
174 175 unresolved conflicts (see hg resolve, then hg rebase --continue)
175 176 [1]
176 177 $ echo 'c' > c
177 178 $ hg resolve --mark c
178 179 (no more unresolved files)
179 180 continue: hg rebase --continue
180 181 $ hg rebase --continue
181 182 rebasing 3:3d5fa227f4b5 "C" (Y Z)
182 183 saved backup bundle to $TESTTMP/a3/.hg/strip-backup/3d5fa227f4b5-c6ea2371-rebase.hg (glob)
183 184 $ hg tglog
184 185 @ 4: 'C' bookmarks: Y Z
185 186 |
186 187 o 3: 'other C' bookmarks:
187 188 |
188 189 o 2: 'B' bookmarks: X
189 190 |
190 191 o 1: 'D' bookmarks: W
191 192 |
192 193 o 0: 'A' bookmarks:
193 194
194 195
195 196 ensure that bookmarks given the names of revset functions can be used
196 197 as --rev arguments (issue3950)
197 198
198 199 $ hg update -q 3
199 200 $ echo bimble > bimble
200 201 $ hg add bimble
201 202 $ hg commit -q -m 'bisect'
202 203 $ echo e >> bimble
203 204 $ hg ci -m bisect2
204 205 $ echo e >> bimble
205 206 $ hg ci -m bisect3
206 207 $ hg book bisect
207 208 $ hg update -q Y
208 209 $ hg rebase -r '"bisect"^^::"bisect"^' -r bisect -d Z
209 210 rebasing 5:345c90f326a4 "bisect"
210 211 rebasing 6:f677a2907404 "bisect2"
211 212 rebasing 7:325c16001345 "bisect3" (tip bisect)
212 213 saved backup bundle to $TESTTMP/a3/.hg/strip-backup/345c90f326a4-b4840586-rebase.hg (glob)
214
215 Bookmark and working parent get moved even if --keep is set (issue5682)
216
217 $ hg init $TESTTMP/book-keep
218 $ cd $TESTTMP/book-keep
219 $ hg debugdrawdag <<'EOS'
220 > B C
221 > |/
222 > A
223 > EOS
224 $ eval `hg tags -T 'hg bookmark -ir {node} {tag};\n' | grep -v tip`
225 $ rm .hg/localtags
226 $ hg up -q B
227 $ hg tglog
228 o 2: 'C' bookmarks: C
229 |
230 | @ 1: 'B' bookmarks: B
231 |/
232 o 0: 'A' bookmarks: A
233
234 $ hg rebase -r B -d C --keep
235 rebasing 1:112478962961 "B" (B)
236 $ hg tglog
237 @ 3: 'B' bookmarks: B
238 |
239 o 2: 'C' bookmarks: C
240 |
241 | o 1: 'B' bookmarks:
242 |/
243 o 0: 'A' bookmarks: A
244
245
@@ -1,202 +1,207 b''
1 1 $ cat >> $HGRCPATH<<EOF
2 2 > [extensions]
3 3 > rebase=
4 4 > drawdag=$TESTDIR/drawdag.py
5 5 > EOF
6 6
7 7 $ hg init non-merge
8 8 $ cd non-merge
9 9 $ hg debugdrawdag<<'EOS'
10 10 > F
11 11 > |
12 12 > E
13 13 > |
14 14 > D
15 15 > |
16 16 > B C
17 17 > |/
18 18 > A
19 19 > EOS
20 20
21 21 $ for i in C D E F; do
22 22 > hg bookmark -r $i -i BOOK-$i
23 23 > done
24 24
25 25 $ hg debugdrawdag<<'EOS'
26 26 > E
27 27 > |
28 28 > D
29 29 > |
30 30 > B
31 31 > EOS
32 32
33 33 $ hg log -G -T '{rev} {desc} {bookmarks}'
34 34 o 7 E
35 35 |
36 36 o 6 D
37 37 |
38 38 | o 5 F BOOK-F
39 39 | |
40 40 | o 4 E BOOK-E
41 41 | |
42 42 | o 3 D BOOK-D
43 43 | |
44 44 | o 2 C BOOK-C
45 45 | |
46 46 o | 1 B
47 47 |/
48 48 o 0 A
49 49
50 With --keep, bookmark should not move
50 With --keep, bookmark should move
51 51
52 52 $ hg rebase -r 3+4 -d E --keep
53 53 rebasing 3:e7b3f00ed42e "D" (BOOK-D)
54 54 note: rebase of 3:e7b3f00ed42e created no changes to commit
55 55 rebasing 4:69a34c08022a "E" (BOOK-E)
56 56 note: rebase of 4:69a34c08022a created no changes to commit
57 57 $ hg log -G -T '{rev} {desc} {bookmarks}'
58 o 7 E
58 o 7 E BOOK-D BOOK-E
59 59 |
60 60 o 6 D
61 61 |
62 62 | o 5 F BOOK-F
63 63 | |
64 | o 4 E BOOK-E
64 | o 4 E
65 65 | |
66 | o 3 D BOOK-D
66 | o 3 D
67 67 | |
68 68 | o 2 C BOOK-C
69 69 | |
70 70 o | 1 B
71 71 |/
72 72 o 0 A
73 73
74 Move D and E back for the next test
75
76 $ hg bookmark BOOK-D -fqir 3
77 $ hg bookmark BOOK-E -fqir 4
78
74 79 Bookmark is usually an indication of a head. For changes that are introduced by
75 80 an ancestor of bookmark B, after moving B to B-NEW, the changes are ideally
76 81 still introduced by an ancestor of changeset on B-NEW. In the below case,
77 82 "BOOK-D", and "BOOK-E" include changes introduced by "C".
78 83
79 84 $ hg rebase -s 2 -d E
80 85 rebasing 2:dc0947a82db8 "C" (C BOOK-C)
81 86 rebasing 3:e7b3f00ed42e "D" (BOOK-D)
82 87 note: rebase of 3:e7b3f00ed42e created no changes to commit
83 88 rebasing 4:69a34c08022a "E" (BOOK-E)
84 89 note: rebase of 4:69a34c08022a created no changes to commit
85 90 rebasing 5:6b2aeab91270 "F" (F BOOK-F)
86 91 saved backup bundle to $TESTTMP/non-merge/.hg/strip-backup/dc0947a82db8-52bb4973-rebase.hg (glob)
87 92 $ hg log -G -T '{rev} {desc} {bookmarks}'
88 93 o 5 F BOOK-F
89 94 |
90 95 o 4 C BOOK-C BOOK-D BOOK-E
91 96 |
92 97 o 3 E
93 98 |
94 99 o 2 D
95 100 |
96 101 o 1 B
97 102 |
98 103 o 0 A
99 104
100 105 Merge and its ancestors all become empty
101 106
102 107 $ hg init $TESTTMP/merge1
103 108 $ cd $TESTTMP/merge1
104 109
105 110 $ hg debugdrawdag<<'EOS'
106 111 > E
107 112 > /|
108 113 > B C D
109 114 > \|/
110 115 > A
111 116 > EOS
112 117
113 118 $ for i in C D E; do
114 119 > hg bookmark -r $i -i BOOK-$i
115 120 > done
116 121
117 122 $ hg debugdrawdag<<'EOS'
118 123 > H
119 124 > |
120 125 > D
121 126 > |
122 127 > C
123 128 > |
124 129 > B
125 130 > EOS
126 131
127 132 $ hg rebase -r '(A::)-(B::)-A' -d H
128 133 rebasing 2:dc0947a82db8 "C" (BOOK-C)
129 134 note: rebase of 2:dc0947a82db8 created no changes to commit
130 135 rebasing 3:b18e25de2cf5 "D" (BOOK-D)
131 136 note: rebase of 3:b18e25de2cf5 created no changes to commit
132 137 rebasing 4:86a1f6686812 "E" (E BOOK-E)
133 138 note: rebase of 4:86a1f6686812 created no changes to commit
134 139 saved backup bundle to $TESTTMP/merge1/.hg/strip-backup/b18e25de2cf5-1fd0a4ba-rebase.hg (glob)
135 140
136 141 $ hg log -G -T '{rev} {desc} {bookmarks}'
137 142 o 4 H BOOK-C BOOK-D BOOK-E
138 143 |
139 144 o 3 D
140 145 |
141 146 o 2 C
142 147 |
143 148 o 1 B
144 149 |
145 150 o 0 A
146 151
147 152 Part of ancestors of a merge become empty
148 153
149 154 $ hg init $TESTTMP/merge2
150 155 $ cd $TESTTMP/merge2
151 156
152 157 $ hg debugdrawdag<<'EOS'
153 158 > G
154 159 > /|
155 160 > E F
156 161 > | |
157 162 > B C D
158 163 > \|/
159 164 > A
160 165 > EOS
161 166
162 167 $ for i in C D E F G; do
163 168 > hg bookmark -r $i -i BOOK-$i
164 169 > done
165 170
166 171 $ hg debugdrawdag<<'EOS'
167 172 > H
168 173 > |
169 174 > F
170 175 > |
171 176 > C
172 177 > |
173 178 > B
174 179 > EOS
175 180
176 181 $ hg rebase -r '(A::)-(B::)-A' -d H
177 182 rebasing 2:dc0947a82db8 "C" (BOOK-C)
178 183 note: rebase of 2:dc0947a82db8 created no changes to commit
179 184 rebasing 3:b18e25de2cf5 "D" (D BOOK-D)
180 185 rebasing 4:03ca77807e91 "E" (E BOOK-E)
181 186 rebasing 5:ad6717a6a58e "F" (BOOK-F)
182 187 note: rebase of 5:ad6717a6a58e created no changes to commit
183 188 rebasing 6:c58e8bdac1f4 "G" (G BOOK-G)
184 189 saved backup bundle to $TESTTMP/merge2/.hg/strip-backup/b18e25de2cf5-2d487005-rebase.hg (glob)
185 190
186 191 $ hg log -G -T '{rev} {desc} {bookmarks}'
187 192 o 7 G BOOK-G
188 193 |\
189 194 | o 6 E BOOK-E
190 195 | |
191 196 o | 5 D BOOK-D BOOK-F
192 197 |/
193 198 o 4 H BOOK-C
194 199 |
195 200 o 3 F
196 201 |
197 202 o 2 C
198 203 |
199 204 o 1 B
200 205 |
201 206 o 0 A
202 207
General Comments 0
You need to be logged in to leave comments. Login now