##// END OF EJS Templates
merge: use merge.clean_update() when applicable...
Martin von Zweigbergk -
r46133:03726f5b default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,2282 +1,2282
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 https://mercurial-scm.org/wiki/RebaseExtension
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 import errno
20 20 import os
21 21
22 22 from mercurial.i18n import _
23 23 from mercurial.node import (
24 24 nullrev,
25 25 short,
26 26 )
27 27 from mercurial.pycompat import open
28 28 from mercurial import (
29 29 bookmarks,
30 30 cmdutil,
31 31 commands,
32 32 copies,
33 33 destutil,
34 34 dirstateguard,
35 35 error,
36 36 extensions,
37 37 hg,
38 38 merge as mergemod,
39 39 mergestate as mergestatemod,
40 40 mergeutil,
41 41 node as nodemod,
42 42 obsolete,
43 43 obsutil,
44 44 patch,
45 45 phases,
46 46 pycompat,
47 47 registrar,
48 48 repair,
49 49 revset,
50 50 revsetlang,
51 51 rewriteutil,
52 52 scmutil,
53 53 smartset,
54 54 state as statemod,
55 55 util,
56 56 )
57 57
58 58 # The following constants are used throughout the rebase module. The ordering of
59 59 # their values must be maintained.
60 60
61 61 # Indicates that a revision needs to be rebased
62 62 revtodo = -1
63 63 revtodostr = b'-1'
64 64
65 65 # legacy revstates no longer needed in current code
66 66 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
67 67 legacystates = {b'-2', b'-3', b'-4', b'-5'}
68 68
69 69 cmdtable = {}
70 70 command = registrar.command(cmdtable)
71 71 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
72 72 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
73 73 # be specifying the version(s) of Mercurial they are tested with, or
74 74 # leave the attribute unspecified.
75 75 testedwith = b'ships-with-hg-core'
76 76
77 77
78 78 def _nothingtorebase():
79 79 return 1
80 80
81 81
82 82 def _savegraft(ctx, extra):
83 83 s = ctx.extra().get(b'source', None)
84 84 if s is not None:
85 85 extra[b'source'] = s
86 86 s = ctx.extra().get(b'intermediate-source', None)
87 87 if s is not None:
88 88 extra[b'intermediate-source'] = s
89 89
90 90
91 91 def _savebranch(ctx, extra):
92 92 extra[b'branch'] = ctx.branch()
93 93
94 94
95 95 def _destrebase(repo, sourceset, destspace=None):
96 96 """small wrapper around destmerge to pass the right extra args
97 97
98 98 Please wrap destutil.destmerge instead."""
99 99 return destutil.destmerge(
100 100 repo,
101 101 action=b'rebase',
102 102 sourceset=sourceset,
103 103 onheadcheck=False,
104 104 destspace=destspace,
105 105 )
106 106
107 107
108 108 revsetpredicate = registrar.revsetpredicate()
109 109
110 110
111 111 @revsetpredicate(b'_destrebase')
112 112 def _revsetdestrebase(repo, subset, x):
113 113 # ``_rebasedefaultdest()``
114 114
115 115 # default destination for rebase.
116 116 # # XXX: Currently private because I expect the signature to change.
117 117 # # XXX: - bailing out in case of ambiguity vs returning all data.
118 118 # i18n: "_rebasedefaultdest" is a keyword
119 119 sourceset = None
120 120 if x is not None:
121 121 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
122 122 return subset & smartset.baseset([_destrebase(repo, sourceset)])
123 123
124 124
125 125 @revsetpredicate(b'_destautoorphanrebase')
126 126 def _revsetdestautoorphanrebase(repo, subset, x):
127 127 # ``_destautoorphanrebase()``
128 128
129 129 # automatic rebase destination for a single orphan revision.
130 130 unfi = repo.unfiltered()
131 131 obsoleted = unfi.revs(b'obsolete()')
132 132
133 133 src = revset.getset(repo, subset, x).first()
134 134
135 135 # Empty src or already obsoleted - Do not return a destination
136 136 if not src or src in obsoleted:
137 137 return smartset.baseset()
138 138 dests = destutil.orphanpossibledestination(repo, src)
139 139 if len(dests) > 1:
140 140 raise error.Abort(
141 141 _(b"ambiguous automatic rebase: %r could end up on any of %r")
142 142 % (src, dests)
143 143 )
144 144 # We have zero or one destination, so we can just return here.
145 145 return smartset.baseset(dests)
146 146
147 147
148 148 def _ctxdesc(ctx):
149 149 """short description for a context"""
150 150 desc = b'%d:%s "%s"' % (
151 151 ctx.rev(),
152 152 ctx,
153 153 ctx.description().split(b'\n', 1)[0],
154 154 )
155 155 repo = ctx.repo()
156 156 names = []
157 157 for nsname, ns in pycompat.iteritems(repo.names):
158 158 if nsname == b'branches':
159 159 continue
160 160 names.extend(ns.names(repo, ctx.node()))
161 161 if names:
162 162 desc += b' (%s)' % b' '.join(names)
163 163 return desc
164 164
165 165
166 166 class rebaseruntime(object):
167 167 """This class is a container for rebase runtime state"""
168 168
169 169 def __init__(self, repo, ui, inmemory=False, dryrun=False, opts=None):
170 170 if opts is None:
171 171 opts = {}
172 172
173 173 # prepared: whether we have rebasestate prepared or not. Currently it
174 174 # decides whether "self.repo" is unfiltered or not.
175 175 # The rebasestate has explicit hash to hash instructions not depending
176 176 # on visibility. If rebasestate exists (in-memory or on-disk), use
177 177 # unfiltered repo to avoid visibility issues.
178 178 # Before knowing rebasestate (i.e. when starting a new rebase (not
179 179 # --continue or --abort)), the original repo should be used so
180 180 # visibility-dependent revsets are correct.
181 181 self.prepared = False
182 182 self.resume = False
183 183 self._repo = repo
184 184
185 185 self.ui = ui
186 186 self.opts = opts
187 187 self.originalwd = None
188 188 self.external = nullrev
189 189 # Mapping between the old revision id and either what is the new rebased
190 190 # revision or what needs to be done with the old revision. The state
191 191 # dict will be what contains most of the rebase progress state.
192 192 self.state = {}
193 193 self.activebookmark = None
194 194 self.destmap = {}
195 195 self.skipped = set()
196 196
197 197 self.collapsef = opts.get(b'collapse', False)
198 198 self.collapsemsg = cmdutil.logmessage(ui, opts)
199 199 self.date = opts.get(b'date', None)
200 200
201 201 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
202 202 self.extrafns = [_savegraft]
203 203 if e:
204 204 self.extrafns = [e]
205 205
206 206 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
207 207 self.keepf = opts.get(b'keep', False)
208 208 self.keepbranchesf = opts.get(b'keepbranches', False)
209 209 self.skipemptysuccessorf = rewriteutil.skip_empty_successor(
210 210 repo.ui, b'rebase'
211 211 )
212 212 self.obsoletenotrebased = {}
213 213 self.obsoletewithoutsuccessorindestination = set()
214 214 self.inmemory = inmemory
215 215 self.dryrun = dryrun
216 216 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
217 217
218 218 @property
219 219 def repo(self):
220 220 if self.prepared:
221 221 return self._repo.unfiltered()
222 222 else:
223 223 return self._repo
224 224
225 225 def storestatus(self, tr=None):
226 226 """Store the current status to allow recovery"""
227 227 if tr:
228 228 tr.addfilegenerator(
229 229 b'rebasestate',
230 230 (b'rebasestate',),
231 231 self._writestatus,
232 232 location=b'plain',
233 233 )
234 234 else:
235 235 with self.repo.vfs(b"rebasestate", b"w") as f:
236 236 self._writestatus(f)
237 237
238 238 def _writestatus(self, f):
239 239 repo = self.repo
240 240 assert repo.filtername is None
241 241 f.write(repo[self.originalwd].hex() + b'\n')
242 242 # was "dest". we now write dest per src root below.
243 243 f.write(b'\n')
244 244 f.write(repo[self.external].hex() + b'\n')
245 245 f.write(b'%d\n' % int(self.collapsef))
246 246 f.write(b'%d\n' % int(self.keepf))
247 247 f.write(b'%d\n' % int(self.keepbranchesf))
248 248 f.write(b'%s\n' % (self.activebookmark or b''))
249 249 destmap = self.destmap
250 250 for d, v in pycompat.iteritems(self.state):
251 251 oldrev = repo[d].hex()
252 252 if v >= 0:
253 253 newrev = repo[v].hex()
254 254 else:
255 255 newrev = b"%d" % v
256 256 destnode = repo[destmap[d]].hex()
257 257 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
258 258 repo.ui.debug(b'rebase status stored\n')
259 259
260 260 def restorestatus(self):
261 261 """Restore a previously stored status"""
262 262 if not self.stateobj.exists():
263 263 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
264 264
265 265 data = self._read()
266 266 self.repo.ui.debug(b'rebase status resumed\n')
267 267
268 268 self.originalwd = data[b'originalwd']
269 269 self.destmap = data[b'destmap']
270 270 self.state = data[b'state']
271 271 self.skipped = data[b'skipped']
272 272 self.collapsef = data[b'collapse']
273 273 self.keepf = data[b'keep']
274 274 self.keepbranchesf = data[b'keepbranches']
275 275 self.external = data[b'external']
276 276 self.activebookmark = data[b'activebookmark']
277 277
278 278 def _read(self):
279 279 self.prepared = True
280 280 repo = self.repo
281 281 assert repo.filtername is None
282 282 data = {
283 283 b'keepbranches': None,
284 284 b'collapse': None,
285 285 b'activebookmark': None,
286 286 b'external': nullrev,
287 287 b'keep': None,
288 288 b'originalwd': None,
289 289 }
290 290 legacydest = None
291 291 state = {}
292 292 destmap = {}
293 293
294 294 if True:
295 295 f = repo.vfs(b"rebasestate")
296 296 for i, l in enumerate(f.read().splitlines()):
297 297 if i == 0:
298 298 data[b'originalwd'] = repo[l].rev()
299 299 elif i == 1:
300 300 # this line should be empty in newer version. but legacy
301 301 # clients may still use it
302 302 if l:
303 303 legacydest = repo[l].rev()
304 304 elif i == 2:
305 305 data[b'external'] = repo[l].rev()
306 306 elif i == 3:
307 307 data[b'collapse'] = bool(int(l))
308 308 elif i == 4:
309 309 data[b'keep'] = bool(int(l))
310 310 elif i == 5:
311 311 data[b'keepbranches'] = bool(int(l))
312 312 elif i == 6 and not (len(l) == 81 and b':' in l):
313 313 # line 6 is a recent addition, so for backwards
314 314 # compatibility check that the line doesn't look like the
315 315 # oldrev:newrev lines
316 316 data[b'activebookmark'] = l
317 317 else:
318 318 args = l.split(b':')
319 319 oldrev = repo[args[0]].rev()
320 320 newrev = args[1]
321 321 if newrev in legacystates:
322 322 continue
323 323 if len(args) > 2:
324 324 destrev = repo[args[2]].rev()
325 325 else:
326 326 destrev = legacydest
327 327 destmap[oldrev] = destrev
328 328 if newrev == revtodostr:
329 329 state[oldrev] = revtodo
330 330 # Legacy compat special case
331 331 else:
332 332 state[oldrev] = repo[newrev].rev()
333 333
334 334 if data[b'keepbranches'] is None:
335 335 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
336 336
337 337 data[b'destmap'] = destmap
338 338 data[b'state'] = state
339 339 skipped = set()
340 340 # recompute the set of skipped revs
341 341 if not data[b'collapse']:
342 342 seen = set(destmap.values())
343 343 for old, new in sorted(state.items()):
344 344 if new != revtodo and new in seen:
345 345 skipped.add(old)
346 346 seen.add(new)
347 347 data[b'skipped'] = skipped
348 348 repo.ui.debug(
349 349 b'computed skipped revs: %s\n'
350 350 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
351 351 )
352 352
353 353 return data
354 354
355 355 def _handleskippingobsolete(self, obsoleterevs, destmap):
356 356 """Compute structures necessary for skipping obsolete revisions
357 357
358 358 obsoleterevs: iterable of all obsolete revisions in rebaseset
359 359 destmap: {srcrev: destrev} destination revisions
360 360 """
361 361 self.obsoletenotrebased = {}
362 362 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
363 363 return
364 364 obsoleteset = set(obsoleterevs)
365 365 (
366 366 self.obsoletenotrebased,
367 367 self.obsoletewithoutsuccessorindestination,
368 368 obsoleteextinctsuccessors,
369 369 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
370 370 skippedset = set(self.obsoletenotrebased)
371 371 skippedset.update(self.obsoletewithoutsuccessorindestination)
372 372 skippedset.update(obsoleteextinctsuccessors)
373 373 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
374 374
375 375 def _prepareabortorcontinue(
376 376 self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False
377 377 ):
378 378 self.resume = True
379 379 try:
380 380 self.restorestatus()
381 381 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
382 382 except error.RepoLookupError:
383 383 if isabort:
384 384 clearstatus(self.repo)
385 385 clearcollapsemsg(self.repo)
386 386 self.repo.ui.warn(
387 387 _(
388 388 b'rebase aborted (no revision is removed,'
389 389 b' only broken state is cleared)\n'
390 390 )
391 391 )
392 392 return 0
393 393 else:
394 394 msg = _(b'cannot continue inconsistent rebase')
395 395 hint = _(b'use "hg rebase --abort" to clear broken state')
396 396 raise error.Abort(msg, hint=hint)
397 397
398 398 if isabort:
399 399 backup = backup and self.backupf
400 400 return self._abort(
401 401 backup=backup,
402 402 suppwarns=suppwarns,
403 403 dryrun=dryrun,
404 404 confirm=confirm,
405 405 )
406 406
407 407 def _preparenewrebase(self, destmap):
408 408 if not destmap:
409 409 return _nothingtorebase()
410 410
411 411 rebaseset = destmap.keys()
412 412 if not self.keepf:
413 413 try:
414 414 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
415 415 except error.Abort as e:
416 416 if e.hint is None:
417 417 e.hint = _(b'use --keep to keep original changesets')
418 418 raise e
419 419
420 420 result = buildstate(self.repo, destmap, self.collapsef)
421 421
422 422 if not result:
423 423 # Empty state built, nothing to rebase
424 424 self.ui.status(_(b'nothing to rebase\n'))
425 425 return _nothingtorebase()
426 426
427 427 (self.originalwd, self.destmap, self.state) = result
428 428 if self.collapsef:
429 429 dests = set(self.destmap.values())
430 430 if len(dests) != 1:
431 431 raise error.Abort(
432 432 _(b'--collapse does not work with multiple destinations')
433 433 )
434 434 destrev = next(iter(dests))
435 435 destancestors = self.repo.changelog.ancestors(
436 436 [destrev], inclusive=True
437 437 )
438 438 self.external = externalparent(self.repo, self.state, destancestors)
439 439
440 440 for destrev in sorted(set(destmap.values())):
441 441 dest = self.repo[destrev]
442 442 if dest.closesbranch() and not self.keepbranchesf:
443 443 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
444 444
445 445 self.prepared = True
446 446
447 447 def _assignworkingcopy(self):
448 448 if self.inmemory:
449 449 from mercurial.context import overlayworkingctx
450 450
451 451 self.wctx = overlayworkingctx(self.repo)
452 452 self.repo.ui.debug(b"rebasing in memory\n")
453 453 else:
454 454 self.wctx = self.repo[None]
455 455 self.repo.ui.debug(b"rebasing on disk\n")
456 456 self.repo.ui.log(
457 457 b"rebase",
458 458 b"using in-memory rebase: %r\n",
459 459 self.inmemory,
460 460 rebase_imm_used=self.inmemory,
461 461 )
462 462
463 463 def _performrebase(self, tr):
464 464 self._assignworkingcopy()
465 465 repo, ui = self.repo, self.ui
466 466 if self.keepbranchesf:
467 467 # insert _savebranch at the start of extrafns so if
468 468 # there's a user-provided extrafn it can clobber branch if
469 469 # desired
470 470 self.extrafns.insert(0, _savebranch)
471 471 if self.collapsef:
472 472 branches = set()
473 473 for rev in self.state:
474 474 branches.add(repo[rev].branch())
475 475 if len(branches) > 1:
476 476 raise error.Abort(
477 477 _(b'cannot collapse multiple named branches')
478 478 )
479 479
480 480 # Calculate self.obsoletenotrebased
481 481 obsrevs = _filterobsoleterevs(self.repo, self.state)
482 482 self._handleskippingobsolete(obsrevs, self.destmap)
483 483
484 484 # Keep track of the active bookmarks in order to reset them later
485 485 self.activebookmark = self.activebookmark or repo._activebookmark
486 486 if self.activebookmark:
487 487 bookmarks.deactivate(repo)
488 488
489 489 # Store the state before we begin so users can run 'hg rebase --abort'
490 490 # if we fail before the transaction closes.
491 491 self.storestatus()
492 492 if tr:
493 493 # When using single transaction, store state when transaction
494 494 # commits.
495 495 self.storestatus(tr)
496 496
497 497 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
498 498 p = repo.ui.makeprogress(
499 499 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
500 500 )
501 501
502 502 def progress(ctx):
503 503 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
504 504
505 505 allowdivergence = self.ui.configbool(
506 506 b'experimental', b'evolution.allowdivergence'
507 507 )
508 508 for subset in sortsource(self.destmap):
509 509 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
510 510 if not allowdivergence:
511 511 sortedrevs -= self.repo.revs(
512 512 b'descendants(%ld) and not %ld',
513 513 self.obsoletewithoutsuccessorindestination,
514 514 self.obsoletewithoutsuccessorindestination,
515 515 )
516 516 for rev in sortedrevs:
517 517 self._rebasenode(tr, rev, allowdivergence, progress)
518 518 p.complete()
519 519 ui.note(_(b'rebase merging completed\n'))
520 520
521 521 def _concludenode(self, rev, editor, commitmsg=None):
522 522 '''Commit the wd changes with parents p1 and p2.
523 523
524 524 Reuse commit info from rev but also store useful information in extra.
525 525 Return node of committed revision.'''
526 526 repo = self.repo
527 527 ctx = repo[rev]
528 528 if commitmsg is None:
529 529 commitmsg = ctx.description()
530 530 date = self.date
531 531 if date is None:
532 532 date = ctx.date()
533 533 extra = {b'rebase_source': ctx.hex()}
534 534 for c in self.extrafns:
535 535 c(ctx, extra)
536 536 destphase = max(ctx.phase(), phases.draft)
537 537 overrides = {
538 538 (b'phases', b'new-commit'): destphase,
539 539 (b'ui', b'allowemptycommit'): not self.skipemptysuccessorf,
540 540 }
541 541 with repo.ui.configoverride(overrides, b'rebase'):
542 542 if self.inmemory:
543 543 newnode = commitmemorynode(
544 544 repo,
545 545 wctx=self.wctx,
546 546 extra=extra,
547 547 commitmsg=commitmsg,
548 548 editor=editor,
549 549 user=ctx.user(),
550 550 date=date,
551 551 )
552 552 else:
553 553 newnode = commitnode(
554 554 repo,
555 555 extra=extra,
556 556 commitmsg=commitmsg,
557 557 editor=editor,
558 558 user=ctx.user(),
559 559 date=date,
560 560 )
561 561
562 562 return newnode
563 563
564 564 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
565 565 repo, ui, opts = self.repo, self.ui, self.opts
566 566 ctx = repo[rev]
567 567 desc = _ctxdesc(ctx)
568 568 if self.state[rev] == rev:
569 569 ui.status(_(b'already rebased %s\n') % desc)
570 570 elif (
571 571 not allowdivergence
572 572 and rev in self.obsoletewithoutsuccessorindestination
573 573 ):
574 574 msg = (
575 575 _(
576 576 b'note: not rebasing %s and its descendants as '
577 577 b'this would cause divergence\n'
578 578 )
579 579 % desc
580 580 )
581 581 repo.ui.status(msg)
582 582 self.skipped.add(rev)
583 583 elif rev in self.obsoletenotrebased:
584 584 succ = self.obsoletenotrebased[rev]
585 585 if succ is None:
586 586 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
587 587 else:
588 588 succdesc = _ctxdesc(repo[succ])
589 589 msg = _(
590 590 b'note: not rebasing %s, already in destination as %s\n'
591 591 ) % (desc, succdesc)
592 592 repo.ui.status(msg)
593 593 # Make clearrebased aware state[rev] is not a true successor
594 594 self.skipped.add(rev)
595 595 # Record rev as moved to its desired destination in self.state.
596 596 # This helps bookmark and working parent movement.
597 597 dest = max(
598 598 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
599 599 )
600 600 self.state[rev] = dest
601 601 elif self.state[rev] == revtodo:
602 602 ui.status(_(b'rebasing %s\n') % desc)
603 603 progressfn(ctx)
604 604 p1, p2, base = defineparents(
605 605 repo,
606 606 rev,
607 607 self.destmap,
608 608 self.state,
609 609 self.skipped,
610 610 self.obsoletenotrebased,
611 611 )
612 612 if self.resume and self.wctx.p1().rev() == p1:
613 613 repo.ui.debug(b'resuming interrupted rebase\n')
614 614 self.resume = False
615 615 else:
616 616 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
617 617 with ui.configoverride(overrides, b'rebase'):
618 618 try:
619 619 rebasenode(
620 620 repo,
621 621 rev,
622 622 p1,
623 623 p2,
624 624 base,
625 625 self.collapsef,
626 626 wctx=self.wctx,
627 627 )
628 628 except error.InMemoryMergeConflictsError:
629 629 if self.dryrun:
630 630 raise error.ConflictResolutionRequired(b'rebase')
631 631 if self.collapsef:
632 632 # TODO: Make the overlayworkingctx reflected
633 633 # in the working copy here instead of re-raising
634 634 # so the entire rebase operation is retried.
635 635 raise
636 636 ui.status(
637 637 _(
638 638 b"hit merge conflicts; rebasing that "
639 639 b"commit again in the working copy\n"
640 640 )
641 641 )
642 642 cmdutil.bailifchanged(repo)
643 643 self.inmemory = False
644 644 self._assignworkingcopy()
645 645 mergemod.update(
646 646 repo,
647 647 p1,
648 648 branchmerge=False,
649 649 force=False,
650 650 wc=self.wctx,
651 651 )
652 652 rebasenode(
653 653 repo,
654 654 rev,
655 655 p1,
656 656 p2,
657 657 base,
658 658 self.collapsef,
659 659 wctx=self.wctx,
660 660 )
661 661 if not self.collapsef:
662 662 merging = p2 != nullrev
663 663 editform = cmdutil.mergeeditform(merging, b'rebase')
664 664 editor = cmdutil.getcommiteditor(
665 665 editform=editform, **pycompat.strkwargs(opts)
666 666 )
667 667 # We need to set parents again here just in case we're continuing
668 668 # a rebase started with an old hg version (before 9c9cfecd4600),
669 669 # because those old versions would have left us with two dirstate
670 670 # parents, and we don't want to create a merge commit here (unless
671 671 # we're rebasing a merge commit).
672 672 self.wctx.setparents(repo[p1].node(), repo[p2].node())
673 673 newnode = self._concludenode(rev, editor)
674 674 else:
675 675 # Skip commit if we are collapsing
676 676 newnode = None
677 677 # Update the state
678 678 if newnode is not None:
679 679 self.state[rev] = repo[newnode].rev()
680 680 ui.debug(b'rebased as %s\n' % short(newnode))
681 681 if repo[newnode].isempty():
682 682 ui.warn(
683 683 _(
684 684 b'note: created empty successor for %s, its '
685 685 b'destination already has all its changes\n'
686 686 )
687 687 % desc
688 688 )
689 689 else:
690 690 if not self.collapsef:
691 691 ui.warn(
692 692 _(
693 693 b'note: not rebasing %s, its destination already '
694 694 b'has all its changes\n'
695 695 )
696 696 % desc
697 697 )
698 698 self.skipped.add(rev)
699 699 self.state[rev] = p1
700 700 ui.debug(b'next revision set to %d\n' % p1)
701 701 else:
702 702 ui.status(
703 703 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
704 704 )
705 705 if not tr:
706 706 # When not using single transaction, store state after each
707 707 # commit is completely done. On InterventionRequired, we thus
708 708 # won't store the status. Instead, we'll hit the "len(parents) == 2"
709 709 # case and realize that the commit was in progress.
710 710 self.storestatus()
711 711
712 712 def _finishrebase(self):
713 713 repo, ui, opts = self.repo, self.ui, self.opts
714 714 fm = ui.formatter(b'rebase', opts)
715 715 fm.startitem()
716 716 if self.collapsef:
717 717 p1, p2, _base = defineparents(
718 718 repo,
719 719 min(self.state),
720 720 self.destmap,
721 721 self.state,
722 722 self.skipped,
723 723 self.obsoletenotrebased,
724 724 )
725 725 editopt = opts.get(b'edit')
726 726 editform = b'rebase.collapse'
727 727 if self.collapsemsg:
728 728 commitmsg = self.collapsemsg
729 729 else:
730 730 commitmsg = b'Collapsed revision'
731 731 for rebased in sorted(self.state):
732 732 if rebased not in self.skipped:
733 733 commitmsg += b'\n* %s' % repo[rebased].description()
734 734 editopt = True
735 735 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
736 736 revtoreuse = max(self.state)
737 737
738 738 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
739 739 newnode = self._concludenode(
740 740 revtoreuse, editor, commitmsg=commitmsg
741 741 )
742 742
743 743 if newnode is not None:
744 744 newrev = repo[newnode].rev()
745 745 for oldrev in self.state:
746 746 self.state[oldrev] = newrev
747 747
748 748 if b'qtip' in repo.tags():
749 749 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
750 750
751 751 # restore original working directory
752 752 # (we do this before stripping)
753 753 newwd = self.state.get(self.originalwd, self.originalwd)
754 754 if newwd < 0:
755 755 # original directory is a parent of rebase set root or ignored
756 756 newwd = self.originalwd
757 757 if newwd not in [c.rev() for c in repo[None].parents()]:
758 758 ui.note(_(b"update back to initial working directory parent\n"))
759 759 hg.updaterepo(repo, newwd, overwrite=False)
760 760
761 761 collapsedas = None
762 762 if self.collapsef and not self.keepf:
763 763 collapsedas = newnode
764 764 clearrebased(
765 765 ui,
766 766 repo,
767 767 self.destmap,
768 768 self.state,
769 769 self.skipped,
770 770 collapsedas,
771 771 self.keepf,
772 772 fm=fm,
773 773 backup=self.backupf,
774 774 )
775 775
776 776 clearstatus(repo)
777 777 clearcollapsemsg(repo)
778 778
779 779 ui.note(_(b"rebase completed\n"))
780 780 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
781 781 if self.skipped:
782 782 skippedlen = len(self.skipped)
783 783 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
784 784 fm.end()
785 785
786 786 if (
787 787 self.activebookmark
788 788 and self.activebookmark in repo._bookmarks
789 789 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
790 790 ):
791 791 bookmarks.activate(repo, self.activebookmark)
792 792
793 793 def _abort(self, backup=True, suppwarns=False, dryrun=False, confirm=False):
794 794 '''Restore the repository to its original state.'''
795 795
796 796 repo = self.repo
797 797 try:
798 798 # If the first commits in the rebased set get skipped during the
799 799 # rebase, their values within the state mapping will be the dest
800 800 # rev id. The rebased list must must not contain the dest rev
801 801 # (issue4896)
802 802 rebased = [
803 803 s
804 804 for r, s in self.state.items()
805 805 if s >= 0 and s != r and s != self.destmap[r]
806 806 ]
807 807 immutable = [d for d in rebased if not repo[d].mutable()]
808 808 cleanup = True
809 809 if immutable:
810 810 repo.ui.warn(
811 811 _(b"warning: can't clean up public changesets %s\n")
812 812 % b', '.join(bytes(repo[r]) for r in immutable),
813 813 hint=_(b"see 'hg help phases' for details"),
814 814 )
815 815 cleanup = False
816 816
817 817 descendants = set()
818 818 if rebased:
819 819 descendants = set(repo.changelog.descendants(rebased))
820 820 if descendants - set(rebased):
821 821 repo.ui.warn(
822 822 _(
823 823 b"warning: new changesets detected on "
824 824 b"destination branch, can't strip\n"
825 825 )
826 826 )
827 827 cleanup = False
828 828
829 829 if cleanup:
830 830 if rebased:
831 831 strippoints = [
832 832 c.node() for c in repo.set(b'roots(%ld)', rebased)
833 833 ]
834 834
835 835 updateifonnodes = set(rebased)
836 836 updateifonnodes.update(self.destmap.values())
837 837
838 838 if not dryrun and not confirm:
839 839 updateifonnodes.add(self.originalwd)
840 840
841 841 shouldupdate = repo[b'.'].rev() in updateifonnodes
842 842
843 843 # Update away from the rebase if necessary
844 844 if shouldupdate:
845 845 mergemod.clean_update(repo[self.originalwd])
846 846
847 847 # Strip from the first rebased revision
848 848 if rebased:
849 849 repair.strip(repo.ui, repo, strippoints, backup=backup)
850 850
851 851 if self.activebookmark and self.activebookmark in repo._bookmarks:
852 852 bookmarks.activate(repo, self.activebookmark)
853 853
854 854 finally:
855 855 clearstatus(repo)
856 856 clearcollapsemsg(repo)
857 857 if not suppwarns:
858 858 repo.ui.warn(_(b'rebase aborted\n'))
859 859 return 0
860 860
861 861
862 862 @command(
863 863 b'rebase',
864 864 [
865 865 (
866 866 b's',
867 867 b'source',
868 868 [],
869 869 _(b'rebase the specified changesets and their descendants'),
870 870 _(b'REV'),
871 871 ),
872 872 (
873 873 b'b',
874 874 b'base',
875 875 [],
876 876 _(b'rebase everything from branching point of specified changeset'),
877 877 _(b'REV'),
878 878 ),
879 879 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
880 880 (
881 881 b'd',
882 882 b'dest',
883 883 b'',
884 884 _(b'rebase onto the specified changeset'),
885 885 _(b'REV'),
886 886 ),
887 887 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
888 888 (
889 889 b'm',
890 890 b'message',
891 891 b'',
892 892 _(b'use text as collapse commit message'),
893 893 _(b'TEXT'),
894 894 ),
895 895 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
896 896 (
897 897 b'l',
898 898 b'logfile',
899 899 b'',
900 900 _(b'read collapse commit message from file'),
901 901 _(b'FILE'),
902 902 ),
903 903 (b'k', b'keep', False, _(b'keep original changesets')),
904 904 (b'', b'keepbranches', False, _(b'keep original branch names')),
905 905 (b'D', b'detach', False, _(b'(DEPRECATED)')),
906 906 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
907 907 (b't', b'tool', b'', _(b'specify merge tool')),
908 908 (b'', b'stop', False, _(b'stop interrupted rebase')),
909 909 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
910 910 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
911 911 (
912 912 b'',
913 913 b'auto-orphans',
914 914 b'',
915 915 _(
916 916 b'automatically rebase orphan revisions '
917 917 b'in the specified revset (EXPERIMENTAL)'
918 918 ),
919 919 ),
920 920 ]
921 921 + cmdutil.dryrunopts
922 922 + cmdutil.formatteropts
923 923 + cmdutil.confirmopts,
924 924 _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'),
925 925 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
926 926 )
927 927 def rebase(ui, repo, **opts):
928 928 """move changeset (and descendants) to a different branch
929 929
930 930 Rebase uses repeated merging to graft changesets from one part of
931 931 history (the source) onto another (the destination). This can be
932 932 useful for linearizing *local* changes relative to a master
933 933 development tree.
934 934
935 935 Published commits cannot be rebased (see :hg:`help phases`).
936 936 To copy commits, see :hg:`help graft`.
937 937
938 938 If you don't specify a destination changeset (``-d/--dest``), rebase
939 939 will use the same logic as :hg:`merge` to pick a destination. if
940 940 the current branch contains exactly one other head, the other head
941 941 is merged with by default. Otherwise, an explicit revision with
942 942 which to merge with must be provided. (destination changeset is not
943 943 modified by rebasing, but new changesets are added as its
944 944 descendants.)
945 945
946 946 Here are the ways to select changesets:
947 947
948 948 1. Explicitly select them using ``--rev``.
949 949
950 950 2. Use ``--source`` to select a root changeset and include all of its
951 951 descendants.
952 952
953 953 3. Use ``--base`` to select a changeset; rebase will find ancestors
954 954 and their descendants which are not also ancestors of the destination.
955 955
956 956 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
957 957 rebase will use ``--base .`` as above.
958 958
959 959 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
960 960 can be used in ``--dest``. Destination would be calculated per source
961 961 revision with ``SRC`` substituted by that single source revision and
962 962 ``ALLSRC`` substituted by all source revisions.
963 963
964 964 Rebase will destroy original changesets unless you use ``--keep``.
965 965 It will also move your bookmarks (even if you do).
966 966
967 967 Some changesets may be dropped if they do not contribute changes
968 968 (e.g. merges from the destination branch).
969 969
970 970 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
971 971 a named branch with two heads. You will need to explicitly specify source
972 972 and/or destination.
973 973
974 974 If you need to use a tool to automate merge/conflict decisions, you
975 975 can specify one with ``--tool``, see :hg:`help merge-tools`.
976 976 As a caveat: the tool will not be used to mediate when a file was
977 977 deleted, there is no hook presently available for this.
978 978
979 979 If a rebase is interrupted to manually resolve a conflict, it can be
980 980 continued with --continue/-c, aborted with --abort/-a, or stopped with
981 981 --stop.
982 982
983 983 .. container:: verbose
984 984
985 985 Examples:
986 986
987 987 - move "local changes" (current commit back to branching point)
988 988 to the current branch tip after a pull::
989 989
990 990 hg rebase
991 991
992 992 - move a single changeset to the stable branch::
993 993
994 994 hg rebase -r 5f493448 -d stable
995 995
996 996 - splice a commit and all its descendants onto another part of history::
997 997
998 998 hg rebase --source c0c3 --dest 4cf9
999 999
1000 1000 - rebase everything on a branch marked by a bookmark onto the
1001 1001 default branch::
1002 1002
1003 1003 hg rebase --base myfeature --dest default
1004 1004
1005 1005 - collapse a sequence of changes into a single commit::
1006 1006
1007 1007 hg rebase --collapse -r 1520:1525 -d .
1008 1008
1009 1009 - move a named branch while preserving its name::
1010 1010
1011 1011 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
1012 1012
1013 1013 - stabilize orphaned changesets so history looks linear::
1014 1014
1015 1015 hg rebase -r 'orphan()-obsolete()'\
1016 1016 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
1017 1017 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
1018 1018
1019 1019 Configuration Options:
1020 1020
1021 1021 You can make rebase require a destination if you set the following config
1022 1022 option::
1023 1023
1024 1024 [commands]
1025 1025 rebase.requiredest = True
1026 1026
1027 1027 By default, rebase will close the transaction after each commit. For
1028 1028 performance purposes, you can configure rebase to use a single transaction
1029 1029 across the entire rebase. WARNING: This setting introduces a significant
1030 1030 risk of losing the work you've done in a rebase if the rebase aborts
1031 1031 unexpectedly::
1032 1032
1033 1033 [rebase]
1034 1034 singletransaction = True
1035 1035
1036 1036 By default, rebase writes to the working copy, but you can configure it to
1037 1037 run in-memory for better performance. When the rebase is not moving the
1038 1038 parent(s) of the working copy (AKA the "currently checked out changesets"),
1039 1039 this may also allow it to run even if the working copy is dirty::
1040 1040
1041 1041 [rebase]
1042 1042 experimental.inmemory = True
1043 1043
1044 1044 Return Values:
1045 1045
1046 1046 Returns 0 on success, 1 if nothing to rebase or there are
1047 1047 unresolved conflicts.
1048 1048
1049 1049 """
1050 1050 opts = pycompat.byteskwargs(opts)
1051 1051 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1052 1052 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1053 1053 if action:
1054 1054 cmdutil.check_incompatible_arguments(
1055 1055 opts, action, [b'confirm', b'dry_run']
1056 1056 )
1057 1057 cmdutil.check_incompatible_arguments(
1058 1058 opts, action, [b'rev', b'source', b'base', b'dest']
1059 1059 )
1060 1060 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1061 1061 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1062 1062
1063 1063 if action or repo.currenttransaction() is not None:
1064 1064 # in-memory rebase is not compatible with resuming rebases.
1065 1065 # (Or if it is run within a transaction, since the restart logic can
1066 1066 # fail the entire transaction.)
1067 1067 inmemory = False
1068 1068
1069 1069 if opts.get(b'auto_orphans'):
1070 1070 disallowed_opts = set(opts) - {b'auto_orphans'}
1071 1071 cmdutil.check_incompatible_arguments(
1072 1072 opts, b'auto_orphans', disallowed_opts
1073 1073 )
1074 1074
1075 1075 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1076 1076 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1077 1077 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1078 1078
1079 1079 if opts.get(b'dry_run') or opts.get(b'confirm'):
1080 1080 return _dryrunrebase(ui, repo, action, opts)
1081 1081 elif action == b'stop':
1082 1082 rbsrt = rebaseruntime(repo, ui)
1083 1083 with repo.wlock(), repo.lock():
1084 1084 rbsrt.restorestatus()
1085 1085 if rbsrt.collapsef:
1086 1086 raise error.Abort(_(b"cannot stop in --collapse session"))
1087 1087 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1088 1088 if not (rbsrt.keepf or allowunstable):
1089 1089 raise error.Abort(
1090 1090 _(
1091 1091 b"cannot remove original changesets with"
1092 1092 b" unrebased descendants"
1093 1093 ),
1094 1094 hint=_(
1095 1095 b'either enable obsmarkers to allow unstable '
1096 1096 b'revisions or use --keep to keep original '
1097 1097 b'changesets'
1098 1098 ),
1099 1099 )
1100 1100 # update to the current working revision
1101 1101 # to clear interrupted merge
1102 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1102 mergemod.clean_update(repo[rbsrt.originalwd])
1103 1103 rbsrt._finishrebase()
1104 1104 return 0
1105 1105 elif inmemory:
1106 1106 try:
1107 1107 # in-memory merge doesn't support conflicts, so if we hit any, abort
1108 1108 # and re-run as an on-disk merge.
1109 1109 overrides = {(b'rebase', b'singletransaction'): True}
1110 1110 with ui.configoverride(overrides, b'rebase'):
1111 1111 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1112 1112 except error.InMemoryMergeConflictsError:
1113 1113 ui.warn(
1114 1114 _(
1115 1115 b'hit merge conflicts; re-running rebase without in-memory'
1116 1116 b' merge\n'
1117 1117 )
1118 1118 )
1119 1119 clearstatus(repo)
1120 1120 clearcollapsemsg(repo)
1121 1121 return _dorebase(ui, repo, action, opts, inmemory=False)
1122 1122 else:
1123 1123 return _dorebase(ui, repo, action, opts)
1124 1124
1125 1125
1126 1126 def _dryrunrebase(ui, repo, action, opts):
1127 1127 rbsrt = rebaseruntime(repo, ui, inmemory=True, dryrun=True, opts=opts)
1128 1128 confirm = opts.get(b'confirm')
1129 1129 if confirm:
1130 1130 ui.status(_(b'starting in-memory rebase\n'))
1131 1131 else:
1132 1132 ui.status(
1133 1133 _(b'starting dry-run rebase; repository will not be changed\n')
1134 1134 )
1135 1135 with repo.wlock(), repo.lock():
1136 1136 needsabort = True
1137 1137 try:
1138 1138 overrides = {(b'rebase', b'singletransaction'): True}
1139 1139 with ui.configoverride(overrides, b'rebase'):
1140 1140 _origrebase(
1141 1141 ui, repo, action, opts, rbsrt,
1142 1142 )
1143 1143 except error.ConflictResolutionRequired:
1144 1144 ui.status(_(b'hit a merge conflict\n'))
1145 1145 return 1
1146 1146 except error.Abort:
1147 1147 needsabort = False
1148 1148 raise
1149 1149 else:
1150 1150 if confirm:
1151 1151 ui.status(_(b'rebase completed successfully\n'))
1152 1152 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1153 1153 # finish unfinished rebase
1154 1154 rbsrt._finishrebase()
1155 1155 else:
1156 1156 rbsrt._prepareabortorcontinue(
1157 1157 isabort=True,
1158 1158 backup=False,
1159 1159 suppwarns=True,
1160 1160 confirm=confirm,
1161 1161 )
1162 1162 needsabort = False
1163 1163 else:
1164 1164 ui.status(
1165 1165 _(
1166 1166 b'dry-run rebase completed successfully; run without'
1167 1167 b' -n/--dry-run to perform this rebase\n'
1168 1168 )
1169 1169 )
1170 1170 return 0
1171 1171 finally:
1172 1172 if needsabort:
1173 1173 # no need to store backup in case of dryrun
1174 1174 rbsrt._prepareabortorcontinue(
1175 1175 isabort=True,
1176 1176 backup=False,
1177 1177 suppwarns=True,
1178 1178 dryrun=opts.get(b'dry_run'),
1179 1179 )
1180 1180
1181 1181
1182 1182 def _dorebase(ui, repo, action, opts, inmemory=False):
1183 1183 rbsrt = rebaseruntime(repo, ui, inmemory, opts=opts)
1184 1184 return _origrebase(ui, repo, action, opts, rbsrt)
1185 1185
1186 1186
1187 1187 def _origrebase(ui, repo, action, opts, rbsrt):
1188 1188 assert action != b'stop'
1189 1189 with repo.wlock(), repo.lock():
1190 1190 if opts.get(b'interactive'):
1191 1191 try:
1192 1192 if extensions.find(b'histedit'):
1193 1193 enablehistedit = b''
1194 1194 except KeyError:
1195 1195 enablehistedit = b" --config extensions.histedit="
1196 1196 help = b"hg%s help -e histedit" % enablehistedit
1197 1197 msg = (
1198 1198 _(
1199 1199 b"interactive history editing is supported by the "
1200 1200 b"'histedit' extension (see \"%s\")"
1201 1201 )
1202 1202 % help
1203 1203 )
1204 1204 raise error.Abort(msg)
1205 1205
1206 1206 if rbsrt.collapsemsg and not rbsrt.collapsef:
1207 1207 raise error.Abort(_(b'message can only be specified with collapse'))
1208 1208
1209 1209 if action:
1210 1210 if rbsrt.collapsef:
1211 1211 raise error.Abort(
1212 1212 _(b'cannot use collapse with continue or abort')
1213 1213 )
1214 1214 if action == b'abort' and opts.get(b'tool', False):
1215 1215 ui.warn(_(b'tool option will be ignored\n'))
1216 1216 if action == b'continue':
1217 1217 ms = mergestatemod.mergestate.read(repo)
1218 1218 mergeutil.checkunresolved(ms)
1219 1219
1220 1220 retcode = rbsrt._prepareabortorcontinue(
1221 1221 isabort=(action == b'abort')
1222 1222 )
1223 1223 if retcode is not None:
1224 1224 return retcode
1225 1225 else:
1226 1226 # search default destination in this space
1227 1227 # used in the 'hg pull --rebase' case, see issue 5214.
1228 1228 destspace = opts.get(b'_destspace')
1229 1229 destmap = _definedestmap(
1230 1230 ui,
1231 1231 repo,
1232 1232 rbsrt.inmemory,
1233 1233 opts.get(b'dest', None),
1234 1234 opts.get(b'source', []),
1235 1235 opts.get(b'base', []),
1236 1236 opts.get(b'rev', []),
1237 1237 destspace=destspace,
1238 1238 )
1239 1239 retcode = rbsrt._preparenewrebase(destmap)
1240 1240 if retcode is not None:
1241 1241 return retcode
1242 1242 storecollapsemsg(repo, rbsrt.collapsemsg)
1243 1243
1244 1244 tr = None
1245 1245
1246 1246 singletr = ui.configbool(b'rebase', b'singletransaction')
1247 1247 if singletr:
1248 1248 tr = repo.transaction(b'rebase')
1249 1249
1250 1250 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1251 1251 # one transaction here. Otherwise, transactions are obtained when
1252 1252 # committing each node, which is slower but allows partial success.
1253 1253 with util.acceptintervention(tr):
1254 1254 # Same logic for the dirstate guard, except we don't create one when
1255 1255 # rebasing in-memory (it's not needed).
1256 1256 dsguard = None
1257 1257 if singletr and not rbsrt.inmemory:
1258 1258 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1259 1259 with util.acceptintervention(dsguard):
1260 1260 rbsrt._performrebase(tr)
1261 1261 if not rbsrt.dryrun:
1262 1262 rbsrt._finishrebase()
1263 1263
1264 1264
1265 1265 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
1266 1266 """use revisions argument to define destmap {srcrev: destrev}"""
1267 1267 if revf is None:
1268 1268 revf = []
1269 1269
1270 1270 # destspace is here to work around issues with `hg pull --rebase` see
1271 1271 # issue5214 for details
1272 1272
1273 1273 cmdutil.checkunfinished(repo)
1274 1274 if not inmemory:
1275 1275 cmdutil.bailifchanged(repo)
1276 1276
1277 1277 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1278 1278 raise error.Abort(
1279 1279 _(b'you must specify a destination'),
1280 1280 hint=_(b'use: hg rebase -d REV'),
1281 1281 )
1282 1282
1283 1283 dest = None
1284 1284
1285 1285 if revf:
1286 1286 rebaseset = scmutil.revrange(repo, revf)
1287 1287 if not rebaseset:
1288 1288 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1289 1289 return None
1290 1290 elif srcf:
1291 1291 src = scmutil.revrange(repo, srcf)
1292 1292 if not src:
1293 1293 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1294 1294 return None
1295 1295 # `+ (%ld)` to work around `wdir()::` being empty
1296 1296 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1297 1297 else:
1298 1298 base = scmutil.revrange(repo, basef or [b'.'])
1299 1299 if not base:
1300 1300 ui.status(
1301 1301 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1302 1302 )
1303 1303 return None
1304 1304 if destf:
1305 1305 # --base does not support multiple destinations
1306 1306 dest = scmutil.revsingle(repo, destf)
1307 1307 else:
1308 1308 dest = repo[_destrebase(repo, base, destspace=destspace)]
1309 1309 destf = bytes(dest)
1310 1310
1311 1311 roots = [] # selected children of branching points
1312 1312 bpbase = {} # {branchingpoint: [origbase]}
1313 1313 for b in base: # group bases by branching points
1314 1314 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1315 1315 bpbase[bp] = bpbase.get(bp, []) + [b]
1316 1316 if None in bpbase:
1317 1317 # emulate the old behavior, showing "nothing to rebase" (a better
1318 1318 # behavior may be abort with "cannot find branching point" error)
1319 1319 bpbase.clear()
1320 1320 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1321 1321 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1322 1322
1323 1323 rebaseset = repo.revs(b'%ld::', roots)
1324 1324
1325 1325 if not rebaseset:
1326 1326 # transform to list because smartsets are not comparable to
1327 1327 # lists. This should be improved to honor laziness of
1328 1328 # smartset.
1329 1329 if list(base) == [dest.rev()]:
1330 1330 if basef:
1331 1331 ui.status(
1332 1332 _(
1333 1333 b'nothing to rebase - %s is both "base"'
1334 1334 b' and destination\n'
1335 1335 )
1336 1336 % dest
1337 1337 )
1338 1338 else:
1339 1339 ui.status(
1340 1340 _(
1341 1341 b'nothing to rebase - working directory '
1342 1342 b'parent is also destination\n'
1343 1343 )
1344 1344 )
1345 1345 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1346 1346 if basef:
1347 1347 ui.status(
1348 1348 _(
1349 1349 b'nothing to rebase - "base" %s is '
1350 1350 b'already an ancestor of destination '
1351 1351 b'%s\n'
1352 1352 )
1353 1353 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1354 1354 )
1355 1355 else:
1356 1356 ui.status(
1357 1357 _(
1358 1358 b'nothing to rebase - working '
1359 1359 b'directory parent is already an '
1360 1360 b'ancestor of destination %s\n'
1361 1361 )
1362 1362 % dest
1363 1363 )
1364 1364 else: # can it happen?
1365 1365 ui.status(
1366 1366 _(b'nothing to rebase from %s to %s\n')
1367 1367 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1368 1368 )
1369 1369 return None
1370 1370
1371 1371 if nodemod.wdirrev in rebaseset:
1372 1372 raise error.Abort(_(b'cannot rebase the working copy'))
1373 1373 rebasingwcp = repo[b'.'].rev() in rebaseset
1374 1374 ui.log(
1375 1375 b"rebase",
1376 1376 b"rebasing working copy parent: %r\n",
1377 1377 rebasingwcp,
1378 1378 rebase_rebasing_wcp=rebasingwcp,
1379 1379 )
1380 1380 if inmemory and rebasingwcp:
1381 1381 # Check these since we did not before.
1382 1382 cmdutil.checkunfinished(repo)
1383 1383 cmdutil.bailifchanged(repo)
1384 1384
1385 1385 if not destf:
1386 1386 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1387 1387 destf = bytes(dest)
1388 1388
1389 1389 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1390 1390 alias = {b'ALLSRC': allsrc}
1391 1391
1392 1392 if dest is None:
1393 1393 try:
1394 1394 # fast path: try to resolve dest without SRC alias
1395 1395 dest = scmutil.revsingle(repo, destf, localalias=alias)
1396 1396 except error.RepoLookupError:
1397 1397 # multi-dest path: resolve dest for each SRC separately
1398 1398 destmap = {}
1399 1399 for r in rebaseset:
1400 1400 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1401 1401 # use repo.anyrevs instead of scmutil.revsingle because we
1402 1402 # don't want to abort if destset is empty.
1403 1403 destset = repo.anyrevs([destf], user=True, localalias=alias)
1404 1404 size = len(destset)
1405 1405 if size == 1:
1406 1406 destmap[r] = destset.first()
1407 1407 elif size == 0:
1408 1408 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1409 1409 else:
1410 1410 raise error.Abort(
1411 1411 _(b'rebase destination for %s is not unique') % repo[r]
1412 1412 )
1413 1413
1414 1414 if dest is not None:
1415 1415 # single-dest case: assign dest to each rev in rebaseset
1416 1416 destrev = dest.rev()
1417 1417 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1418 1418
1419 1419 if not destmap:
1420 1420 ui.status(_(b'nothing to rebase - empty destination\n'))
1421 1421 return None
1422 1422
1423 1423 return destmap
1424 1424
1425 1425
1426 1426 def externalparent(repo, state, destancestors):
1427 1427 """Return the revision that should be used as the second parent
1428 1428 when the revisions in state is collapsed on top of destancestors.
1429 1429 Abort if there is more than one parent.
1430 1430 """
1431 1431 parents = set()
1432 1432 source = min(state)
1433 1433 for rev in state:
1434 1434 if rev == source:
1435 1435 continue
1436 1436 for p in repo[rev].parents():
1437 1437 if p.rev() not in state and p.rev() not in destancestors:
1438 1438 parents.add(p.rev())
1439 1439 if not parents:
1440 1440 return nullrev
1441 1441 if len(parents) == 1:
1442 1442 return parents.pop()
1443 1443 raise error.Abort(
1444 1444 _(
1445 1445 b'unable to collapse on top of %d, there is more '
1446 1446 b'than one external parent: %s'
1447 1447 )
1448 1448 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1449 1449 )
1450 1450
1451 1451
1452 1452 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1453 1453 '''Commit the memory changes with parents p1 and p2.
1454 1454 Return node of committed revision.'''
1455 1455 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1456 1456 # ``branch`` (used when passing ``--keepbranches``).
1457 1457 branch = None
1458 1458 if b'branch' in extra:
1459 1459 branch = extra[b'branch']
1460 1460
1461 1461 # FIXME: We call _compact() because it's required to correctly detect
1462 1462 # changed files. This was added to fix a regression shortly before the 5.5
1463 1463 # release. A proper fix will be done in the default branch.
1464 1464 wctx._compact()
1465 1465 memctx = wctx.tomemctx(
1466 1466 commitmsg,
1467 1467 date=date,
1468 1468 extra=extra,
1469 1469 user=user,
1470 1470 branch=branch,
1471 1471 editor=editor,
1472 1472 )
1473 1473 if memctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1474 1474 return None
1475 1475 commitres = repo.commitctx(memctx)
1476 1476 wctx.clean() # Might be reused
1477 1477 return commitres
1478 1478
1479 1479
1480 1480 def commitnode(repo, editor, extra, user, date, commitmsg):
1481 1481 '''Commit the wd changes with parents p1 and p2.
1482 1482 Return node of committed revision.'''
1483 1483 dsguard = util.nullcontextmanager()
1484 1484 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1485 1485 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1486 1486 with dsguard:
1487 1487 # Commit might fail if unresolved files exist
1488 1488 newnode = repo.commit(
1489 1489 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1490 1490 )
1491 1491
1492 1492 repo.dirstate.setbranch(repo[newnode].branch())
1493 1493 return newnode
1494 1494
1495 1495
1496 1496 def rebasenode(repo, rev, p1, p2, base, collapse, wctx):
1497 1497 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1498 1498 # Merge phase
1499 1499 # Update to destination and merge it with local
1500 1500 p1ctx = repo[p1]
1501 1501 if wctx.isinmemory():
1502 1502 wctx.setbase(p1ctx)
1503 1503 else:
1504 1504 if repo[b'.'].rev() != p1:
1505 1505 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1506 1506 mergemod.clean_update(p1ctx)
1507 1507 else:
1508 1508 repo.ui.debug(b" already in destination\n")
1509 1509 # This is, alas, necessary to invalidate workingctx's manifest cache,
1510 1510 # as well as other data we litter on it in other places.
1511 1511 wctx = repo[None]
1512 1512 repo.dirstate.write(repo.currenttransaction())
1513 1513 ctx = repo[rev]
1514 1514 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1515 1515 if base is not None:
1516 1516 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1517 1517
1518 1518 # See explanation in merge.graft()
1519 1519 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1520 1520 stats = mergemod.update(
1521 1521 repo,
1522 1522 rev,
1523 1523 branchmerge=True,
1524 1524 force=True,
1525 1525 ancestor=base,
1526 1526 mergeancestor=mergeancestor,
1527 1527 labels=[b'dest', b'source'],
1528 1528 wc=wctx,
1529 1529 )
1530 1530 wctx.setparents(p1ctx.node(), repo[p2].node())
1531 1531 if collapse:
1532 1532 copies.graftcopies(wctx, ctx, p1ctx)
1533 1533 else:
1534 1534 # If we're not using --collapse, we need to
1535 1535 # duplicate copies between the revision we're
1536 1536 # rebasing and its first parent.
1537 1537 copies.graftcopies(wctx, ctx, ctx.p1())
1538 1538
1539 1539 if stats.unresolvedcount > 0:
1540 1540 if wctx.isinmemory():
1541 1541 raise error.InMemoryMergeConflictsError()
1542 1542 else:
1543 1543 raise error.ConflictResolutionRequired(b'rebase')
1544 1544
1545 1545
1546 1546 def adjustdest(repo, rev, destmap, state, skipped):
1547 1547 r"""adjust rebase destination given the current rebase state
1548 1548
1549 1549 rev is what is being rebased. Return a list of two revs, which are the
1550 1550 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1551 1551 nullrev, return dest without adjustment for it.
1552 1552
1553 1553 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1554 1554 to B1, and E's destination will be adjusted from F to B1.
1555 1555
1556 1556 B1 <- written during rebasing B
1557 1557 |
1558 1558 F <- original destination of B, E
1559 1559 |
1560 1560 | E <- rev, which is being rebased
1561 1561 | |
1562 1562 | D <- prev, one parent of rev being checked
1563 1563 | |
1564 1564 | x <- skipped, ex. no successor or successor in (::dest)
1565 1565 | |
1566 1566 | C <- rebased as C', different destination
1567 1567 | |
1568 1568 | B <- rebased as B1 C'
1569 1569 |/ |
1570 1570 A G <- destination of C, different
1571 1571
1572 1572 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1573 1573 first move C to C1, G to G1, and when it's checking H, the adjusted
1574 1574 destinations will be [C1, G1].
1575 1575
1576 1576 H C1 G1
1577 1577 /| | /
1578 1578 F G |/
1579 1579 K | | -> K
1580 1580 | C D |
1581 1581 | |/ |
1582 1582 | B | ...
1583 1583 |/ |/
1584 1584 A A
1585 1585
1586 1586 Besides, adjust dest according to existing rebase information. For example,
1587 1587
1588 1588 B C D B needs to be rebased on top of C, C needs to be rebased on top
1589 1589 \|/ of D. We will rebase C first.
1590 1590 A
1591 1591
1592 1592 C' After rebasing C, when considering B's destination, use C'
1593 1593 | instead of the original C.
1594 1594 B D
1595 1595 \ /
1596 1596 A
1597 1597 """
1598 1598 # pick already rebased revs with same dest from state as interesting source
1599 1599 dest = destmap[rev]
1600 1600 source = [
1601 1601 s
1602 1602 for s, d in state.items()
1603 1603 if d > 0 and destmap[s] == dest and s not in skipped
1604 1604 ]
1605 1605
1606 1606 result = []
1607 1607 for prev in repo.changelog.parentrevs(rev):
1608 1608 adjusted = dest
1609 1609 if prev != nullrev:
1610 1610 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1611 1611 if candidate is not None:
1612 1612 adjusted = state[candidate]
1613 1613 if adjusted == dest and dest in state:
1614 1614 adjusted = state[dest]
1615 1615 if adjusted == revtodo:
1616 1616 # sortsource should produce an order that makes this impossible
1617 1617 raise error.ProgrammingError(
1618 1618 b'rev %d should be rebased already at this time' % dest
1619 1619 )
1620 1620 result.append(adjusted)
1621 1621 return result
1622 1622
1623 1623
1624 1624 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1625 1625 """
1626 1626 Abort if rebase will create divergence or rebase is noop because of markers
1627 1627
1628 1628 `rebaseobsrevs`: set of obsolete revision in source
1629 1629 `rebaseobsskipped`: set of revisions from source skipped because they have
1630 1630 successors in destination or no non-obsolete successor.
1631 1631 """
1632 1632 # Obsolete node with successors not in dest leads to divergence
1633 1633 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1634 1634 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1635 1635
1636 1636 if divergencebasecandidates and not divergenceok:
1637 1637 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1638 1638 msg = _(b"this rebase will cause divergences from: %s")
1639 1639 h = _(
1640 1640 b"to force the rebase please set "
1641 1641 b"experimental.evolution.allowdivergence=True"
1642 1642 )
1643 1643 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1644 1644
1645 1645
1646 1646 def successorrevs(unfi, rev):
1647 1647 """yield revision numbers for successors of rev"""
1648 1648 assert unfi.filtername is None
1649 1649 get_rev = unfi.changelog.index.get_rev
1650 1650 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1651 1651 r = get_rev(s)
1652 1652 if r is not None:
1653 1653 yield r
1654 1654
1655 1655
1656 1656 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1657 1657 """Return new parents and optionally a merge base for rev being rebased
1658 1658
1659 1659 The destination specified by "dest" cannot always be used directly because
1660 1660 previously rebase result could affect destination. For example,
1661 1661
1662 1662 D E rebase -r C+D+E -d B
1663 1663 |/ C will be rebased to C'
1664 1664 B C D's new destination will be C' instead of B
1665 1665 |/ E's new destination will be C' instead of B
1666 1666 A
1667 1667
1668 1668 The new parents of a merge is slightly more complicated. See the comment
1669 1669 block below.
1670 1670 """
1671 1671 # use unfiltered changelog since successorrevs may return filtered nodes
1672 1672 assert repo.filtername is None
1673 1673 cl = repo.changelog
1674 1674 isancestor = cl.isancestorrev
1675 1675
1676 1676 dest = destmap[rev]
1677 1677 oldps = repo.changelog.parentrevs(rev) # old parents
1678 1678 newps = [nullrev, nullrev] # new parents
1679 1679 dests = adjustdest(repo, rev, destmap, state, skipped)
1680 1680 bases = list(oldps) # merge base candidates, initially just old parents
1681 1681
1682 1682 if all(r == nullrev for r in oldps[1:]):
1683 1683 # For non-merge changeset, just move p to adjusted dest as requested.
1684 1684 newps[0] = dests[0]
1685 1685 else:
1686 1686 # For merge changeset, if we move p to dests[i] unconditionally, both
1687 1687 # parents may change and the end result looks like "the merge loses a
1688 1688 # parent", which is a surprise. This is a limit because "--dest" only
1689 1689 # accepts one dest per src.
1690 1690 #
1691 1691 # Therefore, only move p with reasonable conditions (in this order):
1692 1692 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1693 1693 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1694 1694 #
1695 1695 # Comparing with adjustdest, the logic here does some additional work:
1696 1696 # 1. decide which parents will not be moved towards dest
1697 1697 # 2. if the above decision is "no", should a parent still be moved
1698 1698 # because it was rebased?
1699 1699 #
1700 1700 # For example:
1701 1701 #
1702 1702 # C # "rebase -r C -d D" is an error since none of the parents
1703 1703 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1704 1704 # A B D # B (using rule "2."), since B will be rebased.
1705 1705 #
1706 1706 # The loop tries to be not rely on the fact that a Mercurial node has
1707 1707 # at most 2 parents.
1708 1708 for i, p in enumerate(oldps):
1709 1709 np = p # new parent
1710 1710 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1711 1711 np = dests[i]
1712 1712 elif p in state and state[p] > 0:
1713 1713 np = state[p]
1714 1714
1715 1715 # If one parent becomes an ancestor of the other, drop the ancestor
1716 1716 for j, x in enumerate(newps[:i]):
1717 1717 if x == nullrev:
1718 1718 continue
1719 1719 if isancestor(np, x): # CASE-1
1720 1720 np = nullrev
1721 1721 elif isancestor(x, np): # CASE-2
1722 1722 newps[j] = np
1723 1723 np = nullrev
1724 1724 # New parents forming an ancestor relationship does not
1725 1725 # mean the old parents have a similar relationship. Do not
1726 1726 # set bases[x] to nullrev.
1727 1727 bases[j], bases[i] = bases[i], bases[j]
1728 1728
1729 1729 newps[i] = np
1730 1730
1731 1731 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1732 1732 # base. If only p2 changes, merging using unchanged p1 as merge base is
1733 1733 # suboptimal. Therefore swap parents to make the merge sane.
1734 1734 if newps[1] != nullrev and oldps[0] == newps[0]:
1735 1735 assert len(newps) == 2 and len(oldps) == 2
1736 1736 newps.reverse()
1737 1737 bases.reverse()
1738 1738
1739 1739 # No parent change might be an error because we fail to make rev a
1740 1740 # descendent of requested dest. This can happen, for example:
1741 1741 #
1742 1742 # C # rebase -r C -d D
1743 1743 # /| # None of A and B will be changed to D and rebase fails.
1744 1744 # A B D
1745 1745 if set(newps) == set(oldps) and dest not in newps:
1746 1746 raise error.Abort(
1747 1747 _(
1748 1748 b'cannot rebase %d:%s without '
1749 1749 b'moving at least one of its parents'
1750 1750 )
1751 1751 % (rev, repo[rev])
1752 1752 )
1753 1753
1754 1754 # Source should not be ancestor of dest. The check here guarantees it's
1755 1755 # impossible. With multi-dest, the initial check does not cover complex
1756 1756 # cases since we don't have abstractions to dry-run rebase cheaply.
1757 1757 if any(p != nullrev and isancestor(rev, p) for p in newps):
1758 1758 raise error.Abort(_(b'source is ancestor of destination'))
1759 1759
1760 1760 # Check if the merge will contain unwanted changes. That may happen if
1761 1761 # there are multiple special (non-changelog ancestor) merge bases, which
1762 1762 # cannot be handled well by the 3-way merge algorithm. For example:
1763 1763 #
1764 1764 # F
1765 1765 # /|
1766 1766 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1767 1767 # | | # as merge base, the difference between D and F will include
1768 1768 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1769 1769 # |/ # chosen, the rebased F will contain B.
1770 1770 # A Z
1771 1771 #
1772 1772 # But our merge base candidates (D and E in above case) could still be
1773 1773 # better than the default (ancestor(F, Z) == null). Therefore still
1774 1774 # pick one (so choose p1 above).
1775 1775 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1776 1776 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1777 1777 for i, base in enumerate(bases):
1778 1778 if base == nullrev or base in newps:
1779 1779 continue
1780 1780 # Revisions in the side (not chosen as merge base) branch that
1781 1781 # might contain "surprising" contents
1782 1782 other_bases = set(bases) - {base}
1783 1783 siderevs = list(
1784 1784 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1785 1785 )
1786 1786
1787 1787 # If those revisions are covered by rebaseset, the result is good.
1788 1788 # A merge in rebaseset would be considered to cover its ancestors.
1789 1789 if siderevs:
1790 1790 rebaseset = [
1791 1791 r for r, d in state.items() if d > 0 and r not in obsskipped
1792 1792 ]
1793 1793 merges = [
1794 1794 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1795 1795 ]
1796 1796 unwanted[i] = list(
1797 1797 repo.revs(
1798 1798 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1799 1799 )
1800 1800 )
1801 1801
1802 1802 if any(revs is not None for revs in unwanted):
1803 1803 # Choose a merge base that has a minimal number of unwanted revs.
1804 1804 l, i = min(
1805 1805 (len(revs), i)
1806 1806 for i, revs in enumerate(unwanted)
1807 1807 if revs is not None
1808 1808 )
1809 1809
1810 1810 # The merge will include unwanted revisions. Abort now. Revisit this if
1811 1811 # we have a more advanced merge algorithm that handles multiple bases.
1812 1812 if l > 0:
1813 1813 unwanteddesc = _(b' or ').join(
1814 1814 (
1815 1815 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1816 1816 for revs in unwanted
1817 1817 if revs is not None
1818 1818 )
1819 1819 )
1820 1820 raise error.Abort(
1821 1821 _(b'rebasing %d:%s will include unwanted changes from %s')
1822 1822 % (rev, repo[rev], unwanteddesc)
1823 1823 )
1824 1824
1825 1825 # newps[0] should match merge base if possible. Currently, if newps[i]
1826 1826 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1827 1827 # the other's ancestor. In that case, it's fine to not swap newps here.
1828 1828 # (see CASE-1 and CASE-2 above)
1829 1829 if i != 0:
1830 1830 if newps[i] != nullrev:
1831 1831 newps[0], newps[i] = newps[i], newps[0]
1832 1832 bases[0], bases[i] = bases[i], bases[0]
1833 1833
1834 1834 # "rebasenode" updates to new p1, use the corresponding merge base.
1835 1835 base = bases[0]
1836 1836
1837 1837 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1838 1838
1839 1839 return newps[0], newps[1], base
1840 1840
1841 1841
1842 1842 def isagitpatch(repo, patchname):
1843 1843 """Return true if the given patch is in git format"""
1844 1844 mqpatch = os.path.join(repo.mq.path, patchname)
1845 1845 for line in patch.linereader(open(mqpatch, b'rb')):
1846 1846 if line.startswith(b'diff --git'):
1847 1847 return True
1848 1848 return False
1849 1849
1850 1850
1851 1851 def updatemq(repo, state, skipped, **opts):
1852 1852 """Update rebased mq patches - finalize and then import them"""
1853 1853 mqrebase = {}
1854 1854 mq = repo.mq
1855 1855 original_series = mq.fullseries[:]
1856 1856 skippedpatches = set()
1857 1857
1858 1858 for p in mq.applied:
1859 1859 rev = repo[p.node].rev()
1860 1860 if rev in state:
1861 1861 repo.ui.debug(
1862 1862 b'revision %d is an mq patch (%s), finalize it.\n'
1863 1863 % (rev, p.name)
1864 1864 )
1865 1865 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1866 1866 else:
1867 1867 # Applied but not rebased, not sure this should happen
1868 1868 skippedpatches.add(p.name)
1869 1869
1870 1870 if mqrebase:
1871 1871 mq.finish(repo, mqrebase.keys())
1872 1872
1873 1873 # We must start import from the newest revision
1874 1874 for rev in sorted(mqrebase, reverse=True):
1875 1875 if rev not in skipped:
1876 1876 name, isgit = mqrebase[rev]
1877 1877 repo.ui.note(
1878 1878 _(b'updating mq patch %s to %d:%s\n')
1879 1879 % (name, state[rev], repo[state[rev]])
1880 1880 )
1881 1881 mq.qimport(
1882 1882 repo,
1883 1883 (),
1884 1884 patchname=name,
1885 1885 git=isgit,
1886 1886 rev=[b"%d" % state[rev]],
1887 1887 )
1888 1888 else:
1889 1889 # Rebased and skipped
1890 1890 skippedpatches.add(mqrebase[rev][0])
1891 1891
1892 1892 # Patches were either applied and rebased and imported in
1893 1893 # order, applied and removed or unapplied. Discard the removed
1894 1894 # ones while preserving the original series order and guards.
1895 1895 newseries = [
1896 1896 s
1897 1897 for s in original_series
1898 1898 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1899 1899 ]
1900 1900 mq.fullseries[:] = newseries
1901 1901 mq.seriesdirty = True
1902 1902 mq.savedirty()
1903 1903
1904 1904
1905 1905 def storecollapsemsg(repo, collapsemsg):
1906 1906 """Store the collapse message to allow recovery"""
1907 1907 collapsemsg = collapsemsg or b''
1908 1908 f = repo.vfs(b"last-message.txt", b"w")
1909 1909 f.write(b"%s\n" % collapsemsg)
1910 1910 f.close()
1911 1911
1912 1912
1913 1913 def clearcollapsemsg(repo):
1914 1914 """Remove collapse message file"""
1915 1915 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1916 1916
1917 1917
1918 1918 def restorecollapsemsg(repo, isabort):
1919 1919 """Restore previously stored collapse message"""
1920 1920 try:
1921 1921 f = repo.vfs(b"last-message.txt")
1922 1922 collapsemsg = f.readline().strip()
1923 1923 f.close()
1924 1924 except IOError as err:
1925 1925 if err.errno != errno.ENOENT:
1926 1926 raise
1927 1927 if isabort:
1928 1928 # Oh well, just abort like normal
1929 1929 collapsemsg = b''
1930 1930 else:
1931 1931 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1932 1932 return collapsemsg
1933 1933
1934 1934
1935 1935 def clearstatus(repo):
1936 1936 """Remove the status files"""
1937 1937 # Make sure the active transaction won't write the state file
1938 1938 tr = repo.currenttransaction()
1939 1939 if tr:
1940 1940 tr.removefilegenerator(b'rebasestate')
1941 1941 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1942 1942
1943 1943
1944 1944 def sortsource(destmap):
1945 1945 """yield source revisions in an order that we only rebase things once
1946 1946
1947 1947 If source and destination overlaps, we should filter out revisions
1948 1948 depending on other revisions which hasn't been rebased yet.
1949 1949
1950 1950 Yield a sorted list of revisions each time.
1951 1951
1952 1952 For example, when rebasing A to B, B to C. This function yields [B], then
1953 1953 [A], indicating B needs to be rebased first.
1954 1954
1955 1955 Raise if there is a cycle so the rebase is impossible.
1956 1956 """
1957 1957 srcset = set(destmap)
1958 1958 while srcset:
1959 1959 srclist = sorted(srcset)
1960 1960 result = []
1961 1961 for r in srclist:
1962 1962 if destmap[r] not in srcset:
1963 1963 result.append(r)
1964 1964 if not result:
1965 1965 raise error.Abort(_(b'source and destination form a cycle'))
1966 1966 srcset -= set(result)
1967 1967 yield result
1968 1968
1969 1969
1970 1970 def buildstate(repo, destmap, collapse):
1971 1971 '''Define which revisions are going to be rebased and where
1972 1972
1973 1973 repo: repo
1974 1974 destmap: {srcrev: destrev}
1975 1975 '''
1976 1976 rebaseset = destmap.keys()
1977 1977 originalwd = repo[b'.'].rev()
1978 1978
1979 1979 # This check isn't strictly necessary, since mq detects commits over an
1980 1980 # applied patch. But it prevents messing up the working directory when
1981 1981 # a partially completed rebase is blocked by mq.
1982 1982 if b'qtip' in repo.tags():
1983 1983 mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
1984 1984 if set(destmap.values()) & mqapplied:
1985 1985 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1986 1986
1987 1987 # Get "cycle" error early by exhausting the generator.
1988 1988 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1989 1989 if not sortedsrc:
1990 1990 raise error.Abort(_(b'no matching revisions'))
1991 1991
1992 1992 # Only check the first batch of revisions to rebase not depending on other
1993 1993 # rebaseset. This means "source is ancestor of destination" for the second
1994 1994 # (and following) batches of revisions are not checked here. We rely on
1995 1995 # "defineparents" to do that check.
1996 1996 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1997 1997 if not roots:
1998 1998 raise error.Abort(_(b'no matching revisions'))
1999 1999
2000 2000 def revof(r):
2001 2001 return r.rev()
2002 2002
2003 2003 roots = sorted(roots, key=revof)
2004 2004 state = dict.fromkeys(rebaseset, revtodo)
2005 2005 emptyrebase = len(sortedsrc) == 1
2006 2006 for root in roots:
2007 2007 dest = repo[destmap[root.rev()]]
2008 2008 commonbase = root.ancestor(dest)
2009 2009 if commonbase == root:
2010 2010 raise error.Abort(_(b'source is ancestor of destination'))
2011 2011 if commonbase == dest:
2012 2012 wctx = repo[None]
2013 2013 if dest == wctx.p1():
2014 2014 # when rebasing to '.', it will use the current wd branch name
2015 2015 samebranch = root.branch() == wctx.branch()
2016 2016 else:
2017 2017 samebranch = root.branch() == dest.branch()
2018 2018 if not collapse and samebranch and dest in root.parents():
2019 2019 # mark the revision as done by setting its new revision
2020 2020 # equal to its old (current) revisions
2021 2021 state[root.rev()] = root.rev()
2022 2022 repo.ui.debug(b'source is a child of destination\n')
2023 2023 continue
2024 2024
2025 2025 emptyrebase = False
2026 2026 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
2027 2027 if emptyrebase:
2028 2028 return None
2029 2029 for rev in sorted(state):
2030 2030 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2031 2031 # if all parents of this revision are done, then so is this revision
2032 2032 if parents and all((state.get(p) == p for p in parents)):
2033 2033 state[rev] = rev
2034 2034 return originalwd, destmap, state
2035 2035
2036 2036
2037 2037 def clearrebased(
2038 2038 ui,
2039 2039 repo,
2040 2040 destmap,
2041 2041 state,
2042 2042 skipped,
2043 2043 collapsedas=None,
2044 2044 keepf=False,
2045 2045 fm=None,
2046 2046 backup=True,
2047 2047 ):
2048 2048 """dispose of rebased revision at the end of the rebase
2049 2049
2050 2050 If `collapsedas` is not None, the rebase was a collapse whose result if the
2051 2051 `collapsedas` node.
2052 2052
2053 2053 If `keepf` is not True, the rebase has --keep set and no nodes should be
2054 2054 removed (but bookmarks still need to be moved).
2055 2055
2056 2056 If `backup` is False, no backup will be stored when stripping rebased
2057 2057 revisions.
2058 2058 """
2059 2059 tonode = repo.changelog.node
2060 2060 replacements = {}
2061 2061 moves = {}
2062 2062 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2063 2063
2064 2064 collapsednodes = []
2065 2065 for rev, newrev in sorted(state.items()):
2066 2066 if newrev >= 0 and newrev != rev:
2067 2067 oldnode = tonode(rev)
2068 2068 newnode = collapsedas or tonode(newrev)
2069 2069 moves[oldnode] = newnode
2070 2070 succs = None
2071 2071 if rev in skipped:
2072 2072 if stripcleanup or not repo[rev].obsolete():
2073 2073 succs = ()
2074 2074 elif collapsedas:
2075 2075 collapsednodes.append(oldnode)
2076 2076 else:
2077 2077 succs = (newnode,)
2078 2078 if succs is not None:
2079 2079 replacements[(oldnode,)] = succs
2080 2080 if collapsednodes:
2081 2081 replacements[tuple(collapsednodes)] = (collapsedas,)
2082 2082 if fm:
2083 2083 hf = fm.hexfunc
2084 2084 fl = fm.formatlist
2085 2085 fd = fm.formatdict
2086 2086 changes = {}
2087 2087 for oldns, newn in pycompat.iteritems(replacements):
2088 2088 for oldn in oldns:
2089 2089 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2090 2090 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2091 2091 fm.data(nodechanges=nodechanges)
2092 2092 if keepf:
2093 2093 replacements = {}
2094 2094 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2095 2095
2096 2096
2097 2097 def pullrebase(orig, ui, repo, *args, **opts):
2098 2098 """Call rebase after pull if the latter has been invoked with --rebase"""
2099 2099 if opts.get('rebase'):
2100 2100 if ui.configbool(b'commands', b'rebase.requiredest'):
2101 2101 msg = _(b'rebase destination required by configuration')
2102 2102 hint = _(b'use hg pull followed by hg rebase -d DEST')
2103 2103 raise error.Abort(msg, hint=hint)
2104 2104
2105 2105 with repo.wlock(), repo.lock():
2106 2106 if opts.get('update'):
2107 2107 del opts['update']
2108 2108 ui.debug(
2109 2109 b'--update and --rebase are not compatible, ignoring '
2110 2110 b'the update flag\n'
2111 2111 )
2112 2112
2113 2113 cmdutil.checkunfinished(repo, skipmerge=True)
2114 2114 cmdutil.bailifchanged(
2115 2115 repo,
2116 2116 hint=_(
2117 2117 b'cannot pull with rebase: '
2118 2118 b'please commit or shelve your changes first'
2119 2119 ),
2120 2120 )
2121 2121
2122 2122 revsprepull = len(repo)
2123 2123 origpostincoming = commands.postincoming
2124 2124
2125 2125 def _dummy(*args, **kwargs):
2126 2126 pass
2127 2127
2128 2128 commands.postincoming = _dummy
2129 2129 try:
2130 2130 ret = orig(ui, repo, *args, **opts)
2131 2131 finally:
2132 2132 commands.postincoming = origpostincoming
2133 2133 revspostpull = len(repo)
2134 2134 if revspostpull > revsprepull:
2135 2135 # --rev option from pull conflict with rebase own --rev
2136 2136 # dropping it
2137 2137 if 'rev' in opts:
2138 2138 del opts['rev']
2139 2139 # positional argument from pull conflicts with rebase's own
2140 2140 # --source.
2141 2141 if 'source' in opts:
2142 2142 del opts['source']
2143 2143 # revsprepull is the len of the repo, not revnum of tip.
2144 2144 destspace = list(repo.changelog.revs(start=revsprepull))
2145 2145 opts['_destspace'] = destspace
2146 2146 try:
2147 2147 rebase(ui, repo, **opts)
2148 2148 except error.NoMergeDestAbort:
2149 2149 # we can maybe update instead
2150 2150 rev, _a, _b = destutil.destupdate(repo)
2151 2151 if rev == repo[b'.'].rev():
2152 2152 ui.status(_(b'nothing to rebase\n'))
2153 2153 else:
2154 2154 ui.status(_(b'nothing to rebase - updating instead\n'))
2155 2155 # not passing argument to get the bare update behavior
2156 2156 # with warning and trumpets
2157 2157 commands.update(ui, repo)
2158 2158 else:
2159 2159 if opts.get('tool'):
2160 2160 raise error.Abort(_(b'--tool can only be used with --rebase'))
2161 2161 ret = orig(ui, repo, *args, **opts)
2162 2162
2163 2163 return ret
2164 2164
2165 2165
2166 2166 def _filterobsoleterevs(repo, revs):
2167 2167 """returns a set of the obsolete revisions in revs"""
2168 2168 return {r for r in revs if repo[r].obsolete()}
2169 2169
2170 2170
2171 2171 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2172 2172 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2173 2173
2174 2174 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2175 2175 obsolete nodes to be rebased given in `rebaseobsrevs`.
2176 2176
2177 2177 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2178 2178 without a successor in destination.
2179 2179
2180 2180 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2181 2181 obsolete successors.
2182 2182 """
2183 2183 obsoletenotrebased = {}
2184 2184 obsoletewithoutsuccessorindestination = set()
2185 2185 obsoleteextinctsuccessors = set()
2186 2186
2187 2187 assert repo.filtername is None
2188 2188 cl = repo.changelog
2189 2189 get_rev = cl.index.get_rev
2190 2190 extinctrevs = set(repo.revs(b'extinct()'))
2191 2191 for srcrev in rebaseobsrevs:
2192 2192 srcnode = cl.node(srcrev)
2193 2193 # XXX: more advanced APIs are required to handle split correctly
2194 2194 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2195 2195 # obsutil.allsuccessors includes node itself
2196 2196 successors.remove(srcnode)
2197 2197 succrevs = {get_rev(s) for s in successors}
2198 2198 succrevs.discard(None)
2199 2199 if succrevs.issubset(extinctrevs):
2200 2200 # all successors are extinct
2201 2201 obsoleteextinctsuccessors.add(srcrev)
2202 2202 if not successors:
2203 2203 # no successor
2204 2204 obsoletenotrebased[srcrev] = None
2205 2205 else:
2206 2206 dstrev = destmap[srcrev]
2207 2207 for succrev in succrevs:
2208 2208 if cl.isancestorrev(succrev, dstrev):
2209 2209 obsoletenotrebased[srcrev] = succrev
2210 2210 break
2211 2211 else:
2212 2212 # If 'srcrev' has a successor in rebase set but none in
2213 2213 # destination (which would be catched above), we shall skip it
2214 2214 # and its descendants to avoid divergence.
2215 2215 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2216 2216 obsoletewithoutsuccessorindestination.add(srcrev)
2217 2217
2218 2218 return (
2219 2219 obsoletenotrebased,
2220 2220 obsoletewithoutsuccessorindestination,
2221 2221 obsoleteextinctsuccessors,
2222 2222 )
2223 2223
2224 2224
2225 2225 def abortrebase(ui, repo):
2226 2226 with repo.wlock(), repo.lock():
2227 2227 rbsrt = rebaseruntime(repo, ui)
2228 2228 rbsrt._prepareabortorcontinue(isabort=True)
2229 2229
2230 2230
2231 2231 def continuerebase(ui, repo):
2232 2232 with repo.wlock(), repo.lock():
2233 2233 rbsrt = rebaseruntime(repo, ui)
2234 2234 ms = mergestatemod.mergestate.read(repo)
2235 2235 mergeutil.checkunresolved(ms)
2236 2236 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2237 2237 if retcode is not None:
2238 2238 return retcode
2239 2239 rbsrt._performrebase(None)
2240 2240 rbsrt._finishrebase()
2241 2241
2242 2242
2243 2243 def summaryhook(ui, repo):
2244 2244 if not repo.vfs.exists(b'rebasestate'):
2245 2245 return
2246 2246 try:
2247 2247 rbsrt = rebaseruntime(repo, ui, {})
2248 2248 rbsrt.restorestatus()
2249 2249 state = rbsrt.state
2250 2250 except error.RepoLookupError:
2251 2251 # i18n: column positioning for "hg summary"
2252 2252 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2253 2253 ui.write(msg)
2254 2254 return
2255 2255 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2256 2256 # i18n: column positioning for "hg summary"
2257 2257 ui.write(
2258 2258 _(b'rebase: %s, %s (rebase --continue)\n')
2259 2259 % (
2260 2260 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2261 2261 ui.label(_(b'%d remaining'), b'rebase.remaining')
2262 2262 % (len(state) - numrebased),
2263 2263 )
2264 2264 )
2265 2265
2266 2266
2267 2267 def uisetup(ui):
2268 2268 # Replace pull with a decorator to provide --rebase option
2269 2269 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2270 2270 entry[1].append(
2271 2271 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2272 2272 )
2273 2273 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2274 2274 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2275 2275 statemod.addunfinished(
2276 2276 b'rebase',
2277 2277 fname=b'rebasestate',
2278 2278 stopflag=True,
2279 2279 continueflag=True,
2280 2280 abortfunc=abortrebase,
2281 2281 continuefunc=continuerebase,
2282 2282 )
@@ -1,929 +1,929
1 1 # Patch transplanting extension for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to transplant changesets from another branch
9 9
10 10 This extension allows you to transplant changes to another parent revision,
11 11 possibly in another repository. The transplant is done using 'diff' patches.
12 12
13 13 Transplanted patches are recorded in .hg/transplant/transplants, as a
14 14 map from a changeset hash to its hash in the source repository.
15 15 '''
16 16 from __future__ import absolute_import
17 17
18 18 import os
19 19
20 20 from mercurial.i18n import _
21 21 from mercurial.pycompat import open
22 22 from mercurial import (
23 23 bundlerepo,
24 24 cmdutil,
25 25 error,
26 26 exchange,
27 27 hg,
28 28 logcmdutil,
29 29 match,
30 30 merge,
31 31 node as nodemod,
32 32 patch,
33 33 pycompat,
34 34 registrar,
35 35 revlog,
36 36 revset,
37 37 scmutil,
38 38 smartset,
39 39 state as statemod,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43 from mercurial.utils import (
44 44 procutil,
45 45 stringutil,
46 46 )
47 47
48 48
49 49 class TransplantError(error.Abort):
50 50 pass
51 51
52 52
53 53 cmdtable = {}
54 54 command = registrar.command(cmdtable)
55 55 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
56 56 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
57 57 # be specifying the version(s) of Mercurial they are tested with, or
58 58 # leave the attribute unspecified.
59 59 testedwith = b'ships-with-hg-core'
60 60
61 61 configtable = {}
62 62 configitem = registrar.configitem(configtable)
63 63
64 64 configitem(
65 65 b'transplant', b'filter', default=None,
66 66 )
67 67 configitem(
68 68 b'transplant', b'log', default=None,
69 69 )
70 70
71 71
72 72 class transplantentry(object):
73 73 def __init__(self, lnode, rnode):
74 74 self.lnode = lnode
75 75 self.rnode = rnode
76 76
77 77
78 78 class transplants(object):
79 79 def __init__(self, path=None, transplantfile=None, opener=None):
80 80 self.path = path
81 81 self.transplantfile = transplantfile
82 82 self.opener = opener
83 83
84 84 if not opener:
85 85 self.opener = vfsmod.vfs(self.path)
86 86 self.transplants = {}
87 87 self.dirty = False
88 88 self.read()
89 89
90 90 def read(self):
91 91 abspath = os.path.join(self.path, self.transplantfile)
92 92 if self.transplantfile and os.path.exists(abspath):
93 93 for line in self.opener.read(self.transplantfile).splitlines():
94 94 lnode, rnode = map(revlog.bin, line.split(b':'))
95 95 list = self.transplants.setdefault(rnode, [])
96 96 list.append(transplantentry(lnode, rnode))
97 97
98 98 def write(self):
99 99 if self.dirty and self.transplantfile:
100 100 if not os.path.isdir(self.path):
101 101 os.mkdir(self.path)
102 102 fp = self.opener(self.transplantfile, b'w')
103 103 for list in pycompat.itervalues(self.transplants):
104 104 for t in list:
105 105 l, r = map(nodemod.hex, (t.lnode, t.rnode))
106 106 fp.write(l + b':' + r + b'\n')
107 107 fp.close()
108 108 self.dirty = False
109 109
110 110 def get(self, rnode):
111 111 return self.transplants.get(rnode) or []
112 112
113 113 def set(self, lnode, rnode):
114 114 list = self.transplants.setdefault(rnode, [])
115 115 list.append(transplantentry(lnode, rnode))
116 116 self.dirty = True
117 117
118 118 def remove(self, transplant):
119 119 list = self.transplants.get(transplant.rnode)
120 120 if list:
121 121 del list[list.index(transplant)]
122 122 self.dirty = True
123 123
124 124
125 125 class transplanter(object):
126 126 def __init__(self, ui, repo, opts):
127 127 self.ui = ui
128 128 self.path = repo.vfs.join(b'transplant')
129 129 self.opener = vfsmod.vfs(self.path)
130 130 self.transplants = transplants(
131 131 self.path, b'transplants', opener=self.opener
132 132 )
133 133
134 134 def getcommiteditor():
135 135 editform = cmdutil.mergeeditform(repo[None], b'transplant')
136 136 return cmdutil.getcommiteditor(
137 137 editform=editform, **pycompat.strkwargs(opts)
138 138 )
139 139
140 140 self.getcommiteditor = getcommiteditor
141 141
142 142 def applied(self, repo, node, parent):
143 143 '''returns True if a node is already an ancestor of parent
144 144 or is parent or has already been transplanted'''
145 145 if hasnode(repo, parent):
146 146 parentrev = repo.changelog.rev(parent)
147 147 if hasnode(repo, node):
148 148 rev = repo.changelog.rev(node)
149 149 reachable = repo.changelog.ancestors(
150 150 [parentrev], rev, inclusive=True
151 151 )
152 152 if rev in reachable:
153 153 return True
154 154 for t in self.transplants.get(node):
155 155 # it might have been stripped
156 156 if not hasnode(repo, t.lnode):
157 157 self.transplants.remove(t)
158 158 return False
159 159 lnoderev = repo.changelog.rev(t.lnode)
160 160 if lnoderev in repo.changelog.ancestors(
161 161 [parentrev], lnoderev, inclusive=True
162 162 ):
163 163 return True
164 164 return False
165 165
166 166 def apply(self, repo, source, revmap, merges, opts=None):
167 167 '''apply the revisions in revmap one by one in revision order'''
168 168 if opts is None:
169 169 opts = {}
170 170 revs = sorted(revmap)
171 171 p1 = repo.dirstate.p1()
172 172 pulls = []
173 173 diffopts = patch.difffeatureopts(self.ui, opts)
174 174 diffopts.git = True
175 175
176 176 lock = tr = None
177 177 try:
178 178 lock = repo.lock()
179 179 tr = repo.transaction(b'transplant')
180 180 for rev in revs:
181 181 node = revmap[rev]
182 182 revstr = b'%d:%s' % (rev, nodemod.short(node))
183 183
184 184 if self.applied(repo, node, p1):
185 185 self.ui.warn(
186 186 _(b'skipping already applied revision %s\n') % revstr
187 187 )
188 188 continue
189 189
190 190 parents = source.changelog.parents(node)
191 191 if not (opts.get(b'filter') or opts.get(b'log')):
192 192 # If the changeset parent is the same as the
193 193 # wdir's parent, just pull it.
194 194 if parents[0] == p1:
195 195 pulls.append(node)
196 196 p1 = node
197 197 continue
198 198 if pulls:
199 199 if source != repo:
200 200 exchange.pull(repo, source.peer(), heads=pulls)
201 201 merge.update(
202 202 repo, pulls[-1], branchmerge=False, force=False
203 203 )
204 204 p1 = repo.dirstate.p1()
205 205 pulls = []
206 206
207 207 domerge = False
208 208 if node in merges:
209 209 # pulling all the merge revs at once would mean we
210 210 # couldn't transplant after the latest even if
211 211 # transplants before them fail.
212 212 domerge = True
213 213 if not hasnode(repo, node):
214 214 exchange.pull(repo, source.peer(), heads=[node])
215 215
216 216 skipmerge = False
217 217 if parents[1] != revlog.nullid:
218 218 if not opts.get(b'parent'):
219 219 self.ui.note(
220 220 _(b'skipping merge changeset %d:%s\n')
221 221 % (rev, nodemod.short(node))
222 222 )
223 223 skipmerge = True
224 224 else:
225 225 parent = source.lookup(opts[b'parent'])
226 226 if parent not in parents:
227 227 raise error.Abort(
228 228 _(b'%s is not a parent of %s')
229 229 % (nodemod.short(parent), nodemod.short(node))
230 230 )
231 231 else:
232 232 parent = parents[0]
233 233
234 234 if skipmerge:
235 235 patchfile = None
236 236 else:
237 237 fd, patchfile = pycompat.mkstemp(prefix=b'hg-transplant-')
238 238 fp = os.fdopen(fd, 'wb')
239 239 gen = patch.diff(source, parent, node, opts=diffopts)
240 240 for chunk in gen:
241 241 fp.write(chunk)
242 242 fp.close()
243 243
244 244 del revmap[rev]
245 245 if patchfile or domerge:
246 246 try:
247 247 try:
248 248 n = self.applyone(
249 249 repo,
250 250 node,
251 251 source.changelog.read(node),
252 252 patchfile,
253 253 merge=domerge,
254 254 log=opts.get(b'log'),
255 255 filter=opts.get(b'filter'),
256 256 )
257 257 except TransplantError:
258 258 # Do not rollback, it is up to the user to
259 259 # fix the merge or cancel everything
260 260 tr.close()
261 261 raise
262 262 if n and domerge:
263 263 self.ui.status(
264 264 _(b'%s merged at %s\n')
265 265 % (revstr, nodemod.short(n))
266 266 )
267 267 elif n:
268 268 self.ui.status(
269 269 _(b'%s transplanted to %s\n')
270 270 % (nodemod.short(node), nodemod.short(n))
271 271 )
272 272 finally:
273 273 if patchfile:
274 274 os.unlink(patchfile)
275 275 tr.close()
276 276 if pulls:
277 277 exchange.pull(repo, source.peer(), heads=pulls)
278 278 merge.update(repo, pulls[-1], branchmerge=False, force=False)
279 279 finally:
280 280 self.saveseries(revmap, merges)
281 281 self.transplants.write()
282 282 if tr:
283 283 tr.release()
284 284 if lock:
285 285 lock.release()
286 286
287 287 def filter(self, filter, node, changelog, patchfile):
288 288 '''arbitrarily rewrite changeset before applying it'''
289 289
290 290 self.ui.status(_(b'filtering %s\n') % patchfile)
291 291 user, date, msg = (changelog[1], changelog[2], changelog[4])
292 292 fd, headerfile = pycompat.mkstemp(prefix=b'hg-transplant-')
293 293 fp = os.fdopen(fd, 'wb')
294 294 fp.write(b"# HG changeset patch\n")
295 295 fp.write(b"# User %s\n" % user)
296 296 fp.write(b"# Date %d %d\n" % date)
297 297 fp.write(msg + b'\n')
298 298 fp.close()
299 299
300 300 try:
301 301 self.ui.system(
302 302 b'%s %s %s'
303 303 % (
304 304 filter,
305 305 procutil.shellquote(headerfile),
306 306 procutil.shellquote(patchfile),
307 307 ),
308 308 environ={
309 309 b'HGUSER': changelog[1],
310 310 b'HGREVISION': nodemod.hex(node),
311 311 },
312 312 onerr=error.Abort,
313 313 errprefix=_(b'filter failed'),
314 314 blockedtag=b'transplant_filter',
315 315 )
316 316 user, date, msg = self.parselog(open(headerfile, b'rb'))[1:4]
317 317 finally:
318 318 os.unlink(headerfile)
319 319
320 320 return (user, date, msg)
321 321
322 322 def applyone(
323 323 self, repo, node, cl, patchfile, merge=False, log=False, filter=None
324 324 ):
325 325 '''apply the patch in patchfile to the repository as a transplant'''
326 326 (manifest, user, (time, timezone), files, message) = cl[:5]
327 327 date = b"%d %d" % (time, timezone)
328 328 extra = {b'transplant_source': node}
329 329 if filter:
330 330 (user, date, message) = self.filter(filter, node, cl, patchfile)
331 331
332 332 if log:
333 333 # we don't translate messages inserted into commits
334 334 message += b'\n(transplanted from %s)' % nodemod.hex(node)
335 335
336 336 self.ui.status(_(b'applying %s\n') % nodemod.short(node))
337 337 self.ui.note(b'%s %s\n%s\n' % (user, date, message))
338 338
339 339 if not patchfile and not merge:
340 340 raise error.Abort(_(b'can only omit patchfile if merging'))
341 341 if patchfile:
342 342 try:
343 343 files = set()
344 344 patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
345 345 files = list(files)
346 346 except Exception as inst:
347 347 seriespath = os.path.join(self.path, b'series')
348 348 if os.path.exists(seriespath):
349 349 os.unlink(seriespath)
350 350 p1 = repo.dirstate.p1()
351 351 p2 = node
352 352 self.log(user, date, message, p1, p2, merge=merge)
353 353 self.ui.write(stringutil.forcebytestr(inst) + b'\n')
354 354 raise TransplantError(
355 355 _(
356 356 b'fix up the working directory and run '
357 357 b'hg transplant --continue'
358 358 )
359 359 )
360 360 else:
361 361 files = None
362 362 if merge:
363 363 p1 = repo.dirstate.p1()
364 364 repo.setparents(p1, node)
365 365 m = match.always()
366 366 else:
367 367 m = match.exact(files)
368 368
369 369 n = repo.commit(
370 370 message,
371 371 user,
372 372 date,
373 373 extra=extra,
374 374 match=m,
375 375 editor=self.getcommiteditor(),
376 376 )
377 377 if not n:
378 378 self.ui.warn(
379 379 _(b'skipping emptied changeset %s\n') % nodemod.short(node)
380 380 )
381 381 return None
382 382 if not merge:
383 383 self.transplants.set(n, node)
384 384
385 385 return n
386 386
387 387 def canresume(self):
388 388 return os.path.exists(os.path.join(self.path, b'journal'))
389 389
390 390 def resume(self, repo, source, opts):
391 391 '''recover last transaction and apply remaining changesets'''
392 392 if os.path.exists(os.path.join(self.path, b'journal')):
393 393 n, node = self.recover(repo, source, opts)
394 394 if n:
395 395 self.ui.status(
396 396 _(b'%s transplanted as %s\n')
397 397 % (nodemod.short(node), nodemod.short(n))
398 398 )
399 399 else:
400 400 self.ui.status(
401 401 _(b'%s skipped due to empty diff\n')
402 402 % (nodemod.short(node),)
403 403 )
404 404 seriespath = os.path.join(self.path, b'series')
405 405 if not os.path.exists(seriespath):
406 406 self.transplants.write()
407 407 return
408 408 nodes, merges = self.readseries()
409 409 revmap = {}
410 410 for n in nodes:
411 411 revmap[source.changelog.rev(n)] = n
412 412 os.unlink(seriespath)
413 413
414 414 self.apply(repo, source, revmap, merges, opts)
415 415
416 416 def recover(self, repo, source, opts):
417 417 '''commit working directory using journal metadata'''
418 418 node, user, date, message, parents = self.readlog()
419 419 merge = False
420 420
421 421 if not user or not date or not message or not parents[0]:
422 422 raise error.Abort(_(b'transplant log file is corrupt'))
423 423
424 424 parent = parents[0]
425 425 if len(parents) > 1:
426 426 if opts.get(b'parent'):
427 427 parent = source.lookup(opts[b'parent'])
428 428 if parent not in parents:
429 429 raise error.Abort(
430 430 _(b'%s is not a parent of %s')
431 431 % (nodemod.short(parent), nodemod.short(node))
432 432 )
433 433 else:
434 434 merge = True
435 435
436 436 extra = {b'transplant_source': node}
437 437 try:
438 438 p1 = repo.dirstate.p1()
439 439 if p1 != parent:
440 440 raise error.Abort(
441 441 _(b'working directory not at transplant parent %s')
442 442 % nodemod.hex(parent)
443 443 )
444 444 if merge:
445 445 repo.setparents(p1, parents[1])
446 446 st = repo.status()
447 447 modified, added, removed, deleted = (
448 448 st.modified,
449 449 st.added,
450 450 st.removed,
451 451 st.deleted,
452 452 )
453 453 if merge or modified or added or removed or deleted:
454 454 n = repo.commit(
455 455 message,
456 456 user,
457 457 date,
458 458 extra=extra,
459 459 editor=self.getcommiteditor(),
460 460 )
461 461 if not n:
462 462 raise error.Abort(_(b'commit failed'))
463 463 if not merge:
464 464 self.transplants.set(n, node)
465 465 else:
466 466 n = None
467 467 self.unlog()
468 468
469 469 return n, node
470 470 finally:
471 471 # TODO: get rid of this meaningless try/finally enclosing.
472 472 # this is kept only to reduce changes in a patch.
473 473 pass
474 474
475 475 def stop(self, ui, repo):
476 476 """logic to stop an interrupted transplant"""
477 477 if self.canresume():
478 478 startctx = repo[b'.']
479 hg.updaterepo(repo, startctx.node(), overwrite=True)
479 merge.clean_update(startctx)
480 480 ui.status(_(b"stopped the interrupted transplant\n"))
481 481 ui.status(
482 482 _(b"working directory is now at %s\n") % startctx.hex()[:12]
483 483 )
484 484 self.unlog()
485 485 return 0
486 486
487 487 def readseries(self):
488 488 nodes = []
489 489 merges = []
490 490 cur = nodes
491 491 for line in self.opener.read(b'series').splitlines():
492 492 if line.startswith(b'# Merges'):
493 493 cur = merges
494 494 continue
495 495 cur.append(revlog.bin(line))
496 496
497 497 return (nodes, merges)
498 498
499 499 def saveseries(self, revmap, merges):
500 500 if not revmap:
501 501 return
502 502
503 503 if not os.path.isdir(self.path):
504 504 os.mkdir(self.path)
505 505 series = self.opener(b'series', b'w')
506 506 for rev in sorted(revmap):
507 507 series.write(nodemod.hex(revmap[rev]) + b'\n')
508 508 if merges:
509 509 series.write(b'# Merges\n')
510 510 for m in merges:
511 511 series.write(nodemod.hex(m) + b'\n')
512 512 series.close()
513 513
514 514 def parselog(self, fp):
515 515 parents = []
516 516 message = []
517 517 node = revlog.nullid
518 518 inmsg = False
519 519 user = None
520 520 date = None
521 521 for line in fp.read().splitlines():
522 522 if inmsg:
523 523 message.append(line)
524 524 elif line.startswith(b'# User '):
525 525 user = line[7:]
526 526 elif line.startswith(b'# Date '):
527 527 date = line[7:]
528 528 elif line.startswith(b'# Node ID '):
529 529 node = revlog.bin(line[10:])
530 530 elif line.startswith(b'# Parent '):
531 531 parents.append(revlog.bin(line[9:]))
532 532 elif not line.startswith(b'# '):
533 533 inmsg = True
534 534 message.append(line)
535 535 if None in (user, date):
536 536 raise error.Abort(
537 537 _(b"filter corrupted changeset (no user or date)")
538 538 )
539 539 return (node, user, date, b'\n'.join(message), parents)
540 540
541 541 def log(self, user, date, message, p1, p2, merge=False):
542 542 '''journal changelog metadata for later recover'''
543 543
544 544 if not os.path.isdir(self.path):
545 545 os.mkdir(self.path)
546 546 fp = self.opener(b'journal', b'w')
547 547 fp.write(b'# User %s\n' % user)
548 548 fp.write(b'# Date %s\n' % date)
549 549 fp.write(b'# Node ID %s\n' % nodemod.hex(p2))
550 550 fp.write(b'# Parent ' + nodemod.hex(p1) + b'\n')
551 551 if merge:
552 552 fp.write(b'# Parent ' + nodemod.hex(p2) + b'\n')
553 553 fp.write(message.rstrip() + b'\n')
554 554 fp.close()
555 555
556 556 def readlog(self):
557 557 return self.parselog(self.opener(b'journal'))
558 558
559 559 def unlog(self):
560 560 '''remove changelog journal'''
561 561 absdst = os.path.join(self.path, b'journal')
562 562 if os.path.exists(absdst):
563 563 os.unlink(absdst)
564 564
565 565 def transplantfilter(self, repo, source, root):
566 566 def matchfn(node):
567 567 if self.applied(repo, node, root):
568 568 return False
569 569 if source.changelog.parents(node)[1] != revlog.nullid:
570 570 return False
571 571 extra = source.changelog.read(node)[5]
572 572 cnode = extra.get(b'transplant_source')
573 573 if cnode and self.applied(repo, cnode, root):
574 574 return False
575 575 return True
576 576
577 577 return matchfn
578 578
579 579
580 580 def hasnode(repo, node):
581 581 try:
582 582 return repo.changelog.rev(node) is not None
583 583 except error.StorageError:
584 584 return False
585 585
586 586
587 587 def browserevs(ui, repo, nodes, opts):
588 588 '''interactively transplant changesets'''
589 589 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
590 590 transplants = []
591 591 merges = []
592 592 prompt = _(
593 593 b'apply changeset? [ynmpcq?]:'
594 594 b'$$ &yes, transplant this changeset'
595 595 b'$$ &no, skip this changeset'
596 596 b'$$ &merge at this changeset'
597 597 b'$$ show &patch'
598 598 b'$$ &commit selected changesets'
599 599 b'$$ &quit and cancel transplant'
600 600 b'$$ &? (show this help)'
601 601 )
602 602 for node in nodes:
603 603 displayer.show(repo[node])
604 604 action = None
605 605 while not action:
606 606 choice = ui.promptchoice(prompt)
607 607 action = b'ynmpcq?'[choice : choice + 1]
608 608 if action == b'?':
609 609 for c, t in ui.extractchoices(prompt)[1]:
610 610 ui.write(b'%s: %s\n' % (c, t))
611 611 action = None
612 612 elif action == b'p':
613 613 parent = repo.changelog.parents(node)[0]
614 614 for chunk in patch.diff(repo, parent, node):
615 615 ui.write(chunk)
616 616 action = None
617 617 if action == b'y':
618 618 transplants.append(node)
619 619 elif action == b'm':
620 620 merges.append(node)
621 621 elif action == b'c':
622 622 break
623 623 elif action == b'q':
624 624 transplants = ()
625 625 merges = ()
626 626 break
627 627 displayer.close()
628 628 return (transplants, merges)
629 629
630 630
631 631 @command(
632 632 b'transplant',
633 633 [
634 634 (
635 635 b's',
636 636 b'source',
637 637 b'',
638 638 _(b'transplant changesets from REPO'),
639 639 _(b'REPO'),
640 640 ),
641 641 (
642 642 b'b',
643 643 b'branch',
644 644 [],
645 645 _(b'use this source changeset as head'),
646 646 _(b'REV'),
647 647 ),
648 648 (
649 649 b'a',
650 650 b'all',
651 651 None,
652 652 _(b'pull all changesets up to the --branch revisions'),
653 653 ),
654 654 (b'p', b'prune', [], _(b'skip over REV'), _(b'REV')),
655 655 (b'm', b'merge', [], _(b'merge at REV'), _(b'REV')),
656 656 (
657 657 b'',
658 658 b'parent',
659 659 b'',
660 660 _(b'parent to choose when transplanting merge'),
661 661 _(b'REV'),
662 662 ),
663 663 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
664 664 (b'', b'log', None, _(b'append transplant info to log message')),
665 665 (b'', b'stop', False, _(b'stop interrupted transplant')),
666 666 (
667 667 b'c',
668 668 b'continue',
669 669 None,
670 670 _(b'continue last transplant session after fixing conflicts'),
671 671 ),
672 672 (
673 673 b'',
674 674 b'filter',
675 675 b'',
676 676 _(b'filter changesets through command'),
677 677 _(b'CMD'),
678 678 ),
679 679 ],
680 680 _(
681 681 b'hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
682 682 b'[-m REV] [REV]...'
683 683 ),
684 684 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
685 685 )
686 686 def transplant(ui, repo, *revs, **opts):
687 687 '''transplant changesets from another branch
688 688
689 689 Selected changesets will be applied on top of the current working
690 690 directory with the log of the original changeset. The changesets
691 691 are copied and will thus appear twice in the history with different
692 692 identities.
693 693
694 694 Consider using the graft command if everything is inside the same
695 695 repository - it will use merges and will usually give a better result.
696 696 Use the rebase extension if the changesets are unpublished and you want
697 697 to move them instead of copying them.
698 698
699 699 If --log is specified, log messages will have a comment appended
700 700 of the form::
701 701
702 702 (transplanted from CHANGESETHASH)
703 703
704 704 You can rewrite the changelog message with the --filter option.
705 705 Its argument will be invoked with the current changelog message as
706 706 $1 and the patch as $2.
707 707
708 708 --source/-s specifies another repository to use for selecting changesets,
709 709 just as if it temporarily had been pulled.
710 710 If --branch/-b is specified, these revisions will be used as
711 711 heads when deciding which changesets to transplant, just as if only
712 712 these revisions had been pulled.
713 713 If --all/-a is specified, all the revisions up to the heads specified
714 714 with --branch will be transplanted.
715 715
716 716 Example:
717 717
718 718 - transplant all changes up to REV on top of your current revision::
719 719
720 720 hg transplant --branch REV --all
721 721
722 722 You can optionally mark selected transplanted changesets as merge
723 723 changesets. You will not be prompted to transplant any ancestors
724 724 of a merged transplant, and you can merge descendants of them
725 725 normally instead of transplanting them.
726 726
727 727 Merge changesets may be transplanted directly by specifying the
728 728 proper parent changeset by calling :hg:`transplant --parent`.
729 729
730 730 If no merges or revisions are provided, :hg:`transplant` will
731 731 start an interactive changeset browser.
732 732
733 733 If a changeset application fails, you can fix the merge by hand
734 734 and then resume where you left off by calling :hg:`transplant
735 735 --continue/-c`.
736 736 '''
737 737 with repo.wlock():
738 738 return _dotransplant(ui, repo, *revs, **opts)
739 739
740 740
741 741 def _dotransplant(ui, repo, *revs, **opts):
742 742 def incwalk(repo, csets, match=util.always):
743 743 for node in csets:
744 744 if match(node):
745 745 yield node
746 746
747 747 def transplantwalk(repo, dest, heads, match=util.always):
748 748 '''Yield all nodes that are ancestors of a head but not ancestors
749 749 of dest.
750 750 If no heads are specified, the heads of repo will be used.'''
751 751 if not heads:
752 752 heads = repo.heads()
753 753 ancestors = []
754 754 ctx = repo[dest]
755 755 for head in heads:
756 756 ancestors.append(ctx.ancestor(repo[head]).node())
757 757 for node in repo.changelog.nodesbetween(ancestors, heads)[0]:
758 758 if match(node):
759 759 yield node
760 760
761 761 def checkopts(opts, revs):
762 762 if opts.get(b'continue'):
763 763 cmdutil.check_incompatible_arguments(
764 764 opts, b'continue', [b'branch', b'all', b'merge']
765 765 )
766 766 return
767 767 if opts.get(b'stop'):
768 768 cmdutil.check_incompatible_arguments(
769 769 opts, b'stop', [b'branch', b'all', b'merge']
770 770 )
771 771 return
772 772 if not (
773 773 opts.get(b'source')
774 774 or revs
775 775 or opts.get(b'merge')
776 776 or opts.get(b'branch')
777 777 ):
778 778 raise error.Abort(
779 779 _(
780 780 b'no source URL, branch revision, or revision '
781 781 b'list provided'
782 782 )
783 783 )
784 784 if opts.get(b'all'):
785 785 if not opts.get(b'branch'):
786 786 raise error.Abort(_(b'--all requires a branch revision'))
787 787 if revs:
788 788 raise error.Abort(
789 789 _(b'--all is incompatible with a revision list')
790 790 )
791 791
792 792 opts = pycompat.byteskwargs(opts)
793 793 checkopts(opts, revs)
794 794
795 795 if not opts.get(b'log'):
796 796 # deprecated config: transplant.log
797 797 opts[b'log'] = ui.config(b'transplant', b'log')
798 798 if not opts.get(b'filter'):
799 799 # deprecated config: transplant.filter
800 800 opts[b'filter'] = ui.config(b'transplant', b'filter')
801 801
802 802 tp = transplanter(ui, repo, opts)
803 803
804 804 p1 = repo.dirstate.p1()
805 805 if len(repo) > 0 and p1 == revlog.nullid:
806 806 raise error.Abort(_(b'no revision checked out'))
807 807 if opts.get(b'continue'):
808 808 if not tp.canresume():
809 809 raise error.Abort(_(b'no transplant to continue'))
810 810 elif opts.get(b'stop'):
811 811 if not tp.canresume():
812 812 raise error.Abort(_(b'no interrupted transplant found'))
813 813 return tp.stop(ui, repo)
814 814 else:
815 815 cmdutil.checkunfinished(repo)
816 816 cmdutil.bailifchanged(repo)
817 817
818 818 sourcerepo = opts.get(b'source')
819 819 if sourcerepo:
820 820 peer = hg.peer(repo, opts, ui.expandpath(sourcerepo))
821 821 heads = pycompat.maplist(peer.lookup, opts.get(b'branch', ()))
822 822 target = set(heads)
823 823 for r in revs:
824 824 try:
825 825 target.add(peer.lookup(r))
826 826 except error.RepoError:
827 827 pass
828 828 source, csets, cleanupfn = bundlerepo.getremotechanges(
829 829 ui, repo, peer, onlyheads=sorted(target), force=True
830 830 )
831 831 else:
832 832 source = repo
833 833 heads = pycompat.maplist(source.lookup, opts.get(b'branch', ()))
834 834 cleanupfn = None
835 835
836 836 try:
837 837 if opts.get(b'continue'):
838 838 tp.resume(repo, source, opts)
839 839 return
840 840
841 841 tf = tp.transplantfilter(repo, source, p1)
842 842 if opts.get(b'prune'):
843 843 prune = {
844 844 source[r].node()
845 845 for r in scmutil.revrange(source, opts.get(b'prune'))
846 846 }
847 847 matchfn = lambda x: tf(x) and x not in prune
848 848 else:
849 849 matchfn = tf
850 850 merges = pycompat.maplist(source.lookup, opts.get(b'merge', ()))
851 851 revmap = {}
852 852 if revs:
853 853 for r in scmutil.revrange(source, revs):
854 854 revmap[int(r)] = source[r].node()
855 855 elif opts.get(b'all') or not merges:
856 856 if source != repo:
857 857 alltransplants = incwalk(source, csets, match=matchfn)
858 858 else:
859 859 alltransplants = transplantwalk(
860 860 source, p1, heads, match=matchfn
861 861 )
862 862 if opts.get(b'all'):
863 863 revs = alltransplants
864 864 else:
865 865 revs, newmerges = browserevs(ui, source, alltransplants, opts)
866 866 merges.extend(newmerges)
867 867 for r in revs:
868 868 revmap[source.changelog.rev(r)] = r
869 869 for r in merges:
870 870 revmap[source.changelog.rev(r)] = r
871 871
872 872 tp.apply(repo, source, revmap, merges, opts)
873 873 finally:
874 874 if cleanupfn:
875 875 cleanupfn()
876 876
877 877
878 878 def continuecmd(ui, repo):
879 879 """logic to resume an interrupted transplant using
880 880 'hg continue'"""
881 881 with repo.wlock():
882 882 tp = transplanter(ui, repo, {})
883 883 return tp.resume(repo, repo, {})
884 884
885 885
886 886 revsetpredicate = registrar.revsetpredicate()
887 887
888 888
889 889 @revsetpredicate(b'transplanted([set])')
890 890 def revsettransplanted(repo, subset, x):
891 891 """Transplanted changesets in set, or all transplanted changesets.
892 892 """
893 893 if x:
894 894 s = revset.getset(repo, subset, x)
895 895 else:
896 896 s = subset
897 897 return smartset.baseset(
898 898 [r for r in s if repo[r].extra().get(b'transplant_source')]
899 899 )
900 900
901 901
902 902 templatekeyword = registrar.templatekeyword()
903 903
904 904
905 905 @templatekeyword(b'transplanted', requires={b'ctx'})
906 906 def kwtransplanted(context, mapping):
907 907 """String. The node identifier of the transplanted
908 908 changeset if any."""
909 909 ctx = context.resource(mapping, b'ctx')
910 910 n = ctx.extra().get(b'transplant_source')
911 911 return n and nodemod.hex(n) or b''
912 912
913 913
914 914 def extsetup(ui):
915 915 statemod.addunfinished(
916 916 b'transplant',
917 917 fname=b'transplant/journal',
918 918 clearable=True,
919 919 continuefunc=continuecmd,
920 920 statushint=_(
921 921 b'To continue: hg transplant --continue\n'
922 922 b'To stop: hg transplant --stop'
923 923 ),
924 924 cmdhint=_(b"use 'hg transplant --continue' or 'hg transplant --stop'"),
925 925 )
926 926
927 927
928 928 # tell hggettext to extract docstrings from these functions:
929 929 i18nfunctions = [revsettransplanted, kwtransplanted]
@@ -1,4217 +1,4216
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy as copymod
11 11 import errno
12 12 import os
13 13 import re
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22 from .pycompat import (
23 23 getattr,
24 24 open,
25 25 setattr,
26 26 )
27 27 from .thirdparty import attr
28 28
29 29 from . import (
30 30 bookmarks,
31 31 changelog,
32 32 copies,
33 33 crecord as crecordmod,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 formatter,
38 38 logcmdutil,
39 39 match as matchmod,
40 40 merge as mergemod,
41 41 mergestate as mergestatemod,
42 42 mergeutil,
43 43 obsolete,
44 44 patch,
45 45 pathutil,
46 46 phases,
47 47 pycompat,
48 48 repair,
49 49 revlog,
50 50 rewriteutil,
51 51 scmutil,
52 52 smartset,
53 53 state as statemod,
54 54 subrepoutil,
55 55 templatekw,
56 56 templater,
57 57 util,
58 58 vfs as vfsmod,
59 59 )
60 60
61 61 from .utils import (
62 62 dateutil,
63 63 stringutil,
64 64 )
65 65
66 66 if pycompat.TYPE_CHECKING:
67 67 from typing import (
68 68 Any,
69 69 Dict,
70 70 )
71 71
72 72 for t in (Any, Dict):
73 73 assert t
74 74
75 75 stringio = util.stringio
76 76
77 77 # templates of common command options
78 78
79 79 dryrunopts = [
80 80 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
81 81 ]
82 82
83 83 confirmopts = [
84 84 (b'', b'confirm', None, _(b'ask before applying actions')),
85 85 ]
86 86
87 87 remoteopts = [
88 88 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
89 89 (
90 90 b'',
91 91 b'remotecmd',
92 92 b'',
93 93 _(b'specify hg command to run on the remote side'),
94 94 _(b'CMD'),
95 95 ),
96 96 (
97 97 b'',
98 98 b'insecure',
99 99 None,
100 100 _(b'do not verify server certificate (ignoring web.cacerts config)'),
101 101 ),
102 102 ]
103 103
104 104 walkopts = [
105 105 (
106 106 b'I',
107 107 b'include',
108 108 [],
109 109 _(b'include names matching the given patterns'),
110 110 _(b'PATTERN'),
111 111 ),
112 112 (
113 113 b'X',
114 114 b'exclude',
115 115 [],
116 116 _(b'exclude names matching the given patterns'),
117 117 _(b'PATTERN'),
118 118 ),
119 119 ]
120 120
121 121 commitopts = [
122 122 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
123 123 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
124 124 ]
125 125
126 126 commitopts2 = [
127 127 (
128 128 b'd',
129 129 b'date',
130 130 b'',
131 131 _(b'record the specified date as commit date'),
132 132 _(b'DATE'),
133 133 ),
134 134 (
135 135 b'u',
136 136 b'user',
137 137 b'',
138 138 _(b'record the specified user as committer'),
139 139 _(b'USER'),
140 140 ),
141 141 ]
142 142
143 143 commitopts3 = [
144 144 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
145 145 (b'U', b'currentuser', None, _(b'record the current user as committer')),
146 146 ]
147 147
148 148 formatteropts = [
149 149 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
150 150 ]
151 151
152 152 templateopts = [
153 153 (
154 154 b'',
155 155 b'style',
156 156 b'',
157 157 _(b'display using template map file (DEPRECATED)'),
158 158 _(b'STYLE'),
159 159 ),
160 160 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
161 161 ]
162 162
163 163 logopts = [
164 164 (b'p', b'patch', None, _(b'show patch')),
165 165 (b'g', b'git', None, _(b'use git extended diff format')),
166 166 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
167 167 (b'M', b'no-merges', None, _(b'do not show merges')),
168 168 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
169 169 (b'G', b'graph', None, _(b"show the revision DAG")),
170 170 ] + templateopts
171 171
172 172 diffopts = [
173 173 (b'a', b'text', None, _(b'treat all files as text')),
174 174 (
175 175 b'g',
176 176 b'git',
177 177 None,
178 178 _(b'use git extended diff format (DEFAULT: diff.git)'),
179 179 ),
180 180 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
181 181 (b'', b'nodates', None, _(b'omit dates from diff headers')),
182 182 ]
183 183
184 184 diffwsopts = [
185 185 (
186 186 b'w',
187 187 b'ignore-all-space',
188 188 None,
189 189 _(b'ignore white space when comparing lines'),
190 190 ),
191 191 (
192 192 b'b',
193 193 b'ignore-space-change',
194 194 None,
195 195 _(b'ignore changes in the amount of white space'),
196 196 ),
197 197 (
198 198 b'B',
199 199 b'ignore-blank-lines',
200 200 None,
201 201 _(b'ignore changes whose lines are all blank'),
202 202 ),
203 203 (
204 204 b'Z',
205 205 b'ignore-space-at-eol',
206 206 None,
207 207 _(b'ignore changes in whitespace at EOL'),
208 208 ),
209 209 ]
210 210
211 211 diffopts2 = (
212 212 [
213 213 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
214 214 (
215 215 b'p',
216 216 b'show-function',
217 217 None,
218 218 _(
219 219 b'show which function each change is in (DEFAULT: diff.showfunc)'
220 220 ),
221 221 ),
222 222 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
223 223 ]
224 224 + diffwsopts
225 225 + [
226 226 (
227 227 b'U',
228 228 b'unified',
229 229 b'',
230 230 _(b'number of lines of context to show'),
231 231 _(b'NUM'),
232 232 ),
233 233 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
234 234 (
235 235 b'',
236 236 b'root',
237 237 b'',
238 238 _(b'produce diffs relative to subdirectory'),
239 239 _(b'DIR'),
240 240 ),
241 241 ]
242 242 )
243 243
244 244 mergetoolopts = [
245 245 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
246 246 ]
247 247
248 248 similarityopts = [
249 249 (
250 250 b's',
251 251 b'similarity',
252 252 b'',
253 253 _(b'guess renamed files by similarity (0<=s<=100)'),
254 254 _(b'SIMILARITY'),
255 255 )
256 256 ]
257 257
258 258 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
259 259
260 260 debugrevlogopts = [
261 261 (b'c', b'changelog', False, _(b'open changelog')),
262 262 (b'm', b'manifest', False, _(b'open manifest')),
263 263 (b'', b'dir', b'', _(b'open directory manifest')),
264 264 ]
265 265
266 266 # special string such that everything below this line will be ingored in the
267 267 # editor text
268 268 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
269 269
270 270
271 271 def check_at_most_one_arg(opts, *args):
272 272 """abort if more than one of the arguments are in opts
273 273
274 274 Returns the unique argument or None if none of them were specified.
275 275 """
276 276
277 277 def to_display(name):
278 278 return pycompat.sysbytes(name).replace(b'_', b'-')
279 279
280 280 previous = None
281 281 for x in args:
282 282 if opts.get(x):
283 283 if previous:
284 284 raise error.Abort(
285 285 _(b'cannot specify both --%s and --%s')
286 286 % (to_display(previous), to_display(x))
287 287 )
288 288 previous = x
289 289 return previous
290 290
291 291
292 292 def check_incompatible_arguments(opts, first, others):
293 293 """abort if the first argument is given along with any of the others
294 294
295 295 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
296 296 among themselves, and they're passed as a single collection.
297 297 """
298 298 for other in others:
299 299 check_at_most_one_arg(opts, first, other)
300 300
301 301
302 302 def resolvecommitoptions(ui, opts):
303 303 """modify commit options dict to handle related options
304 304
305 305 The return value indicates that ``rewrite.update-timestamp`` is the reason
306 306 the ``date`` option is set.
307 307 """
308 308 check_at_most_one_arg(opts, b'date', b'currentdate')
309 309 check_at_most_one_arg(opts, b'user', b'currentuser')
310 310
311 311 datemaydiffer = False # date-only change should be ignored?
312 312
313 313 if opts.get(b'currentdate'):
314 314 opts[b'date'] = b'%d %d' % dateutil.makedate()
315 315 elif (
316 316 not opts.get(b'date')
317 317 and ui.configbool(b'rewrite', b'update-timestamp')
318 318 and opts.get(b'currentdate') is None
319 319 ):
320 320 opts[b'date'] = b'%d %d' % dateutil.makedate()
321 321 datemaydiffer = True
322 322
323 323 if opts.get(b'currentuser'):
324 324 opts[b'user'] = ui.username()
325 325
326 326 return datemaydiffer
327 327
328 328
329 329 def checknotesize(ui, opts):
330 330 """ make sure note is of valid format """
331 331
332 332 note = opts.get(b'note')
333 333 if not note:
334 334 return
335 335
336 336 if len(note) > 255:
337 337 raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
338 338 if b'\n' in note:
339 339 raise error.Abort(_(b"note cannot contain a newline"))
340 340
341 341
342 342 def ishunk(x):
343 343 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
344 344 return isinstance(x, hunkclasses)
345 345
346 346
347 347 def newandmodified(chunks, originalchunks):
348 348 newlyaddedandmodifiedfiles = set()
349 349 alsorestore = set()
350 350 for chunk in chunks:
351 351 if (
352 352 ishunk(chunk)
353 353 and chunk.header.isnewfile()
354 354 and chunk not in originalchunks
355 355 ):
356 356 newlyaddedandmodifiedfiles.add(chunk.header.filename())
357 357 alsorestore.update(
358 358 set(chunk.header.files()) - {chunk.header.filename()}
359 359 )
360 360 return newlyaddedandmodifiedfiles, alsorestore
361 361
362 362
363 363 def parsealiases(cmd):
364 364 return cmd.split(b"|")
365 365
366 366
367 367 def setupwrapcolorwrite(ui):
368 368 # wrap ui.write so diff output can be labeled/colorized
369 369 def wrapwrite(orig, *args, **kw):
370 370 label = kw.pop('label', b'')
371 371 for chunk, l in patch.difflabel(lambda: args):
372 372 orig(chunk, label=label + l)
373 373
374 374 oldwrite = ui.write
375 375
376 376 def wrap(*args, **kwargs):
377 377 return wrapwrite(oldwrite, *args, **kwargs)
378 378
379 379 setattr(ui, 'write', wrap)
380 380 return oldwrite
381 381
382 382
383 383 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
384 384 try:
385 385 if usecurses:
386 386 if testfile:
387 387 recordfn = crecordmod.testdecorator(
388 388 testfile, crecordmod.testchunkselector
389 389 )
390 390 else:
391 391 recordfn = crecordmod.chunkselector
392 392
393 393 return crecordmod.filterpatch(
394 394 ui, originalhunks, recordfn, operation
395 395 )
396 396 except crecordmod.fallbackerror as e:
397 397 ui.warn(b'%s\n' % e)
398 398 ui.warn(_(b'falling back to text mode\n'))
399 399
400 400 return patch.filterpatch(ui, originalhunks, match, operation)
401 401
402 402
403 403 def recordfilter(ui, originalhunks, match, operation=None):
404 404 """ Prompts the user to filter the originalhunks and return a list of
405 405 selected hunks.
406 406 *operation* is used for to build ui messages to indicate the user what
407 407 kind of filtering they are doing: reverting, committing, shelving, etc.
408 408 (see patch.filterpatch).
409 409 """
410 410 usecurses = crecordmod.checkcurses(ui)
411 411 testfile = ui.config(b'experimental', b'crecordtest')
412 412 oldwrite = setupwrapcolorwrite(ui)
413 413 try:
414 414 newchunks, newopts = filterchunks(
415 415 ui, originalhunks, usecurses, testfile, match, operation
416 416 )
417 417 finally:
418 418 ui.write = oldwrite
419 419 return newchunks, newopts
420 420
421 421
422 422 def dorecord(
423 423 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
424 424 ):
425 425 opts = pycompat.byteskwargs(opts)
426 426 if not ui.interactive():
427 427 if cmdsuggest:
428 428 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
429 429 else:
430 430 msg = _(b'running non-interactively')
431 431 raise error.Abort(msg)
432 432
433 433 # make sure username is set before going interactive
434 434 if not opts.get(b'user'):
435 435 ui.username() # raise exception, username not provided
436 436
437 437 def recordfunc(ui, repo, message, match, opts):
438 438 """This is generic record driver.
439 439
440 440 Its job is to interactively filter local changes, and
441 441 accordingly prepare working directory into a state in which the
442 442 job can be delegated to a non-interactive commit command such as
443 443 'commit' or 'qrefresh'.
444 444
445 445 After the actual job is done by non-interactive command, the
446 446 working directory is restored to its original state.
447 447
448 448 In the end we'll record interesting changes, and everything else
449 449 will be left in place, so the user can continue working.
450 450 """
451 451 if not opts.get(b'interactive-unshelve'):
452 452 checkunfinished(repo, commit=True)
453 453 wctx = repo[None]
454 454 merge = len(wctx.parents()) > 1
455 455 if merge:
456 456 raise error.Abort(
457 457 _(
458 458 b'cannot partially commit a merge '
459 459 b'(use "hg commit" instead)'
460 460 )
461 461 )
462 462
463 463 def fail(f, msg):
464 464 raise error.Abort(b'%s: %s' % (f, msg))
465 465
466 466 force = opts.get(b'force')
467 467 if not force:
468 468 match = matchmod.badmatch(match, fail)
469 469
470 470 status = repo.status(match=match)
471 471
472 472 overrides = {(b'ui', b'commitsubrepos'): True}
473 473
474 474 with repo.ui.configoverride(overrides, b'record'):
475 475 # subrepoutil.precommit() modifies the status
476 476 tmpstatus = scmutil.status(
477 477 copymod.copy(status.modified),
478 478 copymod.copy(status.added),
479 479 copymod.copy(status.removed),
480 480 copymod.copy(status.deleted),
481 481 copymod.copy(status.unknown),
482 482 copymod.copy(status.ignored),
483 483 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
484 484 )
485 485
486 486 # Force allows -X subrepo to skip the subrepo.
487 487 subs, commitsubs, newstate = subrepoutil.precommit(
488 488 repo.ui, wctx, tmpstatus, match, force=True
489 489 )
490 490 for s in subs:
491 491 if s in commitsubs:
492 492 dirtyreason = wctx.sub(s).dirtyreason(True)
493 493 raise error.Abort(dirtyreason)
494 494
495 495 if not force:
496 496 repo.checkcommitpatterns(wctx, match, status, fail)
497 497 diffopts = patch.difffeatureopts(
498 498 ui,
499 499 opts=opts,
500 500 whitespace=True,
501 501 section=b'commands',
502 502 configprefix=b'commit.interactive.',
503 503 )
504 504 diffopts.nodates = True
505 505 diffopts.git = True
506 506 diffopts.showfunc = True
507 507 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
508 508 originalchunks = patch.parsepatch(originaldiff)
509 509 match = scmutil.match(repo[None], pats)
510 510
511 511 # 1. filter patch, since we are intending to apply subset of it
512 512 try:
513 513 chunks, newopts = filterfn(ui, originalchunks, match)
514 514 except error.PatchError as err:
515 515 raise error.Abort(_(b'error parsing patch: %s') % err)
516 516 opts.update(newopts)
517 517
518 518 # We need to keep a backup of files that have been newly added and
519 519 # modified during the recording process because there is a previous
520 520 # version without the edit in the workdir. We also will need to restore
521 521 # files that were the sources of renames so that the patch application
522 522 # works.
523 523 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
524 524 chunks, originalchunks
525 525 )
526 526 contenders = set()
527 527 for h in chunks:
528 528 try:
529 529 contenders.update(set(h.files()))
530 530 except AttributeError:
531 531 pass
532 532
533 533 changed = status.modified + status.added + status.removed
534 534 newfiles = [f for f in changed if f in contenders]
535 535 if not newfiles:
536 536 ui.status(_(b'no changes to record\n'))
537 537 return 0
538 538
539 539 modified = set(status.modified)
540 540
541 541 # 2. backup changed files, so we can restore them in the end
542 542
543 543 if backupall:
544 544 tobackup = changed
545 545 else:
546 546 tobackup = [
547 547 f
548 548 for f in newfiles
549 549 if f in modified or f in newlyaddedandmodifiedfiles
550 550 ]
551 551 backups = {}
552 552 if tobackup:
553 553 backupdir = repo.vfs.join(b'record-backups')
554 554 try:
555 555 os.mkdir(backupdir)
556 556 except OSError as err:
557 557 if err.errno != errno.EEXIST:
558 558 raise
559 559 try:
560 560 # backup continues
561 561 for f in tobackup:
562 562 fd, tmpname = pycompat.mkstemp(
563 563 prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
564 564 )
565 565 os.close(fd)
566 566 ui.debug(b'backup %r as %r\n' % (f, tmpname))
567 567 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
568 568 backups[f] = tmpname
569 569
570 570 fp = stringio()
571 571 for c in chunks:
572 572 fname = c.filename()
573 573 if fname in backups:
574 574 c.write(fp)
575 575 dopatch = fp.tell()
576 576 fp.seek(0)
577 577
578 578 # 2.5 optionally review / modify patch in text editor
579 579 if opts.get(b'review', False):
580 580 patchtext = (
581 581 crecordmod.diffhelptext
582 582 + crecordmod.patchhelptext
583 583 + fp.read()
584 584 )
585 585 reviewedpatch = ui.edit(
586 586 patchtext, b"", action=b"diff", repopath=repo.path
587 587 )
588 588 fp.truncate(0)
589 589 fp.write(reviewedpatch)
590 590 fp.seek(0)
591 591
592 592 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
593 593 # 3a. apply filtered patch to clean repo (clean)
594 594 if backups:
595 595 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
596 596 mergemod.revert_to(repo[b'.'], matcher=m)
597 597
598 598 # 3b. (apply)
599 599 if dopatch:
600 600 try:
601 601 ui.debug(b'applying patch\n')
602 602 ui.debug(fp.getvalue())
603 603 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
604 604 except error.PatchError as err:
605 605 raise error.Abort(pycompat.bytestr(err))
606 606 del fp
607 607
608 608 # 4. We prepared working directory according to filtered
609 609 # patch. Now is the time to delegate the job to
610 610 # commit/qrefresh or the like!
611 611
612 612 # Make all of the pathnames absolute.
613 613 newfiles = [repo.wjoin(nf) for nf in newfiles]
614 614 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
615 615 finally:
616 616 # 5. finally restore backed-up files
617 617 try:
618 618 dirstate = repo.dirstate
619 619 for realname, tmpname in pycompat.iteritems(backups):
620 620 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
621 621
622 622 if dirstate[realname] == b'n':
623 623 # without normallookup, restoring timestamp
624 624 # may cause partially committed files
625 625 # to be treated as unmodified
626 626 dirstate.normallookup(realname)
627 627
628 628 # copystat=True here and above are a hack to trick any
629 629 # editors that have f open that we haven't modified them.
630 630 #
631 631 # Also note that this racy as an editor could notice the
632 632 # file's mtime before we've finished writing it.
633 633 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
634 634 os.unlink(tmpname)
635 635 if tobackup:
636 636 os.rmdir(backupdir)
637 637 except OSError:
638 638 pass
639 639
640 640 def recordinwlock(ui, repo, message, match, opts):
641 641 with repo.wlock():
642 642 return recordfunc(ui, repo, message, match, opts)
643 643
644 644 return commit(ui, repo, recordinwlock, pats, opts)
645 645
646 646
647 647 class dirnode(object):
648 648 """
649 649 Represent a directory in user working copy with information required for
650 650 the purpose of tersing its status.
651 651
652 652 path is the path to the directory, without a trailing '/'
653 653
654 654 statuses is a set of statuses of all files in this directory (this includes
655 655 all the files in all the subdirectories too)
656 656
657 657 files is a list of files which are direct child of this directory
658 658
659 659 subdirs is a dictionary of sub-directory name as the key and it's own
660 660 dirnode object as the value
661 661 """
662 662
663 663 def __init__(self, dirpath):
664 664 self.path = dirpath
665 665 self.statuses = set()
666 666 self.files = []
667 667 self.subdirs = {}
668 668
669 669 def _addfileindir(self, filename, status):
670 670 """Add a file in this directory as a direct child."""
671 671 self.files.append((filename, status))
672 672
673 673 def addfile(self, filename, status):
674 674 """
675 675 Add a file to this directory or to its direct parent directory.
676 676
677 677 If the file is not direct child of this directory, we traverse to the
678 678 directory of which this file is a direct child of and add the file
679 679 there.
680 680 """
681 681
682 682 # the filename contains a path separator, it means it's not the direct
683 683 # child of this directory
684 684 if b'/' in filename:
685 685 subdir, filep = filename.split(b'/', 1)
686 686
687 687 # does the dirnode object for subdir exists
688 688 if subdir not in self.subdirs:
689 689 subdirpath = pathutil.join(self.path, subdir)
690 690 self.subdirs[subdir] = dirnode(subdirpath)
691 691
692 692 # try adding the file in subdir
693 693 self.subdirs[subdir].addfile(filep, status)
694 694
695 695 else:
696 696 self._addfileindir(filename, status)
697 697
698 698 if status not in self.statuses:
699 699 self.statuses.add(status)
700 700
701 701 def iterfilepaths(self):
702 702 """Yield (status, path) for files directly under this directory."""
703 703 for f, st in self.files:
704 704 yield st, pathutil.join(self.path, f)
705 705
706 706 def tersewalk(self, terseargs):
707 707 """
708 708 Yield (status, path) obtained by processing the status of this
709 709 dirnode.
710 710
711 711 terseargs is the string of arguments passed by the user with `--terse`
712 712 flag.
713 713
714 714 Following are the cases which can happen:
715 715
716 716 1) All the files in the directory (including all the files in its
717 717 subdirectories) share the same status and the user has asked us to terse
718 718 that status. -> yield (status, dirpath). dirpath will end in '/'.
719 719
720 720 2) Otherwise, we do following:
721 721
722 722 a) Yield (status, filepath) for all the files which are in this
723 723 directory (only the ones in this directory, not the subdirs)
724 724
725 725 b) Recurse the function on all the subdirectories of this
726 726 directory
727 727 """
728 728
729 729 if len(self.statuses) == 1:
730 730 onlyst = self.statuses.pop()
731 731
732 732 # Making sure we terse only when the status abbreviation is
733 733 # passed as terse argument
734 734 if onlyst in terseargs:
735 735 yield onlyst, self.path + b'/'
736 736 return
737 737
738 738 # add the files to status list
739 739 for st, fpath in self.iterfilepaths():
740 740 yield st, fpath
741 741
742 742 # recurse on the subdirs
743 743 for dirobj in self.subdirs.values():
744 744 for st, fpath in dirobj.tersewalk(terseargs):
745 745 yield st, fpath
746 746
747 747
748 748 def tersedir(statuslist, terseargs):
749 749 """
750 750 Terse the status if all the files in a directory shares the same status.
751 751
752 752 statuslist is scmutil.status() object which contains a list of files for
753 753 each status.
754 754 terseargs is string which is passed by the user as the argument to `--terse`
755 755 flag.
756 756
757 757 The function makes a tree of objects of dirnode class, and at each node it
758 758 stores the information required to know whether we can terse a certain
759 759 directory or not.
760 760 """
761 761 # the order matters here as that is used to produce final list
762 762 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
763 763
764 764 # checking the argument validity
765 765 for s in pycompat.bytestr(terseargs):
766 766 if s not in allst:
767 767 raise error.Abort(_(b"'%s' not recognized") % s)
768 768
769 769 # creating a dirnode object for the root of the repo
770 770 rootobj = dirnode(b'')
771 771 pstatus = (
772 772 b'modified',
773 773 b'added',
774 774 b'deleted',
775 775 b'clean',
776 776 b'unknown',
777 777 b'ignored',
778 778 b'removed',
779 779 )
780 780
781 781 tersedict = {}
782 782 for attrname in pstatus:
783 783 statuschar = attrname[0:1]
784 784 for f in getattr(statuslist, attrname):
785 785 rootobj.addfile(f, statuschar)
786 786 tersedict[statuschar] = []
787 787
788 788 # we won't be tersing the root dir, so add files in it
789 789 for st, fpath in rootobj.iterfilepaths():
790 790 tersedict[st].append(fpath)
791 791
792 792 # process each sub-directory and build tersedict
793 793 for subdir in rootobj.subdirs.values():
794 794 for st, f in subdir.tersewalk(terseargs):
795 795 tersedict[st].append(f)
796 796
797 797 tersedlist = []
798 798 for st in allst:
799 799 tersedict[st].sort()
800 800 tersedlist.append(tersedict[st])
801 801
802 802 return scmutil.status(*tersedlist)
803 803
804 804
805 805 def _commentlines(raw):
806 806 '''Surround lineswith a comment char and a new line'''
807 807 lines = raw.splitlines()
808 808 commentedlines = [b'# %s' % line for line in lines]
809 809 return b'\n'.join(commentedlines) + b'\n'
810 810
811 811
812 812 @attr.s(frozen=True)
813 813 class morestatus(object):
814 814 reporoot = attr.ib()
815 815 unfinishedop = attr.ib()
816 816 unfinishedmsg = attr.ib()
817 817 activemerge = attr.ib()
818 818 unresolvedpaths = attr.ib()
819 819 _formattedpaths = attr.ib(init=False, default=set())
820 820 _label = b'status.morestatus'
821 821
822 822 def formatfile(self, path, fm):
823 823 self._formattedpaths.add(path)
824 824 if self.activemerge and path in self.unresolvedpaths:
825 825 fm.data(unresolved=True)
826 826
827 827 def formatfooter(self, fm):
828 828 if self.unfinishedop or self.unfinishedmsg:
829 829 fm.startitem()
830 830 fm.data(itemtype=b'morestatus')
831 831
832 832 if self.unfinishedop:
833 833 fm.data(unfinished=self.unfinishedop)
834 834 statemsg = (
835 835 _(b'The repository is in an unfinished *%s* state.')
836 836 % self.unfinishedop
837 837 )
838 838 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
839 839 if self.unfinishedmsg:
840 840 fm.data(unfinishedmsg=self.unfinishedmsg)
841 841
842 842 # May also start new data items.
843 843 self._formatconflicts(fm)
844 844
845 845 if self.unfinishedmsg:
846 846 fm.plain(
847 847 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
848 848 )
849 849
850 850 def _formatconflicts(self, fm):
851 851 if not self.activemerge:
852 852 return
853 853
854 854 if self.unresolvedpaths:
855 855 mergeliststr = b'\n'.join(
856 856 [
857 857 b' %s'
858 858 % util.pathto(self.reporoot, encoding.getcwd(), path)
859 859 for path in self.unresolvedpaths
860 860 ]
861 861 )
862 862 msg = (
863 863 _(
864 864 '''Unresolved merge conflicts:
865 865
866 866 %s
867 867
868 868 To mark files as resolved: hg resolve --mark FILE'''
869 869 )
870 870 % mergeliststr
871 871 )
872 872
873 873 # If any paths with unresolved conflicts were not previously
874 874 # formatted, output them now.
875 875 for f in self.unresolvedpaths:
876 876 if f in self._formattedpaths:
877 877 # Already output.
878 878 continue
879 879 fm.startitem()
880 880 # We can't claim to know the status of the file - it may just
881 881 # have been in one of the states that were not requested for
882 882 # display, so it could be anything.
883 883 fm.data(itemtype=b'file', path=f, unresolved=True)
884 884
885 885 else:
886 886 msg = _(b'No unresolved merge conflicts.')
887 887
888 888 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
889 889
890 890
891 891 def readmorestatus(repo):
892 892 """Returns a morestatus object if the repo has unfinished state."""
893 893 statetuple = statemod.getrepostate(repo)
894 894 mergestate = mergestatemod.mergestate.read(repo)
895 895 activemerge = mergestate.active()
896 896 if not statetuple and not activemerge:
897 897 return None
898 898
899 899 unfinishedop = unfinishedmsg = unresolved = None
900 900 if statetuple:
901 901 unfinishedop, unfinishedmsg = statetuple
902 902 if activemerge:
903 903 unresolved = sorted(mergestate.unresolved())
904 904 return morestatus(
905 905 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
906 906 )
907 907
908 908
909 909 def findpossible(cmd, table, strict=False):
910 910 """
911 911 Return cmd -> (aliases, command table entry)
912 912 for each matching command.
913 913 Return debug commands (or their aliases) only if no normal command matches.
914 914 """
915 915 choice = {}
916 916 debugchoice = {}
917 917
918 918 if cmd in table:
919 919 # short-circuit exact matches, "log" alias beats "log|history"
920 920 keys = [cmd]
921 921 else:
922 922 keys = table.keys()
923 923
924 924 allcmds = []
925 925 for e in keys:
926 926 aliases = parsealiases(e)
927 927 allcmds.extend(aliases)
928 928 found = None
929 929 if cmd in aliases:
930 930 found = cmd
931 931 elif not strict:
932 932 for a in aliases:
933 933 if a.startswith(cmd):
934 934 found = a
935 935 break
936 936 if found is not None:
937 937 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
938 938 debugchoice[found] = (aliases, table[e])
939 939 else:
940 940 choice[found] = (aliases, table[e])
941 941
942 942 if not choice and debugchoice:
943 943 choice = debugchoice
944 944
945 945 return choice, allcmds
946 946
947 947
948 948 def findcmd(cmd, table, strict=True):
949 949 """Return (aliases, command table entry) for command string."""
950 950 choice, allcmds = findpossible(cmd, table, strict)
951 951
952 952 if cmd in choice:
953 953 return choice[cmd]
954 954
955 955 if len(choice) > 1:
956 956 clist = sorted(choice)
957 957 raise error.AmbiguousCommand(cmd, clist)
958 958
959 959 if choice:
960 960 return list(choice.values())[0]
961 961
962 962 raise error.UnknownCommand(cmd, allcmds)
963 963
964 964
965 965 def changebranch(ui, repo, revs, label, opts):
966 966 """ Change the branch name of given revs to label """
967 967
968 968 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
969 969 # abort in case of uncommitted merge or dirty wdir
970 970 bailifchanged(repo)
971 971 revs = scmutil.revrange(repo, revs)
972 972 if not revs:
973 973 raise error.Abort(b"empty revision set")
974 974 roots = repo.revs(b'roots(%ld)', revs)
975 975 if len(roots) > 1:
976 976 raise error.Abort(
977 977 _(b"cannot change branch of non-linear revisions")
978 978 )
979 979 rewriteutil.precheck(repo, revs, b'change branch of')
980 980
981 981 root = repo[roots.first()]
982 982 rpb = {parent.branch() for parent in root.parents()}
983 983 if (
984 984 not opts.get(b'force')
985 985 and label not in rpb
986 986 and label in repo.branchmap()
987 987 ):
988 988 raise error.Abort(_(b"a branch of the same name already exists"))
989 989
990 990 if repo.revs(b'obsolete() and %ld', revs):
991 991 raise error.Abort(
992 992 _(b"cannot change branch of a obsolete changeset")
993 993 )
994 994
995 995 # make sure only topological heads
996 996 if repo.revs(b'heads(%ld) - head()', revs):
997 997 raise error.Abort(_(b"cannot change branch in middle of a stack"))
998 998
999 999 replacements = {}
1000 1000 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1001 1001 # mercurial.subrepo -> mercurial.cmdutil
1002 1002 from . import context
1003 1003
1004 1004 for rev in revs:
1005 1005 ctx = repo[rev]
1006 1006 oldbranch = ctx.branch()
1007 1007 # check if ctx has same branch
1008 1008 if oldbranch == label:
1009 1009 continue
1010 1010
1011 1011 def filectxfn(repo, newctx, path):
1012 1012 try:
1013 1013 return ctx[path]
1014 1014 except error.ManifestLookupError:
1015 1015 return None
1016 1016
1017 1017 ui.debug(
1018 1018 b"changing branch of '%s' from '%s' to '%s'\n"
1019 1019 % (hex(ctx.node()), oldbranch, label)
1020 1020 )
1021 1021 extra = ctx.extra()
1022 1022 extra[b'branch_change'] = hex(ctx.node())
1023 1023 # While changing branch of set of linear commits, make sure that
1024 1024 # we base our commits on new parent rather than old parent which
1025 1025 # was obsoleted while changing the branch
1026 1026 p1 = ctx.p1().node()
1027 1027 p2 = ctx.p2().node()
1028 1028 if p1 in replacements:
1029 1029 p1 = replacements[p1][0]
1030 1030 if p2 in replacements:
1031 1031 p2 = replacements[p2][0]
1032 1032
1033 1033 mc = context.memctx(
1034 1034 repo,
1035 1035 (p1, p2),
1036 1036 ctx.description(),
1037 1037 ctx.files(),
1038 1038 filectxfn,
1039 1039 user=ctx.user(),
1040 1040 date=ctx.date(),
1041 1041 extra=extra,
1042 1042 branch=label,
1043 1043 )
1044 1044
1045 1045 newnode = repo.commitctx(mc)
1046 1046 replacements[ctx.node()] = (newnode,)
1047 1047 ui.debug(b'new node id is %s\n' % hex(newnode))
1048 1048
1049 1049 # create obsmarkers and move bookmarks
1050 1050 scmutil.cleanupnodes(
1051 1051 repo, replacements, b'branch-change', fixphase=True
1052 1052 )
1053 1053
1054 1054 # move the working copy too
1055 1055 wctx = repo[None]
1056 1056 # in-progress merge is a bit too complex for now.
1057 1057 if len(wctx.parents()) == 1:
1058 1058 newid = replacements.get(wctx.p1().node())
1059 1059 if newid is not None:
1060 1060 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1061 1061 # mercurial.cmdutil
1062 1062 from . import hg
1063 1063
1064 1064 hg.update(repo, newid[0], quietempty=True)
1065 1065
1066 1066 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1067 1067
1068 1068
1069 1069 def findrepo(p):
1070 1070 while not os.path.isdir(os.path.join(p, b".hg")):
1071 1071 oldp, p = p, os.path.dirname(p)
1072 1072 if p == oldp:
1073 1073 return None
1074 1074
1075 1075 return p
1076 1076
1077 1077
1078 1078 def bailifchanged(repo, merge=True, hint=None):
1079 1079 """ enforce the precondition that working directory must be clean.
1080 1080
1081 1081 'merge' can be set to false if a pending uncommitted merge should be
1082 1082 ignored (such as when 'update --check' runs).
1083 1083
1084 1084 'hint' is the usual hint given to Abort exception.
1085 1085 """
1086 1086
1087 1087 if merge and repo.dirstate.p2() != nullid:
1088 1088 raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
1089 1089 st = repo.status()
1090 1090 if st.modified or st.added or st.removed or st.deleted:
1091 1091 raise error.Abort(_(b'uncommitted changes'), hint=hint)
1092 1092 ctx = repo[None]
1093 1093 for s in sorted(ctx.substate):
1094 1094 ctx.sub(s).bailifchanged(hint=hint)
1095 1095
1096 1096
1097 1097 def logmessage(ui, opts):
1098 1098 """ get the log message according to -m and -l option """
1099 1099
1100 1100 check_at_most_one_arg(opts, b'message', b'logfile')
1101 1101
1102 1102 message = opts.get(b'message')
1103 1103 logfile = opts.get(b'logfile')
1104 1104
1105 1105 if not message and logfile:
1106 1106 try:
1107 1107 if isstdiofilename(logfile):
1108 1108 message = ui.fin.read()
1109 1109 else:
1110 1110 message = b'\n'.join(util.readfile(logfile).splitlines())
1111 1111 except IOError as inst:
1112 1112 raise error.Abort(
1113 1113 _(b"can't read commit message '%s': %s")
1114 1114 % (logfile, encoding.strtolocal(inst.strerror))
1115 1115 )
1116 1116 return message
1117 1117
1118 1118
1119 1119 def mergeeditform(ctxorbool, baseformname):
1120 1120 """return appropriate editform name (referencing a committemplate)
1121 1121
1122 1122 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1123 1123 merging is committed.
1124 1124
1125 1125 This returns baseformname with '.merge' appended if it is a merge,
1126 1126 otherwise '.normal' is appended.
1127 1127 """
1128 1128 if isinstance(ctxorbool, bool):
1129 1129 if ctxorbool:
1130 1130 return baseformname + b".merge"
1131 1131 elif len(ctxorbool.parents()) > 1:
1132 1132 return baseformname + b".merge"
1133 1133
1134 1134 return baseformname + b".normal"
1135 1135
1136 1136
1137 1137 def getcommiteditor(
1138 1138 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1139 1139 ):
1140 1140 """get appropriate commit message editor according to '--edit' option
1141 1141
1142 1142 'finishdesc' is a function to be called with edited commit message
1143 1143 (= 'description' of the new changeset) just after editing, but
1144 1144 before checking empty-ness. It should return actual text to be
1145 1145 stored into history. This allows to change description before
1146 1146 storing.
1147 1147
1148 1148 'extramsg' is a extra message to be shown in the editor instead of
1149 1149 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1150 1150 is automatically added.
1151 1151
1152 1152 'editform' is a dot-separated list of names, to distinguish
1153 1153 the purpose of commit text editing.
1154 1154
1155 1155 'getcommiteditor' returns 'commitforceeditor' regardless of
1156 1156 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1157 1157 they are specific for usage in MQ.
1158 1158 """
1159 1159 if edit or finishdesc or extramsg:
1160 1160 return lambda r, c, s: commitforceeditor(
1161 1161 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1162 1162 )
1163 1163 elif editform:
1164 1164 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1165 1165 else:
1166 1166 return commiteditor
1167 1167
1168 1168
1169 1169 def _escapecommandtemplate(tmpl):
1170 1170 parts = []
1171 1171 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1172 1172 if typ == b'string':
1173 1173 parts.append(stringutil.escapestr(tmpl[start:end]))
1174 1174 else:
1175 1175 parts.append(tmpl[start:end])
1176 1176 return b''.join(parts)
1177 1177
1178 1178
1179 1179 def rendercommandtemplate(ui, tmpl, props):
1180 1180 r"""Expand a literal template 'tmpl' in a way suitable for command line
1181 1181
1182 1182 '\' in outermost string is not taken as an escape character because it
1183 1183 is a directory separator on Windows.
1184 1184
1185 1185 >>> from . import ui as uimod
1186 1186 >>> ui = uimod.ui()
1187 1187 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1188 1188 'c:\\foo'
1189 1189 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1190 1190 'c:{path}'
1191 1191 """
1192 1192 if not tmpl:
1193 1193 return tmpl
1194 1194 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1195 1195 return t.renderdefault(props)
1196 1196
1197 1197
1198 1198 def rendertemplate(ctx, tmpl, props=None):
1199 1199 """Expand a literal template 'tmpl' byte-string against one changeset
1200 1200
1201 1201 Each props item must be a stringify-able value or a callable returning
1202 1202 such value, i.e. no bare list nor dict should be passed.
1203 1203 """
1204 1204 repo = ctx.repo()
1205 1205 tres = formatter.templateresources(repo.ui, repo)
1206 1206 t = formatter.maketemplater(
1207 1207 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1208 1208 )
1209 1209 mapping = {b'ctx': ctx}
1210 1210 if props:
1211 1211 mapping.update(props)
1212 1212 return t.renderdefault(mapping)
1213 1213
1214 1214
1215 1215 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1216 1216 r"""Convert old-style filename format string to template string
1217 1217
1218 1218 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1219 1219 'foo-{reporoot|basename}-{seqno}.patch'
1220 1220 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1221 1221 '{rev}{tags % "{tag}"}{node}'
1222 1222
1223 1223 '\' in outermost strings has to be escaped because it is a directory
1224 1224 separator on Windows:
1225 1225
1226 1226 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1227 1227 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1228 1228 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1229 1229 '\\\\\\\\foo\\\\bar.patch'
1230 1230 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1231 1231 '\\\\{tags % "{tag}"}'
1232 1232
1233 1233 but inner strings follow the template rules (i.e. '\' is taken as an
1234 1234 escape character):
1235 1235
1236 1236 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1237 1237 '{"c:\\tmp"}'
1238 1238 """
1239 1239 expander = {
1240 1240 b'H': b'{node}',
1241 1241 b'R': b'{rev}',
1242 1242 b'h': b'{node|short}',
1243 1243 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1244 1244 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1245 1245 b'%': b'%',
1246 1246 b'b': b'{reporoot|basename}',
1247 1247 }
1248 1248 if total is not None:
1249 1249 expander[b'N'] = b'{total}'
1250 1250 if seqno is not None:
1251 1251 expander[b'n'] = b'{seqno}'
1252 1252 if total is not None and seqno is not None:
1253 1253 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1254 1254 if pathname is not None:
1255 1255 expander[b's'] = b'{pathname|basename}'
1256 1256 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1257 1257 expander[b'p'] = b'{pathname}'
1258 1258
1259 1259 newname = []
1260 1260 for typ, start, end in templater.scantemplate(pat, raw=True):
1261 1261 if typ != b'string':
1262 1262 newname.append(pat[start:end])
1263 1263 continue
1264 1264 i = start
1265 1265 while i < end:
1266 1266 n = pat.find(b'%', i, end)
1267 1267 if n < 0:
1268 1268 newname.append(stringutil.escapestr(pat[i:end]))
1269 1269 break
1270 1270 newname.append(stringutil.escapestr(pat[i:n]))
1271 1271 if n + 2 > end:
1272 1272 raise error.Abort(
1273 1273 _(b"incomplete format spec in output filename")
1274 1274 )
1275 1275 c = pat[n + 1 : n + 2]
1276 1276 i = n + 2
1277 1277 try:
1278 1278 newname.append(expander[c])
1279 1279 except KeyError:
1280 1280 raise error.Abort(
1281 1281 _(b"invalid format spec '%%%s' in output filename") % c
1282 1282 )
1283 1283 return b''.join(newname)
1284 1284
1285 1285
1286 1286 def makefilename(ctx, pat, **props):
1287 1287 if not pat:
1288 1288 return pat
1289 1289 tmpl = _buildfntemplate(pat, **props)
1290 1290 # BUG: alias expansion shouldn't be made against template fragments
1291 1291 # rewritten from %-format strings, but we have no easy way to partially
1292 1292 # disable the expansion.
1293 1293 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1294 1294
1295 1295
1296 1296 def isstdiofilename(pat):
1297 1297 """True if the given pat looks like a filename denoting stdin/stdout"""
1298 1298 return not pat or pat == b'-'
1299 1299
1300 1300
1301 1301 class _unclosablefile(object):
1302 1302 def __init__(self, fp):
1303 1303 self._fp = fp
1304 1304
1305 1305 def close(self):
1306 1306 pass
1307 1307
1308 1308 def __iter__(self):
1309 1309 return iter(self._fp)
1310 1310
1311 1311 def __getattr__(self, attr):
1312 1312 return getattr(self._fp, attr)
1313 1313
1314 1314 def __enter__(self):
1315 1315 return self
1316 1316
1317 1317 def __exit__(self, exc_type, exc_value, exc_tb):
1318 1318 pass
1319 1319
1320 1320
1321 1321 def makefileobj(ctx, pat, mode=b'wb', **props):
1322 1322 writable = mode not in (b'r', b'rb')
1323 1323
1324 1324 if isstdiofilename(pat):
1325 1325 repo = ctx.repo()
1326 1326 if writable:
1327 1327 fp = repo.ui.fout
1328 1328 else:
1329 1329 fp = repo.ui.fin
1330 1330 return _unclosablefile(fp)
1331 1331 fn = makefilename(ctx, pat, **props)
1332 1332 return open(fn, mode)
1333 1333
1334 1334
1335 1335 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1336 1336 """opens the changelog, manifest, a filelog or a given revlog"""
1337 1337 cl = opts[b'changelog']
1338 1338 mf = opts[b'manifest']
1339 1339 dir = opts[b'dir']
1340 1340 msg = None
1341 1341 if cl and mf:
1342 1342 msg = _(b'cannot specify --changelog and --manifest at the same time')
1343 1343 elif cl and dir:
1344 1344 msg = _(b'cannot specify --changelog and --dir at the same time')
1345 1345 elif cl or mf or dir:
1346 1346 if file_:
1347 1347 msg = _(b'cannot specify filename with --changelog or --manifest')
1348 1348 elif not repo:
1349 1349 msg = _(
1350 1350 b'cannot specify --changelog or --manifest or --dir '
1351 1351 b'without a repository'
1352 1352 )
1353 1353 if msg:
1354 1354 raise error.Abort(msg)
1355 1355
1356 1356 r = None
1357 1357 if repo:
1358 1358 if cl:
1359 1359 r = repo.unfiltered().changelog
1360 1360 elif dir:
1361 1361 if not scmutil.istreemanifest(repo):
1362 1362 raise error.Abort(
1363 1363 _(
1364 1364 b"--dir can only be used on repos with "
1365 1365 b"treemanifest enabled"
1366 1366 )
1367 1367 )
1368 1368 if not dir.endswith(b'/'):
1369 1369 dir = dir + b'/'
1370 1370 dirlog = repo.manifestlog.getstorage(dir)
1371 1371 if len(dirlog):
1372 1372 r = dirlog
1373 1373 elif mf:
1374 1374 r = repo.manifestlog.getstorage(b'')
1375 1375 elif file_:
1376 1376 filelog = repo.file(file_)
1377 1377 if len(filelog):
1378 1378 r = filelog
1379 1379
1380 1380 # Not all storage may be revlogs. If requested, try to return an actual
1381 1381 # revlog instance.
1382 1382 if returnrevlog:
1383 1383 if isinstance(r, revlog.revlog):
1384 1384 pass
1385 1385 elif util.safehasattr(r, b'_revlog'):
1386 1386 r = r._revlog # pytype: disable=attribute-error
1387 1387 elif r is not None:
1388 1388 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
1389 1389
1390 1390 if not r:
1391 1391 if not returnrevlog:
1392 1392 raise error.Abort(_(b'cannot give path to non-revlog'))
1393 1393
1394 1394 if not file_:
1395 1395 raise error.CommandError(cmd, _(b'invalid arguments'))
1396 1396 if not os.path.isfile(file_):
1397 1397 raise error.Abort(_(b"revlog '%s' not found") % file_)
1398 1398 r = revlog.revlog(
1399 1399 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1400 1400 )
1401 1401 return r
1402 1402
1403 1403
1404 1404 def openrevlog(repo, cmd, file_, opts):
1405 1405 """Obtain a revlog backing storage of an item.
1406 1406
1407 1407 This is similar to ``openstorage()`` except it always returns a revlog.
1408 1408
1409 1409 In most cases, a caller cares about the main storage object - not the
1410 1410 revlog backing it. Therefore, this function should only be used by code
1411 1411 that needs to examine low-level revlog implementation details. e.g. debug
1412 1412 commands.
1413 1413 """
1414 1414 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1415 1415
1416 1416
1417 1417 def copy(ui, repo, pats, opts, rename=False):
1418 1418 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1419 1419
1420 1420 # called with the repo lock held
1421 1421 #
1422 1422 # hgsep => pathname that uses "/" to separate directories
1423 1423 # ossep => pathname that uses os.sep to separate directories
1424 1424 cwd = repo.getcwd()
1425 1425 targets = {}
1426 1426 forget = opts.get(b"forget")
1427 1427 after = opts.get(b"after")
1428 1428 dryrun = opts.get(b"dry_run")
1429 1429 rev = opts.get(b'at_rev')
1430 1430 if rev:
1431 1431 if not forget and not after:
1432 1432 # TODO: Remove this restriction and make it also create the copy
1433 1433 # targets (and remove the rename source if rename==True).
1434 1434 raise error.Abort(_(b'--at-rev requires --after'))
1435 1435 ctx = scmutil.revsingle(repo, rev)
1436 1436 if len(ctx.parents()) > 1:
1437 1437 raise error.Abort(_(b'cannot mark/unmark copy in merge commit'))
1438 1438 else:
1439 1439 ctx = repo[None]
1440 1440
1441 1441 pctx = ctx.p1()
1442 1442
1443 1443 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1444 1444
1445 1445 if forget:
1446 1446 if ctx.rev() is None:
1447 1447 new_ctx = ctx
1448 1448 else:
1449 1449 if len(ctx.parents()) > 1:
1450 1450 raise error.Abort(_(b'cannot unmark copy in merge commit'))
1451 1451 # avoid cycle context -> subrepo -> cmdutil
1452 1452 from . import context
1453 1453
1454 1454 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1455 1455 new_ctx = context.overlayworkingctx(repo)
1456 1456 new_ctx.setbase(ctx.p1())
1457 1457 mergemod.graft(repo, ctx, wctx=new_ctx)
1458 1458
1459 1459 match = scmutil.match(ctx, pats, opts)
1460 1460
1461 1461 current_copies = ctx.p1copies()
1462 1462 current_copies.update(ctx.p2copies())
1463 1463
1464 1464 uipathfn = scmutil.getuipathfn(repo)
1465 1465 for f in ctx.walk(match):
1466 1466 if f in current_copies:
1467 1467 new_ctx[f].markcopied(None)
1468 1468 elif match.exact(f):
1469 1469 ui.warn(
1470 1470 _(
1471 1471 b'%s: not unmarking as copy - file is not marked as copied\n'
1472 1472 )
1473 1473 % uipathfn(f)
1474 1474 )
1475 1475
1476 1476 if ctx.rev() is not None:
1477 1477 with repo.lock():
1478 1478 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1479 1479 new_node = mem_ctx.commit()
1480 1480
1481 1481 if repo.dirstate.p1() == ctx.node():
1482 1482 with repo.dirstate.parentchange():
1483 1483 scmutil.movedirstate(repo, repo[new_node])
1484 1484 replacements = {ctx.node(): [new_node]}
1485 1485 scmutil.cleanupnodes(
1486 1486 repo, replacements, b'uncopy', fixphase=True
1487 1487 )
1488 1488
1489 1489 return
1490 1490
1491 1491 pats = scmutil.expandpats(pats)
1492 1492 if not pats:
1493 1493 raise error.Abort(_(b'no source or destination specified'))
1494 1494 if len(pats) == 1:
1495 1495 raise error.Abort(_(b'no destination specified'))
1496 1496 dest = pats.pop()
1497 1497
1498 1498 def walkpat(pat):
1499 1499 srcs = []
1500 1500 # TODO: Inline and simplify the non-working-copy version of this code
1501 1501 # since it shares very little with the working-copy version of it.
1502 1502 ctx_to_walk = ctx if ctx.rev() is None else pctx
1503 1503 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1504 1504 for abs in ctx_to_walk.walk(m):
1505 1505 rel = uipathfn(abs)
1506 1506 exact = m.exact(abs)
1507 1507 if abs not in ctx:
1508 1508 if abs in pctx:
1509 1509 if not after:
1510 1510 if exact:
1511 1511 ui.warn(
1512 1512 _(
1513 1513 b'%s: not copying - file has been marked '
1514 1514 b'for remove\n'
1515 1515 )
1516 1516 % rel
1517 1517 )
1518 1518 continue
1519 1519 else:
1520 1520 if exact:
1521 1521 ui.warn(
1522 1522 _(b'%s: not copying - file is not managed\n') % rel
1523 1523 )
1524 1524 continue
1525 1525
1526 1526 # abs: hgsep
1527 1527 # rel: ossep
1528 1528 srcs.append((abs, rel, exact))
1529 1529 return srcs
1530 1530
1531 1531 if ctx.rev() is not None:
1532 1532 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1533 1533 absdest = pathutil.canonpath(repo.root, cwd, dest)
1534 1534 if ctx.hasdir(absdest):
1535 1535 raise error.Abort(
1536 1536 _(b'%s: --at-rev does not support a directory as destination')
1537 1537 % uipathfn(absdest)
1538 1538 )
1539 1539 if absdest not in ctx:
1540 1540 raise error.Abort(
1541 1541 _(b'%s: copy destination does not exist in %s')
1542 1542 % (uipathfn(absdest), ctx)
1543 1543 )
1544 1544
1545 1545 # avoid cycle context -> subrepo -> cmdutil
1546 1546 from . import context
1547 1547
1548 1548 copylist = []
1549 1549 for pat in pats:
1550 1550 srcs = walkpat(pat)
1551 1551 if not srcs:
1552 1552 continue
1553 1553 for abs, rel, exact in srcs:
1554 1554 copylist.append(abs)
1555 1555
1556 1556 if not copylist:
1557 1557 raise error.Abort(_(b'no files to copy'))
1558 1558 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1559 1559 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1560 1560 # existing functions below.
1561 1561 if len(copylist) != 1:
1562 1562 raise error.Abort(_(b'--at-rev requires a single source'))
1563 1563
1564 1564 new_ctx = context.overlayworkingctx(repo)
1565 1565 new_ctx.setbase(ctx.p1())
1566 1566 mergemod.graft(repo, ctx, wctx=new_ctx)
1567 1567
1568 1568 new_ctx.markcopied(absdest, copylist[0])
1569 1569
1570 1570 with repo.lock():
1571 1571 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1572 1572 new_node = mem_ctx.commit()
1573 1573
1574 1574 if repo.dirstate.p1() == ctx.node():
1575 1575 with repo.dirstate.parentchange():
1576 1576 scmutil.movedirstate(repo, repo[new_node])
1577 1577 replacements = {ctx.node(): [new_node]}
1578 1578 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1579 1579
1580 1580 return
1581 1581
1582 1582 # abssrc: hgsep
1583 1583 # relsrc: ossep
1584 1584 # otarget: ossep
1585 1585 def copyfile(abssrc, relsrc, otarget, exact):
1586 1586 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1587 1587 if b'/' in abstarget:
1588 1588 # We cannot normalize abstarget itself, this would prevent
1589 1589 # case only renames, like a => A.
1590 1590 abspath, absname = abstarget.rsplit(b'/', 1)
1591 1591 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1592 1592 reltarget = repo.pathto(abstarget, cwd)
1593 1593 target = repo.wjoin(abstarget)
1594 1594 src = repo.wjoin(abssrc)
1595 1595 state = repo.dirstate[abstarget]
1596 1596
1597 1597 scmutil.checkportable(ui, abstarget)
1598 1598
1599 1599 # check for collisions
1600 1600 prevsrc = targets.get(abstarget)
1601 1601 if prevsrc is not None:
1602 1602 ui.warn(
1603 1603 _(b'%s: not overwriting - %s collides with %s\n')
1604 1604 % (
1605 1605 reltarget,
1606 1606 repo.pathto(abssrc, cwd),
1607 1607 repo.pathto(prevsrc, cwd),
1608 1608 )
1609 1609 )
1610 1610 return True # report a failure
1611 1611
1612 1612 # check for overwrites
1613 1613 exists = os.path.lexists(target)
1614 1614 samefile = False
1615 1615 if exists and abssrc != abstarget:
1616 1616 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1617 1617 abstarget
1618 1618 ):
1619 1619 if not rename:
1620 1620 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1621 1621 return True # report a failure
1622 1622 exists = False
1623 1623 samefile = True
1624 1624
1625 1625 if not after and exists or after and state in b'mn':
1626 1626 if not opts[b'force']:
1627 1627 if state in b'mn':
1628 1628 msg = _(b'%s: not overwriting - file already committed\n')
1629 1629 if after:
1630 1630 flags = b'--after --force'
1631 1631 else:
1632 1632 flags = b'--force'
1633 1633 if rename:
1634 1634 hint = (
1635 1635 _(
1636 1636 b"('hg rename %s' to replace the file by "
1637 1637 b'recording a rename)\n'
1638 1638 )
1639 1639 % flags
1640 1640 )
1641 1641 else:
1642 1642 hint = (
1643 1643 _(
1644 1644 b"('hg copy %s' to replace the file by "
1645 1645 b'recording a copy)\n'
1646 1646 )
1647 1647 % flags
1648 1648 )
1649 1649 else:
1650 1650 msg = _(b'%s: not overwriting - file exists\n')
1651 1651 if rename:
1652 1652 hint = _(
1653 1653 b"('hg rename --after' to record the rename)\n"
1654 1654 )
1655 1655 else:
1656 1656 hint = _(b"('hg copy --after' to record the copy)\n")
1657 1657 ui.warn(msg % reltarget)
1658 1658 ui.warn(hint)
1659 1659 return True # report a failure
1660 1660
1661 1661 if after:
1662 1662 if not exists:
1663 1663 if rename:
1664 1664 ui.warn(
1665 1665 _(b'%s: not recording move - %s does not exist\n')
1666 1666 % (relsrc, reltarget)
1667 1667 )
1668 1668 else:
1669 1669 ui.warn(
1670 1670 _(b'%s: not recording copy - %s does not exist\n')
1671 1671 % (relsrc, reltarget)
1672 1672 )
1673 1673 return True # report a failure
1674 1674 elif not dryrun:
1675 1675 try:
1676 1676 if exists:
1677 1677 os.unlink(target)
1678 1678 targetdir = os.path.dirname(target) or b'.'
1679 1679 if not os.path.isdir(targetdir):
1680 1680 os.makedirs(targetdir)
1681 1681 if samefile:
1682 1682 tmp = target + b"~hgrename"
1683 1683 os.rename(src, tmp)
1684 1684 os.rename(tmp, target)
1685 1685 else:
1686 1686 # Preserve stat info on renames, not on copies; this matches
1687 1687 # Linux CLI behavior.
1688 1688 util.copyfile(src, target, copystat=rename)
1689 1689 srcexists = True
1690 1690 except IOError as inst:
1691 1691 if inst.errno == errno.ENOENT:
1692 1692 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1693 1693 srcexists = False
1694 1694 else:
1695 1695 ui.warn(
1696 1696 _(b'%s: cannot copy - %s\n')
1697 1697 % (relsrc, encoding.strtolocal(inst.strerror))
1698 1698 )
1699 1699 return True # report a failure
1700 1700
1701 1701 if ui.verbose or not exact:
1702 1702 if rename:
1703 1703 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1704 1704 else:
1705 1705 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1706 1706
1707 1707 targets[abstarget] = abssrc
1708 1708
1709 1709 # fix up dirstate
1710 1710 scmutil.dirstatecopy(
1711 1711 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1712 1712 )
1713 1713 if rename and not dryrun:
1714 1714 if not after and srcexists and not samefile:
1715 1715 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1716 1716 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1717 1717 ctx.forget([abssrc])
1718 1718
1719 1719 # pat: ossep
1720 1720 # dest ossep
1721 1721 # srcs: list of (hgsep, hgsep, ossep, bool)
1722 1722 # return: function that takes hgsep and returns ossep
1723 1723 def targetpathfn(pat, dest, srcs):
1724 1724 if os.path.isdir(pat):
1725 1725 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1726 1726 abspfx = util.localpath(abspfx)
1727 1727 if destdirexists:
1728 1728 striplen = len(os.path.split(abspfx)[0])
1729 1729 else:
1730 1730 striplen = len(abspfx)
1731 1731 if striplen:
1732 1732 striplen += len(pycompat.ossep)
1733 1733 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1734 1734 elif destdirexists:
1735 1735 res = lambda p: os.path.join(
1736 1736 dest, os.path.basename(util.localpath(p))
1737 1737 )
1738 1738 else:
1739 1739 res = lambda p: dest
1740 1740 return res
1741 1741
1742 1742 # pat: ossep
1743 1743 # dest ossep
1744 1744 # srcs: list of (hgsep, hgsep, ossep, bool)
1745 1745 # return: function that takes hgsep and returns ossep
1746 1746 def targetpathafterfn(pat, dest, srcs):
1747 1747 if matchmod.patkind(pat):
1748 1748 # a mercurial pattern
1749 1749 res = lambda p: os.path.join(
1750 1750 dest, os.path.basename(util.localpath(p))
1751 1751 )
1752 1752 else:
1753 1753 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1754 1754 if len(abspfx) < len(srcs[0][0]):
1755 1755 # A directory. Either the target path contains the last
1756 1756 # component of the source path or it does not.
1757 1757 def evalpath(striplen):
1758 1758 score = 0
1759 1759 for s in srcs:
1760 1760 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1761 1761 if os.path.lexists(t):
1762 1762 score += 1
1763 1763 return score
1764 1764
1765 1765 abspfx = util.localpath(abspfx)
1766 1766 striplen = len(abspfx)
1767 1767 if striplen:
1768 1768 striplen += len(pycompat.ossep)
1769 1769 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1770 1770 score = evalpath(striplen)
1771 1771 striplen1 = len(os.path.split(abspfx)[0])
1772 1772 if striplen1:
1773 1773 striplen1 += len(pycompat.ossep)
1774 1774 if evalpath(striplen1) > score:
1775 1775 striplen = striplen1
1776 1776 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1777 1777 else:
1778 1778 # a file
1779 1779 if destdirexists:
1780 1780 res = lambda p: os.path.join(
1781 1781 dest, os.path.basename(util.localpath(p))
1782 1782 )
1783 1783 else:
1784 1784 res = lambda p: dest
1785 1785 return res
1786 1786
1787 1787 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1788 1788 if not destdirexists:
1789 1789 if len(pats) > 1 or matchmod.patkind(pats[0]):
1790 1790 raise error.Abort(
1791 1791 _(
1792 1792 b'with multiple sources, destination must be an '
1793 1793 b'existing directory'
1794 1794 )
1795 1795 )
1796 1796 if util.endswithsep(dest):
1797 1797 raise error.Abort(_(b'destination %s is not a directory') % dest)
1798 1798
1799 1799 tfn = targetpathfn
1800 1800 if after:
1801 1801 tfn = targetpathafterfn
1802 1802 copylist = []
1803 1803 for pat in pats:
1804 1804 srcs = walkpat(pat)
1805 1805 if not srcs:
1806 1806 continue
1807 1807 copylist.append((tfn(pat, dest, srcs), srcs))
1808 1808 if not copylist:
1809 1809 raise error.Abort(_(b'no files to copy'))
1810 1810
1811 1811 errors = 0
1812 1812 for targetpath, srcs in copylist:
1813 1813 for abssrc, relsrc, exact in srcs:
1814 1814 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1815 1815 errors += 1
1816 1816
1817 1817 return errors != 0
1818 1818
1819 1819
1820 1820 ## facility to let extension process additional data into an import patch
1821 1821 # list of identifier to be executed in order
1822 1822 extrapreimport = [] # run before commit
1823 1823 extrapostimport = [] # run after commit
1824 1824 # mapping from identifier to actual import function
1825 1825 #
1826 1826 # 'preimport' are run before the commit is made and are provided the following
1827 1827 # arguments:
1828 1828 # - repo: the localrepository instance,
1829 1829 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1830 1830 # - extra: the future extra dictionary of the changeset, please mutate it,
1831 1831 # - opts: the import options.
1832 1832 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1833 1833 # mutation of in memory commit and more. Feel free to rework the code to get
1834 1834 # there.
1835 1835 extrapreimportmap = {}
1836 1836 # 'postimport' are run after the commit is made and are provided the following
1837 1837 # argument:
1838 1838 # - ctx: the changectx created by import.
1839 1839 extrapostimportmap = {}
1840 1840
1841 1841
1842 1842 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1843 1843 """Utility function used by commands.import to import a single patch
1844 1844
1845 1845 This function is explicitly defined here to help the evolve extension to
1846 1846 wrap this part of the import logic.
1847 1847
1848 1848 The API is currently a bit ugly because it a simple code translation from
1849 1849 the import command. Feel free to make it better.
1850 1850
1851 1851 :patchdata: a dictionary containing parsed patch data (such as from
1852 1852 ``patch.extract()``)
1853 1853 :parents: nodes that will be parent of the created commit
1854 1854 :opts: the full dict of option passed to the import command
1855 1855 :msgs: list to save commit message to.
1856 1856 (used in case we need to save it when failing)
1857 1857 :updatefunc: a function that update a repo to a given node
1858 1858 updatefunc(<repo>, <node>)
1859 1859 """
1860 1860 # avoid cycle context -> subrepo -> cmdutil
1861 1861 from . import context
1862 1862
1863 1863 tmpname = patchdata.get(b'filename')
1864 1864 message = patchdata.get(b'message')
1865 1865 user = opts.get(b'user') or patchdata.get(b'user')
1866 1866 date = opts.get(b'date') or patchdata.get(b'date')
1867 1867 branch = patchdata.get(b'branch')
1868 1868 nodeid = patchdata.get(b'nodeid')
1869 1869 p1 = patchdata.get(b'p1')
1870 1870 p2 = patchdata.get(b'p2')
1871 1871
1872 1872 nocommit = opts.get(b'no_commit')
1873 1873 importbranch = opts.get(b'import_branch')
1874 1874 update = not opts.get(b'bypass')
1875 1875 strip = opts[b"strip"]
1876 1876 prefix = opts[b"prefix"]
1877 1877 sim = float(opts.get(b'similarity') or 0)
1878 1878
1879 1879 if not tmpname:
1880 1880 return None, None, False
1881 1881
1882 1882 rejects = False
1883 1883
1884 1884 cmdline_message = logmessage(ui, opts)
1885 1885 if cmdline_message:
1886 1886 # pickup the cmdline msg
1887 1887 message = cmdline_message
1888 1888 elif message:
1889 1889 # pickup the patch msg
1890 1890 message = message.strip()
1891 1891 else:
1892 1892 # launch the editor
1893 1893 message = None
1894 1894 ui.debug(b'message:\n%s\n' % (message or b''))
1895 1895
1896 1896 if len(parents) == 1:
1897 1897 parents.append(repo[nullid])
1898 1898 if opts.get(b'exact'):
1899 1899 if not nodeid or not p1:
1900 1900 raise error.Abort(_(b'not a Mercurial patch'))
1901 1901 p1 = repo[p1]
1902 1902 p2 = repo[p2 or nullid]
1903 1903 elif p2:
1904 1904 try:
1905 1905 p1 = repo[p1]
1906 1906 p2 = repo[p2]
1907 1907 # Without any options, consider p2 only if the
1908 1908 # patch is being applied on top of the recorded
1909 1909 # first parent.
1910 1910 if p1 != parents[0]:
1911 1911 p1 = parents[0]
1912 1912 p2 = repo[nullid]
1913 1913 except error.RepoError:
1914 1914 p1, p2 = parents
1915 1915 if p2.node() == nullid:
1916 1916 ui.warn(
1917 1917 _(
1918 1918 b"warning: import the patch as a normal revision\n"
1919 1919 b"(use --exact to import the patch as a merge)\n"
1920 1920 )
1921 1921 )
1922 1922 else:
1923 1923 p1, p2 = parents
1924 1924
1925 1925 n = None
1926 1926 if update:
1927 1927 if p1 != parents[0]:
1928 1928 updatefunc(repo, p1.node())
1929 1929 if p2 != parents[1]:
1930 1930 repo.setparents(p1.node(), p2.node())
1931 1931
1932 1932 if opts.get(b'exact') or importbranch:
1933 1933 repo.dirstate.setbranch(branch or b'default')
1934 1934
1935 1935 partial = opts.get(b'partial', False)
1936 1936 files = set()
1937 1937 try:
1938 1938 patch.patch(
1939 1939 ui,
1940 1940 repo,
1941 1941 tmpname,
1942 1942 strip=strip,
1943 1943 prefix=prefix,
1944 1944 files=files,
1945 1945 eolmode=None,
1946 1946 similarity=sim / 100.0,
1947 1947 )
1948 1948 except error.PatchError as e:
1949 1949 if not partial:
1950 1950 raise error.Abort(pycompat.bytestr(e))
1951 1951 if partial:
1952 1952 rejects = True
1953 1953
1954 1954 files = list(files)
1955 1955 if nocommit:
1956 1956 if message:
1957 1957 msgs.append(message)
1958 1958 else:
1959 1959 if opts.get(b'exact') or p2:
1960 1960 # If you got here, you either use --force and know what
1961 1961 # you are doing or used --exact or a merge patch while
1962 1962 # being updated to its first parent.
1963 1963 m = None
1964 1964 else:
1965 1965 m = scmutil.matchfiles(repo, files or [])
1966 1966 editform = mergeeditform(repo[None], b'import.normal')
1967 1967 if opts.get(b'exact'):
1968 1968 editor = None
1969 1969 else:
1970 1970 editor = getcommiteditor(
1971 1971 editform=editform, **pycompat.strkwargs(opts)
1972 1972 )
1973 1973 extra = {}
1974 1974 for idfunc in extrapreimport:
1975 1975 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1976 1976 overrides = {}
1977 1977 if partial:
1978 1978 overrides[(b'ui', b'allowemptycommit')] = True
1979 1979 if opts.get(b'secret'):
1980 1980 overrides[(b'phases', b'new-commit')] = b'secret'
1981 1981 with repo.ui.configoverride(overrides, b'import'):
1982 1982 n = repo.commit(
1983 1983 message, user, date, match=m, editor=editor, extra=extra
1984 1984 )
1985 1985 for idfunc in extrapostimport:
1986 1986 extrapostimportmap[idfunc](repo[n])
1987 1987 else:
1988 1988 if opts.get(b'exact') or importbranch:
1989 1989 branch = branch or b'default'
1990 1990 else:
1991 1991 branch = p1.branch()
1992 1992 store = patch.filestore()
1993 1993 try:
1994 1994 files = set()
1995 1995 try:
1996 1996 patch.patchrepo(
1997 1997 ui,
1998 1998 repo,
1999 1999 p1,
2000 2000 store,
2001 2001 tmpname,
2002 2002 strip,
2003 2003 prefix,
2004 2004 files,
2005 2005 eolmode=None,
2006 2006 )
2007 2007 except error.PatchError as e:
2008 2008 raise error.Abort(stringutil.forcebytestr(e))
2009 2009 if opts.get(b'exact'):
2010 2010 editor = None
2011 2011 else:
2012 2012 editor = getcommiteditor(editform=b'import.bypass')
2013 2013 memctx = context.memctx(
2014 2014 repo,
2015 2015 (p1.node(), p2.node()),
2016 2016 message,
2017 2017 files=files,
2018 2018 filectxfn=store,
2019 2019 user=user,
2020 2020 date=date,
2021 2021 branch=branch,
2022 2022 editor=editor,
2023 2023 )
2024 2024
2025 2025 overrides = {}
2026 2026 if opts.get(b'secret'):
2027 2027 overrides[(b'phases', b'new-commit')] = b'secret'
2028 2028 with repo.ui.configoverride(overrides, b'import'):
2029 2029 n = memctx.commit()
2030 2030 finally:
2031 2031 store.close()
2032 2032 if opts.get(b'exact') and nocommit:
2033 2033 # --exact with --no-commit is still useful in that it does merge
2034 2034 # and branch bits
2035 2035 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2036 2036 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2037 2037 raise error.Abort(_(b'patch is damaged or loses information'))
2038 2038 msg = _(b'applied to working directory')
2039 2039 if n:
2040 2040 # i18n: refers to a short changeset id
2041 2041 msg = _(b'created %s') % short(n)
2042 2042 return msg, n, rejects
2043 2043
2044 2044
2045 2045 # facility to let extensions include additional data in an exported patch
2046 2046 # list of identifiers to be executed in order
2047 2047 extraexport = []
2048 2048 # mapping from identifier to actual export function
2049 2049 # function as to return a string to be added to the header or None
2050 2050 # it is given two arguments (sequencenumber, changectx)
2051 2051 extraexportmap = {}
2052 2052
2053 2053
2054 2054 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2055 2055 node = scmutil.binnode(ctx)
2056 2056 parents = [p.node() for p in ctx.parents() if p]
2057 2057 branch = ctx.branch()
2058 2058 if switch_parent:
2059 2059 parents.reverse()
2060 2060
2061 2061 if parents:
2062 2062 prev = parents[0]
2063 2063 else:
2064 2064 prev = nullid
2065 2065
2066 2066 fm.context(ctx=ctx)
2067 2067 fm.plain(b'# HG changeset patch\n')
2068 2068 fm.write(b'user', b'# User %s\n', ctx.user())
2069 2069 fm.plain(b'# Date %d %d\n' % ctx.date())
2070 2070 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2071 2071 fm.condwrite(
2072 2072 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2073 2073 )
2074 2074 fm.write(b'node', b'# Node ID %s\n', hex(node))
2075 2075 fm.plain(b'# Parent %s\n' % hex(prev))
2076 2076 if len(parents) > 1:
2077 2077 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2078 2078 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2079 2079
2080 2080 # TODO: redesign extraexportmap function to support formatter
2081 2081 for headerid in extraexport:
2082 2082 header = extraexportmap[headerid](seqno, ctx)
2083 2083 if header is not None:
2084 2084 fm.plain(b'# %s\n' % header)
2085 2085
2086 2086 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2087 2087 fm.plain(b'\n')
2088 2088
2089 2089 if fm.isplain():
2090 2090 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2091 2091 for chunk, label in chunkiter:
2092 2092 fm.plain(chunk, label=label)
2093 2093 else:
2094 2094 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2095 2095 # TODO: make it structured?
2096 2096 fm.data(diff=b''.join(chunkiter))
2097 2097
2098 2098
2099 2099 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2100 2100 """Export changesets to stdout or a single file"""
2101 2101 for seqno, rev in enumerate(revs, 1):
2102 2102 ctx = repo[rev]
2103 2103 if not dest.startswith(b'<'):
2104 2104 repo.ui.note(b"%s\n" % dest)
2105 2105 fm.startitem()
2106 2106 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2107 2107
2108 2108
2109 2109 def _exportfntemplate(
2110 2110 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2111 2111 ):
2112 2112 """Export changesets to possibly multiple files"""
2113 2113 total = len(revs)
2114 2114 revwidth = max(len(str(rev)) for rev in revs)
2115 2115 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2116 2116
2117 2117 for seqno, rev in enumerate(revs, 1):
2118 2118 ctx = repo[rev]
2119 2119 dest = makefilename(
2120 2120 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2121 2121 )
2122 2122 filemap.setdefault(dest, []).append((seqno, rev))
2123 2123
2124 2124 for dest in filemap:
2125 2125 with formatter.maybereopen(basefm, dest) as fm:
2126 2126 repo.ui.note(b"%s\n" % dest)
2127 2127 for seqno, rev in filemap[dest]:
2128 2128 fm.startitem()
2129 2129 ctx = repo[rev]
2130 2130 _exportsingle(
2131 2131 repo, ctx, fm, match, switch_parent, seqno, diffopts
2132 2132 )
2133 2133
2134 2134
2135 2135 def _prefetchchangedfiles(repo, revs, match):
2136 2136 allfiles = set()
2137 2137 for rev in revs:
2138 2138 for file in repo[rev].files():
2139 2139 if not match or match(file):
2140 2140 allfiles.add(file)
2141 2141 match = scmutil.matchfiles(repo, allfiles)
2142 2142 revmatches = [(rev, match) for rev in revs]
2143 2143 scmutil.prefetchfiles(repo, revmatches)
2144 2144
2145 2145
2146 2146 def export(
2147 2147 repo,
2148 2148 revs,
2149 2149 basefm,
2150 2150 fntemplate=b'hg-%h.patch',
2151 2151 switch_parent=False,
2152 2152 opts=None,
2153 2153 match=None,
2154 2154 ):
2155 2155 '''export changesets as hg patches
2156 2156
2157 2157 Args:
2158 2158 repo: The repository from which we're exporting revisions.
2159 2159 revs: A list of revisions to export as revision numbers.
2160 2160 basefm: A formatter to which patches should be written.
2161 2161 fntemplate: An optional string to use for generating patch file names.
2162 2162 switch_parent: If True, show diffs against second parent when not nullid.
2163 2163 Default is false, which always shows diff against p1.
2164 2164 opts: diff options to use for generating the patch.
2165 2165 match: If specified, only export changes to files matching this matcher.
2166 2166
2167 2167 Returns:
2168 2168 Nothing.
2169 2169
2170 2170 Side Effect:
2171 2171 "HG Changeset Patch" data is emitted to one of the following
2172 2172 destinations:
2173 2173 fntemplate specified: Each rev is written to a unique file named using
2174 2174 the given template.
2175 2175 Otherwise: All revs will be written to basefm.
2176 2176 '''
2177 2177 _prefetchchangedfiles(repo, revs, match)
2178 2178
2179 2179 if not fntemplate:
2180 2180 _exportfile(
2181 2181 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2182 2182 )
2183 2183 else:
2184 2184 _exportfntemplate(
2185 2185 repo, revs, basefm, fntemplate, switch_parent, opts, match
2186 2186 )
2187 2187
2188 2188
2189 2189 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2190 2190 """Export changesets to the given file stream"""
2191 2191 _prefetchchangedfiles(repo, revs, match)
2192 2192
2193 2193 dest = getattr(fp, 'name', b'<unnamed>')
2194 2194 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2195 2195 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2196 2196
2197 2197
2198 2198 def showmarker(fm, marker, index=None):
2199 2199 """utility function to display obsolescence marker in a readable way
2200 2200
2201 2201 To be used by debug function."""
2202 2202 if index is not None:
2203 2203 fm.write(b'index', b'%i ', index)
2204 2204 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2205 2205 succs = marker.succnodes()
2206 2206 fm.condwrite(
2207 2207 succs,
2208 2208 b'succnodes',
2209 2209 b'%s ',
2210 2210 fm.formatlist(map(hex, succs), name=b'node'),
2211 2211 )
2212 2212 fm.write(b'flag', b'%X ', marker.flags())
2213 2213 parents = marker.parentnodes()
2214 2214 if parents is not None:
2215 2215 fm.write(
2216 2216 b'parentnodes',
2217 2217 b'{%s} ',
2218 2218 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2219 2219 )
2220 2220 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2221 2221 meta = marker.metadata().copy()
2222 2222 meta.pop(b'date', None)
2223 2223 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2224 2224 fm.write(
2225 2225 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2226 2226 )
2227 2227 fm.plain(b'\n')
2228 2228
2229 2229
2230 2230 def finddate(ui, repo, date):
2231 2231 """Find the tipmost changeset that matches the given date spec"""
2232 2232 mrevs = repo.revs(b'date(%s)', date)
2233 2233 try:
2234 2234 rev = mrevs.max()
2235 2235 except ValueError:
2236 2236 raise error.Abort(_(b"revision matching date not found"))
2237 2237
2238 2238 ui.status(
2239 2239 _(b"found revision %d from %s\n")
2240 2240 % (rev, dateutil.datestr(repo[rev].date()))
2241 2241 )
2242 2242 return b'%d' % rev
2243 2243
2244 2244
2245 2245 def increasingwindows(windowsize=8, sizelimit=512):
2246 2246 while True:
2247 2247 yield windowsize
2248 2248 if windowsize < sizelimit:
2249 2249 windowsize *= 2
2250 2250
2251 2251
2252 2252 def _walkrevs(repo, opts):
2253 2253 # Default --rev value depends on --follow but --follow behavior
2254 2254 # depends on revisions resolved from --rev...
2255 2255 follow = opts.get(b'follow') or opts.get(b'follow_first')
2256 2256 revspec = opts.get(b'rev')
2257 2257 if follow and revspec:
2258 2258 revs = scmutil.revrange(repo, revspec)
2259 2259 revs = repo.revs(b'reverse(::%ld)', revs)
2260 2260 elif revspec:
2261 2261 revs = scmutil.revrange(repo, revspec)
2262 2262 elif follow and repo.dirstate.p1() == nullid:
2263 2263 revs = smartset.baseset()
2264 2264 elif follow:
2265 2265 revs = repo.revs(b'reverse(:.)')
2266 2266 else:
2267 2267 revs = smartset.spanset(repo)
2268 2268 revs.reverse()
2269 2269 return revs
2270 2270
2271 2271
2272 2272 class FileWalkError(Exception):
2273 2273 pass
2274 2274
2275 2275
2276 2276 def walkfilerevs(repo, match, follow, revs, fncache):
2277 2277 '''Walks the file history for the matched files.
2278 2278
2279 2279 Returns the changeset revs that are involved in the file history.
2280 2280
2281 2281 Throws FileWalkError if the file history can't be walked using
2282 2282 filelogs alone.
2283 2283 '''
2284 2284 wanted = set()
2285 2285 copies = []
2286 2286 minrev, maxrev = min(revs), max(revs)
2287 2287
2288 2288 def filerevs(filelog, last):
2289 2289 """
2290 2290 Only files, no patterns. Check the history of each file.
2291 2291
2292 2292 Examines filelog entries within minrev, maxrev linkrev range
2293 2293 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2294 2294 tuples in backwards order
2295 2295 """
2296 2296 cl_count = len(repo)
2297 2297 revs = []
2298 2298 for j in pycompat.xrange(0, last + 1):
2299 2299 linkrev = filelog.linkrev(j)
2300 2300 if linkrev < minrev:
2301 2301 continue
2302 2302 # only yield rev for which we have the changelog, it can
2303 2303 # happen while doing "hg log" during a pull or commit
2304 2304 if linkrev >= cl_count:
2305 2305 break
2306 2306
2307 2307 parentlinkrevs = []
2308 2308 for p in filelog.parentrevs(j):
2309 2309 if p != nullrev:
2310 2310 parentlinkrevs.append(filelog.linkrev(p))
2311 2311 n = filelog.node(j)
2312 2312 revs.append(
2313 2313 (linkrev, parentlinkrevs, follow and filelog.renamed(n))
2314 2314 )
2315 2315
2316 2316 return reversed(revs)
2317 2317
2318 2318 def iterfiles():
2319 2319 pctx = repo[b'.']
2320 2320 for filename in match.files():
2321 2321 if follow:
2322 2322 if filename not in pctx:
2323 2323 raise error.Abort(
2324 2324 _(
2325 2325 b'cannot follow file not in parent '
2326 2326 b'revision: "%s"'
2327 2327 )
2328 2328 % filename
2329 2329 )
2330 2330 yield filename, pctx[filename].filenode()
2331 2331 else:
2332 2332 yield filename, None
2333 2333 for filename_node in copies:
2334 2334 yield filename_node
2335 2335
2336 2336 for file_, node in iterfiles():
2337 2337 filelog = repo.file(file_)
2338 2338 if not len(filelog):
2339 2339 if node is None:
2340 2340 # A zero count may be a directory or deleted file, so
2341 2341 # try to find matching entries on the slow path.
2342 2342 if follow:
2343 2343 raise error.Abort(
2344 2344 _(b'cannot follow nonexistent file: "%s"') % file_
2345 2345 )
2346 2346 raise FileWalkError(b"Cannot walk via filelog")
2347 2347 else:
2348 2348 continue
2349 2349
2350 2350 if node is None:
2351 2351 last = len(filelog) - 1
2352 2352 else:
2353 2353 last = filelog.rev(node)
2354 2354
2355 2355 # keep track of all ancestors of the file
2356 2356 ancestors = {filelog.linkrev(last)}
2357 2357
2358 2358 # iterate from latest to oldest revision
2359 2359 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
2360 2360 if not follow:
2361 2361 if rev > maxrev:
2362 2362 continue
2363 2363 else:
2364 2364 # Note that last might not be the first interesting
2365 2365 # rev to us:
2366 2366 # if the file has been changed after maxrev, we'll
2367 2367 # have linkrev(last) > maxrev, and we still need
2368 2368 # to explore the file graph
2369 2369 if rev not in ancestors:
2370 2370 continue
2371 2371 # XXX insert 1327 fix here
2372 2372 if flparentlinkrevs:
2373 2373 ancestors.update(flparentlinkrevs)
2374 2374
2375 2375 fncache.setdefault(rev, []).append(file_)
2376 2376 wanted.add(rev)
2377 2377 if copied:
2378 2378 copies.append(copied)
2379 2379
2380 2380 return wanted
2381 2381
2382 2382
2383 2383 class _followfilter(object):
2384 2384 def __init__(self, repo, onlyfirst=False):
2385 2385 self.repo = repo
2386 2386 self.startrev = nullrev
2387 2387 self.roots = set()
2388 2388 self.onlyfirst = onlyfirst
2389 2389
2390 2390 def match(self, rev):
2391 2391 def realparents(rev):
2392 2392 try:
2393 2393 if self.onlyfirst:
2394 2394 return self.repo.changelog.parentrevs(rev)[0:1]
2395 2395 else:
2396 2396 return filter(
2397 2397 lambda x: x != nullrev,
2398 2398 self.repo.changelog.parentrevs(rev),
2399 2399 )
2400 2400 except error.WdirUnsupported:
2401 2401 prevs = [p.rev() for p in self.repo[rev].parents()]
2402 2402 if self.onlyfirst:
2403 2403 return prevs[:1]
2404 2404 else:
2405 2405 return prevs
2406 2406
2407 2407 if self.startrev == nullrev:
2408 2408 self.startrev = rev
2409 2409 return True
2410 2410
2411 2411 if rev > self.startrev:
2412 2412 # forward: all descendants
2413 2413 if not self.roots:
2414 2414 self.roots.add(self.startrev)
2415 2415 for parent in realparents(rev):
2416 2416 if parent in self.roots:
2417 2417 self.roots.add(rev)
2418 2418 return True
2419 2419 else:
2420 2420 # backwards: all parents
2421 2421 if not self.roots:
2422 2422 self.roots.update(realparents(self.startrev))
2423 2423 if rev in self.roots:
2424 2424 self.roots.remove(rev)
2425 2425 self.roots.update(realparents(rev))
2426 2426 return True
2427 2427
2428 2428 return False
2429 2429
2430 2430
2431 2431 def walkchangerevs(repo, match, opts, prepare):
2432 2432 '''Iterate over files and the revs in which they changed.
2433 2433
2434 2434 Callers most commonly need to iterate backwards over the history
2435 2435 in which they are interested. Doing so has awful (quadratic-looking)
2436 2436 performance, so we use iterators in a "windowed" way.
2437 2437
2438 2438 We walk a window of revisions in the desired order. Within the
2439 2439 window, we first walk forwards to gather data, then in the desired
2440 2440 order (usually backwards) to display it.
2441 2441
2442 2442 This function returns an iterator yielding contexts. Before
2443 2443 yielding each context, the iterator will first call the prepare
2444 2444 function on each context in the window in forward order.'''
2445 2445
2446 2446 allfiles = opts.get(b'all_files')
2447 2447 follow = opts.get(b'follow') or opts.get(b'follow_first')
2448 2448 revs = _walkrevs(repo, opts)
2449 2449 if not revs:
2450 2450 return []
2451 2451 wanted = set()
2452 2452 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
2453 2453 fncache = {}
2454 2454 change = repo.__getitem__
2455 2455
2456 2456 # First step is to fill wanted, the set of revisions that we want to yield.
2457 2457 # When it does not induce extra cost, we also fill fncache for revisions in
2458 2458 # wanted: a cache of filenames that were changed (ctx.files()) and that
2459 2459 # match the file filtering conditions.
2460 2460
2461 2461 if match.always() or allfiles:
2462 2462 # No files, no patterns. Display all revs.
2463 2463 wanted = revs
2464 2464 elif not slowpath:
2465 2465 # We only have to read through the filelog to find wanted revisions
2466 2466
2467 2467 try:
2468 2468 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2469 2469 except FileWalkError:
2470 2470 slowpath = True
2471 2471
2472 2472 # We decided to fall back to the slowpath because at least one
2473 2473 # of the paths was not a file. Check to see if at least one of them
2474 2474 # existed in history, otherwise simply return
2475 2475 for path in match.files():
2476 2476 if path == b'.' or path in repo.store:
2477 2477 break
2478 2478 else:
2479 2479 return []
2480 2480
2481 2481 if slowpath:
2482 2482 # We have to read the changelog to match filenames against
2483 2483 # changed files
2484 2484
2485 2485 if follow:
2486 2486 raise error.Abort(
2487 2487 _(b'can only follow copies/renames for explicit filenames')
2488 2488 )
2489 2489
2490 2490 # The slow path checks files modified in every changeset.
2491 2491 # This is really slow on large repos, so compute the set lazily.
2492 2492 class lazywantedset(object):
2493 2493 def __init__(self):
2494 2494 self.set = set()
2495 2495 self.revs = set(revs)
2496 2496
2497 2497 # No need to worry about locality here because it will be accessed
2498 2498 # in the same order as the increasing window below.
2499 2499 def __contains__(self, value):
2500 2500 if value in self.set:
2501 2501 return True
2502 2502 elif not value in self.revs:
2503 2503 return False
2504 2504 else:
2505 2505 self.revs.discard(value)
2506 2506 ctx = change(value)
2507 2507 if allfiles:
2508 2508 matches = list(ctx.manifest().walk(match))
2509 2509 else:
2510 2510 matches = [f for f in ctx.files() if match(f)]
2511 2511 if matches:
2512 2512 fncache[value] = matches
2513 2513 self.set.add(value)
2514 2514 return True
2515 2515 return False
2516 2516
2517 2517 def discard(self, value):
2518 2518 self.revs.discard(value)
2519 2519 self.set.discard(value)
2520 2520
2521 2521 wanted = lazywantedset()
2522 2522
2523 2523 # it might be worthwhile to do this in the iterator if the rev range
2524 2524 # is descending and the prune args are all within that range
2525 2525 for rev in opts.get(b'prune', ()):
2526 2526 rev = repo[rev].rev()
2527 2527 ff = _followfilter(repo)
2528 2528 stop = min(revs[0], revs[-1])
2529 2529 for x in pycompat.xrange(rev, stop - 1, -1):
2530 2530 if ff.match(x):
2531 2531 wanted = wanted - [x]
2532 2532
2533 2533 # Now that wanted is correctly initialized, we can iterate over the
2534 2534 # revision range, yielding only revisions in wanted.
2535 2535 def iterate():
2536 2536 if follow and match.always():
2537 2537 ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
2538 2538
2539 2539 def want(rev):
2540 2540 return ff.match(rev) and rev in wanted
2541 2541
2542 2542 else:
2543 2543
2544 2544 def want(rev):
2545 2545 return rev in wanted
2546 2546
2547 2547 it = iter(revs)
2548 2548 stopiteration = False
2549 2549 for windowsize in increasingwindows():
2550 2550 nrevs = []
2551 2551 for i in pycompat.xrange(windowsize):
2552 2552 rev = next(it, None)
2553 2553 if rev is None:
2554 2554 stopiteration = True
2555 2555 break
2556 2556 elif want(rev):
2557 2557 nrevs.append(rev)
2558 2558 for rev in sorted(nrevs):
2559 2559 fns = fncache.get(rev)
2560 2560 ctx = change(rev)
2561 2561 if not fns:
2562 2562
2563 2563 def fns_generator():
2564 2564 if allfiles:
2565 2565
2566 2566 def bad(f, msg):
2567 2567 pass
2568 2568
2569 2569 for f in ctx.matches(matchmod.badmatch(match, bad)):
2570 2570 yield f
2571 2571 else:
2572 2572 for f in ctx.files():
2573 2573 if match(f):
2574 2574 yield f
2575 2575
2576 2576 fns = fns_generator()
2577 2577 prepare(ctx, fns)
2578 2578 for rev in nrevs:
2579 2579 yield change(rev)
2580 2580
2581 2581 if stopiteration:
2582 2582 break
2583 2583
2584 2584 return iterate()
2585 2585
2586 2586
2587 2587 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2588 2588 bad = []
2589 2589
2590 2590 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2591 2591 names = []
2592 2592 wctx = repo[None]
2593 2593 cca = None
2594 2594 abort, warn = scmutil.checkportabilityalert(ui)
2595 2595 if abort or warn:
2596 2596 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2597 2597
2598 2598 match = repo.narrowmatch(match, includeexact=True)
2599 2599 badmatch = matchmod.badmatch(match, badfn)
2600 2600 dirstate = repo.dirstate
2601 2601 # We don't want to just call wctx.walk here, since it would return a lot of
2602 2602 # clean files, which we aren't interested in and takes time.
2603 2603 for f in sorted(
2604 2604 dirstate.walk(
2605 2605 badmatch,
2606 2606 subrepos=sorted(wctx.substate),
2607 2607 unknown=True,
2608 2608 ignored=False,
2609 2609 full=False,
2610 2610 )
2611 2611 ):
2612 2612 exact = match.exact(f)
2613 2613 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2614 2614 if cca:
2615 2615 cca(f)
2616 2616 names.append(f)
2617 2617 if ui.verbose or not exact:
2618 2618 ui.status(
2619 2619 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2620 2620 )
2621 2621
2622 2622 for subpath in sorted(wctx.substate):
2623 2623 sub = wctx.sub(subpath)
2624 2624 try:
2625 2625 submatch = matchmod.subdirmatcher(subpath, match)
2626 2626 subprefix = repo.wvfs.reljoin(prefix, subpath)
2627 2627 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2628 2628 if opts.get('subrepos'):
2629 2629 bad.extend(
2630 2630 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2631 2631 )
2632 2632 else:
2633 2633 bad.extend(
2634 2634 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2635 2635 )
2636 2636 except error.LookupError:
2637 2637 ui.status(
2638 2638 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2639 2639 )
2640 2640
2641 2641 if not opts.get('dry_run'):
2642 2642 rejected = wctx.add(names, prefix)
2643 2643 bad.extend(f for f in rejected if f in match.files())
2644 2644 return bad
2645 2645
2646 2646
2647 2647 def addwebdirpath(repo, serverpath, webconf):
2648 2648 webconf[serverpath] = repo.root
2649 2649 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2650 2650
2651 2651 for r in repo.revs(b'filelog("path:.hgsub")'):
2652 2652 ctx = repo[r]
2653 2653 for subpath in ctx.substate:
2654 2654 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2655 2655
2656 2656
2657 2657 def forget(
2658 2658 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2659 2659 ):
2660 2660 if dryrun and interactive:
2661 2661 raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
2662 2662 bad = []
2663 2663 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2664 2664 wctx = repo[None]
2665 2665 forgot = []
2666 2666
2667 2667 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2668 2668 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2669 2669 if explicitonly:
2670 2670 forget = [f for f in forget if match.exact(f)]
2671 2671
2672 2672 for subpath in sorted(wctx.substate):
2673 2673 sub = wctx.sub(subpath)
2674 2674 submatch = matchmod.subdirmatcher(subpath, match)
2675 2675 subprefix = repo.wvfs.reljoin(prefix, subpath)
2676 2676 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2677 2677 try:
2678 2678 subbad, subforgot = sub.forget(
2679 2679 submatch,
2680 2680 subprefix,
2681 2681 subuipathfn,
2682 2682 dryrun=dryrun,
2683 2683 interactive=interactive,
2684 2684 )
2685 2685 bad.extend([subpath + b'/' + f for f in subbad])
2686 2686 forgot.extend([subpath + b'/' + f for f in subforgot])
2687 2687 except error.LookupError:
2688 2688 ui.status(
2689 2689 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2690 2690 )
2691 2691
2692 2692 if not explicitonly:
2693 2693 for f in match.files():
2694 2694 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2695 2695 if f not in forgot:
2696 2696 if repo.wvfs.exists(f):
2697 2697 # Don't complain if the exact case match wasn't given.
2698 2698 # But don't do this until after checking 'forgot', so
2699 2699 # that subrepo files aren't normalized, and this op is
2700 2700 # purely from data cached by the status walk above.
2701 2701 if repo.dirstate.normalize(f) in repo.dirstate:
2702 2702 continue
2703 2703 ui.warn(
2704 2704 _(
2705 2705 b'not removing %s: '
2706 2706 b'file is already untracked\n'
2707 2707 )
2708 2708 % uipathfn(f)
2709 2709 )
2710 2710 bad.append(f)
2711 2711
2712 2712 if interactive:
2713 2713 responses = _(
2714 2714 b'[Ynsa?]'
2715 2715 b'$$ &Yes, forget this file'
2716 2716 b'$$ &No, skip this file'
2717 2717 b'$$ &Skip remaining files'
2718 2718 b'$$ Include &all remaining files'
2719 2719 b'$$ &? (display help)'
2720 2720 )
2721 2721 for filename in forget[:]:
2722 2722 r = ui.promptchoice(
2723 2723 _(b'forget %s %s') % (uipathfn(filename), responses)
2724 2724 )
2725 2725 if r == 4: # ?
2726 2726 while r == 4:
2727 2727 for c, t in ui.extractchoices(responses)[1]:
2728 2728 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2729 2729 r = ui.promptchoice(
2730 2730 _(b'forget %s %s') % (uipathfn(filename), responses)
2731 2731 )
2732 2732 if r == 0: # yes
2733 2733 continue
2734 2734 elif r == 1: # no
2735 2735 forget.remove(filename)
2736 2736 elif r == 2: # Skip
2737 2737 fnindex = forget.index(filename)
2738 2738 del forget[fnindex:]
2739 2739 break
2740 2740 elif r == 3: # All
2741 2741 break
2742 2742
2743 2743 for f in forget:
2744 2744 if ui.verbose or not match.exact(f) or interactive:
2745 2745 ui.status(
2746 2746 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2747 2747 )
2748 2748
2749 2749 if not dryrun:
2750 2750 rejected = wctx.forget(forget, prefix)
2751 2751 bad.extend(f for f in rejected if f in match.files())
2752 2752 forgot.extend(f for f in forget if f not in rejected)
2753 2753 return bad, forgot
2754 2754
2755 2755
2756 2756 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2757 2757 ret = 1
2758 2758
2759 2759 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2760 2760 if fm.isplain() and not needsfctx:
2761 2761 # Fast path. The speed-up comes from skipping the formatter, and batching
2762 2762 # calls to ui.write.
2763 2763 buf = []
2764 2764 for f in ctx.matches(m):
2765 2765 buf.append(fmt % uipathfn(f))
2766 2766 if len(buf) > 100:
2767 2767 ui.write(b''.join(buf))
2768 2768 del buf[:]
2769 2769 ret = 0
2770 2770 if buf:
2771 2771 ui.write(b''.join(buf))
2772 2772 else:
2773 2773 for f in ctx.matches(m):
2774 2774 fm.startitem()
2775 2775 fm.context(ctx=ctx)
2776 2776 if needsfctx:
2777 2777 fc = ctx[f]
2778 2778 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2779 2779 fm.data(path=f)
2780 2780 fm.plain(fmt % uipathfn(f))
2781 2781 ret = 0
2782 2782
2783 2783 for subpath in sorted(ctx.substate):
2784 2784 submatch = matchmod.subdirmatcher(subpath, m)
2785 2785 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2786 2786 if subrepos or m.exact(subpath) or any(submatch.files()):
2787 2787 sub = ctx.sub(subpath)
2788 2788 try:
2789 2789 recurse = m.exact(subpath) or subrepos
2790 2790 if (
2791 2791 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2792 2792 == 0
2793 2793 ):
2794 2794 ret = 0
2795 2795 except error.LookupError:
2796 2796 ui.status(
2797 2797 _(b"skipping missing subrepository: %s\n")
2798 2798 % uipathfn(subpath)
2799 2799 )
2800 2800
2801 2801 return ret
2802 2802
2803 2803
2804 2804 def remove(
2805 2805 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2806 2806 ):
2807 2807 ret = 0
2808 2808 s = repo.status(match=m, clean=True)
2809 2809 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2810 2810
2811 2811 wctx = repo[None]
2812 2812
2813 2813 if warnings is None:
2814 2814 warnings = []
2815 2815 warn = True
2816 2816 else:
2817 2817 warn = False
2818 2818
2819 2819 subs = sorted(wctx.substate)
2820 2820 progress = ui.makeprogress(
2821 2821 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2822 2822 )
2823 2823 for subpath in subs:
2824 2824 submatch = matchmod.subdirmatcher(subpath, m)
2825 2825 subprefix = repo.wvfs.reljoin(prefix, subpath)
2826 2826 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2827 2827 if subrepos or m.exact(subpath) or any(submatch.files()):
2828 2828 progress.increment()
2829 2829 sub = wctx.sub(subpath)
2830 2830 try:
2831 2831 if sub.removefiles(
2832 2832 submatch,
2833 2833 subprefix,
2834 2834 subuipathfn,
2835 2835 after,
2836 2836 force,
2837 2837 subrepos,
2838 2838 dryrun,
2839 2839 warnings,
2840 2840 ):
2841 2841 ret = 1
2842 2842 except error.LookupError:
2843 2843 warnings.append(
2844 2844 _(b"skipping missing subrepository: %s\n")
2845 2845 % uipathfn(subpath)
2846 2846 )
2847 2847 progress.complete()
2848 2848
2849 2849 # warn about failure to delete explicit files/dirs
2850 2850 deleteddirs = pathutil.dirs(deleted)
2851 2851 files = m.files()
2852 2852 progress = ui.makeprogress(
2853 2853 _(b'deleting'), total=len(files), unit=_(b'files')
2854 2854 )
2855 2855 for f in files:
2856 2856
2857 2857 def insubrepo():
2858 2858 for subpath in wctx.substate:
2859 2859 if f.startswith(subpath + b'/'):
2860 2860 return True
2861 2861 return False
2862 2862
2863 2863 progress.increment()
2864 2864 isdir = f in deleteddirs or wctx.hasdir(f)
2865 2865 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2866 2866 continue
2867 2867
2868 2868 if repo.wvfs.exists(f):
2869 2869 if repo.wvfs.isdir(f):
2870 2870 warnings.append(
2871 2871 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2872 2872 )
2873 2873 else:
2874 2874 warnings.append(
2875 2875 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2876 2876 )
2877 2877 # missing files will generate a warning elsewhere
2878 2878 ret = 1
2879 2879 progress.complete()
2880 2880
2881 2881 if force:
2882 2882 list = modified + deleted + clean + added
2883 2883 elif after:
2884 2884 list = deleted
2885 2885 remaining = modified + added + clean
2886 2886 progress = ui.makeprogress(
2887 2887 _(b'skipping'), total=len(remaining), unit=_(b'files')
2888 2888 )
2889 2889 for f in remaining:
2890 2890 progress.increment()
2891 2891 if ui.verbose or (f in files):
2892 2892 warnings.append(
2893 2893 _(b'not removing %s: file still exists\n') % uipathfn(f)
2894 2894 )
2895 2895 ret = 1
2896 2896 progress.complete()
2897 2897 else:
2898 2898 list = deleted + clean
2899 2899 progress = ui.makeprogress(
2900 2900 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2901 2901 )
2902 2902 for f in modified:
2903 2903 progress.increment()
2904 2904 warnings.append(
2905 2905 _(
2906 2906 b'not removing %s: file is modified (use -f'
2907 2907 b' to force removal)\n'
2908 2908 )
2909 2909 % uipathfn(f)
2910 2910 )
2911 2911 ret = 1
2912 2912 for f in added:
2913 2913 progress.increment()
2914 2914 warnings.append(
2915 2915 _(
2916 2916 b"not removing %s: file has been marked for add"
2917 2917 b" (use 'hg forget' to undo add)\n"
2918 2918 )
2919 2919 % uipathfn(f)
2920 2920 )
2921 2921 ret = 1
2922 2922 progress.complete()
2923 2923
2924 2924 list = sorted(list)
2925 2925 progress = ui.makeprogress(
2926 2926 _(b'deleting'), total=len(list), unit=_(b'files')
2927 2927 )
2928 2928 for f in list:
2929 2929 if ui.verbose or not m.exact(f):
2930 2930 progress.increment()
2931 2931 ui.status(
2932 2932 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2933 2933 )
2934 2934 progress.complete()
2935 2935
2936 2936 if not dryrun:
2937 2937 with repo.wlock():
2938 2938 if not after:
2939 2939 for f in list:
2940 2940 if f in added:
2941 2941 continue # we never unlink added files on remove
2942 2942 rmdir = repo.ui.configbool(
2943 2943 b'experimental', b'removeemptydirs'
2944 2944 )
2945 2945 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2946 2946 repo[None].forget(list)
2947 2947
2948 2948 if warn:
2949 2949 for warning in warnings:
2950 2950 ui.warn(warning)
2951 2951
2952 2952 return ret
2953 2953
2954 2954
2955 2955 def _catfmtneedsdata(fm):
2956 2956 return not fm.datahint() or b'data' in fm.datahint()
2957 2957
2958 2958
2959 2959 def _updatecatformatter(fm, ctx, matcher, path, decode):
2960 2960 """Hook for adding data to the formatter used by ``hg cat``.
2961 2961
2962 2962 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2963 2963 this method first."""
2964 2964
2965 2965 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2966 2966 # wasn't requested.
2967 2967 data = b''
2968 2968 if _catfmtneedsdata(fm):
2969 2969 data = ctx[path].data()
2970 2970 if decode:
2971 2971 data = ctx.repo().wwritedata(path, data)
2972 2972 fm.startitem()
2973 2973 fm.context(ctx=ctx)
2974 2974 fm.write(b'data', b'%s', data)
2975 2975 fm.data(path=path)
2976 2976
2977 2977
2978 2978 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2979 2979 err = 1
2980 2980 opts = pycompat.byteskwargs(opts)
2981 2981
2982 2982 def write(path):
2983 2983 filename = None
2984 2984 if fntemplate:
2985 2985 filename = makefilename(
2986 2986 ctx, fntemplate, pathname=os.path.join(prefix, path)
2987 2987 )
2988 2988 # attempt to create the directory if it does not already exist
2989 2989 try:
2990 2990 os.makedirs(os.path.dirname(filename))
2991 2991 except OSError:
2992 2992 pass
2993 2993 with formatter.maybereopen(basefm, filename) as fm:
2994 2994 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2995 2995
2996 2996 # Automation often uses hg cat on single files, so special case it
2997 2997 # for performance to avoid the cost of parsing the manifest.
2998 2998 if len(matcher.files()) == 1 and not matcher.anypats():
2999 2999 file = matcher.files()[0]
3000 3000 mfl = repo.manifestlog
3001 3001 mfnode = ctx.manifestnode()
3002 3002 try:
3003 3003 if mfnode and mfl[mfnode].find(file)[0]:
3004 3004 if _catfmtneedsdata(basefm):
3005 3005 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
3006 3006 write(file)
3007 3007 return 0
3008 3008 except KeyError:
3009 3009 pass
3010 3010
3011 3011 if _catfmtneedsdata(basefm):
3012 3012 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
3013 3013
3014 3014 for abs in ctx.walk(matcher):
3015 3015 write(abs)
3016 3016 err = 0
3017 3017
3018 3018 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3019 3019 for subpath in sorted(ctx.substate):
3020 3020 sub = ctx.sub(subpath)
3021 3021 try:
3022 3022 submatch = matchmod.subdirmatcher(subpath, matcher)
3023 3023 subprefix = os.path.join(prefix, subpath)
3024 3024 if not sub.cat(
3025 3025 submatch,
3026 3026 basefm,
3027 3027 fntemplate,
3028 3028 subprefix,
3029 3029 **pycompat.strkwargs(opts)
3030 3030 ):
3031 3031 err = 0
3032 3032 except error.RepoLookupError:
3033 3033 ui.status(
3034 3034 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
3035 3035 )
3036 3036
3037 3037 return err
3038 3038
3039 3039
3040 3040 def commit(ui, repo, commitfunc, pats, opts):
3041 3041 '''commit the specified files or all outstanding changes'''
3042 3042 date = opts.get(b'date')
3043 3043 if date:
3044 3044 opts[b'date'] = dateutil.parsedate(date)
3045 3045 message = logmessage(ui, opts)
3046 3046 matcher = scmutil.match(repo[None], pats, opts)
3047 3047
3048 3048 dsguard = None
3049 3049 # extract addremove carefully -- this function can be called from a command
3050 3050 # that doesn't support addremove
3051 3051 if opts.get(b'addremove'):
3052 3052 dsguard = dirstateguard.dirstateguard(repo, b'commit')
3053 3053 with dsguard or util.nullcontextmanager():
3054 3054 if dsguard:
3055 3055 relative = scmutil.anypats(pats, opts)
3056 3056 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3057 3057 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
3058 3058 raise error.Abort(
3059 3059 _(b"failed to mark all new/missing files as added/removed")
3060 3060 )
3061 3061
3062 3062 return commitfunc(ui, repo, message, matcher, opts)
3063 3063
3064 3064
3065 3065 def samefile(f, ctx1, ctx2):
3066 3066 if f in ctx1.manifest():
3067 3067 a = ctx1.filectx(f)
3068 3068 if f in ctx2.manifest():
3069 3069 b = ctx2.filectx(f)
3070 3070 return not a.cmp(b) and a.flags() == b.flags()
3071 3071 else:
3072 3072 return False
3073 3073 else:
3074 3074 return f not in ctx2.manifest()
3075 3075
3076 3076
3077 3077 def amend(ui, repo, old, extra, pats, opts):
3078 3078 # avoid cycle context -> subrepo -> cmdutil
3079 3079 from . import context
3080 3080
3081 3081 # amend will reuse the existing user if not specified, but the obsolete
3082 3082 # marker creation requires that the current user's name is specified.
3083 3083 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3084 3084 ui.username() # raise exception if username not set
3085 3085
3086 3086 ui.note(_(b'amending changeset %s\n') % old)
3087 3087 base = old.p1()
3088 3088
3089 3089 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
3090 3090 # Participating changesets:
3091 3091 #
3092 3092 # wctx o - workingctx that contains changes from working copy
3093 3093 # | to go into amending commit
3094 3094 # |
3095 3095 # old o - changeset to amend
3096 3096 # |
3097 3097 # base o - first parent of the changeset to amend
3098 3098 wctx = repo[None]
3099 3099
3100 3100 # Copy to avoid mutating input
3101 3101 extra = extra.copy()
3102 3102 # Update extra dict from amended commit (e.g. to preserve graft
3103 3103 # source)
3104 3104 extra.update(old.extra())
3105 3105
3106 3106 # Also update it from the from the wctx
3107 3107 extra.update(wctx.extra())
3108 3108
3109 3109 # date-only change should be ignored?
3110 3110 datemaydiffer = resolvecommitoptions(ui, opts)
3111 3111
3112 3112 date = old.date()
3113 3113 if opts.get(b'date'):
3114 3114 date = dateutil.parsedate(opts.get(b'date'))
3115 3115 user = opts.get(b'user') or old.user()
3116 3116
3117 3117 if len(old.parents()) > 1:
3118 3118 # ctx.files() isn't reliable for merges, so fall back to the
3119 3119 # slower repo.status() method
3120 3120 st = base.status(old)
3121 3121 files = set(st.modified) | set(st.added) | set(st.removed)
3122 3122 else:
3123 3123 files = set(old.files())
3124 3124
3125 3125 # add/remove the files to the working copy if the "addremove" option
3126 3126 # was specified.
3127 3127 matcher = scmutil.match(wctx, pats, opts)
3128 3128 relative = scmutil.anypats(pats, opts)
3129 3129 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3130 3130 if opts.get(b'addremove') and scmutil.addremove(
3131 3131 repo, matcher, b"", uipathfn, opts
3132 3132 ):
3133 3133 raise error.Abort(
3134 3134 _(b"failed to mark all new/missing files as added/removed")
3135 3135 )
3136 3136
3137 3137 # Check subrepos. This depends on in-place wctx._status update in
3138 3138 # subrepo.precommit(). To minimize the risk of this hack, we do
3139 3139 # nothing if .hgsub does not exist.
3140 3140 if b'.hgsub' in wctx or b'.hgsub' in old:
3141 3141 subs, commitsubs, newsubstate = subrepoutil.precommit(
3142 3142 ui, wctx, wctx._status, matcher
3143 3143 )
3144 3144 # amend should abort if commitsubrepos is enabled
3145 3145 assert not commitsubs
3146 3146 if subs:
3147 3147 subrepoutil.writestate(repo, newsubstate)
3148 3148
3149 3149 ms = mergestatemod.mergestate.read(repo)
3150 3150 mergeutil.checkunresolved(ms)
3151 3151
3152 3152 filestoamend = {f for f in wctx.files() if matcher(f)}
3153 3153
3154 3154 changes = len(filestoamend) > 0
3155 3155 if changes:
3156 3156 # Recompute copies (avoid recording a -> b -> a)
3157 3157 copied = copies.pathcopies(base, wctx, matcher)
3158 3158 if old.p2:
3159 3159 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3160 3160
3161 3161 # Prune files which were reverted by the updates: if old
3162 3162 # introduced file X and the file was renamed in the working
3163 3163 # copy, then those two files are the same and
3164 3164 # we can discard X from our list of files. Likewise if X
3165 3165 # was removed, it's no longer relevant. If X is missing (aka
3166 3166 # deleted), old X must be preserved.
3167 3167 files.update(filestoamend)
3168 3168 files = [
3169 3169 f
3170 3170 for f in files
3171 3171 if (f not in filestoamend or not samefile(f, wctx, base))
3172 3172 ]
3173 3173
3174 3174 def filectxfn(repo, ctx_, path):
3175 3175 try:
3176 3176 # If the file being considered is not amongst the files
3177 3177 # to be amended, we should return the file context from the
3178 3178 # old changeset. This avoids issues when only some files in
3179 3179 # the working copy are being amended but there are also
3180 3180 # changes to other files from the old changeset.
3181 3181 if path not in filestoamend:
3182 3182 return old.filectx(path)
3183 3183
3184 3184 # Return None for removed files.
3185 3185 if path in wctx.removed():
3186 3186 return None
3187 3187
3188 3188 fctx = wctx[path]
3189 3189 flags = fctx.flags()
3190 3190 mctx = context.memfilectx(
3191 3191 repo,
3192 3192 ctx_,
3193 3193 fctx.path(),
3194 3194 fctx.data(),
3195 3195 islink=b'l' in flags,
3196 3196 isexec=b'x' in flags,
3197 3197 copysource=copied.get(path),
3198 3198 )
3199 3199 return mctx
3200 3200 except KeyError:
3201 3201 return None
3202 3202
3203 3203 else:
3204 3204 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
3205 3205
3206 3206 # Use version of files as in the old cset
3207 3207 def filectxfn(repo, ctx_, path):
3208 3208 try:
3209 3209 return old.filectx(path)
3210 3210 except KeyError:
3211 3211 return None
3212 3212
3213 3213 # See if we got a message from -m or -l, if not, open the editor with
3214 3214 # the message of the changeset to amend.
3215 3215 message = logmessage(ui, opts)
3216 3216
3217 3217 editform = mergeeditform(old, b'commit.amend')
3218 3218
3219 3219 if not message:
3220 3220 message = old.description()
3221 3221 # Default if message isn't provided and --edit is not passed is to
3222 3222 # invoke editor, but allow --no-edit. If somehow we don't have any
3223 3223 # description, let's always start the editor.
3224 3224 doedit = not message or opts.get(b'edit') in [True, None]
3225 3225 else:
3226 3226 # Default if message is provided is to not invoke editor, but allow
3227 3227 # --edit.
3228 3228 doedit = opts.get(b'edit') is True
3229 3229 editor = getcommiteditor(edit=doedit, editform=editform)
3230 3230
3231 3231 pureextra = extra.copy()
3232 3232 extra[b'amend_source'] = old.hex()
3233 3233
3234 3234 new = context.memctx(
3235 3235 repo,
3236 3236 parents=[base.node(), old.p2().node()],
3237 3237 text=message,
3238 3238 files=files,
3239 3239 filectxfn=filectxfn,
3240 3240 user=user,
3241 3241 date=date,
3242 3242 extra=extra,
3243 3243 editor=editor,
3244 3244 )
3245 3245
3246 3246 newdesc = changelog.stripdesc(new.description())
3247 3247 if (
3248 3248 (not changes)
3249 3249 and newdesc == old.description()
3250 3250 and user == old.user()
3251 3251 and (date == old.date() or datemaydiffer)
3252 3252 and pureextra == old.extra()
3253 3253 ):
3254 3254 # nothing changed. continuing here would create a new node
3255 3255 # anyway because of the amend_source noise.
3256 3256 #
3257 3257 # This not what we expect from amend.
3258 3258 return old.node()
3259 3259
3260 3260 commitphase = None
3261 3261 if opts.get(b'secret'):
3262 3262 commitphase = phases.secret
3263 3263 newid = repo.commitctx(new)
3264 3264 ms.reset()
3265 3265
3266 3266 # Reroute the working copy parent to the new changeset
3267 3267 repo.setparents(newid, nullid)
3268 3268 mapping = {old.node(): (newid,)}
3269 3269 obsmetadata = None
3270 3270 if opts.get(b'note'):
3271 3271 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3272 3272 backup = ui.configbool(b'rewrite', b'backup-bundle')
3273 3273 scmutil.cleanupnodes(
3274 3274 repo,
3275 3275 mapping,
3276 3276 b'amend',
3277 3277 metadata=obsmetadata,
3278 3278 fixphase=True,
3279 3279 targetphase=commitphase,
3280 3280 backup=backup,
3281 3281 )
3282 3282
3283 3283 # Fixing the dirstate because localrepo.commitctx does not update
3284 3284 # it. This is rather convenient because we did not need to update
3285 3285 # the dirstate for all the files in the new commit which commitctx
3286 3286 # could have done if it updated the dirstate. Now, we can
3287 3287 # selectively update the dirstate only for the amended files.
3288 3288 dirstate = repo.dirstate
3289 3289
3290 3290 # Update the state of the files which were added and modified in the
3291 3291 # amend to "normal" in the dirstate. We need to use "normallookup" since
3292 3292 # the files may have changed since the command started; using "normal"
3293 3293 # would mark them as clean but with uncommitted contents.
3294 3294 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3295 3295 for f in normalfiles:
3296 3296 dirstate.normallookup(f)
3297 3297
3298 3298 # Update the state of files which were removed in the amend
3299 3299 # to "removed" in the dirstate.
3300 3300 removedfiles = set(wctx.removed()) & filestoamend
3301 3301 for f in removedfiles:
3302 3302 dirstate.drop(f)
3303 3303
3304 3304 return newid
3305 3305
3306 3306
3307 3307 def commiteditor(repo, ctx, subs, editform=b''):
3308 3308 if ctx.description():
3309 3309 return ctx.description()
3310 3310 return commitforceeditor(
3311 3311 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3312 3312 )
3313 3313
3314 3314
3315 3315 def commitforceeditor(
3316 3316 repo,
3317 3317 ctx,
3318 3318 subs,
3319 3319 finishdesc=None,
3320 3320 extramsg=None,
3321 3321 editform=b'',
3322 3322 unchangedmessagedetection=False,
3323 3323 ):
3324 3324 if not extramsg:
3325 3325 extramsg = _(b"Leave message empty to abort commit.")
3326 3326
3327 3327 forms = [e for e in editform.split(b'.') if e]
3328 3328 forms.insert(0, b'changeset')
3329 3329 templatetext = None
3330 3330 while forms:
3331 3331 ref = b'.'.join(forms)
3332 3332 if repo.ui.config(b'committemplate', ref):
3333 3333 templatetext = committext = buildcommittemplate(
3334 3334 repo, ctx, subs, extramsg, ref
3335 3335 )
3336 3336 break
3337 3337 forms.pop()
3338 3338 else:
3339 3339 committext = buildcommittext(repo, ctx, subs, extramsg)
3340 3340
3341 3341 # run editor in the repository root
3342 3342 olddir = encoding.getcwd()
3343 3343 os.chdir(repo.root)
3344 3344
3345 3345 # make in-memory changes visible to external process
3346 3346 tr = repo.currenttransaction()
3347 3347 repo.dirstate.write(tr)
3348 3348 pending = tr and tr.writepending() and repo.root
3349 3349
3350 3350 editortext = repo.ui.edit(
3351 3351 committext,
3352 3352 ctx.user(),
3353 3353 ctx.extra(),
3354 3354 editform=editform,
3355 3355 pending=pending,
3356 3356 repopath=repo.path,
3357 3357 action=b'commit',
3358 3358 )
3359 3359 text = editortext
3360 3360
3361 3361 # strip away anything below this special string (used for editors that want
3362 3362 # to display the diff)
3363 3363 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3364 3364 if stripbelow:
3365 3365 text = text[: stripbelow.start()]
3366 3366
3367 3367 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3368 3368 os.chdir(olddir)
3369 3369
3370 3370 if finishdesc:
3371 3371 text = finishdesc(text)
3372 3372 if not text.strip():
3373 3373 raise error.Abort(_(b"empty commit message"))
3374 3374 if unchangedmessagedetection and editortext == templatetext:
3375 3375 raise error.Abort(_(b"commit message unchanged"))
3376 3376
3377 3377 return text
3378 3378
3379 3379
3380 3380 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3381 3381 ui = repo.ui
3382 3382 spec = formatter.reference_templatespec(ref)
3383 3383 t = logcmdutil.changesettemplater(ui, repo, spec)
3384 3384 t.t.cache.update(
3385 3385 (k, templater.unquotestring(v))
3386 3386 for k, v in repo.ui.configitems(b'committemplate')
3387 3387 )
3388 3388
3389 3389 if not extramsg:
3390 3390 extramsg = b'' # ensure that extramsg is string
3391 3391
3392 3392 ui.pushbuffer()
3393 3393 t.show(ctx, extramsg=extramsg)
3394 3394 return ui.popbuffer()
3395 3395
3396 3396
3397 3397 def hgprefix(msg):
3398 3398 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3399 3399
3400 3400
3401 3401 def buildcommittext(repo, ctx, subs, extramsg):
3402 3402 edittext = []
3403 3403 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3404 3404 if ctx.description():
3405 3405 edittext.append(ctx.description())
3406 3406 edittext.append(b"")
3407 3407 edittext.append(b"") # Empty line between message and comments.
3408 3408 edittext.append(
3409 3409 hgprefix(
3410 3410 _(
3411 3411 b"Enter commit message."
3412 3412 b" Lines beginning with 'HG:' are removed."
3413 3413 )
3414 3414 )
3415 3415 )
3416 3416 edittext.append(hgprefix(extramsg))
3417 3417 edittext.append(b"HG: --")
3418 3418 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3419 3419 if ctx.p2():
3420 3420 edittext.append(hgprefix(_(b"branch merge")))
3421 3421 if ctx.branch():
3422 3422 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3423 3423 if bookmarks.isactivewdirparent(repo):
3424 3424 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3425 3425 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3426 3426 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3427 3427 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3428 3428 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3429 3429 if not added and not modified and not removed:
3430 3430 edittext.append(hgprefix(_(b"no files changed")))
3431 3431 edittext.append(b"")
3432 3432
3433 3433 return b"\n".join(edittext)
3434 3434
3435 3435
3436 3436 def commitstatus(repo, node, branch, bheads=None, opts=None):
3437 3437 if opts is None:
3438 3438 opts = {}
3439 3439 ctx = repo[node]
3440 3440 parents = ctx.parents()
3441 3441
3442 3442 if (
3443 3443 not opts.get(b'amend')
3444 3444 and bheads
3445 3445 and node not in bheads
3446 3446 and not any(
3447 3447 p.node() in bheads and p.branch() == branch for p in parents
3448 3448 )
3449 3449 ):
3450 3450 repo.ui.status(_(b'created new head\n'))
3451 3451 # The message is not printed for initial roots. For the other
3452 3452 # changesets, it is printed in the following situations:
3453 3453 #
3454 3454 # Par column: for the 2 parents with ...
3455 3455 # N: null or no parent
3456 3456 # B: parent is on another named branch
3457 3457 # C: parent is a regular non head changeset
3458 3458 # H: parent was a branch head of the current branch
3459 3459 # Msg column: whether we print "created new head" message
3460 3460 # In the following, it is assumed that there already exists some
3461 3461 # initial branch heads of the current branch, otherwise nothing is
3462 3462 # printed anyway.
3463 3463 #
3464 3464 # Par Msg Comment
3465 3465 # N N y additional topo root
3466 3466 #
3467 3467 # B N y additional branch root
3468 3468 # C N y additional topo head
3469 3469 # H N n usual case
3470 3470 #
3471 3471 # B B y weird additional branch root
3472 3472 # C B y branch merge
3473 3473 # H B n merge with named branch
3474 3474 #
3475 3475 # C C y additional head from merge
3476 3476 # C H n merge with a head
3477 3477 #
3478 3478 # H H n head merge: head count decreases
3479 3479
3480 3480 if not opts.get(b'close_branch'):
3481 3481 for r in parents:
3482 3482 if r.closesbranch() and r.branch() == branch:
3483 3483 repo.ui.status(
3484 3484 _(b'reopening closed branch head %d\n') % r.rev()
3485 3485 )
3486 3486
3487 3487 if repo.ui.debugflag:
3488 3488 repo.ui.write(
3489 3489 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3490 3490 )
3491 3491 elif repo.ui.verbose:
3492 3492 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3493 3493
3494 3494
3495 3495 def postcommitstatus(repo, pats, opts):
3496 3496 return repo.status(match=scmutil.match(repo[None], pats, opts))
3497 3497
3498 3498
3499 3499 def revert(ui, repo, ctx, *pats, **opts):
3500 3500 opts = pycompat.byteskwargs(opts)
3501 3501 parent, p2 = repo.dirstate.parents()
3502 3502 node = ctx.node()
3503 3503
3504 3504 mf = ctx.manifest()
3505 3505 if node == p2:
3506 3506 parent = p2
3507 3507
3508 3508 # need all matching names in dirstate and manifest of target rev,
3509 3509 # so have to walk both. do not print errors if files exist in one
3510 3510 # but not other. in both cases, filesets should be evaluated against
3511 3511 # workingctx to get consistent result (issue4497). this means 'set:**'
3512 3512 # cannot be used to select missing files from target rev.
3513 3513
3514 3514 # `names` is a mapping for all elements in working copy and target revision
3515 3515 # The mapping is in the form:
3516 3516 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3517 3517 names = {}
3518 3518 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3519 3519
3520 3520 with repo.wlock():
3521 3521 ## filling of the `names` mapping
3522 3522 # walk dirstate to fill `names`
3523 3523
3524 3524 interactive = opts.get(b'interactive', False)
3525 3525 wctx = repo[None]
3526 3526 m = scmutil.match(wctx, pats, opts)
3527 3527
3528 3528 # we'll need this later
3529 3529 targetsubs = sorted(s for s in wctx.substate if m(s))
3530 3530
3531 3531 if not m.always():
3532 3532 matcher = matchmod.badmatch(m, lambda x, y: False)
3533 3533 for abs in wctx.walk(matcher):
3534 3534 names[abs] = m.exact(abs)
3535 3535
3536 3536 # walk target manifest to fill `names`
3537 3537
3538 3538 def badfn(path, msg):
3539 3539 if path in names:
3540 3540 return
3541 3541 if path in ctx.substate:
3542 3542 return
3543 3543 path_ = path + b'/'
3544 3544 for f in names:
3545 3545 if f.startswith(path_):
3546 3546 return
3547 3547 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3548 3548
3549 3549 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3550 3550 if abs not in names:
3551 3551 names[abs] = m.exact(abs)
3552 3552
3553 3553 # Find status of all file in `names`.
3554 3554 m = scmutil.matchfiles(repo, names)
3555 3555
3556 3556 changes = repo.status(
3557 3557 node1=node, match=m, unknown=True, ignored=True, clean=True
3558 3558 )
3559 3559 else:
3560 3560 changes = repo.status(node1=node, match=m)
3561 3561 for kind in changes:
3562 3562 for abs in kind:
3563 3563 names[abs] = m.exact(abs)
3564 3564
3565 3565 m = scmutil.matchfiles(repo, names)
3566 3566
3567 3567 modified = set(changes.modified)
3568 3568 added = set(changes.added)
3569 3569 removed = set(changes.removed)
3570 3570 _deleted = set(changes.deleted)
3571 3571 unknown = set(changes.unknown)
3572 3572 unknown.update(changes.ignored)
3573 3573 clean = set(changes.clean)
3574 3574 modadded = set()
3575 3575
3576 3576 # We need to account for the state of the file in the dirstate,
3577 3577 # even when we revert against something else than parent. This will
3578 3578 # slightly alter the behavior of revert (doing back up or not, delete
3579 3579 # or just forget etc).
3580 3580 if parent == node:
3581 3581 dsmodified = modified
3582 3582 dsadded = added
3583 3583 dsremoved = removed
3584 3584 # store all local modifications, useful later for rename detection
3585 3585 localchanges = dsmodified | dsadded
3586 3586 modified, added, removed = set(), set(), set()
3587 3587 else:
3588 3588 changes = repo.status(node1=parent, match=m)
3589 3589 dsmodified = set(changes.modified)
3590 3590 dsadded = set(changes.added)
3591 3591 dsremoved = set(changes.removed)
3592 3592 # store all local modifications, useful later for rename detection
3593 3593 localchanges = dsmodified | dsadded
3594 3594
3595 3595 # only take into account for removes between wc and target
3596 3596 clean |= dsremoved - removed
3597 3597 dsremoved &= removed
3598 3598 # distinct between dirstate remove and other
3599 3599 removed -= dsremoved
3600 3600
3601 3601 modadded = added & dsmodified
3602 3602 added -= modadded
3603 3603
3604 3604 # tell newly modified apart.
3605 3605 dsmodified &= modified
3606 3606 dsmodified |= modified & dsadded # dirstate added may need backup
3607 3607 modified -= dsmodified
3608 3608
3609 3609 # We need to wait for some post-processing to update this set
3610 3610 # before making the distinction. The dirstate will be used for
3611 3611 # that purpose.
3612 3612 dsadded = added
3613 3613
3614 3614 # in case of merge, files that are actually added can be reported as
3615 3615 # modified, we need to post process the result
3616 3616 if p2 != nullid:
3617 3617 mergeadd = set(dsmodified)
3618 3618 for path in dsmodified:
3619 3619 if path in mf:
3620 3620 mergeadd.remove(path)
3621 3621 dsadded |= mergeadd
3622 3622 dsmodified -= mergeadd
3623 3623
3624 3624 # if f is a rename, update `names` to also revert the source
3625 3625 for f in localchanges:
3626 3626 src = repo.dirstate.copied(f)
3627 3627 # XXX should we check for rename down to target node?
3628 3628 if src and src not in names and repo.dirstate[src] == b'r':
3629 3629 dsremoved.add(src)
3630 3630 names[src] = True
3631 3631
3632 3632 # determine the exact nature of the deleted changesets
3633 3633 deladded = set(_deleted)
3634 3634 for path in _deleted:
3635 3635 if path in mf:
3636 3636 deladded.remove(path)
3637 3637 deleted = _deleted - deladded
3638 3638
3639 3639 # distinguish between file to forget and the other
3640 3640 added = set()
3641 3641 for abs in dsadded:
3642 3642 if repo.dirstate[abs] != b'a':
3643 3643 added.add(abs)
3644 3644 dsadded -= added
3645 3645
3646 3646 for abs in deladded:
3647 3647 if repo.dirstate[abs] == b'a':
3648 3648 dsadded.add(abs)
3649 3649 deladded -= dsadded
3650 3650
3651 3651 # For files marked as removed, we check if an unknown file is present at
3652 3652 # the same path. If a such file exists it may need to be backed up.
3653 3653 # Making the distinction at this stage helps have simpler backup
3654 3654 # logic.
3655 3655 removunk = set()
3656 3656 for abs in removed:
3657 3657 target = repo.wjoin(abs)
3658 3658 if os.path.lexists(target):
3659 3659 removunk.add(abs)
3660 3660 removed -= removunk
3661 3661
3662 3662 dsremovunk = set()
3663 3663 for abs in dsremoved:
3664 3664 target = repo.wjoin(abs)
3665 3665 if os.path.lexists(target):
3666 3666 dsremovunk.add(abs)
3667 3667 dsremoved -= dsremovunk
3668 3668
3669 3669 # action to be actually performed by revert
3670 3670 # (<list of file>, message>) tuple
3671 3671 actions = {
3672 3672 b'revert': ([], _(b'reverting %s\n')),
3673 3673 b'add': ([], _(b'adding %s\n')),
3674 3674 b'remove': ([], _(b'removing %s\n')),
3675 3675 b'drop': ([], _(b'removing %s\n')),
3676 3676 b'forget': ([], _(b'forgetting %s\n')),
3677 3677 b'undelete': ([], _(b'undeleting %s\n')),
3678 3678 b'noop': (None, _(b'no changes needed to %s\n')),
3679 3679 b'unknown': (None, _(b'file not managed: %s\n')),
3680 3680 }
3681 3681
3682 3682 # "constant" that convey the backup strategy.
3683 3683 # All set to `discard` if `no-backup` is set do avoid checking
3684 3684 # no_backup lower in the code.
3685 3685 # These values are ordered for comparison purposes
3686 3686 backupinteractive = 3 # do backup if interactively modified
3687 3687 backup = 2 # unconditionally do backup
3688 3688 check = 1 # check if the existing file differs from target
3689 3689 discard = 0 # never do backup
3690 3690 if opts.get(b'no_backup'):
3691 3691 backupinteractive = backup = check = discard
3692 3692 if interactive:
3693 3693 dsmodifiedbackup = backupinteractive
3694 3694 else:
3695 3695 dsmodifiedbackup = backup
3696 3696 tobackup = set()
3697 3697
3698 3698 backupanddel = actions[b'remove']
3699 3699 if not opts.get(b'no_backup'):
3700 3700 backupanddel = actions[b'drop']
3701 3701
3702 3702 disptable = (
3703 3703 # dispatch table:
3704 3704 # file state
3705 3705 # action
3706 3706 # make backup
3707 3707 ## Sets that results that will change file on disk
3708 3708 # Modified compared to target, no local change
3709 3709 (modified, actions[b'revert'], discard),
3710 3710 # Modified compared to target, but local file is deleted
3711 3711 (deleted, actions[b'revert'], discard),
3712 3712 # Modified compared to target, local change
3713 3713 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3714 3714 # Added since target
3715 3715 (added, actions[b'remove'], discard),
3716 3716 # Added in working directory
3717 3717 (dsadded, actions[b'forget'], discard),
3718 3718 # Added since target, have local modification
3719 3719 (modadded, backupanddel, backup),
3720 3720 # Added since target but file is missing in working directory
3721 3721 (deladded, actions[b'drop'], discard),
3722 3722 # Removed since target, before working copy parent
3723 3723 (removed, actions[b'add'], discard),
3724 3724 # Same as `removed` but an unknown file exists at the same path
3725 3725 (removunk, actions[b'add'], check),
3726 3726 # Removed since targe, marked as such in working copy parent
3727 3727 (dsremoved, actions[b'undelete'], discard),
3728 3728 # Same as `dsremoved` but an unknown file exists at the same path
3729 3729 (dsremovunk, actions[b'undelete'], check),
3730 3730 ## the following sets does not result in any file changes
3731 3731 # File with no modification
3732 3732 (clean, actions[b'noop'], discard),
3733 3733 # Existing file, not tracked anywhere
3734 3734 (unknown, actions[b'unknown'], discard),
3735 3735 )
3736 3736
3737 3737 for abs, exact in sorted(names.items()):
3738 3738 # target file to be touch on disk (relative to cwd)
3739 3739 target = repo.wjoin(abs)
3740 3740 # search the entry in the dispatch table.
3741 3741 # if the file is in any of these sets, it was touched in the working
3742 3742 # directory parent and we are sure it needs to be reverted.
3743 3743 for table, (xlist, msg), dobackup in disptable:
3744 3744 if abs not in table:
3745 3745 continue
3746 3746 if xlist is not None:
3747 3747 xlist.append(abs)
3748 3748 if dobackup:
3749 3749 # If in interactive mode, don't automatically create
3750 3750 # .orig files (issue4793)
3751 3751 if dobackup == backupinteractive:
3752 3752 tobackup.add(abs)
3753 3753 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3754 3754 absbakname = scmutil.backuppath(ui, repo, abs)
3755 3755 bakname = os.path.relpath(
3756 3756 absbakname, start=repo.root
3757 3757 )
3758 3758 ui.note(
3759 3759 _(b'saving current version of %s as %s\n')
3760 3760 % (uipathfn(abs), uipathfn(bakname))
3761 3761 )
3762 3762 if not opts.get(b'dry_run'):
3763 3763 if interactive:
3764 3764 util.copyfile(target, absbakname)
3765 3765 else:
3766 3766 util.rename(target, absbakname)
3767 3767 if opts.get(b'dry_run'):
3768 3768 if ui.verbose or not exact:
3769 3769 ui.status(msg % uipathfn(abs))
3770 3770 elif exact:
3771 3771 ui.warn(msg % uipathfn(abs))
3772 3772 break
3773 3773
3774 3774 if not opts.get(b'dry_run'):
3775 3775 needdata = (b'revert', b'add', b'undelete')
3776 3776 oplist = [actions[name][0] for name in needdata]
3777 3777 prefetch = scmutil.prefetchfiles
3778 3778 matchfiles = scmutil.matchfiles(
3779 3779 repo, [f for sublist in oplist for f in sublist]
3780 3780 )
3781 3781 prefetch(
3782 3782 repo, [(ctx.rev(), matchfiles)],
3783 3783 )
3784 3784 match = scmutil.match(repo[None], pats)
3785 3785 _performrevert(
3786 3786 repo,
3787 3787 ctx,
3788 3788 names,
3789 3789 uipathfn,
3790 3790 actions,
3791 3791 match,
3792 3792 interactive,
3793 3793 tobackup,
3794 3794 )
3795 3795
3796 3796 if targetsubs:
3797 3797 # Revert the subrepos on the revert list
3798 3798 for sub in targetsubs:
3799 3799 try:
3800 3800 wctx.sub(sub).revert(
3801 3801 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3802 3802 )
3803 3803 except KeyError:
3804 3804 raise error.Abort(
3805 3805 b"subrepository '%s' does not exist in %s!"
3806 3806 % (sub, short(ctx.node()))
3807 3807 )
3808 3808
3809 3809
3810 3810 def _performrevert(
3811 3811 repo,
3812 3812 ctx,
3813 3813 names,
3814 3814 uipathfn,
3815 3815 actions,
3816 3816 match,
3817 3817 interactive=False,
3818 3818 tobackup=None,
3819 3819 ):
3820 3820 """function that actually perform all the actions computed for revert
3821 3821
3822 3822 This is an independent function to let extension to plug in and react to
3823 3823 the imminent revert.
3824 3824
3825 3825 Make sure you have the working directory locked when calling this function.
3826 3826 """
3827 3827 parent, p2 = repo.dirstate.parents()
3828 3828 node = ctx.node()
3829 3829 excluded_files = []
3830 3830
3831 3831 def checkout(f):
3832 3832 fc = ctx[f]
3833 3833 repo.wwrite(f, fc.data(), fc.flags())
3834 3834
3835 3835 def doremove(f):
3836 3836 try:
3837 3837 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3838 3838 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3839 3839 except OSError:
3840 3840 pass
3841 3841 repo.dirstate.remove(f)
3842 3842
3843 3843 def prntstatusmsg(action, f):
3844 3844 exact = names[f]
3845 3845 if repo.ui.verbose or not exact:
3846 3846 repo.ui.status(actions[action][1] % uipathfn(f))
3847 3847
3848 3848 audit_path = pathutil.pathauditor(repo.root, cached=True)
3849 3849 for f in actions[b'forget'][0]:
3850 3850 if interactive:
3851 3851 choice = repo.ui.promptchoice(
3852 3852 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3853 3853 )
3854 3854 if choice == 0:
3855 3855 prntstatusmsg(b'forget', f)
3856 3856 repo.dirstate.drop(f)
3857 3857 else:
3858 3858 excluded_files.append(f)
3859 3859 else:
3860 3860 prntstatusmsg(b'forget', f)
3861 3861 repo.dirstate.drop(f)
3862 3862 for f in actions[b'remove'][0]:
3863 3863 audit_path(f)
3864 3864 if interactive:
3865 3865 choice = repo.ui.promptchoice(
3866 3866 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3867 3867 )
3868 3868 if choice == 0:
3869 3869 prntstatusmsg(b'remove', f)
3870 3870 doremove(f)
3871 3871 else:
3872 3872 excluded_files.append(f)
3873 3873 else:
3874 3874 prntstatusmsg(b'remove', f)
3875 3875 doremove(f)
3876 3876 for f in actions[b'drop'][0]:
3877 3877 audit_path(f)
3878 3878 prntstatusmsg(b'drop', f)
3879 3879 repo.dirstate.remove(f)
3880 3880
3881 3881 normal = None
3882 3882 if node == parent:
3883 3883 # We're reverting to our parent. If possible, we'd like status
3884 3884 # to report the file as clean. We have to use normallookup for
3885 3885 # merges to avoid losing information about merged/dirty files.
3886 3886 if p2 != nullid:
3887 3887 normal = repo.dirstate.normallookup
3888 3888 else:
3889 3889 normal = repo.dirstate.normal
3890 3890
3891 3891 newlyaddedandmodifiedfiles = set()
3892 3892 if interactive:
3893 3893 # Prompt the user for changes to revert
3894 3894 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3895 3895 m = scmutil.matchfiles(repo, torevert)
3896 3896 diffopts = patch.difffeatureopts(
3897 3897 repo.ui,
3898 3898 whitespace=True,
3899 3899 section=b'commands',
3900 3900 configprefix=b'revert.interactive.',
3901 3901 )
3902 3902 diffopts.nodates = True
3903 3903 diffopts.git = True
3904 3904 operation = b'apply'
3905 3905 if node == parent:
3906 3906 if repo.ui.configbool(
3907 3907 b'experimental', b'revert.interactive.select-to-keep'
3908 3908 ):
3909 3909 operation = b'keep'
3910 3910 else:
3911 3911 operation = b'discard'
3912 3912
3913 3913 if operation == b'apply':
3914 3914 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3915 3915 else:
3916 3916 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3917 3917 originalchunks = patch.parsepatch(diff)
3918 3918
3919 3919 try:
3920 3920
3921 3921 chunks, opts = recordfilter(
3922 3922 repo.ui, originalchunks, match, operation=operation
3923 3923 )
3924 3924 if operation == b'discard':
3925 3925 chunks = patch.reversehunks(chunks)
3926 3926
3927 3927 except error.PatchError as err:
3928 3928 raise error.Abort(_(b'error parsing patch: %s') % err)
3929 3929
3930 3930 # FIXME: when doing an interactive revert of a copy, there's no way of
3931 3931 # performing a partial revert of the added file, the only option is
3932 3932 # "remove added file <name> (Yn)?", so we don't need to worry about the
3933 3933 # alsorestore value. Ideally we'd be able to partially revert
3934 3934 # copied/renamed files.
3935 3935 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3936 3936 chunks, originalchunks
3937 3937 )
3938 3938 if tobackup is None:
3939 3939 tobackup = set()
3940 3940 # Apply changes
3941 3941 fp = stringio()
3942 3942 # chunks are serialized per file, but files aren't sorted
3943 3943 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3944 3944 prntstatusmsg(b'revert', f)
3945 3945 files = set()
3946 3946 for c in chunks:
3947 3947 if ishunk(c):
3948 3948 abs = c.header.filename()
3949 3949 # Create a backup file only if this hunk should be backed up
3950 3950 if c.header.filename() in tobackup:
3951 3951 target = repo.wjoin(abs)
3952 3952 bakname = scmutil.backuppath(repo.ui, repo, abs)
3953 3953 util.copyfile(target, bakname)
3954 3954 tobackup.remove(abs)
3955 3955 if abs not in files:
3956 3956 files.add(abs)
3957 3957 if operation == b'keep':
3958 3958 checkout(abs)
3959 3959 c.write(fp)
3960 3960 dopatch = fp.tell()
3961 3961 fp.seek(0)
3962 3962 if dopatch:
3963 3963 try:
3964 3964 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3965 3965 except error.PatchError as err:
3966 3966 raise error.Abort(pycompat.bytestr(err))
3967 3967 del fp
3968 3968 else:
3969 3969 for f in actions[b'revert'][0]:
3970 3970 prntstatusmsg(b'revert', f)
3971 3971 checkout(f)
3972 3972 if normal:
3973 3973 normal(f)
3974 3974
3975 3975 for f in actions[b'add'][0]:
3976 3976 # Don't checkout modified files, they are already created by the diff
3977 3977 if f not in newlyaddedandmodifiedfiles:
3978 3978 prntstatusmsg(b'add', f)
3979 3979 checkout(f)
3980 3980 repo.dirstate.add(f)
3981 3981
3982 3982 normal = repo.dirstate.normallookup
3983 3983 if node == parent and p2 == nullid:
3984 3984 normal = repo.dirstate.normal
3985 3985 for f in actions[b'undelete'][0]:
3986 3986 if interactive:
3987 3987 choice = repo.ui.promptchoice(
3988 3988 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3989 3989 )
3990 3990 if choice == 0:
3991 3991 prntstatusmsg(b'undelete', f)
3992 3992 checkout(f)
3993 3993 normal(f)
3994 3994 else:
3995 3995 excluded_files.append(f)
3996 3996 else:
3997 3997 prntstatusmsg(b'undelete', f)
3998 3998 checkout(f)
3999 3999 normal(f)
4000 4000
4001 4001 copied = copies.pathcopies(repo[parent], ctx)
4002 4002
4003 4003 for f in (
4004 4004 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
4005 4005 ):
4006 4006 if f in copied:
4007 4007 repo.dirstate.copy(copied[f], f)
4008 4008
4009 4009
4010 4010 # a list of (ui, repo, otherpeer, opts, missing) functions called by
4011 4011 # commands.outgoing. "missing" is "missing" of the result of
4012 4012 # "findcommonoutgoing()"
4013 4013 outgoinghooks = util.hooks()
4014 4014
4015 4015 # a list of (ui, repo) functions called by commands.summary
4016 4016 summaryhooks = util.hooks()
4017 4017
4018 4018 # a list of (ui, repo, opts, changes) functions called by commands.summary.
4019 4019 #
4020 4020 # functions should return tuple of booleans below, if 'changes' is None:
4021 4021 # (whether-incomings-are-needed, whether-outgoings-are-needed)
4022 4022 #
4023 4023 # otherwise, 'changes' is a tuple of tuples below:
4024 4024 # - (sourceurl, sourcebranch, sourcepeer, incoming)
4025 4025 # - (desturl, destbranch, destpeer, outgoing)
4026 4026 summaryremotehooks = util.hooks()
4027 4027
4028 4028
4029 4029 def checkunfinished(repo, commit=False, skipmerge=False):
4030 4030 '''Look for an unfinished multistep operation, like graft, and abort
4031 4031 if found. It's probably good to check this right before
4032 4032 bailifchanged().
4033 4033 '''
4034 4034 # Check for non-clearable states first, so things like rebase will take
4035 4035 # precedence over update.
4036 4036 for state in statemod._unfinishedstates:
4037 4037 if (
4038 4038 state._clearable
4039 4039 or (commit and state._allowcommit)
4040 4040 or state._reportonly
4041 4041 ):
4042 4042 continue
4043 4043 if state.isunfinished(repo):
4044 4044 raise error.Abort(state.msg(), hint=state.hint())
4045 4045
4046 4046 for s in statemod._unfinishedstates:
4047 4047 if (
4048 4048 not s._clearable
4049 4049 or (commit and s._allowcommit)
4050 4050 or (s._opname == b'merge' and skipmerge)
4051 4051 or s._reportonly
4052 4052 ):
4053 4053 continue
4054 4054 if s.isunfinished(repo):
4055 4055 raise error.Abort(s.msg(), hint=s.hint())
4056 4056
4057 4057
4058 4058 def clearunfinished(repo):
4059 4059 '''Check for unfinished operations (as above), and clear the ones
4060 4060 that are clearable.
4061 4061 '''
4062 4062 for state in statemod._unfinishedstates:
4063 4063 if state._reportonly:
4064 4064 continue
4065 4065 if not state._clearable and state.isunfinished(repo):
4066 4066 raise error.Abort(state.msg(), hint=state.hint())
4067 4067
4068 4068 for s in statemod._unfinishedstates:
4069 4069 if s._opname == b'merge' or state._reportonly:
4070 4070 continue
4071 4071 if s._clearable and s.isunfinished(repo):
4072 4072 util.unlink(repo.vfs.join(s._fname))
4073 4073
4074 4074
4075 4075 def getunfinishedstate(repo):
4076 4076 ''' Checks for unfinished operations and returns statecheck object
4077 4077 for it'''
4078 4078 for state in statemod._unfinishedstates:
4079 4079 if state.isunfinished(repo):
4080 4080 return state
4081 4081 return None
4082 4082
4083 4083
4084 4084 def howtocontinue(repo):
4085 4085 '''Check for an unfinished operation and return the command to finish
4086 4086 it.
4087 4087
4088 4088 statemod._unfinishedstates list is checked for an unfinished operation
4089 4089 and the corresponding message to finish it is generated if a method to
4090 4090 continue is supported by the operation.
4091 4091
4092 4092 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
4093 4093 a boolean.
4094 4094 '''
4095 4095 contmsg = _(b"continue: %s")
4096 4096 for state in statemod._unfinishedstates:
4097 4097 if not state._continueflag:
4098 4098 continue
4099 4099 if state.isunfinished(repo):
4100 4100 return contmsg % state.continuemsg(), True
4101 4101 if repo[None].dirty(missing=True, merge=False, branch=False):
4102 4102 return contmsg % _(b"hg commit"), False
4103 4103 return None, None
4104 4104
4105 4105
4106 4106 def checkafterresolved(repo):
4107 4107 '''Inform the user about the next action after completing hg resolve
4108 4108
4109 4109 If there's a an unfinished operation that supports continue flag,
4110 4110 howtocontinue will yield repo.ui.warn as the reporter.
4111 4111
4112 4112 Otherwise, it will yield repo.ui.note.
4113 4113 '''
4114 4114 msg, warning = howtocontinue(repo)
4115 4115 if msg is not None:
4116 4116 if warning:
4117 4117 repo.ui.warn(b"%s\n" % msg)
4118 4118 else:
4119 4119 repo.ui.note(b"%s\n" % msg)
4120 4120
4121 4121
4122 4122 def wrongtooltocontinue(repo, task):
4123 4123 '''Raise an abort suggesting how to properly continue if there is an
4124 4124 active task.
4125 4125
4126 4126 Uses howtocontinue() to find the active task.
4127 4127
4128 4128 If there's no task (repo.ui.note for 'hg commit'), it does not offer
4129 4129 a hint.
4130 4130 '''
4131 4131 after = howtocontinue(repo)
4132 4132 hint = None
4133 4133 if after[1]:
4134 4134 hint = after[0]
4135 4135 raise error.Abort(_(b'no %s in progress') % task, hint=hint)
4136 4136
4137 4137
4138 4138 def abortgraft(ui, repo, graftstate):
4139 4139 """abort the interrupted graft and rollbacks to the state before interrupted
4140 4140 graft"""
4141 4141 if not graftstate.exists():
4142 4142 raise error.Abort(_(b"no interrupted graft to abort"))
4143 4143 statedata = readgraftstate(repo, graftstate)
4144 4144 newnodes = statedata.get(b'newnodes')
4145 4145 if newnodes is None:
4146 4146 # and old graft state which does not have all the data required to abort
4147 4147 # the graft
4148 4148 raise error.Abort(_(b"cannot abort using an old graftstate"))
4149 4149
4150 4150 # changeset from which graft operation was started
4151 4151 if len(newnodes) > 0:
4152 4152 startctx = repo[newnodes[0]].p1()
4153 4153 else:
4154 4154 startctx = repo[b'.']
4155 4155 # whether to strip or not
4156 4156 cleanup = False
4157 from . import hg
4158 4157
4159 4158 if newnodes:
4160 4159 newnodes = [repo[r].rev() for r in newnodes]
4161 4160 cleanup = True
4162 4161 # checking that none of the newnodes turned public or is public
4163 4162 immutable = [c for c in newnodes if not repo[c].mutable()]
4164 4163 if immutable:
4165 4164 repo.ui.warn(
4166 4165 _(b"cannot clean up public changesets %s\n")
4167 4166 % b', '.join(bytes(repo[r]) for r in immutable),
4168 4167 hint=_(b"see 'hg help phases' for details"),
4169 4168 )
4170 4169 cleanup = False
4171 4170
4172 4171 # checking that no new nodes are created on top of grafted revs
4173 4172 desc = set(repo.changelog.descendants(newnodes))
4174 4173 if desc - set(newnodes):
4175 4174 repo.ui.warn(
4176 4175 _(
4177 4176 b"new changesets detected on destination "
4178 4177 b"branch, can't strip\n"
4179 4178 )
4180 4179 )
4181 4180 cleanup = False
4182 4181
4183 4182 if cleanup:
4184 4183 with repo.wlock(), repo.lock():
4185 hg.updaterepo(repo, startctx.node(), overwrite=True)
4184 mergemod.clean_update(startctx)
4186 4185 # stripping the new nodes created
4187 4186 strippoints = [
4188 4187 c.node() for c in repo.set(b"roots(%ld)", newnodes)
4189 4188 ]
4190 4189 repair.strip(repo.ui, repo, strippoints, backup=False)
4191 4190
4192 4191 if not cleanup:
4193 4192 # we don't update to the startnode if we can't strip
4194 4193 startctx = repo[b'.']
4195 hg.updaterepo(repo, startctx.node(), overwrite=True)
4194 mergemod.clean_update(startctx)
4196 4195
4197 4196 ui.status(_(b"graft aborted\n"))
4198 4197 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
4199 4198 graftstate.delete()
4200 4199 return 0
4201 4200
4202 4201
4203 4202 def readgraftstate(repo, graftstate):
4204 4203 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
4205 4204 """read the graft state file and return a dict of the data stored in it"""
4206 4205 try:
4207 4206 return graftstate.read()
4208 4207 except error.CorruptedState:
4209 4208 nodes = repo.vfs.read(b'graftstate').splitlines()
4210 4209 return {b'nodes': nodes}
4211 4210
4212 4211
4213 4212 def hgabortgraft(ui, repo):
4214 4213 """ abort logic for aborting graft using 'hg abort'"""
4215 4214 with repo.wlock():
4216 4215 graftstate = statemod.cmdstate(repo, b'graftstate')
4217 4216 return abortgraft(ui, repo, graftstate)
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now