##// END OF EJS Templates
overlayworkginctx: implement a setparents() to mirror dirstate.setparents()...
Martin von Zweigbergk -
r44503:436d106d default
parent child Browse files
Show More
@@ -1,2284 +1,2284 b''
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 https://mercurial-scm.org/wiki/RebaseExtension
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 import errno
20 20 import os
21 21
22 22 from mercurial.i18n import _
23 23 from mercurial.node import (
24 24 nullrev,
25 25 short,
26 26 )
27 27 from mercurial.pycompat import open
28 28 from mercurial import (
29 29 bookmarks,
30 30 cmdutil,
31 31 commands,
32 32 copies,
33 33 destutil,
34 34 dirstateguard,
35 35 error,
36 36 extensions,
37 37 hg,
38 38 merge as mergemod,
39 39 mergeutil,
40 40 obsolete,
41 41 obsutil,
42 42 patch,
43 43 phases,
44 44 pycompat,
45 45 registrar,
46 46 repair,
47 47 revset,
48 48 revsetlang,
49 49 rewriteutil,
50 50 scmutil,
51 51 smartset,
52 52 state as statemod,
53 53 util,
54 54 )
55 55
56 56 # The following constants are used throughout the rebase module. The ordering of
57 57 # their values must be maintained.
58 58
59 59 # Indicates that a revision needs to be rebased
60 60 revtodo = -1
61 61 revtodostr = b'-1'
62 62
63 63 # legacy revstates no longer needed in current code
64 64 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
65 65 legacystates = {b'-2', b'-3', b'-4', b'-5'}
66 66
67 67 cmdtable = {}
68 68 command = registrar.command(cmdtable)
69 69 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
70 70 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
71 71 # be specifying the version(s) of Mercurial they are tested with, or
72 72 # leave the attribute unspecified.
73 73 testedwith = b'ships-with-hg-core'
74 74
75 75
76 76 def _nothingtorebase():
77 77 return 1
78 78
79 79
80 80 def _savegraft(ctx, extra):
81 81 s = ctx.extra().get(b'source', None)
82 82 if s is not None:
83 83 extra[b'source'] = s
84 84 s = ctx.extra().get(b'intermediate-source', None)
85 85 if s is not None:
86 86 extra[b'intermediate-source'] = s
87 87
88 88
89 89 def _savebranch(ctx, extra):
90 90 extra[b'branch'] = ctx.branch()
91 91
92 92
93 93 def _destrebase(repo, sourceset, destspace=None):
94 94 """small wrapper around destmerge to pass the right extra args
95 95
96 96 Please wrap destutil.destmerge instead."""
97 97 return destutil.destmerge(
98 98 repo,
99 99 action=b'rebase',
100 100 sourceset=sourceset,
101 101 onheadcheck=False,
102 102 destspace=destspace,
103 103 )
104 104
105 105
106 106 revsetpredicate = registrar.revsetpredicate()
107 107
108 108
109 109 @revsetpredicate(b'_destrebase')
110 110 def _revsetdestrebase(repo, subset, x):
111 111 # ``_rebasedefaultdest()``
112 112
113 113 # default destination for rebase.
114 114 # # XXX: Currently private because I expect the signature to change.
115 115 # # XXX: - bailing out in case of ambiguity vs returning all data.
116 116 # i18n: "_rebasedefaultdest" is a keyword
117 117 sourceset = None
118 118 if x is not None:
119 119 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
120 120 return subset & smartset.baseset([_destrebase(repo, sourceset)])
121 121
122 122
123 123 @revsetpredicate(b'_destautoorphanrebase')
124 124 def _revsetdestautoorphanrebase(repo, subset, x):
125 125 # ``_destautoorphanrebase()``
126 126
127 127 # automatic rebase destination for a single orphan revision.
128 128 unfi = repo.unfiltered()
129 129 obsoleted = unfi.revs(b'obsolete()')
130 130
131 131 src = revset.getset(repo, subset, x).first()
132 132
133 133 # Empty src or already obsoleted - Do not return a destination
134 134 if not src or src in obsoleted:
135 135 return smartset.baseset()
136 136 dests = destutil.orphanpossibledestination(repo, src)
137 137 if len(dests) > 1:
138 138 raise error.Abort(
139 139 _(b"ambiguous automatic rebase: %r could end up on any of %r")
140 140 % (src, dests)
141 141 )
142 142 # We have zero or one destination, so we can just return here.
143 143 return smartset.baseset(dests)
144 144
145 145
146 146 def _ctxdesc(ctx):
147 147 """short description for a context"""
148 148 desc = b'%d:%s "%s"' % (
149 149 ctx.rev(),
150 150 ctx,
151 151 ctx.description().split(b'\n', 1)[0],
152 152 )
153 153 repo = ctx.repo()
154 154 names = []
155 155 for nsname, ns in pycompat.iteritems(repo.names):
156 156 if nsname == b'branches':
157 157 continue
158 158 names.extend(ns.names(repo, ctx.node()))
159 159 if names:
160 160 desc += b' (%s)' % b' '.join(names)
161 161 return desc
162 162
163 163
164 164 class rebaseruntime(object):
165 165 """This class is a container for rebase runtime state"""
166 166
167 167 def __init__(self, repo, ui, inmemory=False, opts=None):
168 168 if opts is None:
169 169 opts = {}
170 170
171 171 # prepared: whether we have rebasestate prepared or not. Currently it
172 172 # decides whether "self.repo" is unfiltered or not.
173 173 # The rebasestate has explicit hash to hash instructions not depending
174 174 # on visibility. If rebasestate exists (in-memory or on-disk), use
175 175 # unfiltered repo to avoid visibility issues.
176 176 # Before knowing rebasestate (i.e. when starting a new rebase (not
177 177 # --continue or --abort)), the original repo should be used so
178 178 # visibility-dependent revsets are correct.
179 179 self.prepared = False
180 180 self._repo = repo
181 181
182 182 self.ui = ui
183 183 self.opts = opts
184 184 self.originalwd = None
185 185 self.external = nullrev
186 186 # Mapping between the old revision id and either what is the new rebased
187 187 # revision or what needs to be done with the old revision. The state
188 188 # dict will be what contains most of the rebase progress state.
189 189 self.state = {}
190 190 self.activebookmark = None
191 191 self.destmap = {}
192 192 self.skipped = set()
193 193
194 194 self.collapsef = opts.get(b'collapse', False)
195 195 self.collapsemsg = cmdutil.logmessage(ui, opts)
196 196 self.date = opts.get(b'date', None)
197 197
198 198 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
199 199 self.extrafns = [_savegraft]
200 200 if e:
201 201 self.extrafns = [e]
202 202
203 203 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
204 204 self.keepf = opts.get(b'keep', False)
205 205 self.keepbranchesf = opts.get(b'keepbranches', False)
206 206 self.obsoletenotrebased = {}
207 207 self.obsoletewithoutsuccessorindestination = set()
208 208 self.inmemory = inmemory
209 209 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
210 210
211 211 @property
212 212 def repo(self):
213 213 if self.prepared:
214 214 return self._repo.unfiltered()
215 215 else:
216 216 return self._repo
217 217
218 218 def storestatus(self, tr=None):
219 219 """Store the current status to allow recovery"""
220 220 if tr:
221 221 tr.addfilegenerator(
222 222 b'rebasestate',
223 223 (b'rebasestate',),
224 224 self._writestatus,
225 225 location=b'plain',
226 226 )
227 227 else:
228 228 with self.repo.vfs(b"rebasestate", b"w") as f:
229 229 self._writestatus(f)
230 230
231 231 def _writestatus(self, f):
232 232 repo = self.repo
233 233 assert repo.filtername is None
234 234 f.write(repo[self.originalwd].hex() + b'\n')
235 235 # was "dest". we now write dest per src root below.
236 236 f.write(b'\n')
237 237 f.write(repo[self.external].hex() + b'\n')
238 238 f.write(b'%d\n' % int(self.collapsef))
239 239 f.write(b'%d\n' % int(self.keepf))
240 240 f.write(b'%d\n' % int(self.keepbranchesf))
241 241 f.write(b'%s\n' % (self.activebookmark or b''))
242 242 destmap = self.destmap
243 243 for d, v in pycompat.iteritems(self.state):
244 244 oldrev = repo[d].hex()
245 245 if v >= 0:
246 246 newrev = repo[v].hex()
247 247 else:
248 248 newrev = b"%d" % v
249 249 destnode = repo[destmap[d]].hex()
250 250 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
251 251 repo.ui.debug(b'rebase status stored\n')
252 252
253 253 def restorestatus(self):
254 254 """Restore a previously stored status"""
255 255 if not self.stateobj.exists():
256 256 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
257 257
258 258 data = self._read()
259 259 self.repo.ui.debug(b'rebase status resumed\n')
260 260
261 261 self.originalwd = data[b'originalwd']
262 262 self.destmap = data[b'destmap']
263 263 self.state = data[b'state']
264 264 self.skipped = data[b'skipped']
265 265 self.collapsef = data[b'collapse']
266 266 self.keepf = data[b'keep']
267 267 self.keepbranchesf = data[b'keepbranches']
268 268 self.external = data[b'external']
269 269 self.activebookmark = data[b'activebookmark']
270 270
271 271 def _read(self):
272 272 self.prepared = True
273 273 repo = self.repo
274 274 assert repo.filtername is None
275 275 data = {
276 276 b'keepbranches': None,
277 277 b'collapse': None,
278 278 b'activebookmark': None,
279 279 b'external': nullrev,
280 280 b'keep': None,
281 281 b'originalwd': None,
282 282 }
283 283 legacydest = None
284 284 state = {}
285 285 destmap = {}
286 286
287 287 if True:
288 288 f = repo.vfs(b"rebasestate")
289 289 for i, l in enumerate(f.read().splitlines()):
290 290 if i == 0:
291 291 data[b'originalwd'] = repo[l].rev()
292 292 elif i == 1:
293 293 # this line should be empty in newer version. but legacy
294 294 # clients may still use it
295 295 if l:
296 296 legacydest = repo[l].rev()
297 297 elif i == 2:
298 298 data[b'external'] = repo[l].rev()
299 299 elif i == 3:
300 300 data[b'collapse'] = bool(int(l))
301 301 elif i == 4:
302 302 data[b'keep'] = bool(int(l))
303 303 elif i == 5:
304 304 data[b'keepbranches'] = bool(int(l))
305 305 elif i == 6 and not (len(l) == 81 and b':' in l):
306 306 # line 6 is a recent addition, so for backwards
307 307 # compatibility check that the line doesn't look like the
308 308 # oldrev:newrev lines
309 309 data[b'activebookmark'] = l
310 310 else:
311 311 args = l.split(b':')
312 312 oldrev = repo[args[0]].rev()
313 313 newrev = args[1]
314 314 if newrev in legacystates:
315 315 continue
316 316 if len(args) > 2:
317 317 destrev = repo[args[2]].rev()
318 318 else:
319 319 destrev = legacydest
320 320 destmap[oldrev] = destrev
321 321 if newrev == revtodostr:
322 322 state[oldrev] = revtodo
323 323 # Legacy compat special case
324 324 else:
325 325 state[oldrev] = repo[newrev].rev()
326 326
327 327 if data[b'keepbranches'] is None:
328 328 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
329 329
330 330 data[b'destmap'] = destmap
331 331 data[b'state'] = state
332 332 skipped = set()
333 333 # recompute the set of skipped revs
334 334 if not data[b'collapse']:
335 335 seen = set(destmap.values())
336 336 for old, new in sorted(state.items()):
337 337 if new != revtodo and new in seen:
338 338 skipped.add(old)
339 339 seen.add(new)
340 340 data[b'skipped'] = skipped
341 341 repo.ui.debug(
342 342 b'computed skipped revs: %s\n'
343 343 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
344 344 )
345 345
346 346 return data
347 347
348 348 def _handleskippingobsolete(self, obsoleterevs, destmap):
349 349 """Compute structures necessary for skipping obsolete revisions
350 350
351 351 obsoleterevs: iterable of all obsolete revisions in rebaseset
352 352 destmap: {srcrev: destrev} destination revisions
353 353 """
354 354 self.obsoletenotrebased = {}
355 355 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
356 356 return
357 357 obsoleteset = set(obsoleterevs)
358 358 (
359 359 self.obsoletenotrebased,
360 360 self.obsoletewithoutsuccessorindestination,
361 361 obsoleteextinctsuccessors,
362 362 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
363 363 skippedset = set(self.obsoletenotrebased)
364 364 skippedset.update(self.obsoletewithoutsuccessorindestination)
365 365 skippedset.update(obsoleteextinctsuccessors)
366 366 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
367 367
368 368 def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
369 369 try:
370 370 self.restorestatus()
371 371 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
372 372 except error.RepoLookupError:
373 373 if isabort:
374 374 clearstatus(self.repo)
375 375 clearcollapsemsg(self.repo)
376 376 self.repo.ui.warn(
377 377 _(
378 378 b'rebase aborted (no revision is removed,'
379 379 b' only broken state is cleared)\n'
380 380 )
381 381 )
382 382 return 0
383 383 else:
384 384 msg = _(b'cannot continue inconsistent rebase')
385 385 hint = _(b'use "hg rebase --abort" to clear broken state')
386 386 raise error.Abort(msg, hint=hint)
387 387
388 388 if isabort:
389 389 backup = backup and self.backupf
390 390 return self._abort(backup=backup, suppwarns=suppwarns)
391 391
392 392 def _preparenewrebase(self, destmap):
393 393 if not destmap:
394 394 return _nothingtorebase()
395 395
396 396 rebaseset = destmap.keys()
397 397 if not self.keepf:
398 398 try:
399 399 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
400 400 except error.Abort as e:
401 401 if e.hint is None:
402 402 e.hint = _(b'use --keep to keep original changesets')
403 403 raise e
404 404
405 405 result = buildstate(self.repo, destmap, self.collapsef)
406 406
407 407 if not result:
408 408 # Empty state built, nothing to rebase
409 409 self.ui.status(_(b'nothing to rebase\n'))
410 410 return _nothingtorebase()
411 411
412 412 (self.originalwd, self.destmap, self.state) = result
413 413 if self.collapsef:
414 414 dests = set(self.destmap.values())
415 415 if len(dests) != 1:
416 416 raise error.Abort(
417 417 _(b'--collapse does not work with multiple destinations')
418 418 )
419 419 destrev = next(iter(dests))
420 420 destancestors = self.repo.changelog.ancestors(
421 421 [destrev], inclusive=True
422 422 )
423 423 self.external = externalparent(self.repo, self.state, destancestors)
424 424
425 425 for destrev in sorted(set(destmap.values())):
426 426 dest = self.repo[destrev]
427 427 if dest.closesbranch() and not self.keepbranchesf:
428 428 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
429 429
430 430 self.prepared = True
431 431
432 432 def _assignworkingcopy(self):
433 433 if self.inmemory:
434 434 from mercurial.context import overlayworkingctx
435 435
436 436 self.wctx = overlayworkingctx(self.repo)
437 437 self.repo.ui.debug(b"rebasing in-memory\n")
438 438 else:
439 439 self.wctx = self.repo[None]
440 440 self.repo.ui.debug(b"rebasing on disk\n")
441 441 self.repo.ui.log(
442 442 b"rebase",
443 443 b"using in-memory rebase: %r\n",
444 444 self.inmemory,
445 445 rebase_imm_used=self.inmemory,
446 446 )
447 447
448 448 def _performrebase(self, tr):
449 449 self._assignworkingcopy()
450 450 repo, ui = self.repo, self.ui
451 451 if self.keepbranchesf:
452 452 # insert _savebranch at the start of extrafns so if
453 453 # there's a user-provided extrafn it can clobber branch if
454 454 # desired
455 455 self.extrafns.insert(0, _savebranch)
456 456 if self.collapsef:
457 457 branches = set()
458 458 for rev in self.state:
459 459 branches.add(repo[rev].branch())
460 460 if len(branches) > 1:
461 461 raise error.Abort(
462 462 _(b'cannot collapse multiple named branches')
463 463 )
464 464
465 465 # Calculate self.obsoletenotrebased
466 466 obsrevs = _filterobsoleterevs(self.repo, self.state)
467 467 self._handleskippingobsolete(obsrevs, self.destmap)
468 468
469 469 # Keep track of the active bookmarks in order to reset them later
470 470 self.activebookmark = self.activebookmark or repo._activebookmark
471 471 if self.activebookmark:
472 472 bookmarks.deactivate(repo)
473 473
474 474 # Store the state before we begin so users can run 'hg rebase --abort'
475 475 # if we fail before the transaction closes.
476 476 self.storestatus()
477 477 if tr:
478 478 # When using single transaction, store state when transaction
479 479 # commits.
480 480 self.storestatus(tr)
481 481
482 482 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
483 483 p = repo.ui.makeprogress(
484 484 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
485 485 )
486 486
487 487 def progress(ctx):
488 488 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
489 489
490 490 allowdivergence = self.ui.configbool(
491 491 b'experimental', b'evolution.allowdivergence'
492 492 )
493 493 for subset in sortsource(self.destmap):
494 494 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
495 495 if not allowdivergence:
496 496 sortedrevs -= self.repo.revs(
497 497 b'descendants(%ld) and not %ld',
498 498 self.obsoletewithoutsuccessorindestination,
499 499 self.obsoletewithoutsuccessorindestination,
500 500 )
501 501 for rev in sortedrevs:
502 502 self._rebasenode(tr, rev, allowdivergence, progress)
503 503 p.complete()
504 504 ui.note(_(b'rebase merging completed\n'))
505 505
506 506 def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
507 507 '''Commit the wd changes with parents p1 and p2.
508 508
509 509 Reuse commit info from rev but also store useful information in extra.
510 510 Return node of committed revision.'''
511 511 repo = self.repo
512 512 ctx = repo[rev]
513 513 if commitmsg is None:
514 514 commitmsg = ctx.description()
515 515 date = self.date
516 516 if date is None:
517 517 date = ctx.date()
518 518 extra = {b'rebase_source': ctx.hex()}
519 519 for c in self.extrafns:
520 520 c(ctx, extra)
521 521 keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
522 522 destphase = max(ctx.phase(), phases.draft)
523 523 overrides = {(b'phases', b'new-commit'): destphase}
524 524 if keepbranch:
525 525 overrides[(b'ui', b'allowemptycommit')] = True
526 526 with repo.ui.configoverride(overrides, b'rebase'):
527 527 if self.inmemory:
528 528 newnode = commitmemorynode(
529 529 repo,
530 530 p1,
531 531 p2,
532 532 wctx=self.wctx,
533 533 extra=extra,
534 534 commitmsg=commitmsg,
535 535 editor=editor,
536 536 user=ctx.user(),
537 537 date=date,
538 538 )
539 539 mergemod.mergestate.clean(repo)
540 540 else:
541 541 newnode = commitnode(
542 542 repo,
543 543 p1,
544 544 p2,
545 545 extra=extra,
546 546 commitmsg=commitmsg,
547 547 editor=editor,
548 548 user=ctx.user(),
549 549 date=date,
550 550 )
551 551
552 552 if newnode is None:
553 553 # If it ended up being a no-op commit, then the normal
554 554 # merge state clean-up path doesn't happen, so do it
555 555 # here. Fix issue5494
556 556 mergemod.mergestate.clean(repo)
557 557 return newnode
558 558
559 559 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
560 560 repo, ui, opts = self.repo, self.ui, self.opts
561 561 dest = self.destmap[rev]
562 562 ctx = repo[rev]
563 563 desc = _ctxdesc(ctx)
564 564 if self.state[rev] == rev:
565 565 ui.status(_(b'already rebased %s\n') % desc)
566 566 elif (
567 567 not allowdivergence
568 568 and rev in self.obsoletewithoutsuccessorindestination
569 569 ):
570 570 msg = (
571 571 _(
572 572 b'note: not rebasing %s and its descendants as '
573 573 b'this would cause divergence\n'
574 574 )
575 575 % desc
576 576 )
577 577 repo.ui.status(msg)
578 578 self.skipped.add(rev)
579 579 elif rev in self.obsoletenotrebased:
580 580 succ = self.obsoletenotrebased[rev]
581 581 if succ is None:
582 582 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
583 583 else:
584 584 succdesc = _ctxdesc(repo[succ])
585 585 msg = _(
586 586 b'note: not rebasing %s, already in destination as %s\n'
587 587 ) % (desc, succdesc)
588 588 repo.ui.status(msg)
589 589 # Make clearrebased aware state[rev] is not a true successor
590 590 self.skipped.add(rev)
591 591 # Record rev as moved to its desired destination in self.state.
592 592 # This helps bookmark and working parent movement.
593 593 dest = max(
594 594 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
595 595 )
596 596 self.state[rev] = dest
597 597 elif self.state[rev] == revtodo:
598 598 ui.status(_(b'rebasing %s\n') % desc)
599 599 progressfn(ctx)
600 600 p1, p2, base = defineparents(
601 601 repo,
602 602 rev,
603 603 self.destmap,
604 604 self.state,
605 605 self.skipped,
606 606 self.obsoletenotrebased,
607 607 )
608 608 if not self.inmemory and len(repo[None].parents()) == 2:
609 609 repo.ui.debug(b'resuming interrupted rebase\n')
610 610 else:
611 611 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
612 612 with ui.configoverride(overrides, b'rebase'):
613 613 stats = rebasenode(
614 614 repo,
615 615 rev,
616 616 p1,
617 617 base,
618 618 self.collapsef,
619 619 dest,
620 620 wctx=self.wctx,
621 621 )
622 622 if stats.unresolvedcount > 0:
623 623 if self.inmemory:
624 624 raise error.InMemoryMergeConflictsError()
625 625 else:
626 626 raise error.InterventionRequired(
627 627 _(
628 628 b'unresolved conflicts (see hg '
629 629 b'resolve, then hg rebase --continue)'
630 630 )
631 631 )
632 632 if not self.collapsef:
633 633 merging = p2 != nullrev
634 634 editform = cmdutil.mergeeditform(merging, b'rebase')
635 635 editor = cmdutil.getcommiteditor(
636 636 editform=editform, **pycompat.strkwargs(opts)
637 637 )
638 638 newnode = self._concludenode(rev, p1, p2, editor)
639 639 else:
640 640 # Skip commit if we are collapsing
641 641 if self.inmemory:
642 642 self.wctx.setbase(repo[p1])
643 643 else:
644 644 repo.setparents(repo[p1].node())
645 645 newnode = None
646 646 # Update the state
647 647 if newnode is not None:
648 648 self.state[rev] = repo[newnode].rev()
649 649 ui.debug(b'rebased as %s\n' % short(newnode))
650 650 else:
651 651 if not self.collapsef:
652 652 ui.warn(
653 653 _(
654 654 b'note: not rebasing %s, its destination already '
655 655 b'has all its changes\n'
656 656 )
657 657 % desc
658 658 )
659 659 self.skipped.add(rev)
660 660 self.state[rev] = p1
661 661 ui.debug(b'next revision set to %d\n' % p1)
662 662 else:
663 663 ui.status(
664 664 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
665 665 )
666 666 if not tr:
667 667 # When not using single transaction, store state after each
668 668 # commit is completely done. On InterventionRequired, we thus
669 669 # won't store the status. Instead, we'll hit the "len(parents) == 2"
670 670 # case and realize that the commit was in progress.
671 671 self.storestatus()
672 672
673 673 def _finishrebase(self):
674 674 repo, ui, opts = self.repo, self.ui, self.opts
675 675 fm = ui.formatter(b'rebase', opts)
676 676 fm.startitem()
677 677 if self.collapsef:
678 678 p1, p2, _base = defineparents(
679 679 repo,
680 680 min(self.state),
681 681 self.destmap,
682 682 self.state,
683 683 self.skipped,
684 684 self.obsoletenotrebased,
685 685 )
686 686 editopt = opts.get(b'edit')
687 687 editform = b'rebase.collapse'
688 688 if self.collapsemsg:
689 689 commitmsg = self.collapsemsg
690 690 else:
691 691 commitmsg = b'Collapsed revision'
692 692 for rebased in sorted(self.state):
693 693 if rebased not in self.skipped:
694 694 commitmsg += b'\n* %s' % repo[rebased].description()
695 695 editopt = True
696 696 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
697 697 revtoreuse = max(self.state)
698 698
699 699 newnode = self._concludenode(
700 700 revtoreuse, p1, self.external, editor, commitmsg=commitmsg
701 701 )
702 702
703 703 if newnode is not None:
704 704 newrev = repo[newnode].rev()
705 705 for oldrev in self.state:
706 706 self.state[oldrev] = newrev
707 707
708 708 if b'qtip' in repo.tags():
709 709 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
710 710
711 711 # restore original working directory
712 712 # (we do this before stripping)
713 713 newwd = self.state.get(self.originalwd, self.originalwd)
714 714 if newwd < 0:
715 715 # original directory is a parent of rebase set root or ignored
716 716 newwd = self.originalwd
717 717 if newwd not in [c.rev() for c in repo[None].parents()]:
718 718 ui.note(_(b"update back to initial working directory parent\n"))
719 719 hg.updaterepo(repo, newwd, overwrite=False)
720 720
721 721 collapsedas = None
722 722 if self.collapsef and not self.keepf:
723 723 collapsedas = newnode
724 724 clearrebased(
725 725 ui,
726 726 repo,
727 727 self.destmap,
728 728 self.state,
729 729 self.skipped,
730 730 collapsedas,
731 731 self.keepf,
732 732 fm=fm,
733 733 backup=self.backupf,
734 734 )
735 735
736 736 clearstatus(repo)
737 737 clearcollapsemsg(repo)
738 738
739 739 ui.note(_(b"rebase completed\n"))
740 740 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
741 741 if self.skipped:
742 742 skippedlen = len(self.skipped)
743 743 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
744 744 fm.end()
745 745
746 746 if (
747 747 self.activebookmark
748 748 and self.activebookmark in repo._bookmarks
749 749 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
750 750 ):
751 751 bookmarks.activate(repo, self.activebookmark)
752 752
753 753 def _abort(self, backup=True, suppwarns=False):
754 754 '''Restore the repository to its original state.'''
755 755
756 756 repo = self.repo
757 757 try:
758 758 # If the first commits in the rebased set get skipped during the
759 759 # rebase, their values within the state mapping will be the dest
760 760 # rev id. The rebased list must must not contain the dest rev
761 761 # (issue4896)
762 762 rebased = [
763 763 s
764 764 for r, s in self.state.items()
765 765 if s >= 0 and s != r and s != self.destmap[r]
766 766 ]
767 767 immutable = [d for d in rebased if not repo[d].mutable()]
768 768 cleanup = True
769 769 if immutable:
770 770 repo.ui.warn(
771 771 _(b"warning: can't clean up public changesets %s\n")
772 772 % b', '.join(bytes(repo[r]) for r in immutable),
773 773 hint=_(b"see 'hg help phases' for details"),
774 774 )
775 775 cleanup = False
776 776
777 777 descendants = set()
778 778 if rebased:
779 779 descendants = set(repo.changelog.descendants(rebased))
780 780 if descendants - set(rebased):
781 781 repo.ui.warn(
782 782 _(
783 783 b"warning: new changesets detected on "
784 784 b"destination branch, can't strip\n"
785 785 )
786 786 )
787 787 cleanup = False
788 788
789 789 if cleanup:
790 790 if rebased:
791 791 strippoints = [
792 792 c.node() for c in repo.set(b'roots(%ld)', rebased)
793 793 ]
794 794
795 795 updateifonnodes = set(rebased)
796 796 updateifonnodes.update(self.destmap.values())
797 797 updateifonnodes.add(self.originalwd)
798 798 shouldupdate = repo[b'.'].rev() in updateifonnodes
799 799
800 800 # Update away from the rebase if necessary
801 801 if shouldupdate or needupdate(repo, self.state):
802 802 mergemod.update(
803 803 repo, self.originalwd, branchmerge=False, force=True
804 804 )
805 805
806 806 # Strip from the first rebased revision
807 807 if rebased:
808 808 repair.strip(repo.ui, repo, strippoints, backup=backup)
809 809
810 810 if self.activebookmark and self.activebookmark in repo._bookmarks:
811 811 bookmarks.activate(repo, self.activebookmark)
812 812
813 813 finally:
814 814 clearstatus(repo)
815 815 clearcollapsemsg(repo)
816 816 if not suppwarns:
817 817 repo.ui.warn(_(b'rebase aborted\n'))
818 818 return 0
819 819
820 820
821 821 @command(
822 822 b'rebase',
823 823 [
824 824 (
825 825 b's',
826 826 b'source',
827 827 b'',
828 828 _(b'rebase the specified changeset and descendants'),
829 829 _(b'REV'),
830 830 ),
831 831 (
832 832 b'b',
833 833 b'base',
834 834 b'',
835 835 _(b'rebase everything from branching point of specified changeset'),
836 836 _(b'REV'),
837 837 ),
838 838 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
839 839 (
840 840 b'd',
841 841 b'dest',
842 842 b'',
843 843 _(b'rebase onto the specified changeset'),
844 844 _(b'REV'),
845 845 ),
846 846 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
847 847 (
848 848 b'm',
849 849 b'message',
850 850 b'',
851 851 _(b'use text as collapse commit message'),
852 852 _(b'TEXT'),
853 853 ),
854 854 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
855 855 (
856 856 b'l',
857 857 b'logfile',
858 858 b'',
859 859 _(b'read collapse commit message from file'),
860 860 _(b'FILE'),
861 861 ),
862 862 (b'k', b'keep', False, _(b'keep original changesets')),
863 863 (b'', b'keepbranches', False, _(b'keep original branch names')),
864 864 (b'D', b'detach', False, _(b'(DEPRECATED)')),
865 865 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
866 866 (b't', b'tool', b'', _(b'specify merge tool')),
867 867 (b'', b'stop', False, _(b'stop interrupted rebase')),
868 868 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
869 869 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
870 870 (
871 871 b'',
872 872 b'auto-orphans',
873 873 b'',
874 874 _(
875 875 b'automatically rebase orphan revisions '
876 876 b'in the specified revset (EXPERIMENTAL)'
877 877 ),
878 878 ),
879 879 ]
880 880 + cmdutil.dryrunopts
881 881 + cmdutil.formatteropts
882 882 + cmdutil.confirmopts,
883 883 _(b'[-s REV | -b REV] [-d REV] [OPTION]'),
884 884 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
885 885 )
886 886 def rebase(ui, repo, **opts):
887 887 """move changeset (and descendants) to a different branch
888 888
889 889 Rebase uses repeated merging to graft changesets from one part of
890 890 history (the source) onto another (the destination). This can be
891 891 useful for linearizing *local* changes relative to a master
892 892 development tree.
893 893
894 894 Published commits cannot be rebased (see :hg:`help phases`).
895 895 To copy commits, see :hg:`help graft`.
896 896
897 897 If you don't specify a destination changeset (``-d/--dest``), rebase
898 898 will use the same logic as :hg:`merge` to pick a destination. if
899 899 the current branch contains exactly one other head, the other head
900 900 is merged with by default. Otherwise, an explicit revision with
901 901 which to merge with must be provided. (destination changeset is not
902 902 modified by rebasing, but new changesets are added as its
903 903 descendants.)
904 904
905 905 Here are the ways to select changesets:
906 906
907 907 1. Explicitly select them using ``--rev``.
908 908
909 909 2. Use ``--source`` to select a root changeset and include all of its
910 910 descendants.
911 911
912 912 3. Use ``--base`` to select a changeset; rebase will find ancestors
913 913 and their descendants which are not also ancestors of the destination.
914 914
915 915 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
916 916 rebase will use ``--base .`` as above.
917 917
918 918 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
919 919 can be used in ``--dest``. Destination would be calculated per source
920 920 revision with ``SRC`` substituted by that single source revision and
921 921 ``ALLSRC`` substituted by all source revisions.
922 922
923 923 Rebase will destroy original changesets unless you use ``--keep``.
924 924 It will also move your bookmarks (even if you do).
925 925
926 926 Some changesets may be dropped if they do not contribute changes
927 927 (e.g. merges from the destination branch).
928 928
929 929 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
930 930 a named branch with two heads. You will need to explicitly specify source
931 931 and/or destination.
932 932
933 933 If you need to use a tool to automate merge/conflict decisions, you
934 934 can specify one with ``--tool``, see :hg:`help merge-tools`.
935 935 As a caveat: the tool will not be used to mediate when a file was
936 936 deleted, there is no hook presently available for this.
937 937
938 938 If a rebase is interrupted to manually resolve a conflict, it can be
939 939 continued with --continue/-c, aborted with --abort/-a, or stopped with
940 940 --stop.
941 941
942 942 .. container:: verbose
943 943
944 944 Examples:
945 945
946 946 - move "local changes" (current commit back to branching point)
947 947 to the current branch tip after a pull::
948 948
949 949 hg rebase
950 950
951 951 - move a single changeset to the stable branch::
952 952
953 953 hg rebase -r 5f493448 -d stable
954 954
955 955 - splice a commit and all its descendants onto another part of history::
956 956
957 957 hg rebase --source c0c3 --dest 4cf9
958 958
959 959 - rebase everything on a branch marked by a bookmark onto the
960 960 default branch::
961 961
962 962 hg rebase --base myfeature --dest default
963 963
964 964 - collapse a sequence of changes into a single commit::
965 965
966 966 hg rebase --collapse -r 1520:1525 -d .
967 967
968 968 - move a named branch while preserving its name::
969 969
970 970 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
971 971
972 972 - stabilize orphaned changesets so history looks linear::
973 973
974 974 hg rebase -r 'orphan()-obsolete()'\
975 975 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
976 976 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
977 977
978 978 Configuration Options:
979 979
980 980 You can make rebase require a destination if you set the following config
981 981 option::
982 982
983 983 [commands]
984 984 rebase.requiredest = True
985 985
986 986 By default, rebase will close the transaction after each commit. For
987 987 performance purposes, you can configure rebase to use a single transaction
988 988 across the entire rebase. WARNING: This setting introduces a significant
989 989 risk of losing the work you've done in a rebase if the rebase aborts
990 990 unexpectedly::
991 991
992 992 [rebase]
993 993 singletransaction = True
994 994
995 995 By default, rebase writes to the working copy, but you can configure it to
996 996 run in-memory for better performance. When the rebase is not moving the
997 997 parent(s) of the working copy (AKA the "currently checked out changesets"),
998 998 this may also allow it to run even if the working copy is dirty::
999 999
1000 1000 [rebase]
1001 1001 experimental.inmemory = True
1002 1002
1003 1003 Return Values:
1004 1004
1005 1005 Returns 0 on success, 1 if nothing to rebase or there are
1006 1006 unresolved conflicts.
1007 1007
1008 1008 """
1009 1009 opts = pycompat.byteskwargs(opts)
1010 1010 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1011 1011 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1012 1012 if action:
1013 1013 cmdutil.check_incompatible_arguments(
1014 1014 opts, action, b'confirm', b'dry_run'
1015 1015 )
1016 1016 cmdutil.check_incompatible_arguments(
1017 1017 opts, action, b'rev', b'source', b'base', b'dest'
1018 1018 )
1019 1019 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1020 1020 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1021 1021
1022 1022 if action or repo.currenttransaction() is not None:
1023 1023 # in-memory rebase is not compatible with resuming rebases.
1024 1024 # (Or if it is run within a transaction, since the restart logic can
1025 1025 # fail the entire transaction.)
1026 1026 inmemory = False
1027 1027
1028 1028 if opts.get(b'auto_orphans'):
1029 1029 disallowed_opts = set(opts) - {b'auto_orphans'}
1030 1030 cmdutil.check_incompatible_arguments(
1031 1031 opts, b'auto_orphans', *disallowed_opts
1032 1032 )
1033 1033
1034 1034 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1035 1035 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1036 1036 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1037 1037
1038 1038 if opts.get(b'dry_run') or opts.get(b'confirm'):
1039 1039 return _dryrunrebase(ui, repo, action, opts)
1040 1040 elif action == b'stop':
1041 1041 rbsrt = rebaseruntime(repo, ui)
1042 1042 with repo.wlock(), repo.lock():
1043 1043 rbsrt.restorestatus()
1044 1044 if rbsrt.collapsef:
1045 1045 raise error.Abort(_(b"cannot stop in --collapse session"))
1046 1046 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1047 1047 if not (rbsrt.keepf or allowunstable):
1048 1048 raise error.Abort(
1049 1049 _(
1050 1050 b"cannot remove original changesets with"
1051 1051 b" unrebased descendants"
1052 1052 ),
1053 1053 hint=_(
1054 1054 b'either enable obsmarkers to allow unstable '
1055 1055 b'revisions or use --keep to keep original '
1056 1056 b'changesets'
1057 1057 ),
1058 1058 )
1059 1059 if needupdate(repo, rbsrt.state):
1060 1060 # update to the current working revision
1061 1061 # to clear interrupted merge
1062 1062 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1063 1063 rbsrt._finishrebase()
1064 1064 return 0
1065 1065 elif inmemory:
1066 1066 try:
1067 1067 # in-memory merge doesn't support conflicts, so if we hit any, abort
1068 1068 # and re-run as an on-disk merge.
1069 1069 overrides = {(b'rebase', b'singletransaction'): True}
1070 1070 with ui.configoverride(overrides, b'rebase'):
1071 1071 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1072 1072 except error.InMemoryMergeConflictsError:
1073 1073 ui.warn(
1074 1074 _(
1075 1075 b'hit merge conflicts; re-running rebase without in-memory'
1076 1076 b' merge\n'
1077 1077 )
1078 1078 )
1079 1079 # TODO: Make in-memory merge not use the on-disk merge state, so
1080 1080 # we don't have to clean it here
1081 1081 mergemod.mergestate.clean(repo)
1082 1082 clearstatus(repo)
1083 1083 clearcollapsemsg(repo)
1084 1084 return _dorebase(ui, repo, action, opts, inmemory=False)
1085 1085 else:
1086 1086 return _dorebase(ui, repo, action, opts)
1087 1087
1088 1088
1089 1089 def _dryrunrebase(ui, repo, action, opts):
1090 1090 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1091 1091 confirm = opts.get(b'confirm')
1092 1092 if confirm:
1093 1093 ui.status(_(b'starting in-memory rebase\n'))
1094 1094 else:
1095 1095 ui.status(
1096 1096 _(b'starting dry-run rebase; repository will not be changed\n')
1097 1097 )
1098 1098 with repo.wlock(), repo.lock():
1099 1099 needsabort = True
1100 1100 try:
1101 1101 overrides = {(b'rebase', b'singletransaction'): True}
1102 1102 with ui.configoverride(overrides, b'rebase'):
1103 1103 _origrebase(
1104 1104 ui,
1105 1105 repo,
1106 1106 action,
1107 1107 opts,
1108 1108 rbsrt,
1109 1109 inmemory=True,
1110 1110 leaveunfinished=True,
1111 1111 )
1112 1112 except error.InMemoryMergeConflictsError:
1113 1113 ui.status(_(b'hit a merge conflict\n'))
1114 1114 return 1
1115 1115 except error.Abort:
1116 1116 needsabort = False
1117 1117 raise
1118 1118 else:
1119 1119 if confirm:
1120 1120 ui.status(_(b'rebase completed successfully\n'))
1121 1121 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1122 1122 # finish unfinished rebase
1123 1123 rbsrt._finishrebase()
1124 1124 else:
1125 1125 rbsrt._prepareabortorcontinue(
1126 1126 isabort=True, backup=False, suppwarns=True
1127 1127 )
1128 1128 needsabort = False
1129 1129 else:
1130 1130 ui.status(
1131 1131 _(
1132 1132 b'dry-run rebase completed successfully; run without'
1133 1133 b' -n/--dry-run to perform this rebase\n'
1134 1134 )
1135 1135 )
1136 1136 return 0
1137 1137 finally:
1138 1138 if needsabort:
1139 1139 # no need to store backup in case of dryrun
1140 1140 rbsrt._prepareabortorcontinue(
1141 1141 isabort=True, backup=False, suppwarns=True
1142 1142 )
1143 1143
1144 1144
1145 1145 def _dorebase(ui, repo, action, opts, inmemory=False):
1146 1146 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1147 1147 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1148 1148
1149 1149
1150 1150 def _origrebase(
1151 1151 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1152 1152 ):
1153 1153 assert action != b'stop'
1154 1154 with repo.wlock(), repo.lock():
1155 1155 if opts.get(b'interactive'):
1156 1156 try:
1157 1157 if extensions.find(b'histedit'):
1158 1158 enablehistedit = b''
1159 1159 except KeyError:
1160 1160 enablehistedit = b" --config extensions.histedit="
1161 1161 help = b"hg%s help -e histedit" % enablehistedit
1162 1162 msg = (
1163 1163 _(
1164 1164 b"interactive history editing is supported by the "
1165 1165 b"'histedit' extension (see \"%s\")"
1166 1166 )
1167 1167 % help
1168 1168 )
1169 1169 raise error.Abort(msg)
1170 1170
1171 1171 if rbsrt.collapsemsg and not rbsrt.collapsef:
1172 1172 raise error.Abort(_(b'message can only be specified with collapse'))
1173 1173
1174 1174 if action:
1175 1175 if rbsrt.collapsef:
1176 1176 raise error.Abort(
1177 1177 _(b'cannot use collapse with continue or abort')
1178 1178 )
1179 1179 if action == b'abort' and opts.get(b'tool', False):
1180 1180 ui.warn(_(b'tool option will be ignored\n'))
1181 1181 if action == b'continue':
1182 1182 ms = mergemod.mergestate.read(repo)
1183 1183 mergeutil.checkunresolved(ms)
1184 1184
1185 1185 retcode = rbsrt._prepareabortorcontinue(
1186 1186 isabort=(action == b'abort')
1187 1187 )
1188 1188 if retcode is not None:
1189 1189 return retcode
1190 1190 else:
1191 1191 # search default destination in this space
1192 1192 # used in the 'hg pull --rebase' case, see issue 5214.
1193 1193 destspace = opts.get(b'_destspace')
1194 1194 destmap = _definedestmap(
1195 1195 ui,
1196 1196 repo,
1197 1197 inmemory,
1198 1198 opts.get(b'dest', None),
1199 1199 opts.get(b'source', None),
1200 1200 opts.get(b'base', None),
1201 1201 opts.get(b'rev', []),
1202 1202 destspace=destspace,
1203 1203 )
1204 1204 retcode = rbsrt._preparenewrebase(destmap)
1205 1205 if retcode is not None:
1206 1206 return retcode
1207 1207 storecollapsemsg(repo, rbsrt.collapsemsg)
1208 1208
1209 1209 tr = None
1210 1210
1211 1211 singletr = ui.configbool(b'rebase', b'singletransaction')
1212 1212 if singletr:
1213 1213 tr = repo.transaction(b'rebase')
1214 1214
1215 1215 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1216 1216 # one transaction here. Otherwise, transactions are obtained when
1217 1217 # committing each node, which is slower but allows partial success.
1218 1218 with util.acceptintervention(tr):
1219 1219 # Same logic for the dirstate guard, except we don't create one when
1220 1220 # rebasing in-memory (it's not needed).
1221 1221 dsguard = None
1222 1222 if singletr and not inmemory:
1223 1223 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1224 1224 with util.acceptintervention(dsguard):
1225 1225 rbsrt._performrebase(tr)
1226 1226 if not leaveunfinished:
1227 1227 rbsrt._finishrebase()
1228 1228
1229 1229
1230 1230 def _definedestmap(
1231 1231 ui,
1232 1232 repo,
1233 1233 inmemory,
1234 1234 destf=None,
1235 1235 srcf=None,
1236 1236 basef=None,
1237 1237 revf=None,
1238 1238 destspace=None,
1239 1239 ):
1240 1240 """use revisions argument to define destmap {srcrev: destrev}"""
1241 1241 if revf is None:
1242 1242 revf = []
1243 1243
1244 1244 # destspace is here to work around issues with `hg pull --rebase` see
1245 1245 # issue5214 for details
1246 1246
1247 1247 cmdutil.checkunfinished(repo)
1248 1248 if not inmemory:
1249 1249 cmdutil.bailifchanged(repo)
1250 1250
1251 1251 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1252 1252 raise error.Abort(
1253 1253 _(b'you must specify a destination'),
1254 1254 hint=_(b'use: hg rebase -d REV'),
1255 1255 )
1256 1256
1257 1257 dest = None
1258 1258
1259 1259 if revf:
1260 1260 rebaseset = scmutil.revrange(repo, revf)
1261 1261 if not rebaseset:
1262 1262 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1263 1263 return None
1264 1264 elif srcf:
1265 1265 src = scmutil.revrange(repo, [srcf])
1266 1266 if not src:
1267 1267 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1268 1268 return None
1269 1269 rebaseset = repo.revs(b'(%ld)::', src)
1270 1270 assert rebaseset
1271 1271 else:
1272 1272 base = scmutil.revrange(repo, [basef or b'.'])
1273 1273 if not base:
1274 1274 ui.status(
1275 1275 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1276 1276 )
1277 1277 return None
1278 1278 if destf:
1279 1279 # --base does not support multiple destinations
1280 1280 dest = scmutil.revsingle(repo, destf)
1281 1281 else:
1282 1282 dest = repo[_destrebase(repo, base, destspace=destspace)]
1283 1283 destf = bytes(dest)
1284 1284
1285 1285 roots = [] # selected children of branching points
1286 1286 bpbase = {} # {branchingpoint: [origbase]}
1287 1287 for b in base: # group bases by branching points
1288 1288 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1289 1289 bpbase[bp] = bpbase.get(bp, []) + [b]
1290 1290 if None in bpbase:
1291 1291 # emulate the old behavior, showing "nothing to rebase" (a better
1292 1292 # behavior may be abort with "cannot find branching point" error)
1293 1293 bpbase.clear()
1294 1294 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1295 1295 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1296 1296
1297 1297 rebaseset = repo.revs(b'%ld::', roots)
1298 1298
1299 1299 if not rebaseset:
1300 1300 # transform to list because smartsets are not comparable to
1301 1301 # lists. This should be improved to honor laziness of
1302 1302 # smartset.
1303 1303 if list(base) == [dest.rev()]:
1304 1304 if basef:
1305 1305 ui.status(
1306 1306 _(
1307 1307 b'nothing to rebase - %s is both "base"'
1308 1308 b' and destination\n'
1309 1309 )
1310 1310 % dest
1311 1311 )
1312 1312 else:
1313 1313 ui.status(
1314 1314 _(
1315 1315 b'nothing to rebase - working directory '
1316 1316 b'parent is also destination\n'
1317 1317 )
1318 1318 )
1319 1319 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1320 1320 if basef:
1321 1321 ui.status(
1322 1322 _(
1323 1323 b'nothing to rebase - "base" %s is '
1324 1324 b'already an ancestor of destination '
1325 1325 b'%s\n'
1326 1326 )
1327 1327 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1328 1328 )
1329 1329 else:
1330 1330 ui.status(
1331 1331 _(
1332 1332 b'nothing to rebase - working '
1333 1333 b'directory parent is already an '
1334 1334 b'ancestor of destination %s\n'
1335 1335 )
1336 1336 % dest
1337 1337 )
1338 1338 else: # can it happen?
1339 1339 ui.status(
1340 1340 _(b'nothing to rebase from %s to %s\n')
1341 1341 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1342 1342 )
1343 1343 return None
1344 1344
1345 1345 rebasingwcp = repo[b'.'].rev() in rebaseset
1346 1346 ui.log(
1347 1347 b"rebase",
1348 1348 b"rebasing working copy parent: %r\n",
1349 1349 rebasingwcp,
1350 1350 rebase_rebasing_wcp=rebasingwcp,
1351 1351 )
1352 1352 if inmemory and rebasingwcp:
1353 1353 # Check these since we did not before.
1354 1354 cmdutil.checkunfinished(repo)
1355 1355 cmdutil.bailifchanged(repo)
1356 1356
1357 1357 if not destf:
1358 1358 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1359 1359 destf = bytes(dest)
1360 1360
1361 1361 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1362 1362 alias = {b'ALLSRC': allsrc}
1363 1363
1364 1364 if dest is None:
1365 1365 try:
1366 1366 # fast path: try to resolve dest without SRC alias
1367 1367 dest = scmutil.revsingle(repo, destf, localalias=alias)
1368 1368 except error.RepoLookupError:
1369 1369 # multi-dest path: resolve dest for each SRC separately
1370 1370 destmap = {}
1371 1371 for r in rebaseset:
1372 1372 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1373 1373 # use repo.anyrevs instead of scmutil.revsingle because we
1374 1374 # don't want to abort if destset is empty.
1375 1375 destset = repo.anyrevs([destf], user=True, localalias=alias)
1376 1376 size = len(destset)
1377 1377 if size == 1:
1378 1378 destmap[r] = destset.first()
1379 1379 elif size == 0:
1380 1380 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1381 1381 else:
1382 1382 raise error.Abort(
1383 1383 _(b'rebase destination for %s is not unique') % repo[r]
1384 1384 )
1385 1385
1386 1386 if dest is not None:
1387 1387 # single-dest case: assign dest to each rev in rebaseset
1388 1388 destrev = dest.rev()
1389 1389 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1390 1390
1391 1391 if not destmap:
1392 1392 ui.status(_(b'nothing to rebase - empty destination\n'))
1393 1393 return None
1394 1394
1395 1395 return destmap
1396 1396
1397 1397
1398 1398 def externalparent(repo, state, destancestors):
1399 1399 """Return the revision that should be used as the second parent
1400 1400 when the revisions in state is collapsed on top of destancestors.
1401 1401 Abort if there is more than one parent.
1402 1402 """
1403 1403 parents = set()
1404 1404 source = min(state)
1405 1405 for rev in state:
1406 1406 if rev == source:
1407 1407 continue
1408 1408 for p in repo[rev].parents():
1409 1409 if p.rev() not in state and p.rev() not in destancestors:
1410 1410 parents.add(p.rev())
1411 1411 if not parents:
1412 1412 return nullrev
1413 1413 if len(parents) == 1:
1414 1414 return parents.pop()
1415 1415 raise error.Abort(
1416 1416 _(
1417 1417 b'unable to collapse on top of %d, there is more '
1418 1418 b'than one external parent: %s'
1419 1419 )
1420 1420 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1421 1421 )
1422 1422
1423 1423
1424 1424 def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
1425 1425 '''Commit the memory changes with parents p1 and p2.
1426 1426 Return node of committed revision.'''
1427 1427 # Replicates the empty check in ``repo.commit``.
1428 1428 if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1429 1429 return None
1430 1430
1431 1431 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1432 1432 # ``branch`` (used when passing ``--keepbranches``).
1433 1433 branch = None
1434 1434 if b'branch' in extra:
1435 1435 branch = extra[b'branch']
1436 1436
1437 wctx.setparents(repo[p1].node(), repo[p2].node())
1437 1438 memctx = wctx.tomemctx(
1438 1439 commitmsg,
1439 parents=(p1, p2),
1440 1440 date=date,
1441 1441 extra=extra,
1442 1442 user=user,
1443 1443 branch=branch,
1444 1444 editor=editor,
1445 1445 )
1446 1446 commitres = repo.commitctx(memctx)
1447 1447 wctx.clean() # Might be reused
1448 1448 return commitres
1449 1449
1450 1450
1451 1451 def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
1452 1452 '''Commit the wd changes with parents p1 and p2.
1453 1453 Return node of committed revision.'''
1454 1454 dsguard = util.nullcontextmanager()
1455 1455 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1456 1456 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1457 1457 with dsguard:
1458 1458 repo.setparents(repo[p1].node(), repo[p2].node())
1459 1459
1460 1460 # Commit might fail if unresolved files exist
1461 1461 newnode = repo.commit(
1462 1462 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1463 1463 )
1464 1464
1465 1465 repo.dirstate.setbranch(repo[newnode].branch())
1466 1466 return newnode
1467 1467
1468 1468
1469 1469 def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
1470 1470 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1471 1471 # Merge phase
1472 1472 # Update to destination and merge it with local
1473 1473 if wctx.isinmemory():
1474 1474 wctx.setbase(repo[p1])
1475 1475 else:
1476 1476 if repo[b'.'].rev() != p1:
1477 1477 repo.ui.debug(b" update to %d:%s\n" % (p1, repo[p1]))
1478 1478 mergemod.update(repo, p1, branchmerge=False, force=True)
1479 1479 else:
1480 1480 repo.ui.debug(b" already in destination\n")
1481 1481 # This is, alas, necessary to invalidate workingctx's manifest cache,
1482 1482 # as well as other data we litter on it in other places.
1483 1483 wctx = repo[None]
1484 1484 repo.dirstate.write(repo.currenttransaction())
1485 1485 repo.ui.debug(b" merge against %d:%s\n" % (rev, repo[rev]))
1486 1486 if base is not None:
1487 1487 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1488 1488 # When collapsing in-place, the parent is the common ancestor, we
1489 1489 # have to allow merging with it.
1490 1490 stats = mergemod.update(
1491 1491 repo,
1492 1492 rev,
1493 1493 branchmerge=True,
1494 1494 force=True,
1495 1495 ancestor=base,
1496 1496 mergeancestor=collapse,
1497 1497 labels=[b'dest', b'source'],
1498 1498 wc=wctx,
1499 1499 )
1500 1500 if collapse:
1501 1501 copies.duplicatecopies(repo, wctx, rev, dest)
1502 1502 else:
1503 1503 # If we're not using --collapse, we need to
1504 1504 # duplicate copies between the revision we're
1505 1505 # rebasing and its first parent, but *not*
1506 1506 # duplicate any copies that have already been
1507 1507 # performed in the destination.
1508 1508 p1rev = repo[rev].p1().rev()
1509 1509 copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
1510 1510 return stats
1511 1511
1512 1512
1513 1513 def adjustdest(repo, rev, destmap, state, skipped):
1514 1514 r"""adjust rebase destination given the current rebase state
1515 1515
1516 1516 rev is what is being rebased. Return a list of two revs, which are the
1517 1517 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1518 1518 nullrev, return dest without adjustment for it.
1519 1519
1520 1520 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1521 1521 to B1, and E's destination will be adjusted from F to B1.
1522 1522
1523 1523 B1 <- written during rebasing B
1524 1524 |
1525 1525 F <- original destination of B, E
1526 1526 |
1527 1527 | E <- rev, which is being rebased
1528 1528 | |
1529 1529 | D <- prev, one parent of rev being checked
1530 1530 | |
1531 1531 | x <- skipped, ex. no successor or successor in (::dest)
1532 1532 | |
1533 1533 | C <- rebased as C', different destination
1534 1534 | |
1535 1535 | B <- rebased as B1 C'
1536 1536 |/ |
1537 1537 A G <- destination of C, different
1538 1538
1539 1539 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1540 1540 first move C to C1, G to G1, and when it's checking H, the adjusted
1541 1541 destinations will be [C1, G1].
1542 1542
1543 1543 H C1 G1
1544 1544 /| | /
1545 1545 F G |/
1546 1546 K | | -> K
1547 1547 | C D |
1548 1548 | |/ |
1549 1549 | B | ...
1550 1550 |/ |/
1551 1551 A A
1552 1552
1553 1553 Besides, adjust dest according to existing rebase information. For example,
1554 1554
1555 1555 B C D B needs to be rebased on top of C, C needs to be rebased on top
1556 1556 \|/ of D. We will rebase C first.
1557 1557 A
1558 1558
1559 1559 C' After rebasing C, when considering B's destination, use C'
1560 1560 | instead of the original C.
1561 1561 B D
1562 1562 \ /
1563 1563 A
1564 1564 """
1565 1565 # pick already rebased revs with same dest from state as interesting source
1566 1566 dest = destmap[rev]
1567 1567 source = [
1568 1568 s
1569 1569 for s, d in state.items()
1570 1570 if d > 0 and destmap[s] == dest and s not in skipped
1571 1571 ]
1572 1572
1573 1573 result = []
1574 1574 for prev in repo.changelog.parentrevs(rev):
1575 1575 adjusted = dest
1576 1576 if prev != nullrev:
1577 1577 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1578 1578 if candidate is not None:
1579 1579 adjusted = state[candidate]
1580 1580 if adjusted == dest and dest in state:
1581 1581 adjusted = state[dest]
1582 1582 if adjusted == revtodo:
1583 1583 # sortsource should produce an order that makes this impossible
1584 1584 raise error.ProgrammingError(
1585 1585 b'rev %d should be rebased already at this time' % dest
1586 1586 )
1587 1587 result.append(adjusted)
1588 1588 return result
1589 1589
1590 1590
1591 1591 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1592 1592 """
1593 1593 Abort if rebase will create divergence or rebase is noop because of markers
1594 1594
1595 1595 `rebaseobsrevs`: set of obsolete revision in source
1596 1596 `rebaseobsskipped`: set of revisions from source skipped because they have
1597 1597 successors in destination or no non-obsolete successor.
1598 1598 """
1599 1599 # Obsolete node with successors not in dest leads to divergence
1600 1600 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1601 1601 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1602 1602
1603 1603 if divergencebasecandidates and not divergenceok:
1604 1604 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1605 1605 msg = _(b"this rebase will cause divergences from: %s")
1606 1606 h = _(
1607 1607 b"to force the rebase please set "
1608 1608 b"experimental.evolution.allowdivergence=True"
1609 1609 )
1610 1610 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1611 1611
1612 1612
1613 1613 def successorrevs(unfi, rev):
1614 1614 """yield revision numbers for successors of rev"""
1615 1615 assert unfi.filtername is None
1616 1616 get_rev = unfi.changelog.index.get_rev
1617 1617 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1618 1618 r = get_rev(s)
1619 1619 if r is not None:
1620 1620 yield r
1621 1621
1622 1622
1623 1623 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1624 1624 """Return new parents and optionally a merge base for rev being rebased
1625 1625
1626 1626 The destination specified by "dest" cannot always be used directly because
1627 1627 previously rebase result could affect destination. For example,
1628 1628
1629 1629 D E rebase -r C+D+E -d B
1630 1630 |/ C will be rebased to C'
1631 1631 B C D's new destination will be C' instead of B
1632 1632 |/ E's new destination will be C' instead of B
1633 1633 A
1634 1634
1635 1635 The new parents of a merge is slightly more complicated. See the comment
1636 1636 block below.
1637 1637 """
1638 1638 # use unfiltered changelog since successorrevs may return filtered nodes
1639 1639 assert repo.filtername is None
1640 1640 cl = repo.changelog
1641 1641 isancestor = cl.isancestorrev
1642 1642
1643 1643 dest = destmap[rev]
1644 1644 oldps = repo.changelog.parentrevs(rev) # old parents
1645 1645 newps = [nullrev, nullrev] # new parents
1646 1646 dests = adjustdest(repo, rev, destmap, state, skipped)
1647 1647 bases = list(oldps) # merge base candidates, initially just old parents
1648 1648
1649 1649 if all(r == nullrev for r in oldps[1:]):
1650 1650 # For non-merge changeset, just move p to adjusted dest as requested.
1651 1651 newps[0] = dests[0]
1652 1652 else:
1653 1653 # For merge changeset, if we move p to dests[i] unconditionally, both
1654 1654 # parents may change and the end result looks like "the merge loses a
1655 1655 # parent", which is a surprise. This is a limit because "--dest" only
1656 1656 # accepts one dest per src.
1657 1657 #
1658 1658 # Therefore, only move p with reasonable conditions (in this order):
1659 1659 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1660 1660 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1661 1661 #
1662 1662 # Comparing with adjustdest, the logic here does some additional work:
1663 1663 # 1. decide which parents will not be moved towards dest
1664 1664 # 2. if the above decision is "no", should a parent still be moved
1665 1665 # because it was rebased?
1666 1666 #
1667 1667 # For example:
1668 1668 #
1669 1669 # C # "rebase -r C -d D" is an error since none of the parents
1670 1670 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1671 1671 # A B D # B (using rule "2."), since B will be rebased.
1672 1672 #
1673 1673 # The loop tries to be not rely on the fact that a Mercurial node has
1674 1674 # at most 2 parents.
1675 1675 for i, p in enumerate(oldps):
1676 1676 np = p # new parent
1677 1677 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1678 1678 np = dests[i]
1679 1679 elif p in state and state[p] > 0:
1680 1680 np = state[p]
1681 1681
1682 1682 # "bases" only record "special" merge bases that cannot be
1683 1683 # calculated from changelog DAG (i.e. isancestor(p, np) is False).
1684 1684 # For example:
1685 1685 #
1686 1686 # B' # rebase -s B -d D, when B was rebased to B'. dest for C
1687 1687 # | C # is B', but merge base for C is B, instead of
1688 1688 # D | # changelog.ancestor(C, B') == A. If changelog DAG and
1689 1689 # | B # "state" edges are merged (so there will be an edge from
1690 1690 # |/ # B to B'), the merge base is still ancestor(C, B') in
1691 1691 # A # the merged graph.
1692 1692 #
1693 1693 # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
1694 1694 # which uses "virtual null merge" to explain this situation.
1695 1695 if isancestor(p, np):
1696 1696 bases[i] = nullrev
1697 1697
1698 1698 # If one parent becomes an ancestor of the other, drop the ancestor
1699 1699 for j, x in enumerate(newps[:i]):
1700 1700 if x == nullrev:
1701 1701 continue
1702 1702 if isancestor(np, x): # CASE-1
1703 1703 np = nullrev
1704 1704 elif isancestor(x, np): # CASE-2
1705 1705 newps[j] = np
1706 1706 np = nullrev
1707 1707 # New parents forming an ancestor relationship does not
1708 1708 # mean the old parents have a similar relationship. Do not
1709 1709 # set bases[x] to nullrev.
1710 1710 bases[j], bases[i] = bases[i], bases[j]
1711 1711
1712 1712 newps[i] = np
1713 1713
1714 1714 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1715 1715 # base. If only p2 changes, merging using unchanged p1 as merge base is
1716 1716 # suboptimal. Therefore swap parents to make the merge sane.
1717 1717 if newps[1] != nullrev and oldps[0] == newps[0]:
1718 1718 assert len(newps) == 2 and len(oldps) == 2
1719 1719 newps.reverse()
1720 1720 bases.reverse()
1721 1721
1722 1722 # No parent change might be an error because we fail to make rev a
1723 1723 # descendent of requested dest. This can happen, for example:
1724 1724 #
1725 1725 # C # rebase -r C -d D
1726 1726 # /| # None of A and B will be changed to D and rebase fails.
1727 1727 # A B D
1728 1728 if set(newps) == set(oldps) and dest not in newps:
1729 1729 raise error.Abort(
1730 1730 _(
1731 1731 b'cannot rebase %d:%s without '
1732 1732 b'moving at least one of its parents'
1733 1733 )
1734 1734 % (rev, repo[rev])
1735 1735 )
1736 1736
1737 1737 # Source should not be ancestor of dest. The check here guarantees it's
1738 1738 # impossible. With multi-dest, the initial check does not cover complex
1739 1739 # cases since we don't have abstractions to dry-run rebase cheaply.
1740 1740 if any(p != nullrev and isancestor(rev, p) for p in newps):
1741 1741 raise error.Abort(_(b'source is ancestor of destination'))
1742 1742
1743 1743 # "rebasenode" updates to new p1, use the corresponding merge base.
1744 1744 if bases[0] != nullrev:
1745 1745 base = bases[0]
1746 1746 else:
1747 1747 base = None
1748 1748
1749 1749 # Check if the merge will contain unwanted changes. That may happen if
1750 1750 # there are multiple special (non-changelog ancestor) merge bases, which
1751 1751 # cannot be handled well by the 3-way merge algorithm. For example:
1752 1752 #
1753 1753 # F
1754 1754 # /|
1755 1755 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1756 1756 # | | # as merge base, the difference between D and F will include
1757 1757 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1758 1758 # |/ # chosen, the rebased F will contain B.
1759 1759 # A Z
1760 1760 #
1761 1761 # But our merge base candidates (D and E in above case) could still be
1762 1762 # better than the default (ancestor(F, Z) == null). Therefore still
1763 1763 # pick one (so choose p1 above).
1764 1764 if sum(1 for b in set(bases) if b != nullrev) > 1:
1765 1765 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1766 1766 for i, base in enumerate(bases):
1767 1767 if base == nullrev:
1768 1768 continue
1769 1769 # Revisions in the side (not chosen as merge base) branch that
1770 1770 # might contain "surprising" contents
1771 1771 siderevs = list(
1772 1772 repo.revs(b'((%ld-%d) %% (%d+%d))', bases, base, base, dest)
1773 1773 )
1774 1774
1775 1775 # If those revisions are covered by rebaseset, the result is good.
1776 1776 # A merge in rebaseset would be considered to cover its ancestors.
1777 1777 if siderevs:
1778 1778 rebaseset = [
1779 1779 r for r, d in state.items() if d > 0 and r not in obsskipped
1780 1780 ]
1781 1781 merges = [
1782 1782 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1783 1783 ]
1784 1784 unwanted[i] = list(
1785 1785 repo.revs(
1786 1786 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1787 1787 )
1788 1788 )
1789 1789
1790 1790 # Choose a merge base that has a minimal number of unwanted revs.
1791 1791 l, i = min(
1792 1792 (len(revs), i)
1793 1793 for i, revs in enumerate(unwanted)
1794 1794 if revs is not None
1795 1795 )
1796 1796 base = bases[i]
1797 1797
1798 1798 # newps[0] should match merge base if possible. Currently, if newps[i]
1799 1799 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1800 1800 # the other's ancestor. In that case, it's fine to not swap newps here.
1801 1801 # (see CASE-1 and CASE-2 above)
1802 1802 if i != 0 and newps[i] != nullrev:
1803 1803 newps[0], newps[i] = newps[i], newps[0]
1804 1804
1805 1805 # The merge will include unwanted revisions. Abort now. Revisit this if
1806 1806 # we have a more advanced merge algorithm that handles multiple bases.
1807 1807 if l > 0:
1808 1808 unwanteddesc = _(b' or ').join(
1809 1809 (
1810 1810 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1811 1811 for revs in unwanted
1812 1812 if revs is not None
1813 1813 )
1814 1814 )
1815 1815 raise error.Abort(
1816 1816 _(b'rebasing %d:%s will include unwanted changes from %s')
1817 1817 % (rev, repo[rev], unwanteddesc)
1818 1818 )
1819 1819
1820 1820 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1821 1821
1822 1822 return newps[0], newps[1], base
1823 1823
1824 1824
1825 1825 def isagitpatch(repo, patchname):
1826 1826 """Return true if the given patch is in git format"""
1827 1827 mqpatch = os.path.join(repo.mq.path, patchname)
1828 1828 for line in patch.linereader(open(mqpatch, b'rb')):
1829 1829 if line.startswith(b'diff --git'):
1830 1830 return True
1831 1831 return False
1832 1832
1833 1833
1834 1834 def updatemq(repo, state, skipped, **opts):
1835 1835 """Update rebased mq patches - finalize and then import them"""
1836 1836 mqrebase = {}
1837 1837 mq = repo.mq
1838 1838 original_series = mq.fullseries[:]
1839 1839 skippedpatches = set()
1840 1840
1841 1841 for p in mq.applied:
1842 1842 rev = repo[p.node].rev()
1843 1843 if rev in state:
1844 1844 repo.ui.debug(
1845 1845 b'revision %d is an mq patch (%s), finalize it.\n'
1846 1846 % (rev, p.name)
1847 1847 )
1848 1848 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1849 1849 else:
1850 1850 # Applied but not rebased, not sure this should happen
1851 1851 skippedpatches.add(p.name)
1852 1852
1853 1853 if mqrebase:
1854 1854 mq.finish(repo, mqrebase.keys())
1855 1855
1856 1856 # We must start import from the newest revision
1857 1857 for rev in sorted(mqrebase, reverse=True):
1858 1858 if rev not in skipped:
1859 1859 name, isgit = mqrebase[rev]
1860 1860 repo.ui.note(
1861 1861 _(b'updating mq patch %s to %d:%s\n')
1862 1862 % (name, state[rev], repo[state[rev]])
1863 1863 )
1864 1864 mq.qimport(
1865 1865 repo,
1866 1866 (),
1867 1867 patchname=name,
1868 1868 git=isgit,
1869 1869 rev=[b"%d" % state[rev]],
1870 1870 )
1871 1871 else:
1872 1872 # Rebased and skipped
1873 1873 skippedpatches.add(mqrebase[rev][0])
1874 1874
1875 1875 # Patches were either applied and rebased and imported in
1876 1876 # order, applied and removed or unapplied. Discard the removed
1877 1877 # ones while preserving the original series order and guards.
1878 1878 newseries = [
1879 1879 s
1880 1880 for s in original_series
1881 1881 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1882 1882 ]
1883 1883 mq.fullseries[:] = newseries
1884 1884 mq.seriesdirty = True
1885 1885 mq.savedirty()
1886 1886
1887 1887
1888 1888 def storecollapsemsg(repo, collapsemsg):
1889 1889 """Store the collapse message to allow recovery"""
1890 1890 collapsemsg = collapsemsg or b''
1891 1891 f = repo.vfs(b"last-message.txt", b"w")
1892 1892 f.write(b"%s\n" % collapsemsg)
1893 1893 f.close()
1894 1894
1895 1895
1896 1896 def clearcollapsemsg(repo):
1897 1897 """Remove collapse message file"""
1898 1898 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1899 1899
1900 1900
1901 1901 def restorecollapsemsg(repo, isabort):
1902 1902 """Restore previously stored collapse message"""
1903 1903 try:
1904 1904 f = repo.vfs(b"last-message.txt")
1905 1905 collapsemsg = f.readline().strip()
1906 1906 f.close()
1907 1907 except IOError as err:
1908 1908 if err.errno != errno.ENOENT:
1909 1909 raise
1910 1910 if isabort:
1911 1911 # Oh well, just abort like normal
1912 1912 collapsemsg = b''
1913 1913 else:
1914 1914 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1915 1915 return collapsemsg
1916 1916
1917 1917
1918 1918 def clearstatus(repo):
1919 1919 """Remove the status files"""
1920 1920 # Make sure the active transaction won't write the state file
1921 1921 tr = repo.currenttransaction()
1922 1922 if tr:
1923 1923 tr.removefilegenerator(b'rebasestate')
1924 1924 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1925 1925
1926 1926
1927 1927 def needupdate(repo, state):
1928 1928 '''check whether we should `update --clean` away from a merge, or if
1929 1929 somehow the working dir got forcibly updated, e.g. by older hg'''
1930 1930 parents = [p.rev() for p in repo[None].parents()]
1931 1931
1932 1932 # Are we in a merge state at all?
1933 1933 if len(parents) < 2:
1934 1934 return False
1935 1935
1936 1936 # We should be standing on the first as-of-yet unrebased commit.
1937 1937 firstunrebased = min(
1938 1938 [old for old, new in pycompat.iteritems(state) if new == nullrev]
1939 1939 )
1940 1940 if firstunrebased in parents:
1941 1941 return True
1942 1942
1943 1943 return False
1944 1944
1945 1945
1946 1946 def sortsource(destmap):
1947 1947 """yield source revisions in an order that we only rebase things once
1948 1948
1949 1949 If source and destination overlaps, we should filter out revisions
1950 1950 depending on other revisions which hasn't been rebased yet.
1951 1951
1952 1952 Yield a sorted list of revisions each time.
1953 1953
1954 1954 For example, when rebasing A to B, B to C. This function yields [B], then
1955 1955 [A], indicating B needs to be rebased first.
1956 1956
1957 1957 Raise if there is a cycle so the rebase is impossible.
1958 1958 """
1959 1959 srcset = set(destmap)
1960 1960 while srcset:
1961 1961 srclist = sorted(srcset)
1962 1962 result = []
1963 1963 for r in srclist:
1964 1964 if destmap[r] not in srcset:
1965 1965 result.append(r)
1966 1966 if not result:
1967 1967 raise error.Abort(_(b'source and destination form a cycle'))
1968 1968 srcset -= set(result)
1969 1969 yield result
1970 1970
1971 1971
1972 1972 def buildstate(repo, destmap, collapse):
1973 1973 '''Define which revisions are going to be rebased and where
1974 1974
1975 1975 repo: repo
1976 1976 destmap: {srcrev: destrev}
1977 1977 '''
1978 1978 rebaseset = destmap.keys()
1979 1979 originalwd = repo[b'.'].rev()
1980 1980
1981 1981 # This check isn't strictly necessary, since mq detects commits over an
1982 1982 # applied patch. But it prevents messing up the working directory when
1983 1983 # a partially completed rebase is blocked by mq.
1984 1984 if b'qtip' in repo.tags():
1985 1985 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1986 1986 if set(destmap.values()) & mqapplied:
1987 1987 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1988 1988
1989 1989 # Get "cycle" error early by exhausting the generator.
1990 1990 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1991 1991 if not sortedsrc:
1992 1992 raise error.Abort(_(b'no matching revisions'))
1993 1993
1994 1994 # Only check the first batch of revisions to rebase not depending on other
1995 1995 # rebaseset. This means "source is ancestor of destination" for the second
1996 1996 # (and following) batches of revisions are not checked here. We rely on
1997 1997 # "defineparents" to do that check.
1998 1998 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1999 1999 if not roots:
2000 2000 raise error.Abort(_(b'no matching revisions'))
2001 2001
2002 2002 def revof(r):
2003 2003 return r.rev()
2004 2004
2005 2005 roots = sorted(roots, key=revof)
2006 2006 state = dict.fromkeys(rebaseset, revtodo)
2007 2007 emptyrebase = len(sortedsrc) == 1
2008 2008 for root in roots:
2009 2009 dest = repo[destmap[root.rev()]]
2010 2010 commonbase = root.ancestor(dest)
2011 2011 if commonbase == root:
2012 2012 raise error.Abort(_(b'source is ancestor of destination'))
2013 2013 if commonbase == dest:
2014 2014 wctx = repo[None]
2015 2015 if dest == wctx.p1():
2016 2016 # when rebasing to '.', it will use the current wd branch name
2017 2017 samebranch = root.branch() == wctx.branch()
2018 2018 else:
2019 2019 samebranch = root.branch() == dest.branch()
2020 2020 if not collapse and samebranch and dest in root.parents():
2021 2021 # mark the revision as done by setting its new revision
2022 2022 # equal to its old (current) revisions
2023 2023 state[root.rev()] = root.rev()
2024 2024 repo.ui.debug(b'source is a child of destination\n')
2025 2025 continue
2026 2026
2027 2027 emptyrebase = False
2028 2028 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
2029 2029 if emptyrebase:
2030 2030 return None
2031 2031 for rev in sorted(state):
2032 2032 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2033 2033 # if all parents of this revision are done, then so is this revision
2034 2034 if parents and all((state.get(p) == p for p in parents)):
2035 2035 state[rev] = rev
2036 2036 return originalwd, destmap, state
2037 2037
2038 2038
2039 2039 def clearrebased(
2040 2040 ui,
2041 2041 repo,
2042 2042 destmap,
2043 2043 state,
2044 2044 skipped,
2045 2045 collapsedas=None,
2046 2046 keepf=False,
2047 2047 fm=None,
2048 2048 backup=True,
2049 2049 ):
2050 2050 """dispose of rebased revision at the end of the rebase
2051 2051
2052 2052 If `collapsedas` is not None, the rebase was a collapse whose result if the
2053 2053 `collapsedas` node.
2054 2054
2055 2055 If `keepf` is not True, the rebase has --keep set and no nodes should be
2056 2056 removed (but bookmarks still need to be moved).
2057 2057
2058 2058 If `backup` is False, no backup will be stored when stripping rebased
2059 2059 revisions.
2060 2060 """
2061 2061 tonode = repo.changelog.node
2062 2062 replacements = {}
2063 2063 moves = {}
2064 2064 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2065 2065
2066 2066 collapsednodes = []
2067 2067 for rev, newrev in sorted(state.items()):
2068 2068 if newrev >= 0 and newrev != rev:
2069 2069 oldnode = tonode(rev)
2070 2070 newnode = collapsedas or tonode(newrev)
2071 2071 moves[oldnode] = newnode
2072 2072 succs = None
2073 2073 if rev in skipped:
2074 2074 if stripcleanup or not repo[rev].obsolete():
2075 2075 succs = ()
2076 2076 elif collapsedas:
2077 2077 collapsednodes.append(oldnode)
2078 2078 else:
2079 2079 succs = (newnode,)
2080 2080 if succs is not None:
2081 2081 replacements[(oldnode,)] = succs
2082 2082 if collapsednodes:
2083 2083 replacements[tuple(collapsednodes)] = (collapsedas,)
2084 2084 if fm:
2085 2085 hf = fm.hexfunc
2086 2086 fl = fm.formatlist
2087 2087 fd = fm.formatdict
2088 2088 changes = {}
2089 2089 for oldns, newn in pycompat.iteritems(replacements):
2090 2090 for oldn in oldns:
2091 2091 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2092 2092 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2093 2093 fm.data(nodechanges=nodechanges)
2094 2094 if keepf:
2095 2095 replacements = {}
2096 2096 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2097 2097
2098 2098
2099 2099 def pullrebase(orig, ui, repo, *args, **opts):
2100 2100 """Call rebase after pull if the latter has been invoked with --rebase"""
2101 2101 if opts.get('rebase'):
2102 2102 if ui.configbool(b'commands', b'rebase.requiredest'):
2103 2103 msg = _(b'rebase destination required by configuration')
2104 2104 hint = _(b'use hg pull followed by hg rebase -d DEST')
2105 2105 raise error.Abort(msg, hint=hint)
2106 2106
2107 2107 with repo.wlock(), repo.lock():
2108 2108 if opts.get('update'):
2109 2109 del opts['update']
2110 2110 ui.debug(
2111 2111 b'--update and --rebase are not compatible, ignoring '
2112 2112 b'the update flag\n'
2113 2113 )
2114 2114
2115 2115 cmdutil.checkunfinished(repo, skipmerge=True)
2116 2116 cmdutil.bailifchanged(
2117 2117 repo,
2118 2118 hint=_(
2119 2119 b'cannot pull with rebase: '
2120 2120 b'please commit or shelve your changes first'
2121 2121 ),
2122 2122 )
2123 2123
2124 2124 revsprepull = len(repo)
2125 2125 origpostincoming = commands.postincoming
2126 2126
2127 2127 def _dummy(*args, **kwargs):
2128 2128 pass
2129 2129
2130 2130 commands.postincoming = _dummy
2131 2131 try:
2132 2132 ret = orig(ui, repo, *args, **opts)
2133 2133 finally:
2134 2134 commands.postincoming = origpostincoming
2135 2135 revspostpull = len(repo)
2136 2136 if revspostpull > revsprepull:
2137 2137 # --rev option from pull conflict with rebase own --rev
2138 2138 # dropping it
2139 2139 if 'rev' in opts:
2140 2140 del opts['rev']
2141 2141 # positional argument from pull conflicts with rebase's own
2142 2142 # --source.
2143 2143 if 'source' in opts:
2144 2144 del opts['source']
2145 2145 # revsprepull is the len of the repo, not revnum of tip.
2146 2146 destspace = list(repo.changelog.revs(start=revsprepull))
2147 2147 opts['_destspace'] = destspace
2148 2148 try:
2149 2149 rebase(ui, repo, **opts)
2150 2150 except error.NoMergeDestAbort:
2151 2151 # we can maybe update instead
2152 2152 rev, _a, _b = destutil.destupdate(repo)
2153 2153 if rev == repo[b'.'].rev():
2154 2154 ui.status(_(b'nothing to rebase\n'))
2155 2155 else:
2156 2156 ui.status(_(b'nothing to rebase - updating instead\n'))
2157 2157 # not passing argument to get the bare update behavior
2158 2158 # with warning and trumpets
2159 2159 commands.update(ui, repo)
2160 2160 else:
2161 2161 if opts.get('tool'):
2162 2162 raise error.Abort(_(b'--tool can only be used with --rebase'))
2163 2163 ret = orig(ui, repo, *args, **opts)
2164 2164
2165 2165 return ret
2166 2166
2167 2167
2168 2168 def _filterobsoleterevs(repo, revs):
2169 2169 """returns a set of the obsolete revisions in revs"""
2170 2170 return set(r for r in revs if repo[r].obsolete())
2171 2171
2172 2172
2173 2173 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2174 2174 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2175 2175
2176 2176 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2177 2177 obsolete nodes to be rebased given in `rebaseobsrevs`.
2178 2178
2179 2179 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2180 2180 without a successor in destination.
2181 2181
2182 2182 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2183 2183 obsolete successors.
2184 2184 """
2185 2185 obsoletenotrebased = {}
2186 2186 obsoletewithoutsuccessorindestination = set()
2187 2187 obsoleteextinctsuccessors = set()
2188 2188
2189 2189 assert repo.filtername is None
2190 2190 cl = repo.changelog
2191 2191 get_rev = cl.index.get_rev
2192 2192 extinctrevs = set(repo.revs(b'extinct()'))
2193 2193 for srcrev in rebaseobsrevs:
2194 2194 srcnode = cl.node(srcrev)
2195 2195 # XXX: more advanced APIs are required to handle split correctly
2196 2196 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2197 2197 # obsutil.allsuccessors includes node itself
2198 2198 successors.remove(srcnode)
2199 2199 succrevs = {get_rev(s) for s in successors}
2200 2200 succrevs.discard(None)
2201 2201 if succrevs.issubset(extinctrevs):
2202 2202 # all successors are extinct
2203 2203 obsoleteextinctsuccessors.add(srcrev)
2204 2204 if not successors:
2205 2205 # no successor
2206 2206 obsoletenotrebased[srcrev] = None
2207 2207 else:
2208 2208 dstrev = destmap[srcrev]
2209 2209 for succrev in succrevs:
2210 2210 if cl.isancestorrev(succrev, dstrev):
2211 2211 obsoletenotrebased[srcrev] = succrev
2212 2212 break
2213 2213 else:
2214 2214 # If 'srcrev' has a successor in rebase set but none in
2215 2215 # destination (which would be catched above), we shall skip it
2216 2216 # and its descendants to avoid divergence.
2217 2217 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2218 2218 obsoletewithoutsuccessorindestination.add(srcrev)
2219 2219
2220 2220 return (
2221 2221 obsoletenotrebased,
2222 2222 obsoletewithoutsuccessorindestination,
2223 2223 obsoleteextinctsuccessors,
2224 2224 )
2225 2225
2226 2226
2227 2227 def abortrebase(ui, repo):
2228 2228 with repo.wlock(), repo.lock():
2229 2229 rbsrt = rebaseruntime(repo, ui)
2230 2230 rbsrt._prepareabortorcontinue(isabort=True)
2231 2231
2232 2232
2233 2233 def continuerebase(ui, repo):
2234 2234 with repo.wlock(), repo.lock():
2235 2235 rbsrt = rebaseruntime(repo, ui)
2236 2236 ms = mergemod.mergestate.read(repo)
2237 2237 mergeutil.checkunresolved(ms)
2238 2238 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2239 2239 if retcode is not None:
2240 2240 return retcode
2241 2241 rbsrt._performrebase(None)
2242 2242 rbsrt._finishrebase()
2243 2243
2244 2244
2245 2245 def summaryhook(ui, repo):
2246 2246 if not repo.vfs.exists(b'rebasestate'):
2247 2247 return
2248 2248 try:
2249 2249 rbsrt = rebaseruntime(repo, ui, {})
2250 2250 rbsrt.restorestatus()
2251 2251 state = rbsrt.state
2252 2252 except error.RepoLookupError:
2253 2253 # i18n: column positioning for "hg summary"
2254 2254 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2255 2255 ui.write(msg)
2256 2256 return
2257 2257 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2258 2258 # i18n: column positioning for "hg summary"
2259 2259 ui.write(
2260 2260 _(b'rebase: %s, %s (rebase --continue)\n')
2261 2261 % (
2262 2262 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2263 2263 ui.label(_(b'%d remaining'), b'rebase.remaining')
2264 2264 % (len(state) - numrebased),
2265 2265 )
2266 2266 )
2267 2267
2268 2268
2269 2269 def uisetup(ui):
2270 2270 # Replace pull with a decorator to provide --rebase option
2271 2271 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2272 2272 entry[1].append(
2273 2273 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2274 2274 )
2275 2275 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2276 2276 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2277 2277 statemod.addunfinished(
2278 2278 b'rebase',
2279 2279 fname=b'rebasestate',
2280 2280 stopflag=True,
2281 2281 continueflag=True,
2282 2282 abortfunc=abortrebase,
2283 2283 continuefunc=continuerebase,
2284 2284 )
@@ -1,3030 +1,3034 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 copies,
32 32 dagop,
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 obsolete as obsmod,
38 38 patch,
39 39 pathutil,
40 40 phases,
41 41 pycompat,
42 42 repoview,
43 43 scmutil,
44 44 sparse,
45 45 subrepo,
46 46 subrepoutil,
47 47 util,
48 48 )
49 49 from .utils import (
50 50 dateutil,
51 51 stringutil,
52 52 )
53 53
54 54 propertycache = util.propertycache
55 55
56 56
57 57 class basectx(object):
58 58 """A basectx object represents the common logic for its children:
59 59 changectx: read-only context that is already present in the repo,
60 60 workingctx: a context that represents the working directory and can
61 61 be committed,
62 62 memctx: a context that represents changes in-memory and can also
63 63 be committed."""
64 64
65 65 def __init__(self, repo):
66 66 self._repo = repo
67 67
68 68 def __bytes__(self):
69 69 return short(self.node())
70 70
71 71 __str__ = encoding.strmethod(__bytes__)
72 72
73 73 def __repr__(self):
74 74 return "<%s %s>" % (type(self).__name__, str(self))
75 75
76 76 def __eq__(self, other):
77 77 try:
78 78 return type(self) == type(other) and self._rev == other._rev
79 79 except AttributeError:
80 80 return False
81 81
82 82 def __ne__(self, other):
83 83 return not (self == other)
84 84
85 85 def __contains__(self, key):
86 86 return key in self._manifest
87 87
88 88 def __getitem__(self, key):
89 89 return self.filectx(key)
90 90
91 91 def __iter__(self):
92 92 return iter(self._manifest)
93 93
94 94 def _buildstatusmanifest(self, status):
95 95 """Builds a manifest that includes the given status results, if this is
96 96 a working copy context. For non-working copy contexts, it just returns
97 97 the normal manifest."""
98 98 return self.manifest()
99 99
100 100 def _matchstatus(self, other, match):
101 101 """This internal method provides a way for child objects to override the
102 102 match operator.
103 103 """
104 104 return match
105 105
106 106 def _buildstatus(
107 107 self, other, s, match, listignored, listclean, listunknown
108 108 ):
109 109 """build a status with respect to another context"""
110 110 # Load earliest manifest first for caching reasons. More specifically,
111 111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 114 # delta to what's in the cache. So that's one full reconstruction + one
115 115 # delta application.
116 116 mf2 = None
117 117 if self.rev() is not None and self.rev() < other.rev():
118 118 mf2 = self._buildstatusmanifest(s)
119 119 mf1 = other._buildstatusmanifest(s)
120 120 if mf2 is None:
121 121 mf2 = self._buildstatusmanifest(s)
122 122
123 123 modified, added = [], []
124 124 removed = []
125 125 clean = []
126 126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 127 deletedset = set(deleted)
128 128 d = mf1.diff(mf2, match=match, clean=listclean)
129 129 for fn, value in pycompat.iteritems(d):
130 130 if fn in deletedset:
131 131 continue
132 132 if value is None:
133 133 clean.append(fn)
134 134 continue
135 135 (node1, flag1), (node2, flag2) = value
136 136 if node1 is None:
137 137 added.append(fn)
138 138 elif node2 is None:
139 139 removed.append(fn)
140 140 elif flag1 != flag2:
141 141 modified.append(fn)
142 142 elif node2 not in wdirfilenodeids:
143 143 # When comparing files between two commits, we save time by
144 144 # not comparing the file contents when the nodeids differ.
145 145 # Note that this means we incorrectly report a reverted change
146 146 # to a file as a modification.
147 147 modified.append(fn)
148 148 elif self[fn].cmp(other[fn]):
149 149 modified.append(fn)
150 150 else:
151 151 clean.append(fn)
152 152
153 153 if removed:
154 154 # need to filter files if they are already reported as removed
155 155 unknown = [
156 156 fn
157 157 for fn in unknown
158 158 if fn not in mf1 and (not match or match(fn))
159 159 ]
160 160 ignored = [
161 161 fn
162 162 for fn in ignored
163 163 if fn not in mf1 and (not match or match(fn))
164 164 ]
165 165 # if they're deleted, don't report them as removed
166 166 removed = [fn for fn in removed if fn not in deletedset]
167 167
168 168 return scmutil.status(
169 169 modified, added, removed, deleted, unknown, ignored, clean
170 170 )
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepoutil.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181
182 182 def node(self):
183 183 return self._node
184 184
185 185 def hex(self):
186 186 return hex(self.node())
187 187
188 188 def manifest(self):
189 189 return self._manifest
190 190
191 191 def manifestctx(self):
192 192 return self._manifestctx
193 193
194 194 def repo(self):
195 195 return self._repo
196 196
197 197 def phasestr(self):
198 198 return phases.phasenames[self.phase()]
199 199
200 200 def mutable(self):
201 201 return self.phase() > phases.public
202 202
203 203 def matchfileset(self, cwd, expr, badfn=None):
204 204 return fileset.match(self, cwd, expr, badfn=badfn)
205 205
206 206 def obsolete(self):
207 207 """True if the changeset is obsolete"""
208 208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 209
210 210 def extinct(self):
211 211 """True if the changeset is extinct"""
212 212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete, but its ancestor is"""
216 216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 217
218 218 def phasedivergent(self):
219 219 """True if the changeset tries to be a successor of a public changeset
220 220
221 221 Only non-public and non-obsolete changesets may be phase-divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 224
225 225 def contentdivergent(self):
226 226 """Is a successor of a changeset with multiple possible successor sets
227 227
228 228 Only non-public and non-obsolete changesets may be content-divergent.
229 229 """
230 230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 231
232 232 def isunstable(self):
233 233 """True if the changeset is either orphan, phase-divergent or
234 234 content-divergent"""
235 235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 236
237 237 def instabilities(self):
238 238 """return the list of instabilities affecting this changeset.
239 239
240 240 Instabilities are returned as strings. possible values are:
241 241 - orphan,
242 242 - phase-divergent,
243 243 - content-divergent.
244 244 """
245 245 instabilities = []
246 246 if self.orphan():
247 247 instabilities.append(b'orphan')
248 248 if self.phasedivergent():
249 249 instabilities.append(b'phase-divergent')
250 250 if self.contentdivergent():
251 251 instabilities.append(b'content-divergent')
252 252 return instabilities
253 253
254 254 def parents(self):
255 255 """return contexts for each parent changeset"""
256 256 return self._parents
257 257
258 258 def p1(self):
259 259 return self._parents[0]
260 260
261 261 def p2(self):
262 262 parents = self._parents
263 263 if len(parents) == 2:
264 264 return parents[1]
265 265 return self._repo[nullrev]
266 266
267 267 def _fileinfo(self, path):
268 268 if '_manifest' in self.__dict__:
269 269 try:
270 270 return self._manifest[path], self._manifest.flags(path)
271 271 except KeyError:
272 272 raise error.ManifestLookupError(
273 273 self._node, path, _(b'not found in manifest')
274 274 )
275 275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 276 if path in self._manifestdelta:
277 277 return (
278 278 self._manifestdelta[path],
279 279 self._manifestdelta.flags(path),
280 280 )
281 281 mfl = self._repo.manifestlog
282 282 try:
283 283 node, flag = mfl[self._changeset.manifest].find(path)
284 284 except KeyError:
285 285 raise error.ManifestLookupError(
286 286 self._node, path, _(b'not found in manifest')
287 287 )
288 288
289 289 return node, flag
290 290
291 291 def filenode(self, path):
292 292 return self._fileinfo(path)[0]
293 293
294 294 def flags(self, path):
295 295 try:
296 296 return self._fileinfo(path)[1]
297 297 except error.LookupError:
298 298 return b''
299 299
300 300 @propertycache
301 301 def _copies(self):
302 302 return copies.computechangesetcopies(self)
303 303
304 304 def p1copies(self):
305 305 return self._copies[0]
306 306
307 307 def p2copies(self):
308 308 return self._copies[1]
309 309
310 310 def sub(self, path, allowcreate=True):
311 311 '''return a subrepo for the stored revision of path, never wdir()'''
312 312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 313
314 314 def nullsub(self, path, pctx):
315 315 return subrepo.nullsubrepo(self, path, pctx)
316 316
317 317 def workingsub(self, path):
318 318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 319 context.
320 320 '''
321 321 return subrepo.subrepo(self, path, allowwdir=True)
322 322
323 323 def match(
324 324 self,
325 325 pats=None,
326 326 include=None,
327 327 exclude=None,
328 328 default=b'glob',
329 329 listsubrepos=False,
330 330 badfn=None,
331 331 cwd=None,
332 332 ):
333 333 r = self._repo
334 334 if not cwd:
335 335 cwd = r.getcwd()
336 336 return matchmod.match(
337 337 r.root,
338 338 cwd,
339 339 pats,
340 340 include,
341 341 exclude,
342 342 default,
343 343 auditor=r.nofsauditor,
344 344 ctx=self,
345 345 listsubrepos=listsubrepos,
346 346 badfn=badfn,
347 347 )
348 348
349 349 def diff(
350 350 self,
351 351 ctx2=None,
352 352 match=None,
353 353 changes=None,
354 354 opts=None,
355 355 losedatafn=None,
356 356 pathfn=None,
357 357 copy=None,
358 358 copysourcematch=None,
359 359 hunksfilterfn=None,
360 360 ):
361 361 """Returns a diff generator for the given contexts and matcher"""
362 362 if ctx2 is None:
363 363 ctx2 = self.p1()
364 364 if ctx2 is not None:
365 365 ctx2 = self._repo[ctx2]
366 366 return patch.diff(
367 367 self._repo,
368 368 ctx2,
369 369 self,
370 370 match=match,
371 371 changes=changes,
372 372 opts=opts,
373 373 losedatafn=losedatafn,
374 374 pathfn=pathfn,
375 375 copy=copy,
376 376 copysourcematch=copysourcematch,
377 377 hunksfilterfn=hunksfilterfn,
378 378 )
379 379
380 380 def dirs(self):
381 381 return self._manifest.dirs()
382 382
383 383 def hasdir(self, dir):
384 384 return self._manifest.hasdir(dir)
385 385
386 386 def status(
387 387 self,
388 388 other=None,
389 389 match=None,
390 390 listignored=False,
391 391 listclean=False,
392 392 listunknown=False,
393 393 listsubrepos=False,
394 394 ):
395 395 """return status of files between two nodes or node and working
396 396 directory.
397 397
398 398 If other is None, compare this node with working directory.
399 399
400 400 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 401 """
402 402
403 403 ctx1 = self
404 404 ctx2 = self._repo[other]
405 405
406 406 # This next code block is, admittedly, fragile logic that tests for
407 407 # reversing the contexts and wouldn't need to exist if it weren't for
408 408 # the fast (and common) code path of comparing the working directory
409 409 # with its first parent.
410 410 #
411 411 # What we're aiming for here is the ability to call:
412 412 #
413 413 # workingctx.status(parentctx)
414 414 #
415 415 # If we always built the manifest for each context and compared those,
416 416 # then we'd be done. But the special case of the above call means we
417 417 # just copy the manifest of the parent.
418 418 reversed = False
419 419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 420 reversed = True
421 421 ctx1, ctx2 = ctx2, ctx1
422 422
423 423 match = self._repo.narrowmatch(match)
424 424 match = ctx2._matchstatus(ctx1, match)
425 425 r = scmutil.status([], [], [], [], [], [], [])
426 426 r = ctx2._buildstatus(
427 427 ctx1, r, match, listignored, listclean, listunknown
428 428 )
429 429
430 430 if reversed:
431 431 # Reverse added and removed. Clear deleted, unknown and ignored as
432 432 # these make no sense to reverse.
433 433 r = scmutil.status(
434 434 r.modified, r.removed, r.added, [], [], [], r.clean
435 435 )
436 436
437 437 if listsubrepos:
438 438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 439 try:
440 440 rev2 = ctx2.subrev(subpath)
441 441 except KeyError:
442 442 # A subrepo that existed in node1 was deleted between
443 443 # node1 and node2 (inclusive). Thus, ctx2's substate
444 444 # won't contain that subpath. The best we can do ignore it.
445 445 rev2 = None
446 446 submatch = matchmod.subdirmatcher(subpath, match)
447 447 s = sub.status(
448 448 rev2,
449 449 match=submatch,
450 450 ignored=listignored,
451 451 clean=listclean,
452 452 unknown=listunknown,
453 453 listsubrepos=True,
454 454 )
455 455 for k in (
456 456 'modified',
457 457 'added',
458 458 'removed',
459 459 'deleted',
460 460 'unknown',
461 461 'ignored',
462 462 'clean',
463 463 ):
464 464 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466 466
467 467 r.modified.sort()
468 468 r.added.sort()
469 469 r.removed.sort()
470 470 r.deleted.sort()
471 471 r.unknown.sort()
472 472 r.ignored.sort()
473 473 r.clean.sort()
474 474
475 475 return r
476 476
477 477
478 478 class changectx(basectx):
479 479 """A changecontext object makes access to data related to a particular
480 480 changeset convenient. It represents a read-only context already present in
481 481 the repo."""
482 482
483 483 def __init__(self, repo, rev, node, maybe_filtered=True):
484 484 super(changectx, self).__init__(repo)
485 485 self._rev = rev
486 486 self._node = node
487 487 # When maybe_filtered is True, the revision might be affected by
488 488 # changelog filtering and operation through the filtered changelog must be used.
489 489 #
490 490 # When maybe_filtered is False, the revision has already been checked
491 491 # against filtering and is not filtered. Operation through the
492 492 # unfiltered changelog might be used in some case.
493 493 self._maybe_filtered = maybe_filtered
494 494
495 495 def __hash__(self):
496 496 try:
497 497 return hash(self._rev)
498 498 except AttributeError:
499 499 return id(self)
500 500
501 501 def __nonzero__(self):
502 502 return self._rev != nullrev
503 503
504 504 __bool__ = __nonzero__
505 505
506 506 @propertycache
507 507 def _changeset(self):
508 508 if self._maybe_filtered:
509 509 repo = self._repo
510 510 else:
511 511 repo = self._repo.unfiltered()
512 512 return repo.changelog.changelogrevision(self.rev())
513 513
514 514 @propertycache
515 515 def _manifest(self):
516 516 return self._manifestctx.read()
517 517
518 518 @property
519 519 def _manifestctx(self):
520 520 return self._repo.manifestlog[self._changeset.manifest]
521 521
522 522 @propertycache
523 523 def _manifestdelta(self):
524 524 return self._manifestctx.readdelta()
525 525
526 526 @propertycache
527 527 def _parents(self):
528 528 repo = self._repo
529 529 if self._maybe_filtered:
530 530 cl = repo.changelog
531 531 else:
532 532 cl = repo.unfiltered().changelog
533 533
534 534 p1, p2 = cl.parentrevs(self._rev)
535 535 if p2 == nullrev:
536 536 return [repo[p1]]
537 537 return [repo[p1], repo[p2]]
538 538
539 539 def changeset(self):
540 540 c = self._changeset
541 541 return (
542 542 c.manifest,
543 543 c.user,
544 544 c.date,
545 545 c.files,
546 546 c.description,
547 547 c.extra,
548 548 )
549 549
550 550 def manifestnode(self):
551 551 return self._changeset.manifest
552 552
553 553 def user(self):
554 554 return self._changeset.user
555 555
556 556 def date(self):
557 557 return self._changeset.date
558 558
559 559 def files(self):
560 560 return self._changeset.files
561 561
562 562 def filesmodified(self):
563 563 modified = set(self.files())
564 564 modified.difference_update(self.filesadded())
565 565 modified.difference_update(self.filesremoved())
566 566 return sorted(modified)
567 567
568 568 def filesadded(self):
569 569 filesadded = self._changeset.filesadded
570 570 compute_on_none = True
571 571 if self._repo.filecopiesmode == b'changeset-sidedata':
572 572 compute_on_none = False
573 573 else:
574 574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
575 575 if source == b'changeset-only':
576 576 compute_on_none = False
577 577 elif source != b'compatibility':
578 578 # filelog mode, ignore any changelog content
579 579 filesadded = None
580 580 if filesadded is None:
581 581 if compute_on_none:
582 582 filesadded = copies.computechangesetfilesadded(self)
583 583 else:
584 584 filesadded = []
585 585 return filesadded
586 586
587 587 def filesremoved(self):
588 588 filesremoved = self._changeset.filesremoved
589 589 compute_on_none = True
590 590 if self._repo.filecopiesmode == b'changeset-sidedata':
591 591 compute_on_none = False
592 592 else:
593 593 source = self._repo.ui.config(b'experimental', b'copies.read-from')
594 594 if source == b'changeset-only':
595 595 compute_on_none = False
596 596 elif source != b'compatibility':
597 597 # filelog mode, ignore any changelog content
598 598 filesremoved = None
599 599 if filesremoved is None:
600 600 if compute_on_none:
601 601 filesremoved = copies.computechangesetfilesremoved(self)
602 602 else:
603 603 filesremoved = []
604 604 return filesremoved
605 605
606 606 @propertycache
607 607 def _copies(self):
608 608 p1copies = self._changeset.p1copies
609 609 p2copies = self._changeset.p2copies
610 610 compute_on_none = True
611 611 if self._repo.filecopiesmode == b'changeset-sidedata':
612 612 compute_on_none = False
613 613 else:
614 614 source = self._repo.ui.config(b'experimental', b'copies.read-from')
615 615 # If config says to get copy metadata only from changeset, then
616 616 # return that, defaulting to {} if there was no copy metadata. In
617 617 # compatibility mode, we return copy data from the changeset if it
618 618 # was recorded there, and otherwise we fall back to getting it from
619 619 # the filelogs (below).
620 620 #
621 621 # If we are in compatiblity mode and there is not data in the
622 622 # changeset), we get the copy metadata from the filelogs.
623 623 #
624 624 # otherwise, when config said to read only from filelog, we get the
625 625 # copy metadata from the filelogs.
626 626 if source == b'changeset-only':
627 627 compute_on_none = False
628 628 elif source != b'compatibility':
629 629 # filelog mode, ignore any changelog content
630 630 p1copies = p2copies = None
631 631 if p1copies is None:
632 632 if compute_on_none:
633 633 p1copies, p2copies = super(changectx, self)._copies
634 634 else:
635 635 if p1copies is None:
636 636 p1copies = {}
637 637 if p2copies is None:
638 638 p2copies = {}
639 639 return p1copies, p2copies
640 640
641 641 def description(self):
642 642 return self._changeset.description
643 643
644 644 def branch(self):
645 645 return encoding.tolocal(self._changeset.extra.get(b"branch"))
646 646
647 647 def closesbranch(self):
648 648 return b'close' in self._changeset.extra
649 649
650 650 def extra(self):
651 651 """Return a dict of extra information."""
652 652 return self._changeset.extra
653 653
654 654 def tags(self):
655 655 """Return a list of byte tag names"""
656 656 return self._repo.nodetags(self._node)
657 657
658 658 def bookmarks(self):
659 659 """Return a list of byte bookmark names."""
660 660 return self._repo.nodebookmarks(self._node)
661 661
662 662 def phase(self):
663 663 return self._repo._phasecache.phase(self._repo, self._rev)
664 664
665 665 def hidden(self):
666 666 return self._rev in repoview.filterrevs(self._repo, b'visible')
667 667
668 668 def isinmemory(self):
669 669 return False
670 670
671 671 def children(self):
672 672 """return list of changectx contexts for each child changeset.
673 673
674 674 This returns only the immediate child changesets. Use descendants() to
675 675 recursively walk children.
676 676 """
677 677 c = self._repo.changelog.children(self._node)
678 678 return [self._repo[x] for x in c]
679 679
680 680 def ancestors(self):
681 681 for a in self._repo.changelog.ancestors([self._rev]):
682 682 yield self._repo[a]
683 683
684 684 def descendants(self):
685 685 """Recursively yield all children of the changeset.
686 686
687 687 For just the immediate children, use children()
688 688 """
689 689 for d in self._repo.changelog.descendants([self._rev]):
690 690 yield self._repo[d]
691 691
692 692 def filectx(self, path, fileid=None, filelog=None):
693 693 """get a file context from this changeset"""
694 694 if fileid is None:
695 695 fileid = self.filenode(path)
696 696 return filectx(
697 697 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
698 698 )
699 699
700 700 def ancestor(self, c2, warn=False):
701 701 """return the "best" ancestor context of self and c2
702 702
703 703 If there are multiple candidates, it will show a message and check
704 704 merge.preferancestor configuration before falling back to the
705 705 revlog ancestor."""
706 706 # deal with workingctxs
707 707 n2 = c2._node
708 708 if n2 is None:
709 709 n2 = c2._parents[0]._node
710 710 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
711 711 if not cahs:
712 712 anc = nullid
713 713 elif len(cahs) == 1:
714 714 anc = cahs[0]
715 715 else:
716 716 # experimental config: merge.preferancestor
717 717 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
718 718 try:
719 719 ctx = scmutil.revsymbol(self._repo, r)
720 720 except error.RepoLookupError:
721 721 continue
722 722 anc = ctx.node()
723 723 if anc in cahs:
724 724 break
725 725 else:
726 726 anc = self._repo.changelog.ancestor(self._node, n2)
727 727 if warn:
728 728 self._repo.ui.status(
729 729 (
730 730 _(b"note: using %s as ancestor of %s and %s\n")
731 731 % (short(anc), short(self._node), short(n2))
732 732 )
733 733 + b''.join(
734 734 _(
735 735 b" alternatively, use --config "
736 736 b"merge.preferancestor=%s\n"
737 737 )
738 738 % short(n)
739 739 for n in sorted(cahs)
740 740 if n != anc
741 741 )
742 742 )
743 743 return self._repo[anc]
744 744
745 745 def isancestorof(self, other):
746 746 """True if this changeset is an ancestor of other"""
747 747 return self._repo.changelog.isancestorrev(self._rev, other._rev)
748 748
749 749 def walk(self, match):
750 750 '''Generates matching file names.'''
751 751
752 752 # Wrap match.bad method to have message with nodeid
753 753 def bad(fn, msg):
754 754 # The manifest doesn't know about subrepos, so don't complain about
755 755 # paths into valid subrepos.
756 756 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
757 757 return
758 758 match.bad(fn, _(b'no such file in rev %s') % self)
759 759
760 760 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
761 761 return self._manifest.walk(m)
762 762
763 763 def matches(self, match):
764 764 return self.walk(match)
765 765
766 766
767 767 class basefilectx(object):
768 768 """A filecontext object represents the common logic for its children:
769 769 filectx: read-only access to a filerevision that is already present
770 770 in the repo,
771 771 workingfilectx: a filecontext that represents files from the working
772 772 directory,
773 773 memfilectx: a filecontext that represents files in-memory,
774 774 """
775 775
776 776 @propertycache
777 777 def _filelog(self):
778 778 return self._repo.file(self._path)
779 779
780 780 @propertycache
781 781 def _changeid(self):
782 782 if '_changectx' in self.__dict__:
783 783 return self._changectx.rev()
784 784 elif '_descendantrev' in self.__dict__:
785 785 # this file context was created from a revision with a known
786 786 # descendant, we can (lazily) correct for linkrev aliases
787 787 return self._adjustlinkrev(self._descendantrev)
788 788 else:
789 789 return self._filelog.linkrev(self._filerev)
790 790
791 791 @propertycache
792 792 def _filenode(self):
793 793 if '_fileid' in self.__dict__:
794 794 return self._filelog.lookup(self._fileid)
795 795 else:
796 796 return self._changectx.filenode(self._path)
797 797
798 798 @propertycache
799 799 def _filerev(self):
800 800 return self._filelog.rev(self._filenode)
801 801
802 802 @propertycache
803 803 def _repopath(self):
804 804 return self._path
805 805
806 806 def __nonzero__(self):
807 807 try:
808 808 self._filenode
809 809 return True
810 810 except error.LookupError:
811 811 # file is missing
812 812 return False
813 813
814 814 __bool__ = __nonzero__
815 815
816 816 def __bytes__(self):
817 817 try:
818 818 return b"%s@%s" % (self.path(), self._changectx)
819 819 except error.LookupError:
820 820 return b"%s@???" % self.path()
821 821
822 822 __str__ = encoding.strmethod(__bytes__)
823 823
824 824 def __repr__(self):
825 825 return "<%s %s>" % (type(self).__name__, str(self))
826 826
827 827 def __hash__(self):
828 828 try:
829 829 return hash((self._path, self._filenode))
830 830 except AttributeError:
831 831 return id(self)
832 832
833 833 def __eq__(self, other):
834 834 try:
835 835 return (
836 836 type(self) == type(other)
837 837 and self._path == other._path
838 838 and self._filenode == other._filenode
839 839 )
840 840 except AttributeError:
841 841 return False
842 842
843 843 def __ne__(self, other):
844 844 return not (self == other)
845 845
846 846 def filerev(self):
847 847 return self._filerev
848 848
849 849 def filenode(self):
850 850 return self._filenode
851 851
852 852 @propertycache
853 853 def _flags(self):
854 854 return self._changectx.flags(self._path)
855 855
856 856 def flags(self):
857 857 return self._flags
858 858
859 859 def filelog(self):
860 860 return self._filelog
861 861
862 862 def rev(self):
863 863 return self._changeid
864 864
865 865 def linkrev(self):
866 866 return self._filelog.linkrev(self._filerev)
867 867
868 868 def node(self):
869 869 return self._changectx.node()
870 870
871 871 def hex(self):
872 872 return self._changectx.hex()
873 873
874 874 def user(self):
875 875 return self._changectx.user()
876 876
877 877 def date(self):
878 878 return self._changectx.date()
879 879
880 880 def files(self):
881 881 return self._changectx.files()
882 882
883 883 def description(self):
884 884 return self._changectx.description()
885 885
886 886 def branch(self):
887 887 return self._changectx.branch()
888 888
889 889 def extra(self):
890 890 return self._changectx.extra()
891 891
892 892 def phase(self):
893 893 return self._changectx.phase()
894 894
895 895 def phasestr(self):
896 896 return self._changectx.phasestr()
897 897
898 898 def obsolete(self):
899 899 return self._changectx.obsolete()
900 900
901 901 def instabilities(self):
902 902 return self._changectx.instabilities()
903 903
904 904 def manifest(self):
905 905 return self._changectx.manifest()
906 906
907 907 def changectx(self):
908 908 return self._changectx
909 909
910 910 def renamed(self):
911 911 return self._copied
912 912
913 913 def copysource(self):
914 914 return self._copied and self._copied[0]
915 915
916 916 def repo(self):
917 917 return self._repo
918 918
919 919 def size(self):
920 920 return len(self.data())
921 921
922 922 def path(self):
923 923 return self._path
924 924
925 925 def isbinary(self):
926 926 try:
927 927 return stringutil.binary(self.data())
928 928 except IOError:
929 929 return False
930 930
931 931 def isexec(self):
932 932 return b'x' in self.flags()
933 933
934 934 def islink(self):
935 935 return b'l' in self.flags()
936 936
937 937 def isabsent(self):
938 938 """whether this filectx represents a file not in self._changectx
939 939
940 940 This is mainly for merge code to detect change/delete conflicts. This is
941 941 expected to be True for all subclasses of basectx."""
942 942 return False
943 943
944 944 _customcmp = False
945 945
946 946 def cmp(self, fctx):
947 947 """compare with other file context
948 948
949 949 returns True if different than fctx.
950 950 """
951 951 if fctx._customcmp:
952 952 return fctx.cmp(self)
953 953
954 954 if self._filenode is None:
955 955 raise error.ProgrammingError(
956 956 b'filectx.cmp() must be reimplemented if not backed by revlog'
957 957 )
958 958
959 959 if fctx._filenode is None:
960 960 if self._repo._encodefilterpats:
961 961 # can't rely on size() because wdir content may be decoded
962 962 return self._filelog.cmp(self._filenode, fctx.data())
963 963 if self.size() - 4 == fctx.size():
964 964 # size() can match:
965 965 # if file data starts with '\1\n', empty metadata block is
966 966 # prepended, which adds 4 bytes to filelog.size().
967 967 return self._filelog.cmp(self._filenode, fctx.data())
968 968 if self.size() == fctx.size():
969 969 # size() matches: need to compare content
970 970 return self._filelog.cmp(self._filenode, fctx.data())
971 971
972 972 # size() differs
973 973 return True
974 974
975 975 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
976 976 """return the first ancestor of <srcrev> introducing <fnode>
977 977
978 978 If the linkrev of the file revision does not point to an ancestor of
979 979 srcrev, we'll walk down the ancestors until we find one introducing
980 980 this file revision.
981 981
982 982 :srcrev: the changeset revision we search ancestors from
983 983 :inclusive: if true, the src revision will also be checked
984 984 :stoprev: an optional revision to stop the walk at. If no introduction
985 985 of this file content could be found before this floor
986 986 revision, the function will returns "None" and stops its
987 987 iteration.
988 988 """
989 989 repo = self._repo
990 990 cl = repo.unfiltered().changelog
991 991 mfl = repo.manifestlog
992 992 # fetch the linkrev
993 993 lkr = self.linkrev()
994 994 if srcrev == lkr:
995 995 return lkr
996 996 # hack to reuse ancestor computation when searching for renames
997 997 memberanc = getattr(self, '_ancestrycontext', None)
998 998 iteranc = None
999 999 if srcrev is None:
1000 1000 # wctx case, used by workingfilectx during mergecopy
1001 1001 revs = [p.rev() for p in self._repo[None].parents()]
1002 1002 inclusive = True # we skipped the real (revless) source
1003 1003 else:
1004 1004 revs = [srcrev]
1005 1005 if memberanc is None:
1006 1006 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1007 1007 # check if this linkrev is an ancestor of srcrev
1008 1008 if lkr not in memberanc:
1009 1009 if iteranc is None:
1010 1010 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1011 1011 fnode = self._filenode
1012 1012 path = self._path
1013 1013 for a in iteranc:
1014 1014 if stoprev is not None and a < stoprev:
1015 1015 return None
1016 1016 ac = cl.read(a) # get changeset data (we avoid object creation)
1017 1017 if path in ac[3]: # checking the 'files' field.
1018 1018 # The file has been touched, check if the content is
1019 1019 # similar to the one we search for.
1020 1020 if fnode == mfl[ac[0]].readfast().get(path):
1021 1021 return a
1022 1022 # In theory, we should never get out of that loop without a result.
1023 1023 # But if manifest uses a buggy file revision (not children of the
1024 1024 # one it replaces) we could. Such a buggy situation will likely
1025 1025 # result is crash somewhere else at to some point.
1026 1026 return lkr
1027 1027
1028 1028 def isintroducedafter(self, changelogrev):
1029 1029 """True if a filectx has been introduced after a given floor revision
1030 1030 """
1031 1031 if self.linkrev() >= changelogrev:
1032 1032 return True
1033 1033 introrev = self._introrev(stoprev=changelogrev)
1034 1034 if introrev is None:
1035 1035 return False
1036 1036 return introrev >= changelogrev
1037 1037
1038 1038 def introrev(self):
1039 1039 """return the rev of the changeset which introduced this file revision
1040 1040
1041 1041 This method is different from linkrev because it take into account the
1042 1042 changeset the filectx was created from. It ensures the returned
1043 1043 revision is one of its ancestors. This prevents bugs from
1044 1044 'linkrev-shadowing' when a file revision is used by multiple
1045 1045 changesets.
1046 1046 """
1047 1047 return self._introrev()
1048 1048
1049 1049 def _introrev(self, stoprev=None):
1050 1050 """
1051 1051 Same as `introrev` but, with an extra argument to limit changelog
1052 1052 iteration range in some internal usecase.
1053 1053
1054 1054 If `stoprev` is set, the `introrev` will not be searched past that
1055 1055 `stoprev` revision and "None" might be returned. This is useful to
1056 1056 limit the iteration range.
1057 1057 """
1058 1058 toprev = None
1059 1059 attrs = vars(self)
1060 1060 if '_changeid' in attrs:
1061 1061 # We have a cached value already
1062 1062 toprev = self._changeid
1063 1063 elif '_changectx' in attrs:
1064 1064 # We know which changelog entry we are coming from
1065 1065 toprev = self._changectx.rev()
1066 1066
1067 1067 if toprev is not None:
1068 1068 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1069 1069 elif '_descendantrev' in attrs:
1070 1070 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1071 1071 # be nice and cache the result of the computation
1072 1072 if introrev is not None:
1073 1073 self._changeid = introrev
1074 1074 return introrev
1075 1075 else:
1076 1076 return self.linkrev()
1077 1077
1078 1078 def introfilectx(self):
1079 1079 """Return filectx having identical contents, but pointing to the
1080 1080 changeset revision where this filectx was introduced"""
1081 1081 introrev = self.introrev()
1082 1082 if self.rev() == introrev:
1083 1083 return self
1084 1084 return self.filectx(self.filenode(), changeid=introrev)
1085 1085
1086 1086 def _parentfilectx(self, path, fileid, filelog):
1087 1087 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1088 1088 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1089 1089 if '_changeid' in vars(self) or '_changectx' in vars(self):
1090 1090 # If self is associated with a changeset (probably explicitly
1091 1091 # fed), ensure the created filectx is associated with a
1092 1092 # changeset that is an ancestor of self.changectx.
1093 1093 # This lets us later use _adjustlinkrev to get a correct link.
1094 1094 fctx._descendantrev = self.rev()
1095 1095 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1096 1096 elif '_descendantrev' in vars(self):
1097 1097 # Otherwise propagate _descendantrev if we have one associated.
1098 1098 fctx._descendantrev = self._descendantrev
1099 1099 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1100 1100 return fctx
1101 1101
1102 1102 def parents(self):
1103 1103 _path = self._path
1104 1104 fl = self._filelog
1105 1105 parents = self._filelog.parents(self._filenode)
1106 1106 pl = [(_path, node, fl) for node in parents if node != nullid]
1107 1107
1108 1108 r = fl.renamed(self._filenode)
1109 1109 if r:
1110 1110 # - In the simple rename case, both parent are nullid, pl is empty.
1111 1111 # - In case of merge, only one of the parent is null id and should
1112 1112 # be replaced with the rename information. This parent is -always-
1113 1113 # the first one.
1114 1114 #
1115 1115 # As null id have always been filtered out in the previous list
1116 1116 # comprehension, inserting to 0 will always result in "replacing
1117 1117 # first nullid parent with rename information.
1118 1118 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1119 1119
1120 1120 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1121 1121
1122 1122 def p1(self):
1123 1123 return self.parents()[0]
1124 1124
1125 1125 def p2(self):
1126 1126 p = self.parents()
1127 1127 if len(p) == 2:
1128 1128 return p[1]
1129 1129 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1130 1130
1131 1131 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1132 1132 """Returns a list of annotateline objects for each line in the file
1133 1133
1134 1134 - line.fctx is the filectx of the node where that line was last changed
1135 1135 - line.lineno is the line number at the first appearance in the managed
1136 1136 file
1137 1137 - line.text is the data on that line (including newline character)
1138 1138 """
1139 1139 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1140 1140
1141 1141 def parents(f):
1142 1142 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1143 1143 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1144 1144 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1145 1145 # isn't an ancestor of the srcrev.
1146 1146 f._changeid
1147 1147 pl = f.parents()
1148 1148
1149 1149 # Don't return renamed parents if we aren't following.
1150 1150 if not follow:
1151 1151 pl = [p for p in pl if p.path() == f.path()]
1152 1152
1153 1153 # renamed filectx won't have a filelog yet, so set it
1154 1154 # from the cache to save time
1155 1155 for p in pl:
1156 1156 if not '_filelog' in p.__dict__:
1157 1157 p._filelog = getlog(p.path())
1158 1158
1159 1159 return pl
1160 1160
1161 1161 # use linkrev to find the first changeset where self appeared
1162 1162 base = self.introfilectx()
1163 1163 if getattr(base, '_ancestrycontext', None) is None:
1164 1164 cl = self._repo.changelog
1165 1165 if base.rev() is None:
1166 1166 # wctx is not inclusive, but works because _ancestrycontext
1167 1167 # is used to test filelog revisions
1168 1168 ac = cl.ancestors(
1169 1169 [p.rev() for p in base.parents()], inclusive=True
1170 1170 )
1171 1171 else:
1172 1172 ac = cl.ancestors([base.rev()], inclusive=True)
1173 1173 base._ancestrycontext = ac
1174 1174
1175 1175 return dagop.annotate(
1176 1176 base, parents, skiprevs=skiprevs, diffopts=diffopts
1177 1177 )
1178 1178
1179 1179 def ancestors(self, followfirst=False):
1180 1180 visit = {}
1181 1181 c = self
1182 1182 if followfirst:
1183 1183 cut = 1
1184 1184 else:
1185 1185 cut = None
1186 1186
1187 1187 while True:
1188 1188 for parent in c.parents()[:cut]:
1189 1189 visit[(parent.linkrev(), parent.filenode())] = parent
1190 1190 if not visit:
1191 1191 break
1192 1192 c = visit.pop(max(visit))
1193 1193 yield c
1194 1194
1195 1195 def decodeddata(self):
1196 1196 """Returns `data()` after running repository decoding filters.
1197 1197
1198 1198 This is often equivalent to how the data would be expressed on disk.
1199 1199 """
1200 1200 return self._repo.wwritedata(self.path(), self.data())
1201 1201
1202 1202
1203 1203 class filectx(basefilectx):
1204 1204 """A filecontext object makes access to data related to a particular
1205 1205 filerevision convenient."""
1206 1206
1207 1207 def __init__(
1208 1208 self,
1209 1209 repo,
1210 1210 path,
1211 1211 changeid=None,
1212 1212 fileid=None,
1213 1213 filelog=None,
1214 1214 changectx=None,
1215 1215 ):
1216 1216 """changeid must be a revision number, if specified.
1217 1217 fileid can be a file revision or node."""
1218 1218 self._repo = repo
1219 1219 self._path = path
1220 1220
1221 1221 assert (
1222 1222 changeid is not None or fileid is not None or changectx is not None
1223 1223 ), (
1224 1224 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1225 1225 % (changeid, fileid, changectx,)
1226 1226 )
1227 1227
1228 1228 if filelog is not None:
1229 1229 self._filelog = filelog
1230 1230
1231 1231 if changeid is not None:
1232 1232 self._changeid = changeid
1233 1233 if changectx is not None:
1234 1234 self._changectx = changectx
1235 1235 if fileid is not None:
1236 1236 self._fileid = fileid
1237 1237
1238 1238 @propertycache
1239 1239 def _changectx(self):
1240 1240 try:
1241 1241 return self._repo[self._changeid]
1242 1242 except error.FilteredRepoLookupError:
1243 1243 # Linkrev may point to any revision in the repository. When the
1244 1244 # repository is filtered this may lead to `filectx` trying to build
1245 1245 # `changectx` for filtered revision. In such case we fallback to
1246 1246 # creating `changectx` on the unfiltered version of the reposition.
1247 1247 # This fallback should not be an issue because `changectx` from
1248 1248 # `filectx` are not used in complex operations that care about
1249 1249 # filtering.
1250 1250 #
1251 1251 # This fallback is a cheap and dirty fix that prevent several
1252 1252 # crashes. It does not ensure the behavior is correct. However the
1253 1253 # behavior was not correct before filtering either and "incorrect
1254 1254 # behavior" is seen as better as "crash"
1255 1255 #
1256 1256 # Linkrevs have several serious troubles with filtering that are
1257 1257 # complicated to solve. Proper handling of the issue here should be
1258 1258 # considered when solving linkrev issue are on the table.
1259 1259 return self._repo.unfiltered()[self._changeid]
1260 1260
1261 1261 def filectx(self, fileid, changeid=None):
1262 1262 '''opens an arbitrary revision of the file without
1263 1263 opening a new filelog'''
1264 1264 return filectx(
1265 1265 self._repo,
1266 1266 self._path,
1267 1267 fileid=fileid,
1268 1268 filelog=self._filelog,
1269 1269 changeid=changeid,
1270 1270 )
1271 1271
1272 1272 def rawdata(self):
1273 1273 return self._filelog.rawdata(self._filenode)
1274 1274
1275 1275 def rawflags(self):
1276 1276 """low-level revlog flags"""
1277 1277 return self._filelog.flags(self._filerev)
1278 1278
1279 1279 def data(self):
1280 1280 try:
1281 1281 return self._filelog.read(self._filenode)
1282 1282 except error.CensoredNodeError:
1283 1283 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1284 1284 return b""
1285 1285 raise error.Abort(
1286 1286 _(b"censored node: %s") % short(self._filenode),
1287 1287 hint=_(b"set censor.policy to ignore errors"),
1288 1288 )
1289 1289
1290 1290 def size(self):
1291 1291 return self._filelog.size(self._filerev)
1292 1292
1293 1293 @propertycache
1294 1294 def _copied(self):
1295 1295 """check if file was actually renamed in this changeset revision
1296 1296
1297 1297 If rename logged in file revision, we report copy for changeset only
1298 1298 if file revisions linkrev points back to the changeset in question
1299 1299 or both changeset parents contain different file revisions.
1300 1300 """
1301 1301
1302 1302 renamed = self._filelog.renamed(self._filenode)
1303 1303 if not renamed:
1304 1304 return None
1305 1305
1306 1306 if self.rev() == self.linkrev():
1307 1307 return renamed
1308 1308
1309 1309 name = self.path()
1310 1310 fnode = self._filenode
1311 1311 for p in self._changectx.parents():
1312 1312 try:
1313 1313 if fnode == p.filenode(name):
1314 1314 return None
1315 1315 except error.LookupError:
1316 1316 pass
1317 1317 return renamed
1318 1318
1319 1319 def children(self):
1320 1320 # hard for renames
1321 1321 c = self._filelog.children(self._filenode)
1322 1322 return [
1323 1323 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1324 1324 for x in c
1325 1325 ]
1326 1326
1327 1327
1328 1328 class committablectx(basectx):
1329 1329 """A committablectx object provides common functionality for a context that
1330 1330 wants the ability to commit, e.g. workingctx or memctx."""
1331 1331
1332 1332 def __init__(
1333 1333 self,
1334 1334 repo,
1335 1335 text=b"",
1336 1336 user=None,
1337 1337 date=None,
1338 1338 extra=None,
1339 1339 changes=None,
1340 1340 branch=None,
1341 1341 ):
1342 1342 super(committablectx, self).__init__(repo)
1343 1343 self._rev = None
1344 1344 self._node = None
1345 1345 self._text = text
1346 1346 if date:
1347 1347 self._date = dateutil.parsedate(date)
1348 1348 if user:
1349 1349 self._user = user
1350 1350 if changes:
1351 1351 self._status = changes
1352 1352
1353 1353 self._extra = {}
1354 1354 if extra:
1355 1355 self._extra = extra.copy()
1356 1356 if branch is not None:
1357 1357 self._extra[b'branch'] = encoding.fromlocal(branch)
1358 1358 if not self._extra.get(b'branch'):
1359 1359 self._extra[b'branch'] = b'default'
1360 1360
1361 1361 def __bytes__(self):
1362 1362 return bytes(self._parents[0]) + b"+"
1363 1363
1364 1364 __str__ = encoding.strmethod(__bytes__)
1365 1365
1366 1366 def __nonzero__(self):
1367 1367 return True
1368 1368
1369 1369 __bool__ = __nonzero__
1370 1370
1371 1371 @propertycache
1372 1372 def _status(self):
1373 1373 return self._repo.status()
1374 1374
1375 1375 @propertycache
1376 1376 def _user(self):
1377 1377 return self._repo.ui.username()
1378 1378
1379 1379 @propertycache
1380 1380 def _date(self):
1381 1381 ui = self._repo.ui
1382 1382 date = ui.configdate(b'devel', b'default-date')
1383 1383 if date is None:
1384 1384 date = dateutil.makedate()
1385 1385 return date
1386 1386
1387 1387 def subrev(self, subpath):
1388 1388 return None
1389 1389
1390 1390 def manifestnode(self):
1391 1391 return None
1392 1392
1393 1393 def user(self):
1394 1394 return self._user or self._repo.ui.username()
1395 1395
1396 1396 def date(self):
1397 1397 return self._date
1398 1398
1399 1399 def description(self):
1400 1400 return self._text
1401 1401
1402 1402 def files(self):
1403 1403 return sorted(
1404 1404 self._status.modified + self._status.added + self._status.removed
1405 1405 )
1406 1406
1407 1407 def modified(self):
1408 1408 return self._status.modified
1409 1409
1410 1410 def added(self):
1411 1411 return self._status.added
1412 1412
1413 1413 def removed(self):
1414 1414 return self._status.removed
1415 1415
1416 1416 def deleted(self):
1417 1417 return self._status.deleted
1418 1418
1419 1419 filesmodified = modified
1420 1420 filesadded = added
1421 1421 filesremoved = removed
1422 1422
1423 1423 def branch(self):
1424 1424 return encoding.tolocal(self._extra[b'branch'])
1425 1425
1426 1426 def closesbranch(self):
1427 1427 return b'close' in self._extra
1428 1428
1429 1429 def extra(self):
1430 1430 return self._extra
1431 1431
1432 1432 def isinmemory(self):
1433 1433 return False
1434 1434
1435 1435 def tags(self):
1436 1436 return []
1437 1437
1438 1438 def bookmarks(self):
1439 1439 b = []
1440 1440 for p in self.parents():
1441 1441 b.extend(p.bookmarks())
1442 1442 return b
1443 1443
1444 1444 def phase(self):
1445 1445 phase = phases.newcommitphase(self._repo.ui)
1446 1446 for p in self.parents():
1447 1447 phase = max(phase, p.phase())
1448 1448 return phase
1449 1449
1450 1450 def hidden(self):
1451 1451 return False
1452 1452
1453 1453 def children(self):
1454 1454 return []
1455 1455
1456 1456 def ancestor(self, c2):
1457 1457 """return the "best" ancestor context of self and c2"""
1458 1458 return self._parents[0].ancestor(c2) # punt on two parents for now
1459 1459
1460 1460 def ancestors(self):
1461 1461 for p in self._parents:
1462 1462 yield p
1463 1463 for a in self._repo.changelog.ancestors(
1464 1464 [p.rev() for p in self._parents]
1465 1465 ):
1466 1466 yield self._repo[a]
1467 1467
1468 1468 def markcommitted(self, node):
1469 1469 """Perform post-commit cleanup necessary after committing this ctx
1470 1470
1471 1471 Specifically, this updates backing stores this working context
1472 1472 wraps to reflect the fact that the changes reflected by this
1473 1473 workingctx have been committed. For example, it marks
1474 1474 modified and added files as normal in the dirstate.
1475 1475
1476 1476 """
1477 1477
1478 1478 def dirty(self, missing=False, merge=True, branch=True):
1479 1479 return False
1480 1480
1481 1481
1482 1482 class workingctx(committablectx):
1483 1483 """A workingctx object makes access to data related to
1484 1484 the current working directory convenient.
1485 1485 date - any valid date string or (unixtime, offset), or None.
1486 1486 user - username string, or None.
1487 1487 extra - a dictionary of extra values, or None.
1488 1488 changes - a list of file lists as returned by localrepo.status()
1489 1489 or None to use the repository status.
1490 1490 """
1491 1491
1492 1492 def __init__(
1493 1493 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1494 1494 ):
1495 1495 branch = None
1496 1496 if not extra or b'branch' not in extra:
1497 1497 try:
1498 1498 branch = repo.dirstate.branch()
1499 1499 except UnicodeDecodeError:
1500 1500 raise error.Abort(_(b'branch name not in UTF-8!'))
1501 1501 super(workingctx, self).__init__(
1502 1502 repo, text, user, date, extra, changes, branch=branch
1503 1503 )
1504 1504
1505 1505 def __iter__(self):
1506 1506 d = self._repo.dirstate
1507 1507 for f in d:
1508 1508 if d[f] != b'r':
1509 1509 yield f
1510 1510
1511 1511 def __contains__(self, key):
1512 1512 return self._repo.dirstate[key] not in b"?r"
1513 1513
1514 1514 def hex(self):
1515 1515 return wdirhex
1516 1516
1517 1517 @propertycache
1518 1518 def _parents(self):
1519 1519 p = self._repo.dirstate.parents()
1520 1520 if p[1] == nullid:
1521 1521 p = p[:-1]
1522 1522 # use unfiltered repo to delay/avoid loading obsmarkers
1523 1523 unfi = self._repo.unfiltered()
1524 1524 return [
1525 1525 changectx(
1526 1526 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1527 1527 )
1528 1528 for n in p
1529 1529 ]
1530 1530
1531 1531 def _fileinfo(self, path):
1532 1532 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1533 1533 self._manifest
1534 1534 return super(workingctx, self)._fileinfo(path)
1535 1535
1536 1536 def _buildflagfunc(self):
1537 1537 # Create a fallback function for getting file flags when the
1538 1538 # filesystem doesn't support them
1539 1539
1540 1540 copiesget = self._repo.dirstate.copies().get
1541 1541 parents = self.parents()
1542 1542 if len(parents) < 2:
1543 1543 # when we have one parent, it's easy: copy from parent
1544 1544 man = parents[0].manifest()
1545 1545
1546 1546 def func(f):
1547 1547 f = copiesget(f, f)
1548 1548 return man.flags(f)
1549 1549
1550 1550 else:
1551 1551 # merges are tricky: we try to reconstruct the unstored
1552 1552 # result from the merge (issue1802)
1553 1553 p1, p2 = parents
1554 1554 pa = p1.ancestor(p2)
1555 1555 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1556 1556
1557 1557 def func(f):
1558 1558 f = copiesget(f, f) # may be wrong for merges with copies
1559 1559 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1560 1560 if fl1 == fl2:
1561 1561 return fl1
1562 1562 if fl1 == fla:
1563 1563 return fl2
1564 1564 if fl2 == fla:
1565 1565 return fl1
1566 1566 return b'' # punt for conflicts
1567 1567
1568 1568 return func
1569 1569
1570 1570 @propertycache
1571 1571 def _flagfunc(self):
1572 1572 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1573 1573
1574 1574 def flags(self, path):
1575 1575 if '_manifest' in self.__dict__:
1576 1576 try:
1577 1577 return self._manifest.flags(path)
1578 1578 except KeyError:
1579 1579 return b''
1580 1580
1581 1581 try:
1582 1582 return self._flagfunc(path)
1583 1583 except OSError:
1584 1584 return b''
1585 1585
1586 1586 def filectx(self, path, filelog=None):
1587 1587 """get a file context from the working directory"""
1588 1588 return workingfilectx(
1589 1589 self._repo, path, workingctx=self, filelog=filelog
1590 1590 )
1591 1591
1592 1592 def dirty(self, missing=False, merge=True, branch=True):
1593 1593 """check whether a working directory is modified"""
1594 1594 # check subrepos first
1595 1595 for s in sorted(self.substate):
1596 1596 if self.sub(s).dirty(missing=missing):
1597 1597 return True
1598 1598 # check current working dir
1599 1599 return (
1600 1600 (merge and self.p2())
1601 1601 or (branch and self.branch() != self.p1().branch())
1602 1602 or self.modified()
1603 1603 or self.added()
1604 1604 or self.removed()
1605 1605 or (missing and self.deleted())
1606 1606 )
1607 1607
1608 1608 def add(self, list, prefix=b""):
1609 1609 with self._repo.wlock():
1610 1610 ui, ds = self._repo.ui, self._repo.dirstate
1611 1611 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1612 1612 rejected = []
1613 1613 lstat = self._repo.wvfs.lstat
1614 1614 for f in list:
1615 1615 # ds.pathto() returns an absolute file when this is invoked from
1616 1616 # the keyword extension. That gets flagged as non-portable on
1617 1617 # Windows, since it contains the drive letter and colon.
1618 1618 scmutil.checkportable(ui, os.path.join(prefix, f))
1619 1619 try:
1620 1620 st = lstat(f)
1621 1621 except OSError:
1622 1622 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1623 1623 rejected.append(f)
1624 1624 continue
1625 1625 limit = ui.configbytes(b'ui', b'large-file-limit')
1626 1626 if limit != 0 and st.st_size > limit:
1627 1627 ui.warn(
1628 1628 _(
1629 1629 b"%s: up to %d MB of RAM may be required "
1630 1630 b"to manage this file\n"
1631 1631 b"(use 'hg revert %s' to cancel the "
1632 1632 b"pending addition)\n"
1633 1633 )
1634 1634 % (f, 3 * st.st_size // 1000000, uipath(f))
1635 1635 )
1636 1636 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1637 1637 ui.warn(
1638 1638 _(
1639 1639 b"%s not added: only files and symlinks "
1640 1640 b"supported currently\n"
1641 1641 )
1642 1642 % uipath(f)
1643 1643 )
1644 1644 rejected.append(f)
1645 1645 elif ds[f] in b'amn':
1646 1646 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1647 1647 elif ds[f] == b'r':
1648 1648 ds.normallookup(f)
1649 1649 else:
1650 1650 ds.add(f)
1651 1651 return rejected
1652 1652
1653 1653 def forget(self, files, prefix=b""):
1654 1654 with self._repo.wlock():
1655 1655 ds = self._repo.dirstate
1656 1656 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1657 1657 rejected = []
1658 1658 for f in files:
1659 1659 if f not in ds:
1660 1660 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1661 1661 rejected.append(f)
1662 1662 elif ds[f] != b'a':
1663 1663 ds.remove(f)
1664 1664 else:
1665 1665 ds.drop(f)
1666 1666 return rejected
1667 1667
1668 1668 def copy(self, source, dest):
1669 1669 try:
1670 1670 st = self._repo.wvfs.lstat(dest)
1671 1671 except OSError as err:
1672 1672 if err.errno != errno.ENOENT:
1673 1673 raise
1674 1674 self._repo.ui.warn(
1675 1675 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1676 1676 )
1677 1677 return
1678 1678 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1679 1679 self._repo.ui.warn(
1680 1680 _(b"copy failed: %s is not a file or a symbolic link\n")
1681 1681 % self._repo.dirstate.pathto(dest)
1682 1682 )
1683 1683 else:
1684 1684 with self._repo.wlock():
1685 1685 ds = self._repo.dirstate
1686 1686 if ds[dest] in b'?':
1687 1687 ds.add(dest)
1688 1688 elif ds[dest] in b'r':
1689 1689 ds.normallookup(dest)
1690 1690 ds.copy(source, dest)
1691 1691
1692 1692 def match(
1693 1693 self,
1694 1694 pats=None,
1695 1695 include=None,
1696 1696 exclude=None,
1697 1697 default=b'glob',
1698 1698 listsubrepos=False,
1699 1699 badfn=None,
1700 1700 cwd=None,
1701 1701 ):
1702 1702 r = self._repo
1703 1703 if not cwd:
1704 1704 cwd = r.getcwd()
1705 1705
1706 1706 # Only a case insensitive filesystem needs magic to translate user input
1707 1707 # to actual case in the filesystem.
1708 1708 icasefs = not util.fscasesensitive(r.root)
1709 1709 return matchmod.match(
1710 1710 r.root,
1711 1711 cwd,
1712 1712 pats,
1713 1713 include,
1714 1714 exclude,
1715 1715 default,
1716 1716 auditor=r.auditor,
1717 1717 ctx=self,
1718 1718 listsubrepos=listsubrepos,
1719 1719 badfn=badfn,
1720 1720 icasefs=icasefs,
1721 1721 )
1722 1722
1723 1723 def _filtersuspectsymlink(self, files):
1724 1724 if not files or self._repo.dirstate._checklink:
1725 1725 return files
1726 1726
1727 1727 # Symlink placeholders may get non-symlink-like contents
1728 1728 # via user error or dereferencing by NFS or Samba servers,
1729 1729 # so we filter out any placeholders that don't look like a
1730 1730 # symlink
1731 1731 sane = []
1732 1732 for f in files:
1733 1733 if self.flags(f) == b'l':
1734 1734 d = self[f].data()
1735 1735 if (
1736 1736 d == b''
1737 1737 or len(d) >= 1024
1738 1738 or b'\n' in d
1739 1739 or stringutil.binary(d)
1740 1740 ):
1741 1741 self._repo.ui.debug(
1742 1742 b'ignoring suspect symlink placeholder "%s"\n' % f
1743 1743 )
1744 1744 continue
1745 1745 sane.append(f)
1746 1746 return sane
1747 1747
1748 1748 def _checklookup(self, files):
1749 1749 # check for any possibly clean files
1750 1750 if not files:
1751 1751 return [], [], []
1752 1752
1753 1753 modified = []
1754 1754 deleted = []
1755 1755 fixup = []
1756 1756 pctx = self._parents[0]
1757 1757 # do a full compare of any files that might have changed
1758 1758 for f in sorted(files):
1759 1759 try:
1760 1760 # This will return True for a file that got replaced by a
1761 1761 # directory in the interim, but fixing that is pretty hard.
1762 1762 if (
1763 1763 f not in pctx
1764 1764 or self.flags(f) != pctx.flags(f)
1765 1765 or pctx[f].cmp(self[f])
1766 1766 ):
1767 1767 modified.append(f)
1768 1768 else:
1769 1769 fixup.append(f)
1770 1770 except (IOError, OSError):
1771 1771 # A file become inaccessible in between? Mark it as deleted,
1772 1772 # matching dirstate behavior (issue5584).
1773 1773 # The dirstate has more complex behavior around whether a
1774 1774 # missing file matches a directory, etc, but we don't need to
1775 1775 # bother with that: if f has made it to this point, we're sure
1776 1776 # it's in the dirstate.
1777 1777 deleted.append(f)
1778 1778
1779 1779 return modified, deleted, fixup
1780 1780
1781 1781 def _poststatusfixup(self, status, fixup):
1782 1782 """update dirstate for files that are actually clean"""
1783 1783 poststatus = self._repo.postdsstatus()
1784 1784 if fixup or poststatus:
1785 1785 try:
1786 1786 oldid = self._repo.dirstate.identity()
1787 1787
1788 1788 # updating the dirstate is optional
1789 1789 # so we don't wait on the lock
1790 1790 # wlock can invalidate the dirstate, so cache normal _after_
1791 1791 # taking the lock
1792 1792 with self._repo.wlock(False):
1793 1793 if self._repo.dirstate.identity() == oldid:
1794 1794 if fixup:
1795 1795 normal = self._repo.dirstate.normal
1796 1796 for f in fixup:
1797 1797 normal(f)
1798 1798 # write changes out explicitly, because nesting
1799 1799 # wlock at runtime may prevent 'wlock.release()'
1800 1800 # after this block from doing so for subsequent
1801 1801 # changing files
1802 1802 tr = self._repo.currenttransaction()
1803 1803 self._repo.dirstate.write(tr)
1804 1804
1805 1805 if poststatus:
1806 1806 for ps in poststatus:
1807 1807 ps(self, status)
1808 1808 else:
1809 1809 # in this case, writing changes out breaks
1810 1810 # consistency, because .hg/dirstate was
1811 1811 # already changed simultaneously after last
1812 1812 # caching (see also issue5584 for detail)
1813 1813 self._repo.ui.debug(
1814 1814 b'skip updating dirstate: identity mismatch\n'
1815 1815 )
1816 1816 except error.LockError:
1817 1817 pass
1818 1818 finally:
1819 1819 # Even if the wlock couldn't be grabbed, clear out the list.
1820 1820 self._repo.clearpostdsstatus()
1821 1821
1822 1822 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1823 1823 '''Gets the status from the dirstate -- internal use only.'''
1824 1824 subrepos = []
1825 1825 if b'.hgsub' in self:
1826 1826 subrepos = sorted(self.substate)
1827 1827 cmp, s = self._repo.dirstate.status(
1828 1828 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1829 1829 )
1830 1830
1831 1831 # check for any possibly clean files
1832 1832 fixup = []
1833 1833 if cmp:
1834 1834 modified2, deleted2, fixup = self._checklookup(cmp)
1835 1835 s.modified.extend(modified2)
1836 1836 s.deleted.extend(deleted2)
1837 1837
1838 1838 if fixup and clean:
1839 1839 s.clean.extend(fixup)
1840 1840
1841 1841 self._poststatusfixup(s, fixup)
1842 1842
1843 1843 if match.always():
1844 1844 # cache for performance
1845 1845 if s.unknown or s.ignored or s.clean:
1846 1846 # "_status" is cached with list*=False in the normal route
1847 1847 self._status = scmutil.status(
1848 1848 s.modified, s.added, s.removed, s.deleted, [], [], []
1849 1849 )
1850 1850 else:
1851 1851 self._status = s
1852 1852
1853 1853 return s
1854 1854
1855 1855 @propertycache
1856 1856 def _copies(self):
1857 1857 p1copies = {}
1858 1858 p2copies = {}
1859 1859 parents = self._repo.dirstate.parents()
1860 1860 p1manifest = self._repo[parents[0]].manifest()
1861 1861 p2manifest = self._repo[parents[1]].manifest()
1862 1862 changedset = set(self.added()) | set(self.modified())
1863 1863 narrowmatch = self._repo.narrowmatch()
1864 1864 for dst, src in self._repo.dirstate.copies().items():
1865 1865 if dst not in changedset or not narrowmatch(dst):
1866 1866 continue
1867 1867 if src in p1manifest:
1868 1868 p1copies[dst] = src
1869 1869 elif src in p2manifest:
1870 1870 p2copies[dst] = src
1871 1871 return p1copies, p2copies
1872 1872
1873 1873 @propertycache
1874 1874 def _manifest(self):
1875 1875 """generate a manifest corresponding to the values in self._status
1876 1876
1877 1877 This reuse the file nodeid from parent, but we use special node
1878 1878 identifiers for added and modified files. This is used by manifests
1879 1879 merge to see that files are different and by update logic to avoid
1880 1880 deleting newly added files.
1881 1881 """
1882 1882 return self._buildstatusmanifest(self._status)
1883 1883
1884 1884 def _buildstatusmanifest(self, status):
1885 1885 """Builds a manifest that includes the given status results."""
1886 1886 parents = self.parents()
1887 1887
1888 1888 man = parents[0].manifest().copy()
1889 1889
1890 1890 ff = self._flagfunc
1891 1891 for i, l in (
1892 1892 (addednodeid, status.added),
1893 1893 (modifiednodeid, status.modified),
1894 1894 ):
1895 1895 for f in l:
1896 1896 man[f] = i
1897 1897 try:
1898 1898 man.setflag(f, ff(f))
1899 1899 except OSError:
1900 1900 pass
1901 1901
1902 1902 for f in status.deleted + status.removed:
1903 1903 if f in man:
1904 1904 del man[f]
1905 1905
1906 1906 return man
1907 1907
1908 1908 def _buildstatus(
1909 1909 self, other, s, match, listignored, listclean, listunknown
1910 1910 ):
1911 1911 """build a status with respect to another context
1912 1912
1913 1913 This includes logic for maintaining the fast path of status when
1914 1914 comparing the working directory against its parent, which is to skip
1915 1915 building a new manifest if self (working directory) is not comparing
1916 1916 against its parent (repo['.']).
1917 1917 """
1918 1918 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1919 1919 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1920 1920 # might have accidentally ended up with the entire contents of the file
1921 1921 # they are supposed to be linking to.
1922 1922 s.modified[:] = self._filtersuspectsymlink(s.modified)
1923 1923 if other != self._repo[b'.']:
1924 1924 s = super(workingctx, self)._buildstatus(
1925 1925 other, s, match, listignored, listclean, listunknown
1926 1926 )
1927 1927 return s
1928 1928
1929 1929 def _matchstatus(self, other, match):
1930 1930 """override the match method with a filter for directory patterns
1931 1931
1932 1932 We use inheritance to customize the match.bad method only in cases of
1933 1933 workingctx since it belongs only to the working directory when
1934 1934 comparing against the parent changeset.
1935 1935
1936 1936 If we aren't comparing against the working directory's parent, then we
1937 1937 just use the default match object sent to us.
1938 1938 """
1939 1939 if other != self._repo[b'.']:
1940 1940
1941 1941 def bad(f, msg):
1942 1942 # 'f' may be a directory pattern from 'match.files()',
1943 1943 # so 'f not in ctx1' is not enough
1944 1944 if f not in other and not other.hasdir(f):
1945 1945 self._repo.ui.warn(
1946 1946 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1947 1947 )
1948 1948
1949 1949 match.bad = bad
1950 1950 return match
1951 1951
1952 1952 def walk(self, match):
1953 1953 '''Generates matching file names.'''
1954 1954 return sorted(
1955 1955 self._repo.dirstate.walk(
1956 1956 self._repo.narrowmatch(match),
1957 1957 subrepos=sorted(self.substate),
1958 1958 unknown=True,
1959 1959 ignored=False,
1960 1960 )
1961 1961 )
1962 1962
1963 1963 def matches(self, match):
1964 1964 match = self._repo.narrowmatch(match)
1965 1965 ds = self._repo.dirstate
1966 1966 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1967 1967
1968 1968 def markcommitted(self, node):
1969 1969 with self._repo.dirstate.parentchange():
1970 1970 for f in self.modified() + self.added():
1971 1971 self._repo.dirstate.normal(f)
1972 1972 for f in self.removed():
1973 1973 self._repo.dirstate.drop(f)
1974 1974 self._repo.dirstate.setparents(node)
1975 1975
1976 1976 # write changes out explicitly, because nesting wlock at
1977 1977 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1978 1978 # from immediately doing so for subsequent changing files
1979 1979 self._repo.dirstate.write(self._repo.currenttransaction())
1980 1980
1981 1981 sparse.aftercommit(self._repo, node)
1982 1982
1983 1983
1984 1984 class committablefilectx(basefilectx):
1985 1985 """A committablefilectx provides common functionality for a file context
1986 1986 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1987 1987
1988 1988 def __init__(self, repo, path, filelog=None, ctx=None):
1989 1989 self._repo = repo
1990 1990 self._path = path
1991 1991 self._changeid = None
1992 1992 self._filerev = self._filenode = None
1993 1993
1994 1994 if filelog is not None:
1995 1995 self._filelog = filelog
1996 1996 if ctx:
1997 1997 self._changectx = ctx
1998 1998
1999 1999 def __nonzero__(self):
2000 2000 return True
2001 2001
2002 2002 __bool__ = __nonzero__
2003 2003
2004 2004 def linkrev(self):
2005 2005 # linked to self._changectx no matter if file is modified or not
2006 2006 return self.rev()
2007 2007
2008 2008 def renamed(self):
2009 2009 path = self.copysource()
2010 2010 if not path:
2011 2011 return None
2012 2012 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2013 2013
2014 2014 def parents(self):
2015 2015 '''return parent filectxs, following copies if necessary'''
2016 2016
2017 2017 def filenode(ctx, path):
2018 2018 return ctx._manifest.get(path, nullid)
2019 2019
2020 2020 path = self._path
2021 2021 fl = self._filelog
2022 2022 pcl = self._changectx._parents
2023 2023 renamed = self.renamed()
2024 2024
2025 2025 if renamed:
2026 2026 pl = [renamed + (None,)]
2027 2027 else:
2028 2028 pl = [(path, filenode(pcl[0], path), fl)]
2029 2029
2030 2030 for pc in pcl[1:]:
2031 2031 pl.append((path, filenode(pc, path), fl))
2032 2032
2033 2033 return [
2034 2034 self._parentfilectx(p, fileid=n, filelog=l)
2035 2035 for p, n, l in pl
2036 2036 if n != nullid
2037 2037 ]
2038 2038
2039 2039 def children(self):
2040 2040 return []
2041 2041
2042 2042
2043 2043 class workingfilectx(committablefilectx):
2044 2044 """A workingfilectx object makes access to data related to a particular
2045 2045 file in the working directory convenient."""
2046 2046
2047 2047 def __init__(self, repo, path, filelog=None, workingctx=None):
2048 2048 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2049 2049
2050 2050 @propertycache
2051 2051 def _changectx(self):
2052 2052 return workingctx(self._repo)
2053 2053
2054 2054 def data(self):
2055 2055 return self._repo.wread(self._path)
2056 2056
2057 2057 def copysource(self):
2058 2058 return self._repo.dirstate.copied(self._path)
2059 2059
2060 2060 def size(self):
2061 2061 return self._repo.wvfs.lstat(self._path).st_size
2062 2062
2063 2063 def lstat(self):
2064 2064 return self._repo.wvfs.lstat(self._path)
2065 2065
2066 2066 def date(self):
2067 2067 t, tz = self._changectx.date()
2068 2068 try:
2069 2069 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2070 2070 except OSError as err:
2071 2071 if err.errno != errno.ENOENT:
2072 2072 raise
2073 2073 return (t, tz)
2074 2074
2075 2075 def exists(self):
2076 2076 return self._repo.wvfs.exists(self._path)
2077 2077
2078 2078 def lexists(self):
2079 2079 return self._repo.wvfs.lexists(self._path)
2080 2080
2081 2081 def audit(self):
2082 2082 return self._repo.wvfs.audit(self._path)
2083 2083
2084 2084 def cmp(self, fctx):
2085 2085 """compare with other file context
2086 2086
2087 2087 returns True if different than fctx.
2088 2088 """
2089 2089 # fctx should be a filectx (not a workingfilectx)
2090 2090 # invert comparison to reuse the same code path
2091 2091 return fctx.cmp(self)
2092 2092
2093 2093 def remove(self, ignoremissing=False):
2094 2094 """wraps unlink for a repo's working directory"""
2095 2095 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2096 2096 self._repo.wvfs.unlinkpath(
2097 2097 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2098 2098 )
2099 2099
2100 2100 def write(self, data, flags, backgroundclose=False, **kwargs):
2101 2101 """wraps repo.wwrite"""
2102 2102 return self._repo.wwrite(
2103 2103 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2104 2104 )
2105 2105
2106 2106 def markcopied(self, src):
2107 2107 """marks this file a copy of `src`"""
2108 2108 self._repo.dirstate.copy(src, self._path)
2109 2109
2110 2110 def clearunknown(self):
2111 2111 """Removes conflicting items in the working directory so that
2112 2112 ``write()`` can be called successfully.
2113 2113 """
2114 2114 wvfs = self._repo.wvfs
2115 2115 f = self._path
2116 2116 wvfs.audit(f)
2117 2117 if self._repo.ui.configbool(
2118 2118 b'experimental', b'merge.checkpathconflicts'
2119 2119 ):
2120 2120 # remove files under the directory as they should already be
2121 2121 # warned and backed up
2122 2122 if wvfs.isdir(f) and not wvfs.islink(f):
2123 2123 wvfs.rmtree(f, forcibly=True)
2124 2124 for p in reversed(list(pathutil.finddirs(f))):
2125 2125 if wvfs.isfileorlink(p):
2126 2126 wvfs.unlink(p)
2127 2127 break
2128 2128 else:
2129 2129 # don't remove files if path conflicts are not processed
2130 2130 if wvfs.isdir(f) and not wvfs.islink(f):
2131 2131 wvfs.removedirs(f)
2132 2132
2133 2133 def setflags(self, l, x):
2134 2134 self._repo.wvfs.setflags(self._path, l, x)
2135 2135
2136 2136
2137 2137 class overlayworkingctx(committablectx):
2138 2138 """Wraps another mutable context with a write-back cache that can be
2139 2139 converted into a commit context.
2140 2140
2141 2141 self._cache[path] maps to a dict with keys: {
2142 2142 'exists': bool?
2143 2143 'date': date?
2144 2144 'data': str?
2145 2145 'flags': str?
2146 2146 'copied': str? (path or None)
2147 2147 }
2148 2148 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2149 2149 is `False`, the file was deleted.
2150 2150 """
2151 2151
2152 2152 def __init__(self, repo):
2153 2153 super(overlayworkingctx, self).__init__(repo)
2154 2154 self.clean()
2155 2155
2156 2156 def setbase(self, wrappedctx):
2157 2157 self._wrappedctx = wrappedctx
2158 2158 self._parents = [wrappedctx]
2159 2159 # Drop old manifest cache as it is now out of date.
2160 2160 # This is necessary when, e.g., rebasing several nodes with one
2161 2161 # ``overlayworkingctx`` (e.g. with --collapse).
2162 2162 util.clearcachedproperty(self, b'_manifest')
2163 2163
2164 def setparents(self, p1node, p2node=nullid):
2165 assert p1node == self._wrappedctx.node()
2166 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2167
2164 2168 def data(self, path):
2165 2169 if self.isdirty(path):
2166 2170 if self._cache[path][b'exists']:
2167 2171 if self._cache[path][b'data'] is not None:
2168 2172 return self._cache[path][b'data']
2169 2173 else:
2170 2174 # Must fallback here, too, because we only set flags.
2171 2175 return self._wrappedctx[path].data()
2172 2176 else:
2173 2177 raise error.ProgrammingError(
2174 2178 b"No such file or directory: %s" % path
2175 2179 )
2176 2180 else:
2177 2181 return self._wrappedctx[path].data()
2178 2182
2179 2183 @propertycache
2180 2184 def _manifest(self):
2181 2185 parents = self.parents()
2182 2186 man = parents[0].manifest().copy()
2183 2187
2184 2188 flag = self._flagfunc
2185 2189 for path in self.added():
2186 2190 man[path] = addednodeid
2187 2191 man.setflag(path, flag(path))
2188 2192 for path in self.modified():
2189 2193 man[path] = modifiednodeid
2190 2194 man.setflag(path, flag(path))
2191 2195 for path in self.removed():
2192 2196 del man[path]
2193 2197 return man
2194 2198
2195 2199 @propertycache
2196 2200 def _flagfunc(self):
2197 2201 def f(path):
2198 2202 return self._cache[path][b'flags']
2199 2203
2200 2204 return f
2201 2205
2202 2206 def files(self):
2203 2207 return sorted(self.added() + self.modified() + self.removed())
2204 2208
2205 2209 def modified(self):
2206 2210 return [
2207 2211 f
2208 2212 for f in self._cache.keys()
2209 2213 if self._cache[f][b'exists'] and self._existsinparent(f)
2210 2214 ]
2211 2215
2212 2216 def added(self):
2213 2217 return [
2214 2218 f
2215 2219 for f in self._cache.keys()
2216 2220 if self._cache[f][b'exists'] and not self._existsinparent(f)
2217 2221 ]
2218 2222
2219 2223 def removed(self):
2220 2224 return [
2221 2225 f
2222 2226 for f in self._cache.keys()
2223 2227 if not self._cache[f][b'exists'] and self._existsinparent(f)
2224 2228 ]
2225 2229
2226 2230 def p1copies(self):
2227 2231 copies = {}
2228 2232 narrowmatch = self._repo.narrowmatch()
2229 2233 for f in self._cache.keys():
2230 2234 if not narrowmatch(f):
2231 2235 continue
2232 2236 copies.pop(f, None) # delete if it exists
2233 2237 source = self._cache[f][b'copied']
2234 2238 if source:
2235 2239 copies[f] = source
2236 2240 return copies
2237 2241
2238 2242 def p2copies(self):
2239 2243 copies = {}
2240 2244 narrowmatch = self._repo.narrowmatch()
2241 2245 for f in self._cache.keys():
2242 2246 if not narrowmatch(f):
2243 2247 continue
2244 2248 copies.pop(f, None) # delete if it exists
2245 2249 source = self._cache[f][b'copied']
2246 2250 if source:
2247 2251 copies[f] = source
2248 2252 return copies
2249 2253
2250 2254 def isinmemory(self):
2251 2255 return True
2252 2256
2253 2257 def filedate(self, path):
2254 2258 if self.isdirty(path):
2255 2259 return self._cache[path][b'date']
2256 2260 else:
2257 2261 return self._wrappedctx[path].date()
2258 2262
2259 2263 def markcopied(self, path, origin):
2260 2264 self._markdirty(
2261 2265 path,
2262 2266 exists=True,
2263 2267 date=self.filedate(path),
2264 2268 flags=self.flags(path),
2265 2269 copied=origin,
2266 2270 )
2267 2271
2268 2272 def copydata(self, path):
2269 2273 if self.isdirty(path):
2270 2274 return self._cache[path][b'copied']
2271 2275 else:
2272 2276 return None
2273 2277
2274 2278 def flags(self, path):
2275 2279 if self.isdirty(path):
2276 2280 if self._cache[path][b'exists']:
2277 2281 return self._cache[path][b'flags']
2278 2282 else:
2279 2283 raise error.ProgrammingError(
2280 2284 b"No such file or directory: %s" % self._path
2281 2285 )
2282 2286 else:
2283 2287 return self._wrappedctx[path].flags()
2284 2288
2285 2289 def __contains__(self, key):
2286 2290 if key in self._cache:
2287 2291 return self._cache[key][b'exists']
2288 2292 return key in self.p1()
2289 2293
2290 2294 def _existsinparent(self, path):
2291 2295 try:
2292 2296 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2293 2297 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2294 2298 # with an ``exists()`` function.
2295 2299 self._wrappedctx[path]
2296 2300 return True
2297 2301 except error.ManifestLookupError:
2298 2302 return False
2299 2303
2300 2304 def _auditconflicts(self, path):
2301 2305 """Replicates conflict checks done by wvfs.write().
2302 2306
2303 2307 Since we never write to the filesystem and never call `applyupdates` in
2304 2308 IMM, we'll never check that a path is actually writable -- e.g., because
2305 2309 it adds `a/foo`, but `a` is actually a file in the other commit.
2306 2310 """
2307 2311
2308 2312 def fail(path, component):
2309 2313 # p1() is the base and we're receiving "writes" for p2()'s
2310 2314 # files.
2311 2315 if b'l' in self.p1()[component].flags():
2312 2316 raise error.Abort(
2313 2317 b"error: %s conflicts with symlink %s "
2314 2318 b"in %d." % (path, component, self.p1().rev())
2315 2319 )
2316 2320 else:
2317 2321 raise error.Abort(
2318 2322 b"error: '%s' conflicts with file '%s' in "
2319 2323 b"%d." % (path, component, self.p1().rev())
2320 2324 )
2321 2325
2322 2326 # Test that each new directory to be created to write this path from p2
2323 2327 # is not a file in p1.
2324 2328 components = path.split(b'/')
2325 2329 for i in pycompat.xrange(len(components)):
2326 2330 component = b"/".join(components[0:i])
2327 2331 if component in self:
2328 2332 fail(path, component)
2329 2333
2330 2334 # Test the other direction -- that this path from p2 isn't a directory
2331 2335 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2332 2336 match = self.match([path], default=b'path')
2333 2337 matches = self.p1().manifest().matches(match)
2334 2338 mfiles = matches.keys()
2335 2339 if len(mfiles) > 0:
2336 2340 if len(mfiles) == 1 and mfiles[0] == path:
2337 2341 return
2338 2342 # omit the files which are deleted in current IMM wctx
2339 2343 mfiles = [m for m in mfiles if m in self]
2340 2344 if not mfiles:
2341 2345 return
2342 2346 raise error.Abort(
2343 2347 b"error: file '%s' cannot be written because "
2344 2348 b" '%s/' is a directory in %s (containing %d "
2345 2349 b"entries: %s)"
2346 2350 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2347 2351 )
2348 2352
2349 2353 def write(self, path, data, flags=b'', **kwargs):
2350 2354 if data is None:
2351 2355 raise error.ProgrammingError(b"data must be non-None")
2352 2356 self._auditconflicts(path)
2353 2357 self._markdirty(
2354 2358 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2355 2359 )
2356 2360
2357 2361 def setflags(self, path, l, x):
2358 2362 flag = b''
2359 2363 if l:
2360 2364 flag = b'l'
2361 2365 elif x:
2362 2366 flag = b'x'
2363 2367 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2364 2368
2365 2369 def remove(self, path):
2366 2370 self._markdirty(path, exists=False)
2367 2371
2368 2372 def exists(self, path):
2369 2373 """exists behaves like `lexists`, but needs to follow symlinks and
2370 2374 return False if they are broken.
2371 2375 """
2372 2376 if self.isdirty(path):
2373 2377 # If this path exists and is a symlink, "follow" it by calling
2374 2378 # exists on the destination path.
2375 2379 if (
2376 2380 self._cache[path][b'exists']
2377 2381 and b'l' in self._cache[path][b'flags']
2378 2382 ):
2379 2383 return self.exists(self._cache[path][b'data'].strip())
2380 2384 else:
2381 2385 return self._cache[path][b'exists']
2382 2386
2383 2387 return self._existsinparent(path)
2384 2388
2385 2389 def lexists(self, path):
2386 2390 """lexists returns True if the path exists"""
2387 2391 if self.isdirty(path):
2388 2392 return self._cache[path][b'exists']
2389 2393
2390 2394 return self._existsinparent(path)
2391 2395
2392 2396 def size(self, path):
2393 2397 if self.isdirty(path):
2394 2398 if self._cache[path][b'exists']:
2395 2399 return len(self._cache[path][b'data'])
2396 2400 else:
2397 2401 raise error.ProgrammingError(
2398 2402 b"No such file or directory: %s" % self._path
2399 2403 )
2400 2404 return self._wrappedctx[path].size()
2401 2405
2402 2406 def tomemctx(
2403 2407 self,
2404 2408 text,
2405 2409 branch=None,
2406 2410 extra=None,
2407 2411 date=None,
2408 2412 parents=None,
2409 2413 user=None,
2410 2414 editor=None,
2411 2415 ):
2412 2416 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2413 2417 committed.
2414 2418
2415 2419 ``text`` is the commit message.
2416 2420 ``parents`` (optional) are rev numbers.
2417 2421 """
2418 # Default parents to the wrapped contexts' if not passed.
2422 # Default parents to the wrapped context if not passed.
2419 2423 if parents is None:
2420 parents = self._wrappedctx.parents()
2424 parents = self.parents()
2421 2425 if len(parents) == 1:
2422 2426 parents = (parents[0], None)
2423 2427
2424 2428 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2425 2429 if parents[1] is None:
2426 2430 parents = (self._repo[parents[0]], None)
2427 2431 else:
2428 2432 parents = (self._repo[parents[0]], self._repo[parents[1]])
2429 2433
2430 2434 files = self.files()
2431 2435
2432 2436 def getfile(repo, memctx, path):
2433 2437 if self._cache[path][b'exists']:
2434 2438 return memfilectx(
2435 2439 repo,
2436 2440 memctx,
2437 2441 path,
2438 2442 self._cache[path][b'data'],
2439 2443 b'l' in self._cache[path][b'flags'],
2440 2444 b'x' in self._cache[path][b'flags'],
2441 2445 self._cache[path][b'copied'],
2442 2446 )
2443 2447 else:
2444 2448 # Returning None, but including the path in `files`, is
2445 2449 # necessary for memctx to register a deletion.
2446 2450 return None
2447 2451
2448 2452 if branch is None:
2449 2453 branch = self._wrappedctx.branch()
2450 2454
2451 2455 return memctx(
2452 2456 self._repo,
2453 2457 parents,
2454 2458 text,
2455 2459 files,
2456 2460 getfile,
2457 2461 date=date,
2458 2462 extra=extra,
2459 2463 user=user,
2460 2464 branch=branch,
2461 2465 editor=editor,
2462 2466 )
2463 2467
2464 2468 def isdirty(self, path):
2465 2469 return path in self._cache
2466 2470
2467 2471 def isempty(self):
2468 2472 # We need to discard any keys that are actually clean before the empty
2469 2473 # commit check.
2470 2474 self._compact()
2471 2475 return len(self._cache) == 0
2472 2476
2473 2477 def clean(self):
2474 2478 self._cache = {}
2475 2479
2476 2480 def _compact(self):
2477 2481 """Removes keys from the cache that are actually clean, by comparing
2478 2482 them with the underlying context.
2479 2483
2480 2484 This can occur during the merge process, e.g. by passing --tool :local
2481 2485 to resolve a conflict.
2482 2486 """
2483 2487 keys = []
2484 2488 # This won't be perfect, but can help performance significantly when
2485 2489 # using things like remotefilelog.
2486 2490 scmutil.prefetchfiles(
2487 2491 self.repo(),
2488 2492 [self.p1().rev()],
2489 2493 scmutil.matchfiles(self.repo(), self._cache.keys()),
2490 2494 )
2491 2495
2492 2496 for path in self._cache.keys():
2493 2497 cache = self._cache[path]
2494 2498 try:
2495 2499 underlying = self._wrappedctx[path]
2496 2500 if (
2497 2501 underlying.data() == cache[b'data']
2498 2502 and underlying.flags() == cache[b'flags']
2499 2503 ):
2500 2504 keys.append(path)
2501 2505 except error.ManifestLookupError:
2502 2506 # Path not in the underlying manifest (created).
2503 2507 continue
2504 2508
2505 2509 for path in keys:
2506 2510 del self._cache[path]
2507 2511 return keys
2508 2512
2509 2513 def _markdirty(
2510 2514 self, path, exists, data=None, date=None, flags=b'', copied=None
2511 2515 ):
2512 2516 # data not provided, let's see if we already have some; if not, let's
2513 2517 # grab it from our underlying context, so that we always have data if
2514 2518 # the file is marked as existing.
2515 2519 if exists and data is None:
2516 2520 oldentry = self._cache.get(path) or {}
2517 2521 data = oldentry.get(b'data')
2518 2522 if data is None:
2519 2523 data = self._wrappedctx[path].data()
2520 2524
2521 2525 self._cache[path] = {
2522 2526 b'exists': exists,
2523 2527 b'data': data,
2524 2528 b'date': date,
2525 2529 b'flags': flags,
2526 2530 b'copied': copied,
2527 2531 }
2528 2532
2529 2533 def filectx(self, path, filelog=None):
2530 2534 return overlayworkingfilectx(
2531 2535 self._repo, path, parent=self, filelog=filelog
2532 2536 )
2533 2537
2534 2538
2535 2539 class overlayworkingfilectx(committablefilectx):
2536 2540 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2537 2541 cache, which can be flushed through later by calling ``flush()``."""
2538 2542
2539 2543 def __init__(self, repo, path, filelog=None, parent=None):
2540 2544 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2541 2545 self._repo = repo
2542 2546 self._parent = parent
2543 2547 self._path = path
2544 2548
2545 2549 def cmp(self, fctx):
2546 2550 return self.data() != fctx.data()
2547 2551
2548 2552 def changectx(self):
2549 2553 return self._parent
2550 2554
2551 2555 def data(self):
2552 2556 return self._parent.data(self._path)
2553 2557
2554 2558 def date(self):
2555 2559 return self._parent.filedate(self._path)
2556 2560
2557 2561 def exists(self):
2558 2562 return self.lexists()
2559 2563
2560 2564 def lexists(self):
2561 2565 return self._parent.exists(self._path)
2562 2566
2563 2567 def copysource(self):
2564 2568 return self._parent.copydata(self._path)
2565 2569
2566 2570 def size(self):
2567 2571 return self._parent.size(self._path)
2568 2572
2569 2573 def markcopied(self, origin):
2570 2574 self._parent.markcopied(self._path, origin)
2571 2575
2572 2576 def audit(self):
2573 2577 pass
2574 2578
2575 2579 def flags(self):
2576 2580 return self._parent.flags(self._path)
2577 2581
2578 2582 def setflags(self, islink, isexec):
2579 2583 return self._parent.setflags(self._path, islink, isexec)
2580 2584
2581 2585 def write(self, data, flags, backgroundclose=False, **kwargs):
2582 2586 return self._parent.write(self._path, data, flags, **kwargs)
2583 2587
2584 2588 def remove(self, ignoremissing=False):
2585 2589 return self._parent.remove(self._path)
2586 2590
2587 2591 def clearunknown(self):
2588 2592 pass
2589 2593
2590 2594
2591 2595 class workingcommitctx(workingctx):
2592 2596 """A workingcommitctx object makes access to data related to
2593 2597 the revision being committed convenient.
2594 2598
2595 2599 This hides changes in the working directory, if they aren't
2596 2600 committed in this context.
2597 2601 """
2598 2602
2599 2603 def __init__(
2600 2604 self, repo, changes, text=b"", user=None, date=None, extra=None
2601 2605 ):
2602 2606 super(workingcommitctx, self).__init__(
2603 2607 repo, text, user, date, extra, changes
2604 2608 )
2605 2609
2606 2610 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2607 2611 """Return matched files only in ``self._status``
2608 2612
2609 2613 Uncommitted files appear "clean" via this context, even if
2610 2614 they aren't actually so in the working directory.
2611 2615 """
2612 2616 if clean:
2613 2617 clean = [f for f in self._manifest if f not in self._changedset]
2614 2618 else:
2615 2619 clean = []
2616 2620 return scmutil.status(
2617 2621 [f for f in self._status.modified if match(f)],
2618 2622 [f for f in self._status.added if match(f)],
2619 2623 [f for f in self._status.removed if match(f)],
2620 2624 [],
2621 2625 [],
2622 2626 [],
2623 2627 clean,
2624 2628 )
2625 2629
2626 2630 @propertycache
2627 2631 def _changedset(self):
2628 2632 """Return the set of files changed in this context
2629 2633 """
2630 2634 changed = set(self._status.modified)
2631 2635 changed.update(self._status.added)
2632 2636 changed.update(self._status.removed)
2633 2637 return changed
2634 2638
2635 2639
2636 2640 def makecachingfilectxfn(func):
2637 2641 """Create a filectxfn that caches based on the path.
2638 2642
2639 2643 We can't use util.cachefunc because it uses all arguments as the cache
2640 2644 key and this creates a cycle since the arguments include the repo and
2641 2645 memctx.
2642 2646 """
2643 2647 cache = {}
2644 2648
2645 2649 def getfilectx(repo, memctx, path):
2646 2650 if path not in cache:
2647 2651 cache[path] = func(repo, memctx, path)
2648 2652 return cache[path]
2649 2653
2650 2654 return getfilectx
2651 2655
2652 2656
2653 2657 def memfilefromctx(ctx):
2654 2658 """Given a context return a memfilectx for ctx[path]
2655 2659
2656 2660 This is a convenience method for building a memctx based on another
2657 2661 context.
2658 2662 """
2659 2663
2660 2664 def getfilectx(repo, memctx, path):
2661 2665 fctx = ctx[path]
2662 2666 copysource = fctx.copysource()
2663 2667 return memfilectx(
2664 2668 repo,
2665 2669 memctx,
2666 2670 path,
2667 2671 fctx.data(),
2668 2672 islink=fctx.islink(),
2669 2673 isexec=fctx.isexec(),
2670 2674 copysource=copysource,
2671 2675 )
2672 2676
2673 2677 return getfilectx
2674 2678
2675 2679
2676 2680 def memfilefrompatch(patchstore):
2677 2681 """Given a patch (e.g. patchstore object) return a memfilectx
2678 2682
2679 2683 This is a convenience method for building a memctx based on a patchstore.
2680 2684 """
2681 2685
2682 2686 def getfilectx(repo, memctx, path):
2683 2687 data, mode, copysource = patchstore.getfile(path)
2684 2688 if data is None:
2685 2689 return None
2686 2690 islink, isexec = mode
2687 2691 return memfilectx(
2688 2692 repo,
2689 2693 memctx,
2690 2694 path,
2691 2695 data,
2692 2696 islink=islink,
2693 2697 isexec=isexec,
2694 2698 copysource=copysource,
2695 2699 )
2696 2700
2697 2701 return getfilectx
2698 2702
2699 2703
2700 2704 class memctx(committablectx):
2701 2705 """Use memctx to perform in-memory commits via localrepo.commitctx().
2702 2706
2703 2707 Revision information is supplied at initialization time while
2704 2708 related files data and is made available through a callback
2705 2709 mechanism. 'repo' is the current localrepo, 'parents' is a
2706 2710 sequence of two parent revisions identifiers (pass None for every
2707 2711 missing parent), 'text' is the commit message and 'files' lists
2708 2712 names of files touched by the revision (normalized and relative to
2709 2713 repository root).
2710 2714
2711 2715 filectxfn(repo, memctx, path) is a callable receiving the
2712 2716 repository, the current memctx object and the normalized path of
2713 2717 requested file, relative to repository root. It is fired by the
2714 2718 commit function for every file in 'files', but calls order is
2715 2719 undefined. If the file is available in the revision being
2716 2720 committed (updated or added), filectxfn returns a memfilectx
2717 2721 object. If the file was removed, filectxfn return None for recent
2718 2722 Mercurial. Moved files are represented by marking the source file
2719 2723 removed and the new file added with copy information (see
2720 2724 memfilectx).
2721 2725
2722 2726 user receives the committer name and defaults to current
2723 2727 repository username, date is the commit date in any format
2724 2728 supported by dateutil.parsedate() and defaults to current date, extra
2725 2729 is a dictionary of metadata or is left empty.
2726 2730 """
2727 2731
2728 2732 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2729 2733 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2730 2734 # this field to determine what to do in filectxfn.
2731 2735 _returnnoneformissingfiles = True
2732 2736
2733 2737 def __init__(
2734 2738 self,
2735 2739 repo,
2736 2740 parents,
2737 2741 text,
2738 2742 files,
2739 2743 filectxfn,
2740 2744 user=None,
2741 2745 date=None,
2742 2746 extra=None,
2743 2747 branch=None,
2744 2748 editor=None,
2745 2749 ):
2746 2750 super(memctx, self).__init__(
2747 2751 repo, text, user, date, extra, branch=branch
2748 2752 )
2749 2753 self._rev = None
2750 2754 self._node = None
2751 2755 parents = [(p or nullid) for p in parents]
2752 2756 p1, p2 = parents
2753 2757 self._parents = [self._repo[p] for p in (p1, p2)]
2754 2758 files = sorted(set(files))
2755 2759 self._files = files
2756 2760 self.substate = {}
2757 2761
2758 2762 if isinstance(filectxfn, patch.filestore):
2759 2763 filectxfn = memfilefrompatch(filectxfn)
2760 2764 elif not callable(filectxfn):
2761 2765 # if store is not callable, wrap it in a function
2762 2766 filectxfn = memfilefromctx(filectxfn)
2763 2767
2764 2768 # memoizing increases performance for e.g. vcs convert scenarios.
2765 2769 self._filectxfn = makecachingfilectxfn(filectxfn)
2766 2770
2767 2771 if editor:
2768 2772 self._text = editor(self._repo, self, [])
2769 2773 self._repo.savecommitmessage(self._text)
2770 2774
2771 2775 def filectx(self, path, filelog=None):
2772 2776 """get a file context from the working directory
2773 2777
2774 2778 Returns None if file doesn't exist and should be removed."""
2775 2779 return self._filectxfn(self._repo, self, path)
2776 2780
2777 2781 def commit(self):
2778 2782 """commit context to the repo"""
2779 2783 return self._repo.commitctx(self)
2780 2784
2781 2785 @propertycache
2782 2786 def _manifest(self):
2783 2787 """generate a manifest based on the return values of filectxfn"""
2784 2788
2785 2789 # keep this simple for now; just worry about p1
2786 2790 pctx = self._parents[0]
2787 2791 man = pctx.manifest().copy()
2788 2792
2789 2793 for f in self._status.modified:
2790 2794 man[f] = modifiednodeid
2791 2795
2792 2796 for f in self._status.added:
2793 2797 man[f] = addednodeid
2794 2798
2795 2799 for f in self._status.removed:
2796 2800 if f in man:
2797 2801 del man[f]
2798 2802
2799 2803 return man
2800 2804
2801 2805 @propertycache
2802 2806 def _status(self):
2803 2807 """Calculate exact status from ``files`` specified at construction
2804 2808 """
2805 2809 man1 = self.p1().manifest()
2806 2810 p2 = self._parents[1]
2807 2811 # "1 < len(self._parents)" can't be used for checking
2808 2812 # existence of the 2nd parent, because "memctx._parents" is
2809 2813 # explicitly initialized by the list, of which length is 2.
2810 2814 if p2.node() != nullid:
2811 2815 man2 = p2.manifest()
2812 2816 managing = lambda f: f in man1 or f in man2
2813 2817 else:
2814 2818 managing = lambda f: f in man1
2815 2819
2816 2820 modified, added, removed = [], [], []
2817 2821 for f in self._files:
2818 2822 if not managing(f):
2819 2823 added.append(f)
2820 2824 elif self[f]:
2821 2825 modified.append(f)
2822 2826 else:
2823 2827 removed.append(f)
2824 2828
2825 2829 return scmutil.status(modified, added, removed, [], [], [], [])
2826 2830
2827 2831
2828 2832 class memfilectx(committablefilectx):
2829 2833 """memfilectx represents an in-memory file to commit.
2830 2834
2831 2835 See memctx and committablefilectx for more details.
2832 2836 """
2833 2837
2834 2838 def __init__(
2835 2839 self,
2836 2840 repo,
2837 2841 changectx,
2838 2842 path,
2839 2843 data,
2840 2844 islink=False,
2841 2845 isexec=False,
2842 2846 copysource=None,
2843 2847 ):
2844 2848 """
2845 2849 path is the normalized file path relative to repository root.
2846 2850 data is the file content as a string.
2847 2851 islink is True if the file is a symbolic link.
2848 2852 isexec is True if the file is executable.
2849 2853 copied is the source file path if current file was copied in the
2850 2854 revision being committed, or None."""
2851 2855 super(memfilectx, self).__init__(repo, path, None, changectx)
2852 2856 self._data = data
2853 2857 if islink:
2854 2858 self._flags = b'l'
2855 2859 elif isexec:
2856 2860 self._flags = b'x'
2857 2861 else:
2858 2862 self._flags = b''
2859 2863 self._copysource = copysource
2860 2864
2861 2865 def copysource(self):
2862 2866 return self._copysource
2863 2867
2864 2868 def cmp(self, fctx):
2865 2869 return self.data() != fctx.data()
2866 2870
2867 2871 def data(self):
2868 2872 return self._data
2869 2873
2870 2874 def remove(self, ignoremissing=False):
2871 2875 """wraps unlink for a repo's working directory"""
2872 2876 # need to figure out what to do here
2873 2877 del self._changectx[self._path]
2874 2878
2875 2879 def write(self, data, flags, **kwargs):
2876 2880 """wraps repo.wwrite"""
2877 2881 self._data = data
2878 2882
2879 2883
2880 2884 class metadataonlyctx(committablectx):
2881 2885 """Like memctx but it's reusing the manifest of different commit.
2882 2886 Intended to be used by lightweight operations that are creating
2883 2887 metadata-only changes.
2884 2888
2885 2889 Revision information is supplied at initialization time. 'repo' is the
2886 2890 current localrepo, 'ctx' is original revision which manifest we're reuisng
2887 2891 'parents' is a sequence of two parent revisions identifiers (pass None for
2888 2892 every missing parent), 'text' is the commit.
2889 2893
2890 2894 user receives the committer name and defaults to current repository
2891 2895 username, date is the commit date in any format supported by
2892 2896 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2893 2897 metadata or is left empty.
2894 2898 """
2895 2899
2896 2900 def __init__(
2897 2901 self,
2898 2902 repo,
2899 2903 originalctx,
2900 2904 parents=None,
2901 2905 text=None,
2902 2906 user=None,
2903 2907 date=None,
2904 2908 extra=None,
2905 2909 editor=None,
2906 2910 ):
2907 2911 if text is None:
2908 2912 text = originalctx.description()
2909 2913 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2910 2914 self._rev = None
2911 2915 self._node = None
2912 2916 self._originalctx = originalctx
2913 2917 self._manifestnode = originalctx.manifestnode()
2914 2918 if parents is None:
2915 2919 parents = originalctx.parents()
2916 2920 else:
2917 2921 parents = [repo[p] for p in parents if p is not None]
2918 2922 parents = parents[:]
2919 2923 while len(parents) < 2:
2920 2924 parents.append(repo[nullid])
2921 2925 p1, p2 = self._parents = parents
2922 2926
2923 2927 # sanity check to ensure that the reused manifest parents are
2924 2928 # manifests of our commit parents
2925 2929 mp1, mp2 = self.manifestctx().parents
2926 2930 if p1 != nullid and p1.manifestnode() != mp1:
2927 2931 raise RuntimeError(
2928 2932 r"can't reuse the manifest: its p1 "
2929 2933 r"doesn't match the new ctx p1"
2930 2934 )
2931 2935 if p2 != nullid and p2.manifestnode() != mp2:
2932 2936 raise RuntimeError(
2933 2937 r"can't reuse the manifest: "
2934 2938 r"its p2 doesn't match the new ctx p2"
2935 2939 )
2936 2940
2937 2941 self._files = originalctx.files()
2938 2942 self.substate = {}
2939 2943
2940 2944 if editor:
2941 2945 self._text = editor(self._repo, self, [])
2942 2946 self._repo.savecommitmessage(self._text)
2943 2947
2944 2948 def manifestnode(self):
2945 2949 return self._manifestnode
2946 2950
2947 2951 @property
2948 2952 def _manifestctx(self):
2949 2953 return self._repo.manifestlog[self._manifestnode]
2950 2954
2951 2955 def filectx(self, path, filelog=None):
2952 2956 return self._originalctx.filectx(path, filelog=filelog)
2953 2957
2954 2958 def commit(self):
2955 2959 """commit context to the repo"""
2956 2960 return self._repo.commitctx(self)
2957 2961
2958 2962 @property
2959 2963 def _manifest(self):
2960 2964 return self._originalctx.manifest()
2961 2965
2962 2966 @propertycache
2963 2967 def _status(self):
2964 2968 """Calculate exact status from ``files`` specified in the ``origctx``
2965 2969 and parents manifests.
2966 2970 """
2967 2971 man1 = self.p1().manifest()
2968 2972 p2 = self._parents[1]
2969 2973 # "1 < len(self._parents)" can't be used for checking
2970 2974 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2971 2975 # explicitly initialized by the list, of which length is 2.
2972 2976 if p2.node() != nullid:
2973 2977 man2 = p2.manifest()
2974 2978 managing = lambda f: f in man1 or f in man2
2975 2979 else:
2976 2980 managing = lambda f: f in man1
2977 2981
2978 2982 modified, added, removed = [], [], []
2979 2983 for f in self._files:
2980 2984 if not managing(f):
2981 2985 added.append(f)
2982 2986 elif f in self:
2983 2987 modified.append(f)
2984 2988 else:
2985 2989 removed.append(f)
2986 2990
2987 2991 return scmutil.status(modified, added, removed, [], [], [], [])
2988 2992
2989 2993
2990 2994 class arbitraryfilectx(object):
2991 2995 """Allows you to use filectx-like functions on a file in an arbitrary
2992 2996 location on disk, possibly not in the working directory.
2993 2997 """
2994 2998
2995 2999 def __init__(self, path, repo=None):
2996 3000 # Repo is optional because contrib/simplemerge uses this class.
2997 3001 self._repo = repo
2998 3002 self._path = path
2999 3003
3000 3004 def cmp(self, fctx):
3001 3005 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3002 3006 # path if either side is a symlink.
3003 3007 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3004 3008 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3005 3009 # Add a fast-path for merge if both sides are disk-backed.
3006 3010 # Note that filecmp uses the opposite return values (True if same)
3007 3011 # from our cmp functions (True if different).
3008 3012 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3009 3013 return self.data() != fctx.data()
3010 3014
3011 3015 def path(self):
3012 3016 return self._path
3013 3017
3014 3018 def flags(self):
3015 3019 return b''
3016 3020
3017 3021 def data(self):
3018 3022 return util.readfile(self._path)
3019 3023
3020 3024 def decodeddata(self):
3021 3025 with open(self._path, b"rb") as f:
3022 3026 return f.read()
3023 3027
3024 3028 def remove(self):
3025 3029 util.unlink(self._path)
3026 3030
3027 3031 def write(self, data, flags, **kwargs):
3028 3032 assert not flags
3029 3033 with open(self._path, b"wb") as f:
3030 3034 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now