##// END OF EJS Templates
overlayworkingctx: rename misleadingly named `isempty()` method...
Manuel Jacob -
r45647:83f75f1e default
parent child Browse files
Show More
@@ -1,2250 +1,2252 b''
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 https://mercurial-scm.org/wiki/RebaseExtension
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 import errno
20 20 import os
21 21
22 22 from mercurial.i18n import _
23 23 from mercurial.node import (
24 24 nullrev,
25 25 short,
26 26 )
27 27 from mercurial.pycompat import open
28 28 from mercurial import (
29 29 bookmarks,
30 30 cmdutil,
31 31 commands,
32 32 copies,
33 33 destutil,
34 34 dirstateguard,
35 35 error,
36 36 extensions,
37 37 hg,
38 38 merge as mergemod,
39 39 mergestate as mergestatemod,
40 40 mergeutil,
41 41 node as nodemod,
42 42 obsolete,
43 43 obsutil,
44 44 patch,
45 45 phases,
46 46 pycompat,
47 47 registrar,
48 48 repair,
49 49 revset,
50 50 revsetlang,
51 51 rewriteutil,
52 52 scmutil,
53 53 smartset,
54 54 state as statemod,
55 55 util,
56 56 )
57 57
58 58 # The following constants are used throughout the rebase module. The ordering of
59 59 # their values must be maintained.
60 60
61 61 # Indicates that a revision needs to be rebased
62 62 revtodo = -1
63 63 revtodostr = b'-1'
64 64
65 65 # legacy revstates no longer needed in current code
66 66 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
67 67 legacystates = {b'-2', b'-3', b'-4', b'-5'}
68 68
69 69 cmdtable = {}
70 70 command = registrar.command(cmdtable)
71 71 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
72 72 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
73 73 # be specifying the version(s) of Mercurial they are tested with, or
74 74 # leave the attribute unspecified.
75 75 testedwith = b'ships-with-hg-core'
76 76
77 77
78 78 def _nothingtorebase():
79 79 return 1
80 80
81 81
82 82 def _savegraft(ctx, extra):
83 83 s = ctx.extra().get(b'source', None)
84 84 if s is not None:
85 85 extra[b'source'] = s
86 86 s = ctx.extra().get(b'intermediate-source', None)
87 87 if s is not None:
88 88 extra[b'intermediate-source'] = s
89 89
90 90
91 91 def _savebranch(ctx, extra):
92 92 extra[b'branch'] = ctx.branch()
93 93
94 94
95 95 def _destrebase(repo, sourceset, destspace=None):
96 96 """small wrapper around destmerge to pass the right extra args
97 97
98 98 Please wrap destutil.destmerge instead."""
99 99 return destutil.destmerge(
100 100 repo,
101 101 action=b'rebase',
102 102 sourceset=sourceset,
103 103 onheadcheck=False,
104 104 destspace=destspace,
105 105 )
106 106
107 107
108 108 revsetpredicate = registrar.revsetpredicate()
109 109
110 110
111 111 @revsetpredicate(b'_destrebase')
112 112 def _revsetdestrebase(repo, subset, x):
113 113 # ``_rebasedefaultdest()``
114 114
115 115 # default destination for rebase.
116 116 # # XXX: Currently private because I expect the signature to change.
117 117 # # XXX: - bailing out in case of ambiguity vs returning all data.
118 118 # i18n: "_rebasedefaultdest" is a keyword
119 119 sourceset = None
120 120 if x is not None:
121 121 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
122 122 return subset & smartset.baseset([_destrebase(repo, sourceset)])
123 123
124 124
125 125 @revsetpredicate(b'_destautoorphanrebase')
126 126 def _revsetdestautoorphanrebase(repo, subset, x):
127 127 # ``_destautoorphanrebase()``
128 128
129 129 # automatic rebase destination for a single orphan revision.
130 130 unfi = repo.unfiltered()
131 131 obsoleted = unfi.revs(b'obsolete()')
132 132
133 133 src = revset.getset(repo, subset, x).first()
134 134
135 135 # Empty src or already obsoleted - Do not return a destination
136 136 if not src or src in obsoleted:
137 137 return smartset.baseset()
138 138 dests = destutil.orphanpossibledestination(repo, src)
139 139 if len(dests) > 1:
140 140 raise error.Abort(
141 141 _(b"ambiguous automatic rebase: %r could end up on any of %r")
142 142 % (src, dests)
143 143 )
144 144 # We have zero or one destination, so we can just return here.
145 145 return smartset.baseset(dests)
146 146
147 147
148 148 def _ctxdesc(ctx):
149 149 """short description for a context"""
150 150 desc = b'%d:%s "%s"' % (
151 151 ctx.rev(),
152 152 ctx,
153 153 ctx.description().split(b'\n', 1)[0],
154 154 )
155 155 repo = ctx.repo()
156 156 names = []
157 157 for nsname, ns in pycompat.iteritems(repo.names):
158 158 if nsname == b'branches':
159 159 continue
160 160 names.extend(ns.names(repo, ctx.node()))
161 161 if names:
162 162 desc += b' (%s)' % b' '.join(names)
163 163 return desc
164 164
165 165
166 166 class rebaseruntime(object):
167 167 """This class is a container for rebase runtime state"""
168 168
169 169 def __init__(self, repo, ui, inmemory=False, opts=None):
170 170 if opts is None:
171 171 opts = {}
172 172
173 173 # prepared: whether we have rebasestate prepared or not. Currently it
174 174 # decides whether "self.repo" is unfiltered or not.
175 175 # The rebasestate has explicit hash to hash instructions not depending
176 176 # on visibility. If rebasestate exists (in-memory or on-disk), use
177 177 # unfiltered repo to avoid visibility issues.
178 178 # Before knowing rebasestate (i.e. when starting a new rebase (not
179 179 # --continue or --abort)), the original repo should be used so
180 180 # visibility-dependent revsets are correct.
181 181 self.prepared = False
182 182 self.resume = False
183 183 self._repo = repo
184 184
185 185 self.ui = ui
186 186 self.opts = opts
187 187 self.originalwd = None
188 188 self.external = nullrev
189 189 # Mapping between the old revision id and either what is the new rebased
190 190 # revision or what needs to be done with the old revision. The state
191 191 # dict will be what contains most of the rebase progress state.
192 192 self.state = {}
193 193 self.activebookmark = None
194 194 self.destmap = {}
195 195 self.skipped = set()
196 196
197 197 self.collapsef = opts.get(b'collapse', False)
198 198 self.collapsemsg = cmdutil.logmessage(ui, opts)
199 199 self.date = opts.get(b'date', None)
200 200
201 201 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
202 202 self.extrafns = [_savegraft]
203 203 if e:
204 204 self.extrafns = [e]
205 205
206 206 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
207 207 self.keepf = opts.get(b'keep', False)
208 208 self.keepbranchesf = opts.get(b'keepbranches', False)
209 209 self.obsoletenotrebased = {}
210 210 self.obsoletewithoutsuccessorindestination = set()
211 211 self.inmemory = inmemory
212 212 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
213 213
214 214 @property
215 215 def repo(self):
216 216 if self.prepared:
217 217 return self._repo.unfiltered()
218 218 else:
219 219 return self._repo
220 220
221 221 def storestatus(self, tr=None):
222 222 """Store the current status to allow recovery"""
223 223 if tr:
224 224 tr.addfilegenerator(
225 225 b'rebasestate',
226 226 (b'rebasestate',),
227 227 self._writestatus,
228 228 location=b'plain',
229 229 )
230 230 else:
231 231 with self.repo.vfs(b"rebasestate", b"w") as f:
232 232 self._writestatus(f)
233 233
234 234 def _writestatus(self, f):
235 235 repo = self.repo
236 236 assert repo.filtername is None
237 237 f.write(repo[self.originalwd].hex() + b'\n')
238 238 # was "dest". we now write dest per src root below.
239 239 f.write(b'\n')
240 240 f.write(repo[self.external].hex() + b'\n')
241 241 f.write(b'%d\n' % int(self.collapsef))
242 242 f.write(b'%d\n' % int(self.keepf))
243 243 f.write(b'%d\n' % int(self.keepbranchesf))
244 244 f.write(b'%s\n' % (self.activebookmark or b''))
245 245 destmap = self.destmap
246 246 for d, v in pycompat.iteritems(self.state):
247 247 oldrev = repo[d].hex()
248 248 if v >= 0:
249 249 newrev = repo[v].hex()
250 250 else:
251 251 newrev = b"%d" % v
252 252 destnode = repo[destmap[d]].hex()
253 253 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
254 254 repo.ui.debug(b'rebase status stored\n')
255 255
256 256 def restorestatus(self):
257 257 """Restore a previously stored status"""
258 258 if not self.stateobj.exists():
259 259 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
260 260
261 261 data = self._read()
262 262 self.repo.ui.debug(b'rebase status resumed\n')
263 263
264 264 self.originalwd = data[b'originalwd']
265 265 self.destmap = data[b'destmap']
266 266 self.state = data[b'state']
267 267 self.skipped = data[b'skipped']
268 268 self.collapsef = data[b'collapse']
269 269 self.keepf = data[b'keep']
270 270 self.keepbranchesf = data[b'keepbranches']
271 271 self.external = data[b'external']
272 272 self.activebookmark = data[b'activebookmark']
273 273
274 274 def _read(self):
275 275 self.prepared = True
276 276 repo = self.repo
277 277 assert repo.filtername is None
278 278 data = {
279 279 b'keepbranches': None,
280 280 b'collapse': None,
281 281 b'activebookmark': None,
282 282 b'external': nullrev,
283 283 b'keep': None,
284 284 b'originalwd': None,
285 285 }
286 286 legacydest = None
287 287 state = {}
288 288 destmap = {}
289 289
290 290 if True:
291 291 f = repo.vfs(b"rebasestate")
292 292 for i, l in enumerate(f.read().splitlines()):
293 293 if i == 0:
294 294 data[b'originalwd'] = repo[l].rev()
295 295 elif i == 1:
296 296 # this line should be empty in newer version. but legacy
297 297 # clients may still use it
298 298 if l:
299 299 legacydest = repo[l].rev()
300 300 elif i == 2:
301 301 data[b'external'] = repo[l].rev()
302 302 elif i == 3:
303 303 data[b'collapse'] = bool(int(l))
304 304 elif i == 4:
305 305 data[b'keep'] = bool(int(l))
306 306 elif i == 5:
307 307 data[b'keepbranches'] = bool(int(l))
308 308 elif i == 6 and not (len(l) == 81 and b':' in l):
309 309 # line 6 is a recent addition, so for backwards
310 310 # compatibility check that the line doesn't look like the
311 311 # oldrev:newrev lines
312 312 data[b'activebookmark'] = l
313 313 else:
314 314 args = l.split(b':')
315 315 oldrev = repo[args[0]].rev()
316 316 newrev = args[1]
317 317 if newrev in legacystates:
318 318 continue
319 319 if len(args) > 2:
320 320 destrev = repo[args[2]].rev()
321 321 else:
322 322 destrev = legacydest
323 323 destmap[oldrev] = destrev
324 324 if newrev == revtodostr:
325 325 state[oldrev] = revtodo
326 326 # Legacy compat special case
327 327 else:
328 328 state[oldrev] = repo[newrev].rev()
329 329
330 330 if data[b'keepbranches'] is None:
331 331 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
332 332
333 333 data[b'destmap'] = destmap
334 334 data[b'state'] = state
335 335 skipped = set()
336 336 # recompute the set of skipped revs
337 337 if not data[b'collapse']:
338 338 seen = set(destmap.values())
339 339 for old, new in sorted(state.items()):
340 340 if new != revtodo and new in seen:
341 341 skipped.add(old)
342 342 seen.add(new)
343 343 data[b'skipped'] = skipped
344 344 repo.ui.debug(
345 345 b'computed skipped revs: %s\n'
346 346 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
347 347 )
348 348
349 349 return data
350 350
351 351 def _handleskippingobsolete(self, obsoleterevs, destmap):
352 352 """Compute structures necessary for skipping obsolete revisions
353 353
354 354 obsoleterevs: iterable of all obsolete revisions in rebaseset
355 355 destmap: {srcrev: destrev} destination revisions
356 356 """
357 357 self.obsoletenotrebased = {}
358 358 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
359 359 return
360 360 obsoleteset = set(obsoleterevs)
361 361 (
362 362 self.obsoletenotrebased,
363 363 self.obsoletewithoutsuccessorindestination,
364 364 obsoleteextinctsuccessors,
365 365 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
366 366 skippedset = set(self.obsoletenotrebased)
367 367 skippedset.update(self.obsoletewithoutsuccessorindestination)
368 368 skippedset.update(obsoleteextinctsuccessors)
369 369 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
370 370
371 371 def _prepareabortorcontinue(
372 372 self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False
373 373 ):
374 374 self.resume = True
375 375 try:
376 376 self.restorestatus()
377 377 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
378 378 except error.RepoLookupError:
379 379 if isabort:
380 380 clearstatus(self.repo)
381 381 clearcollapsemsg(self.repo)
382 382 self.repo.ui.warn(
383 383 _(
384 384 b'rebase aborted (no revision is removed,'
385 385 b' only broken state is cleared)\n'
386 386 )
387 387 )
388 388 return 0
389 389 else:
390 390 msg = _(b'cannot continue inconsistent rebase')
391 391 hint = _(b'use "hg rebase --abort" to clear broken state')
392 392 raise error.Abort(msg, hint=hint)
393 393
394 394 if isabort:
395 395 backup = backup and self.backupf
396 396 return self._abort(
397 397 backup=backup,
398 398 suppwarns=suppwarns,
399 399 dryrun=dryrun,
400 400 confirm=confirm,
401 401 )
402 402
403 403 def _preparenewrebase(self, destmap):
404 404 if not destmap:
405 405 return _nothingtorebase()
406 406
407 407 rebaseset = destmap.keys()
408 408 if not self.keepf:
409 409 try:
410 410 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
411 411 except error.Abort as e:
412 412 if e.hint is None:
413 413 e.hint = _(b'use --keep to keep original changesets')
414 414 raise e
415 415
416 416 result = buildstate(self.repo, destmap, self.collapsef)
417 417
418 418 if not result:
419 419 # Empty state built, nothing to rebase
420 420 self.ui.status(_(b'nothing to rebase\n'))
421 421 return _nothingtorebase()
422 422
423 423 (self.originalwd, self.destmap, self.state) = result
424 424 if self.collapsef:
425 425 dests = set(self.destmap.values())
426 426 if len(dests) != 1:
427 427 raise error.Abort(
428 428 _(b'--collapse does not work with multiple destinations')
429 429 )
430 430 destrev = next(iter(dests))
431 431 destancestors = self.repo.changelog.ancestors(
432 432 [destrev], inclusive=True
433 433 )
434 434 self.external = externalparent(self.repo, self.state, destancestors)
435 435
436 436 for destrev in sorted(set(destmap.values())):
437 437 dest = self.repo[destrev]
438 438 if dest.closesbranch() and not self.keepbranchesf:
439 439 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
440 440
441 441 self.prepared = True
442 442
443 443 def _assignworkingcopy(self):
444 444 if self.inmemory:
445 445 from mercurial.context import overlayworkingctx
446 446
447 447 self.wctx = overlayworkingctx(self.repo)
448 448 self.repo.ui.debug(b"rebasing in-memory\n")
449 449 else:
450 450 self.wctx = self.repo[None]
451 451 self.repo.ui.debug(b"rebasing on disk\n")
452 452 self.repo.ui.log(
453 453 b"rebase",
454 454 b"using in-memory rebase: %r\n",
455 455 self.inmemory,
456 456 rebase_imm_used=self.inmemory,
457 457 )
458 458
459 459 def _performrebase(self, tr):
460 460 self._assignworkingcopy()
461 461 repo, ui = self.repo, self.ui
462 462 if self.keepbranchesf:
463 463 # insert _savebranch at the start of extrafns so if
464 464 # there's a user-provided extrafn it can clobber branch if
465 465 # desired
466 466 self.extrafns.insert(0, _savebranch)
467 467 if self.collapsef:
468 468 branches = set()
469 469 for rev in self.state:
470 470 branches.add(repo[rev].branch())
471 471 if len(branches) > 1:
472 472 raise error.Abort(
473 473 _(b'cannot collapse multiple named branches')
474 474 )
475 475
476 476 # Calculate self.obsoletenotrebased
477 477 obsrevs = _filterobsoleterevs(self.repo, self.state)
478 478 self._handleskippingobsolete(obsrevs, self.destmap)
479 479
480 480 # Keep track of the active bookmarks in order to reset them later
481 481 self.activebookmark = self.activebookmark or repo._activebookmark
482 482 if self.activebookmark:
483 483 bookmarks.deactivate(repo)
484 484
485 485 # Store the state before we begin so users can run 'hg rebase --abort'
486 486 # if we fail before the transaction closes.
487 487 self.storestatus()
488 488 if tr:
489 489 # When using single transaction, store state when transaction
490 490 # commits.
491 491 self.storestatus(tr)
492 492
493 493 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
494 494 p = repo.ui.makeprogress(
495 495 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
496 496 )
497 497
498 498 def progress(ctx):
499 499 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
500 500
501 501 allowdivergence = self.ui.configbool(
502 502 b'experimental', b'evolution.allowdivergence'
503 503 )
504 504 for subset in sortsource(self.destmap):
505 505 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
506 506 if not allowdivergence:
507 507 sortedrevs -= self.repo.revs(
508 508 b'descendants(%ld) and not %ld',
509 509 self.obsoletewithoutsuccessorindestination,
510 510 self.obsoletewithoutsuccessorindestination,
511 511 )
512 512 for rev in sortedrevs:
513 513 self._rebasenode(tr, rev, allowdivergence, progress)
514 514 p.complete()
515 515 ui.note(_(b'rebase merging completed\n'))
516 516
517 517 def _concludenode(self, rev, p1, editor, commitmsg=None):
518 518 '''Commit the wd changes with parents p1 and p2.
519 519
520 520 Reuse commit info from rev but also store useful information in extra.
521 521 Return node of committed revision.'''
522 522 repo = self.repo
523 523 ctx = repo[rev]
524 524 if commitmsg is None:
525 525 commitmsg = ctx.description()
526 526 date = self.date
527 527 if date is None:
528 528 date = ctx.date()
529 529 extra = {b'rebase_source': ctx.hex()}
530 530 for c in self.extrafns:
531 531 c(ctx, extra)
532 532 destphase = max(ctx.phase(), phases.draft)
533 533 overrides = {(b'phases', b'new-commit'): destphase}
534 534 with repo.ui.configoverride(overrides, b'rebase'):
535 535 if self.inmemory:
536 536 newnode = commitmemorynode(
537 537 repo,
538 538 wctx=self.wctx,
539 539 extra=extra,
540 540 commitmsg=commitmsg,
541 541 editor=editor,
542 542 user=ctx.user(),
543 543 date=date,
544 544 )
545 545 mergestatemod.mergestate.clean(repo)
546 546 else:
547 547 newnode = commitnode(
548 548 repo,
549 549 extra=extra,
550 550 commitmsg=commitmsg,
551 551 editor=editor,
552 552 user=ctx.user(),
553 553 date=date,
554 554 )
555 555
556 556 return newnode
557 557
558 558 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
559 559 repo, ui, opts = self.repo, self.ui, self.opts
560 560 dest = self.destmap[rev]
561 561 ctx = repo[rev]
562 562 desc = _ctxdesc(ctx)
563 563 if self.state[rev] == rev:
564 564 ui.status(_(b'already rebased %s\n') % desc)
565 565 elif (
566 566 not allowdivergence
567 567 and rev in self.obsoletewithoutsuccessorindestination
568 568 ):
569 569 msg = (
570 570 _(
571 571 b'note: not rebasing %s and its descendants as '
572 572 b'this would cause divergence\n'
573 573 )
574 574 % desc
575 575 )
576 576 repo.ui.status(msg)
577 577 self.skipped.add(rev)
578 578 elif rev in self.obsoletenotrebased:
579 579 succ = self.obsoletenotrebased[rev]
580 580 if succ is None:
581 581 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
582 582 else:
583 583 succdesc = _ctxdesc(repo[succ])
584 584 msg = _(
585 585 b'note: not rebasing %s, already in destination as %s\n'
586 586 ) % (desc, succdesc)
587 587 repo.ui.status(msg)
588 588 # Make clearrebased aware state[rev] is not a true successor
589 589 self.skipped.add(rev)
590 590 # Record rev as moved to its desired destination in self.state.
591 591 # This helps bookmark and working parent movement.
592 592 dest = max(
593 593 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
594 594 )
595 595 self.state[rev] = dest
596 596 elif self.state[rev] == revtodo:
597 597 ui.status(_(b'rebasing %s\n') % desc)
598 598 progressfn(ctx)
599 599 p1, p2, base = defineparents(
600 600 repo,
601 601 rev,
602 602 self.destmap,
603 603 self.state,
604 604 self.skipped,
605 605 self.obsoletenotrebased,
606 606 )
607 607 if self.resume and self.wctx.p1().rev() == p1:
608 608 repo.ui.debug(b'resuming interrupted rebase\n')
609 609 self.resume = False
610 610 else:
611 611 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
612 612 with ui.configoverride(overrides, b'rebase'):
613 613 stats = rebasenode(
614 614 repo,
615 615 rev,
616 616 p1,
617 617 p2,
618 618 base,
619 619 self.collapsef,
620 620 dest,
621 621 wctx=self.wctx,
622 622 )
623 623 if stats.unresolvedcount > 0:
624 624 if self.inmemory:
625 625 raise error.InMemoryMergeConflictsError()
626 626 else:
627 627 raise error.InterventionRequired(
628 628 _(
629 629 b'unresolved conflicts (see hg '
630 630 b'resolve, then hg rebase --continue)'
631 631 )
632 632 )
633 633 if not self.collapsef:
634 634 merging = p2 != nullrev
635 635 editform = cmdutil.mergeeditform(merging, b'rebase')
636 636 editor = cmdutil.getcommiteditor(
637 637 editform=editform, **pycompat.strkwargs(opts)
638 638 )
639 639 # We need to set parents again here just in case we're continuing
640 640 # a rebase started with an old hg version (before 9c9cfecd4600),
641 641 # because those old versions would have left us with two dirstate
642 642 # parents, and we don't want to create a merge commit here (unless
643 643 # we're rebasing a merge commit).
644 644 self.wctx.setparents(repo[p1].node(), repo[p2].node())
645 645 newnode = self._concludenode(rev, p1, editor)
646 646 else:
647 647 # Skip commit if we are collapsing
648 648 newnode = None
649 649 # Update the state
650 650 if newnode is not None:
651 651 self.state[rev] = repo[newnode].rev()
652 652 ui.debug(b'rebased as %s\n' % short(newnode))
653 653 else:
654 654 if not self.collapsef:
655 655 ui.warn(
656 656 _(
657 657 b'note: not rebasing %s, its destination already '
658 658 b'has all its changes\n'
659 659 )
660 660 % desc
661 661 )
662 662 self.skipped.add(rev)
663 663 self.state[rev] = p1
664 664 ui.debug(b'next revision set to %d\n' % p1)
665 665 else:
666 666 ui.status(
667 667 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
668 668 )
669 669 if not tr:
670 670 # When not using single transaction, store state after each
671 671 # commit is completely done. On InterventionRequired, we thus
672 672 # won't store the status. Instead, we'll hit the "len(parents) == 2"
673 673 # case and realize that the commit was in progress.
674 674 self.storestatus()
675 675
676 676 def _finishrebase(self):
677 677 repo, ui, opts = self.repo, self.ui, self.opts
678 678 fm = ui.formatter(b'rebase', opts)
679 679 fm.startitem()
680 680 if self.collapsef:
681 681 p1, p2, _base = defineparents(
682 682 repo,
683 683 min(self.state),
684 684 self.destmap,
685 685 self.state,
686 686 self.skipped,
687 687 self.obsoletenotrebased,
688 688 )
689 689 editopt = opts.get(b'edit')
690 690 editform = b'rebase.collapse'
691 691 if self.collapsemsg:
692 692 commitmsg = self.collapsemsg
693 693 else:
694 694 commitmsg = b'Collapsed revision'
695 695 for rebased in sorted(self.state):
696 696 if rebased not in self.skipped:
697 697 commitmsg += b'\n* %s' % repo[rebased].description()
698 698 editopt = True
699 699 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
700 700 revtoreuse = max(self.state)
701 701
702 702 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
703 703 newnode = self._concludenode(
704 704 revtoreuse, p1, editor, commitmsg=commitmsg
705 705 )
706 706
707 707 if newnode is not None:
708 708 newrev = repo[newnode].rev()
709 709 for oldrev in self.state:
710 710 self.state[oldrev] = newrev
711 711
712 712 if b'qtip' in repo.tags():
713 713 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
714 714
715 715 # restore original working directory
716 716 # (we do this before stripping)
717 717 newwd = self.state.get(self.originalwd, self.originalwd)
718 718 if newwd < 0:
719 719 # original directory is a parent of rebase set root or ignored
720 720 newwd = self.originalwd
721 721 if newwd not in [c.rev() for c in repo[None].parents()]:
722 722 ui.note(_(b"update back to initial working directory parent\n"))
723 723 hg.updaterepo(repo, newwd, overwrite=False)
724 724
725 725 collapsedas = None
726 726 if self.collapsef and not self.keepf:
727 727 collapsedas = newnode
728 728 clearrebased(
729 729 ui,
730 730 repo,
731 731 self.destmap,
732 732 self.state,
733 733 self.skipped,
734 734 collapsedas,
735 735 self.keepf,
736 736 fm=fm,
737 737 backup=self.backupf,
738 738 )
739 739
740 740 clearstatus(repo)
741 741 clearcollapsemsg(repo)
742 742
743 743 ui.note(_(b"rebase completed\n"))
744 744 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
745 745 if self.skipped:
746 746 skippedlen = len(self.skipped)
747 747 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
748 748 fm.end()
749 749
750 750 if (
751 751 self.activebookmark
752 752 and self.activebookmark in repo._bookmarks
753 753 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
754 754 ):
755 755 bookmarks.activate(repo, self.activebookmark)
756 756
757 757 def _abort(self, backup=True, suppwarns=False, dryrun=False, confirm=False):
758 758 '''Restore the repository to its original state.'''
759 759
760 760 repo = self.repo
761 761 try:
762 762 # If the first commits in the rebased set get skipped during the
763 763 # rebase, their values within the state mapping will be the dest
764 764 # rev id. The rebased list must must not contain the dest rev
765 765 # (issue4896)
766 766 rebased = [
767 767 s
768 768 for r, s in self.state.items()
769 769 if s >= 0 and s != r and s != self.destmap[r]
770 770 ]
771 771 immutable = [d for d in rebased if not repo[d].mutable()]
772 772 cleanup = True
773 773 if immutable:
774 774 repo.ui.warn(
775 775 _(b"warning: can't clean up public changesets %s\n")
776 776 % b', '.join(bytes(repo[r]) for r in immutable),
777 777 hint=_(b"see 'hg help phases' for details"),
778 778 )
779 779 cleanup = False
780 780
781 781 descendants = set()
782 782 if rebased:
783 783 descendants = set(repo.changelog.descendants(rebased))
784 784 if descendants - set(rebased):
785 785 repo.ui.warn(
786 786 _(
787 787 b"warning: new changesets detected on "
788 788 b"destination branch, can't strip\n"
789 789 )
790 790 )
791 791 cleanup = False
792 792
793 793 if cleanup:
794 794 if rebased:
795 795 strippoints = [
796 796 c.node() for c in repo.set(b'roots(%ld)', rebased)
797 797 ]
798 798
799 799 updateifonnodes = set(rebased)
800 800 updateifonnodes.update(self.destmap.values())
801 801
802 802 if not dryrun and not confirm:
803 803 updateifonnodes.add(self.originalwd)
804 804
805 805 shouldupdate = repo[b'.'].rev() in updateifonnodes
806 806
807 807 # Update away from the rebase if necessary
808 808 if shouldupdate:
809 809 mergemod.clean_update(repo[self.originalwd])
810 810
811 811 # Strip from the first rebased revision
812 812 if rebased:
813 813 repair.strip(repo.ui, repo, strippoints, backup=backup)
814 814
815 815 if self.activebookmark and self.activebookmark in repo._bookmarks:
816 816 bookmarks.activate(repo, self.activebookmark)
817 817
818 818 finally:
819 819 clearstatus(repo)
820 820 clearcollapsemsg(repo)
821 821 if not suppwarns:
822 822 repo.ui.warn(_(b'rebase aborted\n'))
823 823 return 0
824 824
825 825
826 826 @command(
827 827 b'rebase',
828 828 [
829 829 (
830 830 b's',
831 831 b'source',
832 832 [],
833 833 _(b'rebase the specified changesets and their descendants'),
834 834 _(b'REV'),
835 835 ),
836 836 (
837 837 b'b',
838 838 b'base',
839 839 [],
840 840 _(b'rebase everything from branching point of specified changeset'),
841 841 _(b'REV'),
842 842 ),
843 843 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
844 844 (
845 845 b'd',
846 846 b'dest',
847 847 b'',
848 848 _(b'rebase onto the specified changeset'),
849 849 _(b'REV'),
850 850 ),
851 851 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
852 852 (
853 853 b'm',
854 854 b'message',
855 855 b'',
856 856 _(b'use text as collapse commit message'),
857 857 _(b'TEXT'),
858 858 ),
859 859 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
860 860 (
861 861 b'l',
862 862 b'logfile',
863 863 b'',
864 864 _(b'read collapse commit message from file'),
865 865 _(b'FILE'),
866 866 ),
867 867 (b'k', b'keep', False, _(b'keep original changesets')),
868 868 (b'', b'keepbranches', False, _(b'keep original branch names')),
869 869 (b'D', b'detach', False, _(b'(DEPRECATED)')),
870 870 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
871 871 (b't', b'tool', b'', _(b'specify merge tool')),
872 872 (b'', b'stop', False, _(b'stop interrupted rebase')),
873 873 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
874 874 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
875 875 (
876 876 b'',
877 877 b'auto-orphans',
878 878 b'',
879 879 _(
880 880 b'automatically rebase orphan revisions '
881 881 b'in the specified revset (EXPERIMENTAL)'
882 882 ),
883 883 ),
884 884 ]
885 885 + cmdutil.dryrunopts
886 886 + cmdutil.formatteropts
887 887 + cmdutil.confirmopts,
888 888 _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'),
889 889 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
890 890 )
891 891 def rebase(ui, repo, **opts):
892 892 """move changeset (and descendants) to a different branch
893 893
894 894 Rebase uses repeated merging to graft changesets from one part of
895 895 history (the source) onto another (the destination). This can be
896 896 useful for linearizing *local* changes relative to a master
897 897 development tree.
898 898
899 899 Published commits cannot be rebased (see :hg:`help phases`).
900 900 To copy commits, see :hg:`help graft`.
901 901
902 902 If you don't specify a destination changeset (``-d/--dest``), rebase
903 903 will use the same logic as :hg:`merge` to pick a destination. if
904 904 the current branch contains exactly one other head, the other head
905 905 is merged with by default. Otherwise, an explicit revision with
906 906 which to merge with must be provided. (destination changeset is not
907 907 modified by rebasing, but new changesets are added as its
908 908 descendants.)
909 909
910 910 Here are the ways to select changesets:
911 911
912 912 1. Explicitly select them using ``--rev``.
913 913
914 914 2. Use ``--source`` to select a root changeset and include all of its
915 915 descendants.
916 916
917 917 3. Use ``--base`` to select a changeset; rebase will find ancestors
918 918 and their descendants which are not also ancestors of the destination.
919 919
920 920 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
921 921 rebase will use ``--base .`` as above.
922 922
923 923 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
924 924 can be used in ``--dest``. Destination would be calculated per source
925 925 revision with ``SRC`` substituted by that single source revision and
926 926 ``ALLSRC`` substituted by all source revisions.
927 927
928 928 Rebase will destroy original changesets unless you use ``--keep``.
929 929 It will also move your bookmarks (even if you do).
930 930
931 931 Some changesets may be dropped if they do not contribute changes
932 932 (e.g. merges from the destination branch).
933 933
934 934 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
935 935 a named branch with two heads. You will need to explicitly specify source
936 936 and/or destination.
937 937
938 938 If you need to use a tool to automate merge/conflict decisions, you
939 939 can specify one with ``--tool``, see :hg:`help merge-tools`.
940 940 As a caveat: the tool will not be used to mediate when a file was
941 941 deleted, there is no hook presently available for this.
942 942
943 943 If a rebase is interrupted to manually resolve a conflict, it can be
944 944 continued with --continue/-c, aborted with --abort/-a, or stopped with
945 945 --stop.
946 946
947 947 .. container:: verbose
948 948
949 949 Examples:
950 950
951 951 - move "local changes" (current commit back to branching point)
952 952 to the current branch tip after a pull::
953 953
954 954 hg rebase
955 955
956 956 - move a single changeset to the stable branch::
957 957
958 958 hg rebase -r 5f493448 -d stable
959 959
960 960 - splice a commit and all its descendants onto another part of history::
961 961
962 962 hg rebase --source c0c3 --dest 4cf9
963 963
964 964 - rebase everything on a branch marked by a bookmark onto the
965 965 default branch::
966 966
967 967 hg rebase --base myfeature --dest default
968 968
969 969 - collapse a sequence of changes into a single commit::
970 970
971 971 hg rebase --collapse -r 1520:1525 -d .
972 972
973 973 - move a named branch while preserving its name::
974 974
975 975 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
976 976
977 977 - stabilize orphaned changesets so history looks linear::
978 978
979 979 hg rebase -r 'orphan()-obsolete()'\
980 980 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
981 981 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
982 982
983 983 Configuration Options:
984 984
985 985 You can make rebase require a destination if you set the following config
986 986 option::
987 987
988 988 [commands]
989 989 rebase.requiredest = True
990 990
991 991 By default, rebase will close the transaction after each commit. For
992 992 performance purposes, you can configure rebase to use a single transaction
993 993 across the entire rebase. WARNING: This setting introduces a significant
994 994 risk of losing the work you've done in a rebase if the rebase aborts
995 995 unexpectedly::
996 996
997 997 [rebase]
998 998 singletransaction = True
999 999
1000 1000 By default, rebase writes to the working copy, but you can configure it to
1001 1001 run in-memory for better performance. When the rebase is not moving the
1002 1002 parent(s) of the working copy (AKA the "currently checked out changesets"),
1003 1003 this may also allow it to run even if the working copy is dirty::
1004 1004
1005 1005 [rebase]
1006 1006 experimental.inmemory = True
1007 1007
1008 1008 Return Values:
1009 1009
1010 1010 Returns 0 on success, 1 if nothing to rebase or there are
1011 1011 unresolved conflicts.
1012 1012
1013 1013 """
1014 1014 opts = pycompat.byteskwargs(opts)
1015 1015 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1016 1016 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1017 1017 if action:
1018 1018 cmdutil.check_incompatible_arguments(
1019 1019 opts, action, [b'confirm', b'dry_run']
1020 1020 )
1021 1021 cmdutil.check_incompatible_arguments(
1022 1022 opts, action, [b'rev', b'source', b'base', b'dest']
1023 1023 )
1024 1024 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1025 1025 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1026 1026
1027 1027 if action or repo.currenttransaction() is not None:
1028 1028 # in-memory rebase is not compatible with resuming rebases.
1029 1029 # (Or if it is run within a transaction, since the restart logic can
1030 1030 # fail the entire transaction.)
1031 1031 inmemory = False
1032 1032
1033 1033 if opts.get(b'auto_orphans'):
1034 1034 disallowed_opts = set(opts) - {b'auto_orphans'}
1035 1035 cmdutil.check_incompatible_arguments(
1036 1036 opts, b'auto_orphans', disallowed_opts
1037 1037 )
1038 1038
1039 1039 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1040 1040 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1041 1041 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1042 1042
1043 1043 if opts.get(b'dry_run') or opts.get(b'confirm'):
1044 1044 return _dryrunrebase(ui, repo, action, opts)
1045 1045 elif action == b'stop':
1046 1046 rbsrt = rebaseruntime(repo, ui)
1047 1047 with repo.wlock(), repo.lock():
1048 1048 rbsrt.restorestatus()
1049 1049 if rbsrt.collapsef:
1050 1050 raise error.Abort(_(b"cannot stop in --collapse session"))
1051 1051 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1052 1052 if not (rbsrt.keepf or allowunstable):
1053 1053 raise error.Abort(
1054 1054 _(
1055 1055 b"cannot remove original changesets with"
1056 1056 b" unrebased descendants"
1057 1057 ),
1058 1058 hint=_(
1059 1059 b'either enable obsmarkers to allow unstable '
1060 1060 b'revisions or use --keep to keep original '
1061 1061 b'changesets'
1062 1062 ),
1063 1063 )
1064 1064 # update to the current working revision
1065 1065 # to clear interrupted merge
1066 1066 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1067 1067 rbsrt._finishrebase()
1068 1068 return 0
1069 1069 elif inmemory:
1070 1070 try:
1071 1071 # in-memory merge doesn't support conflicts, so if we hit any, abort
1072 1072 # and re-run as an on-disk merge.
1073 1073 overrides = {(b'rebase', b'singletransaction'): True}
1074 1074 with ui.configoverride(overrides, b'rebase'):
1075 1075 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1076 1076 except error.InMemoryMergeConflictsError:
1077 1077 ui.warn(
1078 1078 _(
1079 1079 b'hit merge conflicts; re-running rebase without in-memory'
1080 1080 b' merge\n'
1081 1081 )
1082 1082 )
1083 1083 # TODO: Make in-memory merge not use the on-disk merge state, so
1084 1084 # we don't have to clean it here
1085 1085 mergestatemod.mergestate.clean(repo)
1086 1086 clearstatus(repo)
1087 1087 clearcollapsemsg(repo)
1088 1088 return _dorebase(ui, repo, action, opts, inmemory=False)
1089 1089 else:
1090 1090 return _dorebase(ui, repo, action, opts)
1091 1091
1092 1092
1093 1093 def _dryrunrebase(ui, repo, action, opts):
1094 1094 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1095 1095 confirm = opts.get(b'confirm')
1096 1096 if confirm:
1097 1097 ui.status(_(b'starting in-memory rebase\n'))
1098 1098 else:
1099 1099 ui.status(
1100 1100 _(b'starting dry-run rebase; repository will not be changed\n')
1101 1101 )
1102 1102 with repo.wlock(), repo.lock():
1103 1103 needsabort = True
1104 1104 try:
1105 1105 overrides = {(b'rebase', b'singletransaction'): True}
1106 1106 with ui.configoverride(overrides, b'rebase'):
1107 1107 _origrebase(
1108 1108 ui,
1109 1109 repo,
1110 1110 action,
1111 1111 opts,
1112 1112 rbsrt,
1113 1113 inmemory=True,
1114 1114 leaveunfinished=True,
1115 1115 )
1116 1116 except error.InMemoryMergeConflictsError:
1117 1117 ui.status(_(b'hit a merge conflict\n'))
1118 1118 return 1
1119 1119 except error.Abort:
1120 1120 needsabort = False
1121 1121 raise
1122 1122 else:
1123 1123 if confirm:
1124 1124 ui.status(_(b'rebase completed successfully\n'))
1125 1125 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1126 1126 # finish unfinished rebase
1127 1127 rbsrt._finishrebase()
1128 1128 else:
1129 1129 rbsrt._prepareabortorcontinue(
1130 1130 isabort=True,
1131 1131 backup=False,
1132 1132 suppwarns=True,
1133 1133 confirm=confirm,
1134 1134 )
1135 1135 needsabort = False
1136 1136 else:
1137 1137 ui.status(
1138 1138 _(
1139 1139 b'dry-run rebase completed successfully; run without'
1140 1140 b' -n/--dry-run to perform this rebase\n'
1141 1141 )
1142 1142 )
1143 1143 return 0
1144 1144 finally:
1145 1145 if needsabort:
1146 1146 # no need to store backup in case of dryrun
1147 1147 rbsrt._prepareabortorcontinue(
1148 1148 isabort=True,
1149 1149 backup=False,
1150 1150 suppwarns=True,
1151 1151 dryrun=opts.get(b'dry_run'),
1152 1152 )
1153 1153
1154 1154
1155 1155 def _dorebase(ui, repo, action, opts, inmemory=False):
1156 1156 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1157 1157 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1158 1158
1159 1159
1160 1160 def _origrebase(
1161 1161 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1162 1162 ):
1163 1163 assert action != b'stop'
1164 1164 with repo.wlock(), repo.lock():
1165 1165 if opts.get(b'interactive'):
1166 1166 try:
1167 1167 if extensions.find(b'histedit'):
1168 1168 enablehistedit = b''
1169 1169 except KeyError:
1170 1170 enablehistedit = b" --config extensions.histedit="
1171 1171 help = b"hg%s help -e histedit" % enablehistedit
1172 1172 msg = (
1173 1173 _(
1174 1174 b"interactive history editing is supported by the "
1175 1175 b"'histedit' extension (see \"%s\")"
1176 1176 )
1177 1177 % help
1178 1178 )
1179 1179 raise error.Abort(msg)
1180 1180
1181 1181 if rbsrt.collapsemsg and not rbsrt.collapsef:
1182 1182 raise error.Abort(_(b'message can only be specified with collapse'))
1183 1183
1184 1184 if action:
1185 1185 if rbsrt.collapsef:
1186 1186 raise error.Abort(
1187 1187 _(b'cannot use collapse with continue or abort')
1188 1188 )
1189 1189 if action == b'abort' and opts.get(b'tool', False):
1190 1190 ui.warn(_(b'tool option will be ignored\n'))
1191 1191 if action == b'continue':
1192 1192 ms = mergestatemod.mergestate.read(repo)
1193 1193 mergeutil.checkunresolved(ms)
1194 1194
1195 1195 retcode = rbsrt._prepareabortorcontinue(
1196 1196 isabort=(action == b'abort')
1197 1197 )
1198 1198 if retcode is not None:
1199 1199 return retcode
1200 1200 else:
1201 1201 # search default destination in this space
1202 1202 # used in the 'hg pull --rebase' case, see issue 5214.
1203 1203 destspace = opts.get(b'_destspace')
1204 1204 destmap = _definedestmap(
1205 1205 ui,
1206 1206 repo,
1207 1207 inmemory,
1208 1208 opts.get(b'dest', None),
1209 1209 opts.get(b'source', []),
1210 1210 opts.get(b'base', []),
1211 1211 opts.get(b'rev', []),
1212 1212 destspace=destspace,
1213 1213 )
1214 1214 retcode = rbsrt._preparenewrebase(destmap)
1215 1215 if retcode is not None:
1216 1216 return retcode
1217 1217 storecollapsemsg(repo, rbsrt.collapsemsg)
1218 1218
1219 1219 tr = None
1220 1220
1221 1221 singletr = ui.configbool(b'rebase', b'singletransaction')
1222 1222 if singletr:
1223 1223 tr = repo.transaction(b'rebase')
1224 1224
1225 1225 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1226 1226 # one transaction here. Otherwise, transactions are obtained when
1227 1227 # committing each node, which is slower but allows partial success.
1228 1228 with util.acceptintervention(tr):
1229 1229 # Same logic for the dirstate guard, except we don't create one when
1230 1230 # rebasing in-memory (it's not needed).
1231 1231 dsguard = None
1232 1232 if singletr and not inmemory:
1233 1233 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1234 1234 with util.acceptintervention(dsguard):
1235 1235 rbsrt._performrebase(tr)
1236 1236 if not leaveunfinished:
1237 1237 rbsrt._finishrebase()
1238 1238
1239 1239
1240 1240 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
1241 1241 """use revisions argument to define destmap {srcrev: destrev}"""
1242 1242 if revf is None:
1243 1243 revf = []
1244 1244
1245 1245 # destspace is here to work around issues with `hg pull --rebase` see
1246 1246 # issue5214 for details
1247 1247
1248 1248 cmdutil.checkunfinished(repo)
1249 1249 if not inmemory:
1250 1250 cmdutil.bailifchanged(repo)
1251 1251
1252 1252 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1253 1253 raise error.Abort(
1254 1254 _(b'you must specify a destination'),
1255 1255 hint=_(b'use: hg rebase -d REV'),
1256 1256 )
1257 1257
1258 1258 dest = None
1259 1259
1260 1260 if revf:
1261 1261 rebaseset = scmutil.revrange(repo, revf)
1262 1262 if not rebaseset:
1263 1263 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1264 1264 return None
1265 1265 elif srcf:
1266 1266 src = scmutil.revrange(repo, srcf)
1267 1267 if not src:
1268 1268 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1269 1269 return None
1270 1270 # `+ (%ld)` to work around `wdir()::` being empty
1271 1271 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1272 1272 else:
1273 1273 base = scmutil.revrange(repo, basef or [b'.'])
1274 1274 if not base:
1275 1275 ui.status(
1276 1276 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1277 1277 )
1278 1278 return None
1279 1279 if destf:
1280 1280 # --base does not support multiple destinations
1281 1281 dest = scmutil.revsingle(repo, destf)
1282 1282 else:
1283 1283 dest = repo[_destrebase(repo, base, destspace=destspace)]
1284 1284 destf = bytes(dest)
1285 1285
1286 1286 roots = [] # selected children of branching points
1287 1287 bpbase = {} # {branchingpoint: [origbase]}
1288 1288 for b in base: # group bases by branching points
1289 1289 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1290 1290 bpbase[bp] = bpbase.get(bp, []) + [b]
1291 1291 if None in bpbase:
1292 1292 # emulate the old behavior, showing "nothing to rebase" (a better
1293 1293 # behavior may be abort with "cannot find branching point" error)
1294 1294 bpbase.clear()
1295 1295 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1296 1296 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1297 1297
1298 1298 rebaseset = repo.revs(b'%ld::', roots)
1299 1299
1300 1300 if not rebaseset:
1301 1301 # transform to list because smartsets are not comparable to
1302 1302 # lists. This should be improved to honor laziness of
1303 1303 # smartset.
1304 1304 if list(base) == [dest.rev()]:
1305 1305 if basef:
1306 1306 ui.status(
1307 1307 _(
1308 1308 b'nothing to rebase - %s is both "base"'
1309 1309 b' and destination\n'
1310 1310 )
1311 1311 % dest
1312 1312 )
1313 1313 else:
1314 1314 ui.status(
1315 1315 _(
1316 1316 b'nothing to rebase - working directory '
1317 1317 b'parent is also destination\n'
1318 1318 )
1319 1319 )
1320 1320 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1321 1321 if basef:
1322 1322 ui.status(
1323 1323 _(
1324 1324 b'nothing to rebase - "base" %s is '
1325 1325 b'already an ancestor of destination '
1326 1326 b'%s\n'
1327 1327 )
1328 1328 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1329 1329 )
1330 1330 else:
1331 1331 ui.status(
1332 1332 _(
1333 1333 b'nothing to rebase - working '
1334 1334 b'directory parent is already an '
1335 1335 b'ancestor of destination %s\n'
1336 1336 )
1337 1337 % dest
1338 1338 )
1339 1339 else: # can it happen?
1340 1340 ui.status(
1341 1341 _(b'nothing to rebase from %s to %s\n')
1342 1342 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1343 1343 )
1344 1344 return None
1345 1345
1346 1346 if nodemod.wdirrev in rebaseset:
1347 1347 raise error.Abort(_(b'cannot rebase the working copy'))
1348 1348 rebasingwcp = repo[b'.'].rev() in rebaseset
1349 1349 ui.log(
1350 1350 b"rebase",
1351 1351 b"rebasing working copy parent: %r\n",
1352 1352 rebasingwcp,
1353 1353 rebase_rebasing_wcp=rebasingwcp,
1354 1354 )
1355 1355 if inmemory and rebasingwcp:
1356 1356 # Check these since we did not before.
1357 1357 cmdutil.checkunfinished(repo)
1358 1358 cmdutil.bailifchanged(repo)
1359 1359
1360 1360 if not destf:
1361 1361 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1362 1362 destf = bytes(dest)
1363 1363
1364 1364 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1365 1365 alias = {b'ALLSRC': allsrc}
1366 1366
1367 1367 if dest is None:
1368 1368 try:
1369 1369 # fast path: try to resolve dest without SRC alias
1370 1370 dest = scmutil.revsingle(repo, destf, localalias=alias)
1371 1371 except error.RepoLookupError:
1372 1372 # multi-dest path: resolve dest for each SRC separately
1373 1373 destmap = {}
1374 1374 for r in rebaseset:
1375 1375 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1376 1376 # use repo.anyrevs instead of scmutil.revsingle because we
1377 1377 # don't want to abort if destset is empty.
1378 1378 destset = repo.anyrevs([destf], user=True, localalias=alias)
1379 1379 size = len(destset)
1380 1380 if size == 1:
1381 1381 destmap[r] = destset.first()
1382 1382 elif size == 0:
1383 1383 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1384 1384 else:
1385 1385 raise error.Abort(
1386 1386 _(b'rebase destination for %s is not unique') % repo[r]
1387 1387 )
1388 1388
1389 1389 if dest is not None:
1390 1390 # single-dest case: assign dest to each rev in rebaseset
1391 1391 destrev = dest.rev()
1392 1392 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1393 1393
1394 1394 if not destmap:
1395 1395 ui.status(_(b'nothing to rebase - empty destination\n'))
1396 1396 return None
1397 1397
1398 1398 return destmap
1399 1399
1400 1400
1401 1401 def externalparent(repo, state, destancestors):
1402 1402 """Return the revision that should be used as the second parent
1403 1403 when the revisions in state is collapsed on top of destancestors.
1404 1404 Abort if there is more than one parent.
1405 1405 """
1406 1406 parents = set()
1407 1407 source = min(state)
1408 1408 for rev in state:
1409 1409 if rev == source:
1410 1410 continue
1411 1411 for p in repo[rev].parents():
1412 1412 if p.rev() not in state and p.rev() not in destancestors:
1413 1413 parents.add(p.rev())
1414 1414 if not parents:
1415 1415 return nullrev
1416 1416 if len(parents) == 1:
1417 1417 return parents.pop()
1418 1418 raise error.Abort(
1419 1419 _(
1420 1420 b'unable to collapse on top of %d, there is more '
1421 1421 b'than one external parent: %s'
1422 1422 )
1423 1423 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1424 1424 )
1425 1425
1426 1426
1427 1427 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1428 1428 '''Commit the memory changes with parents p1 and p2.
1429 1429 Return node of committed revision.'''
1430 # Replicates the empty check in ``repo.commit``.
1431 if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1430 # FIXME: make empty commit check consistent with ``repo.commit``
1431 if wctx.nofilechanges() and not repo.ui.configbool(
1432 b'ui', b'allowemptycommit'
1433 ):
1432 1434 return None
1433 1435
1434 1436 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1435 1437 # ``branch`` (used when passing ``--keepbranches``).
1436 1438 branch = None
1437 1439 if b'branch' in extra:
1438 1440 branch = extra[b'branch']
1439 1441
1440 1442 memctx = wctx.tomemctx(
1441 1443 commitmsg,
1442 1444 date=date,
1443 1445 extra=extra,
1444 1446 user=user,
1445 1447 branch=branch,
1446 1448 editor=editor,
1447 1449 )
1448 1450 commitres = repo.commitctx(memctx)
1449 1451 wctx.clean() # Might be reused
1450 1452 return commitres
1451 1453
1452 1454
1453 1455 def commitnode(repo, editor, extra, user, date, commitmsg):
1454 1456 '''Commit the wd changes with parents p1 and p2.
1455 1457 Return node of committed revision.'''
1456 1458 dsguard = util.nullcontextmanager()
1457 1459 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1458 1460 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1459 1461 with dsguard:
1460 1462 # Commit might fail if unresolved files exist
1461 1463 newnode = repo.commit(
1462 1464 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1463 1465 )
1464 1466
1465 1467 repo.dirstate.setbranch(repo[newnode].branch())
1466 1468 return newnode
1467 1469
1468 1470
1469 1471 def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
1470 1472 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1471 1473 # Merge phase
1472 1474 # Update to destination and merge it with local
1473 1475 p1ctx = repo[p1]
1474 1476 if wctx.isinmemory():
1475 1477 wctx.setbase(p1ctx)
1476 1478 else:
1477 1479 if repo[b'.'].rev() != p1:
1478 1480 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1479 1481 mergemod.clean_update(p1ctx)
1480 1482 else:
1481 1483 repo.ui.debug(b" already in destination\n")
1482 1484 # This is, alas, necessary to invalidate workingctx's manifest cache,
1483 1485 # as well as other data we litter on it in other places.
1484 1486 wctx = repo[None]
1485 1487 repo.dirstate.write(repo.currenttransaction())
1486 1488 ctx = repo[rev]
1487 1489 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1488 1490 if base is not None:
1489 1491 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1490 1492
1491 1493 # See explanation in merge.graft()
1492 1494 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1493 1495 stats = mergemod.update(
1494 1496 repo,
1495 1497 rev,
1496 1498 branchmerge=True,
1497 1499 force=True,
1498 1500 ancestor=base,
1499 1501 mergeancestor=mergeancestor,
1500 1502 labels=[b'dest', b'source'],
1501 1503 wc=wctx,
1502 1504 )
1503 1505 wctx.setparents(p1ctx.node(), repo[p2].node())
1504 1506 if collapse:
1505 1507 copies.graftcopies(wctx, ctx, repo[dest])
1506 1508 else:
1507 1509 # If we're not using --collapse, we need to
1508 1510 # duplicate copies between the revision we're
1509 1511 # rebasing and its first parent.
1510 1512 copies.graftcopies(wctx, ctx, ctx.p1())
1511 1513 return stats
1512 1514
1513 1515
1514 1516 def adjustdest(repo, rev, destmap, state, skipped):
1515 1517 r"""adjust rebase destination given the current rebase state
1516 1518
1517 1519 rev is what is being rebased. Return a list of two revs, which are the
1518 1520 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1519 1521 nullrev, return dest without adjustment for it.
1520 1522
1521 1523 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1522 1524 to B1, and E's destination will be adjusted from F to B1.
1523 1525
1524 1526 B1 <- written during rebasing B
1525 1527 |
1526 1528 F <- original destination of B, E
1527 1529 |
1528 1530 | E <- rev, which is being rebased
1529 1531 | |
1530 1532 | D <- prev, one parent of rev being checked
1531 1533 | |
1532 1534 | x <- skipped, ex. no successor or successor in (::dest)
1533 1535 | |
1534 1536 | C <- rebased as C', different destination
1535 1537 | |
1536 1538 | B <- rebased as B1 C'
1537 1539 |/ |
1538 1540 A G <- destination of C, different
1539 1541
1540 1542 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1541 1543 first move C to C1, G to G1, and when it's checking H, the adjusted
1542 1544 destinations will be [C1, G1].
1543 1545
1544 1546 H C1 G1
1545 1547 /| | /
1546 1548 F G |/
1547 1549 K | | -> K
1548 1550 | C D |
1549 1551 | |/ |
1550 1552 | B | ...
1551 1553 |/ |/
1552 1554 A A
1553 1555
1554 1556 Besides, adjust dest according to existing rebase information. For example,
1555 1557
1556 1558 B C D B needs to be rebased on top of C, C needs to be rebased on top
1557 1559 \|/ of D. We will rebase C first.
1558 1560 A
1559 1561
1560 1562 C' After rebasing C, when considering B's destination, use C'
1561 1563 | instead of the original C.
1562 1564 B D
1563 1565 \ /
1564 1566 A
1565 1567 """
1566 1568 # pick already rebased revs with same dest from state as interesting source
1567 1569 dest = destmap[rev]
1568 1570 source = [
1569 1571 s
1570 1572 for s, d in state.items()
1571 1573 if d > 0 and destmap[s] == dest and s not in skipped
1572 1574 ]
1573 1575
1574 1576 result = []
1575 1577 for prev in repo.changelog.parentrevs(rev):
1576 1578 adjusted = dest
1577 1579 if prev != nullrev:
1578 1580 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1579 1581 if candidate is not None:
1580 1582 adjusted = state[candidate]
1581 1583 if adjusted == dest and dest in state:
1582 1584 adjusted = state[dest]
1583 1585 if adjusted == revtodo:
1584 1586 # sortsource should produce an order that makes this impossible
1585 1587 raise error.ProgrammingError(
1586 1588 b'rev %d should be rebased already at this time' % dest
1587 1589 )
1588 1590 result.append(adjusted)
1589 1591 return result
1590 1592
1591 1593
1592 1594 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1593 1595 """
1594 1596 Abort if rebase will create divergence or rebase is noop because of markers
1595 1597
1596 1598 `rebaseobsrevs`: set of obsolete revision in source
1597 1599 `rebaseobsskipped`: set of revisions from source skipped because they have
1598 1600 successors in destination or no non-obsolete successor.
1599 1601 """
1600 1602 # Obsolete node with successors not in dest leads to divergence
1601 1603 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1602 1604 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1603 1605
1604 1606 if divergencebasecandidates and not divergenceok:
1605 1607 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1606 1608 msg = _(b"this rebase will cause divergences from: %s")
1607 1609 h = _(
1608 1610 b"to force the rebase please set "
1609 1611 b"experimental.evolution.allowdivergence=True"
1610 1612 )
1611 1613 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1612 1614
1613 1615
1614 1616 def successorrevs(unfi, rev):
1615 1617 """yield revision numbers for successors of rev"""
1616 1618 assert unfi.filtername is None
1617 1619 get_rev = unfi.changelog.index.get_rev
1618 1620 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1619 1621 r = get_rev(s)
1620 1622 if r is not None:
1621 1623 yield r
1622 1624
1623 1625
1624 1626 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1625 1627 """Return new parents and optionally a merge base for rev being rebased
1626 1628
1627 1629 The destination specified by "dest" cannot always be used directly because
1628 1630 previously rebase result could affect destination. For example,
1629 1631
1630 1632 D E rebase -r C+D+E -d B
1631 1633 |/ C will be rebased to C'
1632 1634 B C D's new destination will be C' instead of B
1633 1635 |/ E's new destination will be C' instead of B
1634 1636 A
1635 1637
1636 1638 The new parents of a merge is slightly more complicated. See the comment
1637 1639 block below.
1638 1640 """
1639 1641 # use unfiltered changelog since successorrevs may return filtered nodes
1640 1642 assert repo.filtername is None
1641 1643 cl = repo.changelog
1642 1644 isancestor = cl.isancestorrev
1643 1645
1644 1646 dest = destmap[rev]
1645 1647 oldps = repo.changelog.parentrevs(rev) # old parents
1646 1648 newps = [nullrev, nullrev] # new parents
1647 1649 dests = adjustdest(repo, rev, destmap, state, skipped)
1648 1650 bases = list(oldps) # merge base candidates, initially just old parents
1649 1651
1650 1652 if all(r == nullrev for r in oldps[1:]):
1651 1653 # For non-merge changeset, just move p to adjusted dest as requested.
1652 1654 newps[0] = dests[0]
1653 1655 else:
1654 1656 # For merge changeset, if we move p to dests[i] unconditionally, both
1655 1657 # parents may change and the end result looks like "the merge loses a
1656 1658 # parent", which is a surprise. This is a limit because "--dest" only
1657 1659 # accepts one dest per src.
1658 1660 #
1659 1661 # Therefore, only move p with reasonable conditions (in this order):
1660 1662 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1661 1663 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1662 1664 #
1663 1665 # Comparing with adjustdest, the logic here does some additional work:
1664 1666 # 1. decide which parents will not be moved towards dest
1665 1667 # 2. if the above decision is "no", should a parent still be moved
1666 1668 # because it was rebased?
1667 1669 #
1668 1670 # For example:
1669 1671 #
1670 1672 # C # "rebase -r C -d D" is an error since none of the parents
1671 1673 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1672 1674 # A B D # B (using rule "2."), since B will be rebased.
1673 1675 #
1674 1676 # The loop tries to be not rely on the fact that a Mercurial node has
1675 1677 # at most 2 parents.
1676 1678 for i, p in enumerate(oldps):
1677 1679 np = p # new parent
1678 1680 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1679 1681 np = dests[i]
1680 1682 elif p in state and state[p] > 0:
1681 1683 np = state[p]
1682 1684
1683 1685 # If one parent becomes an ancestor of the other, drop the ancestor
1684 1686 for j, x in enumerate(newps[:i]):
1685 1687 if x == nullrev:
1686 1688 continue
1687 1689 if isancestor(np, x): # CASE-1
1688 1690 np = nullrev
1689 1691 elif isancestor(x, np): # CASE-2
1690 1692 newps[j] = np
1691 1693 np = nullrev
1692 1694 # New parents forming an ancestor relationship does not
1693 1695 # mean the old parents have a similar relationship. Do not
1694 1696 # set bases[x] to nullrev.
1695 1697 bases[j], bases[i] = bases[i], bases[j]
1696 1698
1697 1699 newps[i] = np
1698 1700
1699 1701 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1700 1702 # base. If only p2 changes, merging using unchanged p1 as merge base is
1701 1703 # suboptimal. Therefore swap parents to make the merge sane.
1702 1704 if newps[1] != nullrev and oldps[0] == newps[0]:
1703 1705 assert len(newps) == 2 and len(oldps) == 2
1704 1706 newps.reverse()
1705 1707 bases.reverse()
1706 1708
1707 1709 # No parent change might be an error because we fail to make rev a
1708 1710 # descendent of requested dest. This can happen, for example:
1709 1711 #
1710 1712 # C # rebase -r C -d D
1711 1713 # /| # None of A and B will be changed to D and rebase fails.
1712 1714 # A B D
1713 1715 if set(newps) == set(oldps) and dest not in newps:
1714 1716 raise error.Abort(
1715 1717 _(
1716 1718 b'cannot rebase %d:%s without '
1717 1719 b'moving at least one of its parents'
1718 1720 )
1719 1721 % (rev, repo[rev])
1720 1722 )
1721 1723
1722 1724 # Source should not be ancestor of dest. The check here guarantees it's
1723 1725 # impossible. With multi-dest, the initial check does not cover complex
1724 1726 # cases since we don't have abstractions to dry-run rebase cheaply.
1725 1727 if any(p != nullrev and isancestor(rev, p) for p in newps):
1726 1728 raise error.Abort(_(b'source is ancestor of destination'))
1727 1729
1728 1730 # Check if the merge will contain unwanted changes. That may happen if
1729 1731 # there are multiple special (non-changelog ancestor) merge bases, which
1730 1732 # cannot be handled well by the 3-way merge algorithm. For example:
1731 1733 #
1732 1734 # F
1733 1735 # /|
1734 1736 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1735 1737 # | | # as merge base, the difference between D and F will include
1736 1738 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1737 1739 # |/ # chosen, the rebased F will contain B.
1738 1740 # A Z
1739 1741 #
1740 1742 # But our merge base candidates (D and E in above case) could still be
1741 1743 # better than the default (ancestor(F, Z) == null). Therefore still
1742 1744 # pick one (so choose p1 above).
1743 1745 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1744 1746 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1745 1747 for i, base in enumerate(bases):
1746 1748 if base == nullrev or base in newps:
1747 1749 continue
1748 1750 # Revisions in the side (not chosen as merge base) branch that
1749 1751 # might contain "surprising" contents
1750 1752 other_bases = set(bases) - {base}
1751 1753 siderevs = list(
1752 1754 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1753 1755 )
1754 1756
1755 1757 # If those revisions are covered by rebaseset, the result is good.
1756 1758 # A merge in rebaseset would be considered to cover its ancestors.
1757 1759 if siderevs:
1758 1760 rebaseset = [
1759 1761 r for r, d in state.items() if d > 0 and r not in obsskipped
1760 1762 ]
1761 1763 merges = [
1762 1764 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1763 1765 ]
1764 1766 unwanted[i] = list(
1765 1767 repo.revs(
1766 1768 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1767 1769 )
1768 1770 )
1769 1771
1770 1772 if any(revs is not None for revs in unwanted):
1771 1773 # Choose a merge base that has a minimal number of unwanted revs.
1772 1774 l, i = min(
1773 1775 (len(revs), i)
1774 1776 for i, revs in enumerate(unwanted)
1775 1777 if revs is not None
1776 1778 )
1777 1779
1778 1780 # The merge will include unwanted revisions. Abort now. Revisit this if
1779 1781 # we have a more advanced merge algorithm that handles multiple bases.
1780 1782 if l > 0:
1781 1783 unwanteddesc = _(b' or ').join(
1782 1784 (
1783 1785 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1784 1786 for revs in unwanted
1785 1787 if revs is not None
1786 1788 )
1787 1789 )
1788 1790 raise error.Abort(
1789 1791 _(b'rebasing %d:%s will include unwanted changes from %s')
1790 1792 % (rev, repo[rev], unwanteddesc)
1791 1793 )
1792 1794
1793 1795 # newps[0] should match merge base if possible. Currently, if newps[i]
1794 1796 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1795 1797 # the other's ancestor. In that case, it's fine to not swap newps here.
1796 1798 # (see CASE-1 and CASE-2 above)
1797 1799 if i != 0:
1798 1800 if newps[i] != nullrev:
1799 1801 newps[0], newps[i] = newps[i], newps[0]
1800 1802 bases[0], bases[i] = bases[i], bases[0]
1801 1803
1802 1804 # "rebasenode" updates to new p1, use the corresponding merge base.
1803 1805 base = bases[0]
1804 1806
1805 1807 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1806 1808
1807 1809 return newps[0], newps[1], base
1808 1810
1809 1811
1810 1812 def isagitpatch(repo, patchname):
1811 1813 """Return true if the given patch is in git format"""
1812 1814 mqpatch = os.path.join(repo.mq.path, patchname)
1813 1815 for line in patch.linereader(open(mqpatch, b'rb')):
1814 1816 if line.startswith(b'diff --git'):
1815 1817 return True
1816 1818 return False
1817 1819
1818 1820
1819 1821 def updatemq(repo, state, skipped, **opts):
1820 1822 """Update rebased mq patches - finalize and then import them"""
1821 1823 mqrebase = {}
1822 1824 mq = repo.mq
1823 1825 original_series = mq.fullseries[:]
1824 1826 skippedpatches = set()
1825 1827
1826 1828 for p in mq.applied:
1827 1829 rev = repo[p.node].rev()
1828 1830 if rev in state:
1829 1831 repo.ui.debug(
1830 1832 b'revision %d is an mq patch (%s), finalize it.\n'
1831 1833 % (rev, p.name)
1832 1834 )
1833 1835 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1834 1836 else:
1835 1837 # Applied but not rebased, not sure this should happen
1836 1838 skippedpatches.add(p.name)
1837 1839
1838 1840 if mqrebase:
1839 1841 mq.finish(repo, mqrebase.keys())
1840 1842
1841 1843 # We must start import from the newest revision
1842 1844 for rev in sorted(mqrebase, reverse=True):
1843 1845 if rev not in skipped:
1844 1846 name, isgit = mqrebase[rev]
1845 1847 repo.ui.note(
1846 1848 _(b'updating mq patch %s to %d:%s\n')
1847 1849 % (name, state[rev], repo[state[rev]])
1848 1850 )
1849 1851 mq.qimport(
1850 1852 repo,
1851 1853 (),
1852 1854 patchname=name,
1853 1855 git=isgit,
1854 1856 rev=[b"%d" % state[rev]],
1855 1857 )
1856 1858 else:
1857 1859 # Rebased and skipped
1858 1860 skippedpatches.add(mqrebase[rev][0])
1859 1861
1860 1862 # Patches were either applied and rebased and imported in
1861 1863 # order, applied and removed or unapplied. Discard the removed
1862 1864 # ones while preserving the original series order and guards.
1863 1865 newseries = [
1864 1866 s
1865 1867 for s in original_series
1866 1868 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1867 1869 ]
1868 1870 mq.fullseries[:] = newseries
1869 1871 mq.seriesdirty = True
1870 1872 mq.savedirty()
1871 1873
1872 1874
1873 1875 def storecollapsemsg(repo, collapsemsg):
1874 1876 """Store the collapse message to allow recovery"""
1875 1877 collapsemsg = collapsemsg or b''
1876 1878 f = repo.vfs(b"last-message.txt", b"w")
1877 1879 f.write(b"%s\n" % collapsemsg)
1878 1880 f.close()
1879 1881
1880 1882
1881 1883 def clearcollapsemsg(repo):
1882 1884 """Remove collapse message file"""
1883 1885 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1884 1886
1885 1887
1886 1888 def restorecollapsemsg(repo, isabort):
1887 1889 """Restore previously stored collapse message"""
1888 1890 try:
1889 1891 f = repo.vfs(b"last-message.txt")
1890 1892 collapsemsg = f.readline().strip()
1891 1893 f.close()
1892 1894 except IOError as err:
1893 1895 if err.errno != errno.ENOENT:
1894 1896 raise
1895 1897 if isabort:
1896 1898 # Oh well, just abort like normal
1897 1899 collapsemsg = b''
1898 1900 else:
1899 1901 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1900 1902 return collapsemsg
1901 1903
1902 1904
1903 1905 def clearstatus(repo):
1904 1906 """Remove the status files"""
1905 1907 # Make sure the active transaction won't write the state file
1906 1908 tr = repo.currenttransaction()
1907 1909 if tr:
1908 1910 tr.removefilegenerator(b'rebasestate')
1909 1911 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1910 1912
1911 1913
1912 1914 def sortsource(destmap):
1913 1915 """yield source revisions in an order that we only rebase things once
1914 1916
1915 1917 If source and destination overlaps, we should filter out revisions
1916 1918 depending on other revisions which hasn't been rebased yet.
1917 1919
1918 1920 Yield a sorted list of revisions each time.
1919 1921
1920 1922 For example, when rebasing A to B, B to C. This function yields [B], then
1921 1923 [A], indicating B needs to be rebased first.
1922 1924
1923 1925 Raise if there is a cycle so the rebase is impossible.
1924 1926 """
1925 1927 srcset = set(destmap)
1926 1928 while srcset:
1927 1929 srclist = sorted(srcset)
1928 1930 result = []
1929 1931 for r in srclist:
1930 1932 if destmap[r] not in srcset:
1931 1933 result.append(r)
1932 1934 if not result:
1933 1935 raise error.Abort(_(b'source and destination form a cycle'))
1934 1936 srcset -= set(result)
1935 1937 yield result
1936 1938
1937 1939
1938 1940 def buildstate(repo, destmap, collapse):
1939 1941 '''Define which revisions are going to be rebased and where
1940 1942
1941 1943 repo: repo
1942 1944 destmap: {srcrev: destrev}
1943 1945 '''
1944 1946 rebaseset = destmap.keys()
1945 1947 originalwd = repo[b'.'].rev()
1946 1948
1947 1949 # This check isn't strictly necessary, since mq detects commits over an
1948 1950 # applied patch. But it prevents messing up the working directory when
1949 1951 # a partially completed rebase is blocked by mq.
1950 1952 if b'qtip' in repo.tags():
1951 1953 mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
1952 1954 if set(destmap.values()) & mqapplied:
1953 1955 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1954 1956
1955 1957 # Get "cycle" error early by exhausting the generator.
1956 1958 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1957 1959 if not sortedsrc:
1958 1960 raise error.Abort(_(b'no matching revisions'))
1959 1961
1960 1962 # Only check the first batch of revisions to rebase not depending on other
1961 1963 # rebaseset. This means "source is ancestor of destination" for the second
1962 1964 # (and following) batches of revisions are not checked here. We rely on
1963 1965 # "defineparents" to do that check.
1964 1966 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1965 1967 if not roots:
1966 1968 raise error.Abort(_(b'no matching revisions'))
1967 1969
1968 1970 def revof(r):
1969 1971 return r.rev()
1970 1972
1971 1973 roots = sorted(roots, key=revof)
1972 1974 state = dict.fromkeys(rebaseset, revtodo)
1973 1975 emptyrebase = len(sortedsrc) == 1
1974 1976 for root in roots:
1975 1977 dest = repo[destmap[root.rev()]]
1976 1978 commonbase = root.ancestor(dest)
1977 1979 if commonbase == root:
1978 1980 raise error.Abort(_(b'source is ancestor of destination'))
1979 1981 if commonbase == dest:
1980 1982 wctx = repo[None]
1981 1983 if dest == wctx.p1():
1982 1984 # when rebasing to '.', it will use the current wd branch name
1983 1985 samebranch = root.branch() == wctx.branch()
1984 1986 else:
1985 1987 samebranch = root.branch() == dest.branch()
1986 1988 if not collapse and samebranch and dest in root.parents():
1987 1989 # mark the revision as done by setting its new revision
1988 1990 # equal to its old (current) revisions
1989 1991 state[root.rev()] = root.rev()
1990 1992 repo.ui.debug(b'source is a child of destination\n')
1991 1993 continue
1992 1994
1993 1995 emptyrebase = False
1994 1996 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
1995 1997 if emptyrebase:
1996 1998 return None
1997 1999 for rev in sorted(state):
1998 2000 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1999 2001 # if all parents of this revision are done, then so is this revision
2000 2002 if parents and all((state.get(p) == p for p in parents)):
2001 2003 state[rev] = rev
2002 2004 return originalwd, destmap, state
2003 2005
2004 2006
2005 2007 def clearrebased(
2006 2008 ui,
2007 2009 repo,
2008 2010 destmap,
2009 2011 state,
2010 2012 skipped,
2011 2013 collapsedas=None,
2012 2014 keepf=False,
2013 2015 fm=None,
2014 2016 backup=True,
2015 2017 ):
2016 2018 """dispose of rebased revision at the end of the rebase
2017 2019
2018 2020 If `collapsedas` is not None, the rebase was a collapse whose result if the
2019 2021 `collapsedas` node.
2020 2022
2021 2023 If `keepf` is not True, the rebase has --keep set and no nodes should be
2022 2024 removed (but bookmarks still need to be moved).
2023 2025
2024 2026 If `backup` is False, no backup will be stored when stripping rebased
2025 2027 revisions.
2026 2028 """
2027 2029 tonode = repo.changelog.node
2028 2030 replacements = {}
2029 2031 moves = {}
2030 2032 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2031 2033
2032 2034 collapsednodes = []
2033 2035 for rev, newrev in sorted(state.items()):
2034 2036 if newrev >= 0 and newrev != rev:
2035 2037 oldnode = tonode(rev)
2036 2038 newnode = collapsedas or tonode(newrev)
2037 2039 moves[oldnode] = newnode
2038 2040 succs = None
2039 2041 if rev in skipped:
2040 2042 if stripcleanup or not repo[rev].obsolete():
2041 2043 succs = ()
2042 2044 elif collapsedas:
2043 2045 collapsednodes.append(oldnode)
2044 2046 else:
2045 2047 succs = (newnode,)
2046 2048 if succs is not None:
2047 2049 replacements[(oldnode,)] = succs
2048 2050 if collapsednodes:
2049 2051 replacements[tuple(collapsednodes)] = (collapsedas,)
2050 2052 if fm:
2051 2053 hf = fm.hexfunc
2052 2054 fl = fm.formatlist
2053 2055 fd = fm.formatdict
2054 2056 changes = {}
2055 2057 for oldns, newn in pycompat.iteritems(replacements):
2056 2058 for oldn in oldns:
2057 2059 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2058 2060 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2059 2061 fm.data(nodechanges=nodechanges)
2060 2062 if keepf:
2061 2063 replacements = {}
2062 2064 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2063 2065
2064 2066
2065 2067 def pullrebase(orig, ui, repo, *args, **opts):
2066 2068 """Call rebase after pull if the latter has been invoked with --rebase"""
2067 2069 if opts.get('rebase'):
2068 2070 if ui.configbool(b'commands', b'rebase.requiredest'):
2069 2071 msg = _(b'rebase destination required by configuration')
2070 2072 hint = _(b'use hg pull followed by hg rebase -d DEST')
2071 2073 raise error.Abort(msg, hint=hint)
2072 2074
2073 2075 with repo.wlock(), repo.lock():
2074 2076 if opts.get('update'):
2075 2077 del opts['update']
2076 2078 ui.debug(
2077 2079 b'--update and --rebase are not compatible, ignoring '
2078 2080 b'the update flag\n'
2079 2081 )
2080 2082
2081 2083 cmdutil.checkunfinished(repo, skipmerge=True)
2082 2084 cmdutil.bailifchanged(
2083 2085 repo,
2084 2086 hint=_(
2085 2087 b'cannot pull with rebase: '
2086 2088 b'please commit or shelve your changes first'
2087 2089 ),
2088 2090 )
2089 2091
2090 2092 revsprepull = len(repo)
2091 2093 origpostincoming = commands.postincoming
2092 2094
2093 2095 def _dummy(*args, **kwargs):
2094 2096 pass
2095 2097
2096 2098 commands.postincoming = _dummy
2097 2099 try:
2098 2100 ret = orig(ui, repo, *args, **opts)
2099 2101 finally:
2100 2102 commands.postincoming = origpostincoming
2101 2103 revspostpull = len(repo)
2102 2104 if revspostpull > revsprepull:
2103 2105 # --rev option from pull conflict with rebase own --rev
2104 2106 # dropping it
2105 2107 if 'rev' in opts:
2106 2108 del opts['rev']
2107 2109 # positional argument from pull conflicts with rebase's own
2108 2110 # --source.
2109 2111 if 'source' in opts:
2110 2112 del opts['source']
2111 2113 # revsprepull is the len of the repo, not revnum of tip.
2112 2114 destspace = list(repo.changelog.revs(start=revsprepull))
2113 2115 opts['_destspace'] = destspace
2114 2116 try:
2115 2117 rebase(ui, repo, **opts)
2116 2118 except error.NoMergeDestAbort:
2117 2119 # we can maybe update instead
2118 2120 rev, _a, _b = destutil.destupdate(repo)
2119 2121 if rev == repo[b'.'].rev():
2120 2122 ui.status(_(b'nothing to rebase\n'))
2121 2123 else:
2122 2124 ui.status(_(b'nothing to rebase - updating instead\n'))
2123 2125 # not passing argument to get the bare update behavior
2124 2126 # with warning and trumpets
2125 2127 commands.update(ui, repo)
2126 2128 else:
2127 2129 if opts.get('tool'):
2128 2130 raise error.Abort(_(b'--tool can only be used with --rebase'))
2129 2131 ret = orig(ui, repo, *args, **opts)
2130 2132
2131 2133 return ret
2132 2134
2133 2135
2134 2136 def _filterobsoleterevs(repo, revs):
2135 2137 """returns a set of the obsolete revisions in revs"""
2136 2138 return {r for r in revs if repo[r].obsolete()}
2137 2139
2138 2140
2139 2141 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2140 2142 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2141 2143
2142 2144 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2143 2145 obsolete nodes to be rebased given in `rebaseobsrevs`.
2144 2146
2145 2147 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2146 2148 without a successor in destination.
2147 2149
2148 2150 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2149 2151 obsolete successors.
2150 2152 """
2151 2153 obsoletenotrebased = {}
2152 2154 obsoletewithoutsuccessorindestination = set()
2153 2155 obsoleteextinctsuccessors = set()
2154 2156
2155 2157 assert repo.filtername is None
2156 2158 cl = repo.changelog
2157 2159 get_rev = cl.index.get_rev
2158 2160 extinctrevs = set(repo.revs(b'extinct()'))
2159 2161 for srcrev in rebaseobsrevs:
2160 2162 srcnode = cl.node(srcrev)
2161 2163 # XXX: more advanced APIs are required to handle split correctly
2162 2164 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2163 2165 # obsutil.allsuccessors includes node itself
2164 2166 successors.remove(srcnode)
2165 2167 succrevs = {get_rev(s) for s in successors}
2166 2168 succrevs.discard(None)
2167 2169 if succrevs.issubset(extinctrevs):
2168 2170 # all successors are extinct
2169 2171 obsoleteextinctsuccessors.add(srcrev)
2170 2172 if not successors:
2171 2173 # no successor
2172 2174 obsoletenotrebased[srcrev] = None
2173 2175 else:
2174 2176 dstrev = destmap[srcrev]
2175 2177 for succrev in succrevs:
2176 2178 if cl.isancestorrev(succrev, dstrev):
2177 2179 obsoletenotrebased[srcrev] = succrev
2178 2180 break
2179 2181 else:
2180 2182 # If 'srcrev' has a successor in rebase set but none in
2181 2183 # destination (which would be catched above), we shall skip it
2182 2184 # and its descendants to avoid divergence.
2183 2185 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2184 2186 obsoletewithoutsuccessorindestination.add(srcrev)
2185 2187
2186 2188 return (
2187 2189 obsoletenotrebased,
2188 2190 obsoletewithoutsuccessorindestination,
2189 2191 obsoleteextinctsuccessors,
2190 2192 )
2191 2193
2192 2194
2193 2195 def abortrebase(ui, repo):
2194 2196 with repo.wlock(), repo.lock():
2195 2197 rbsrt = rebaseruntime(repo, ui)
2196 2198 rbsrt._prepareabortorcontinue(isabort=True)
2197 2199
2198 2200
2199 2201 def continuerebase(ui, repo):
2200 2202 with repo.wlock(), repo.lock():
2201 2203 rbsrt = rebaseruntime(repo, ui)
2202 2204 ms = mergestatemod.mergestate.read(repo)
2203 2205 mergeutil.checkunresolved(ms)
2204 2206 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2205 2207 if retcode is not None:
2206 2208 return retcode
2207 2209 rbsrt._performrebase(None)
2208 2210 rbsrt._finishrebase()
2209 2211
2210 2212
2211 2213 def summaryhook(ui, repo):
2212 2214 if not repo.vfs.exists(b'rebasestate'):
2213 2215 return
2214 2216 try:
2215 2217 rbsrt = rebaseruntime(repo, ui, {})
2216 2218 rbsrt.restorestatus()
2217 2219 state = rbsrt.state
2218 2220 except error.RepoLookupError:
2219 2221 # i18n: column positioning for "hg summary"
2220 2222 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2221 2223 ui.write(msg)
2222 2224 return
2223 2225 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2224 2226 # i18n: column positioning for "hg summary"
2225 2227 ui.write(
2226 2228 _(b'rebase: %s, %s (rebase --continue)\n')
2227 2229 % (
2228 2230 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2229 2231 ui.label(_(b'%d remaining'), b'rebase.remaining')
2230 2232 % (len(state) - numrebased),
2231 2233 )
2232 2234 )
2233 2235
2234 2236
2235 2237 def uisetup(ui):
2236 2238 # Replace pull with a decorator to provide --rebase option
2237 2239 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2238 2240 entry[1].append(
2239 2241 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2240 2242 )
2241 2243 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2242 2244 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2243 2245 statemod.addunfinished(
2244 2246 b'rebase',
2245 2247 fname=b'rebasestate',
2246 2248 stopflag=True,
2247 2249 continueflag=True,
2248 2250 abortfunc=abortrebase,
2249 2251 continuefunc=continuerebase,
2250 2252 )
@@ -1,3089 +1,3089 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 dagop,
32 32 encoding,
33 33 error,
34 34 fileset,
35 35 match as matchmod,
36 36 mergestate as mergestatemod,
37 37 metadata,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 scmutil,
45 45 sparse,
46 46 subrepo,
47 47 subrepoutil,
48 48 util,
49 49 )
50 50 from .utils import (
51 51 dateutil,
52 52 stringutil,
53 53 )
54 54
55 55 propertycache = util.propertycache
56 56
57 57
58 58 class basectx(object):
59 59 """A basectx object represents the common logic for its children:
60 60 changectx: read-only context that is already present in the repo,
61 61 workingctx: a context that represents the working directory and can
62 62 be committed,
63 63 memctx: a context that represents changes in-memory and can also
64 64 be committed."""
65 65
66 66 def __init__(self, repo):
67 67 self._repo = repo
68 68
69 69 def __bytes__(self):
70 70 return short(self.node())
71 71
72 72 __str__ = encoding.strmethod(__bytes__)
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _buildstatusmanifest(self, status):
96 96 """Builds a manifest that includes the given status results, if this is
97 97 a working copy context. For non-working copy contexts, it just returns
98 98 the normal manifest."""
99 99 return self.manifest()
100 100
101 101 def _matchstatus(self, other, match):
102 102 """This internal method provides a way for child objects to override the
103 103 match operator.
104 104 """
105 105 return match
106 106
107 107 def _buildstatus(
108 108 self, other, s, match, listignored, listclean, listunknown
109 109 ):
110 110 """build a status with respect to another context"""
111 111 # Load earliest manifest first for caching reasons. More specifically,
112 112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 115 # delta to what's in the cache. So that's one full reconstruction + one
116 116 # delta application.
117 117 mf2 = None
118 118 if self.rev() is not None and self.rev() < other.rev():
119 119 mf2 = self._buildstatusmanifest(s)
120 120 mf1 = other._buildstatusmanifest(s)
121 121 if mf2 is None:
122 122 mf2 = self._buildstatusmanifest(s)
123 123
124 124 modified, added = [], []
125 125 removed = []
126 126 clean = []
127 127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 128 deletedset = set(deleted)
129 129 d = mf1.diff(mf2, match=match, clean=listclean)
130 130 for fn, value in pycompat.iteritems(d):
131 131 if fn in deletedset:
132 132 continue
133 133 if value is None:
134 134 clean.append(fn)
135 135 continue
136 136 (node1, flag1), (node2, flag2) = value
137 137 if node1 is None:
138 138 added.append(fn)
139 139 elif node2 is None:
140 140 removed.append(fn)
141 141 elif flag1 != flag2:
142 142 modified.append(fn)
143 143 elif node2 not in wdirfilenodeids:
144 144 # When comparing files between two commits, we save time by
145 145 # not comparing the file contents when the nodeids differ.
146 146 # Note that this means we incorrectly report a reverted change
147 147 # to a file as a modification.
148 148 modified.append(fn)
149 149 elif self[fn].cmp(other[fn]):
150 150 modified.append(fn)
151 151 else:
152 152 clean.append(fn)
153 153
154 154 if removed:
155 155 # need to filter files if they are already reported as removed
156 156 unknown = [
157 157 fn
158 158 for fn in unknown
159 159 if fn not in mf1 and (not match or match(fn))
160 160 ]
161 161 ignored = [
162 162 fn
163 163 for fn in ignored
164 164 if fn not in mf1 and (not match or match(fn))
165 165 ]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(
170 170 modified, added, removed, deleted, unknown, ignored, clean
171 171 )
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepoutil.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182
183 183 def node(self):
184 184 return self._node
185 185
186 186 def hex(self):
187 187 return hex(self.node())
188 188
189 189 def manifest(self):
190 190 return self._manifest
191 191
192 192 def manifestctx(self):
193 193 return self._manifestctx
194 194
195 195 def repo(self):
196 196 return self._repo
197 197
198 198 def phasestr(self):
199 199 return phases.phasenames[self.phase()]
200 200
201 201 def mutable(self):
202 202 return self.phase() > phases.public
203 203
204 204 def matchfileset(self, cwd, expr, badfn=None):
205 205 return fileset.match(self, cwd, expr, badfn=badfn)
206 206
207 207 def obsolete(self):
208 208 """True if the changeset is obsolete"""
209 209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210 210
211 211 def extinct(self):
212 212 """True if the changeset is extinct"""
213 213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214 214
215 215 def orphan(self):
216 216 """True if the changeset is not obsolete, but its ancestor is"""
217 217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218 218
219 219 def phasedivergent(self):
220 220 """True if the changeset tries to be a successor of a public changeset
221 221
222 222 Only non-public and non-obsolete changesets may be phase-divergent.
223 223 """
224 224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225 225
226 226 def contentdivergent(self):
227 227 """Is a successor of a changeset with multiple possible successor sets
228 228
229 229 Only non-public and non-obsolete changesets may be content-divergent.
230 230 """
231 231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232 232
233 233 def isunstable(self):
234 234 """True if the changeset is either orphan, phase-divergent or
235 235 content-divergent"""
236 236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237 237
238 238 def instabilities(self):
239 239 """return the list of instabilities affecting this changeset.
240 240
241 241 Instabilities are returned as strings. possible values are:
242 242 - orphan,
243 243 - phase-divergent,
244 244 - content-divergent.
245 245 """
246 246 instabilities = []
247 247 if self.orphan():
248 248 instabilities.append(b'orphan')
249 249 if self.phasedivergent():
250 250 instabilities.append(b'phase-divergent')
251 251 if self.contentdivergent():
252 252 instabilities.append(b'content-divergent')
253 253 return instabilities
254 254
255 255 def parents(self):
256 256 """return contexts for each parent changeset"""
257 257 return self._parents
258 258
259 259 def p1(self):
260 260 return self._parents[0]
261 261
262 262 def p2(self):
263 263 parents = self._parents
264 264 if len(parents) == 2:
265 265 return parents[1]
266 266 return self._repo[nullrev]
267 267
268 268 def _fileinfo(self, path):
269 269 if '_manifest' in self.__dict__:
270 270 try:
271 271 return self._manifest.find(path)
272 272 except KeyError:
273 273 raise error.ManifestLookupError(
274 274 self._node, path, _(b'not found in manifest')
275 275 )
276 276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 277 if path in self._manifestdelta:
278 278 return (
279 279 self._manifestdelta[path],
280 280 self._manifestdelta.flags(path),
281 281 )
282 282 mfl = self._repo.manifestlog
283 283 try:
284 284 node, flag = mfl[self._changeset.manifest].find(path)
285 285 except KeyError:
286 286 raise error.ManifestLookupError(
287 287 self._node, path, _(b'not found in manifest')
288 288 )
289 289
290 290 return node, flag
291 291
292 292 def filenode(self, path):
293 293 return self._fileinfo(path)[0]
294 294
295 295 def flags(self, path):
296 296 try:
297 297 return self._fileinfo(path)[1]
298 298 except error.LookupError:
299 299 return b''
300 300
301 301 @propertycache
302 302 def _copies(self):
303 303 return metadata.computechangesetcopies(self)
304 304
305 305 def p1copies(self):
306 306 return self._copies[0]
307 307
308 308 def p2copies(self):
309 309 return self._copies[1]
310 310
311 311 def sub(self, path, allowcreate=True):
312 312 '''return a subrepo for the stored revision of path, never wdir()'''
313 313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314 314
315 315 def nullsub(self, path, pctx):
316 316 return subrepo.nullsubrepo(self, path, pctx)
317 317
318 318 def workingsub(self, path):
319 319 '''return a subrepo for the stored revision, or wdir if this is a wdir
320 320 context.
321 321 '''
322 322 return subrepo.subrepo(self, path, allowwdir=True)
323 323
324 324 def match(
325 325 self,
326 326 pats=None,
327 327 include=None,
328 328 exclude=None,
329 329 default=b'glob',
330 330 listsubrepos=False,
331 331 badfn=None,
332 332 cwd=None,
333 333 ):
334 334 r = self._repo
335 335 if not cwd:
336 336 cwd = r.getcwd()
337 337 return matchmod.match(
338 338 r.root,
339 339 cwd,
340 340 pats,
341 341 include,
342 342 exclude,
343 343 default,
344 344 auditor=r.nofsauditor,
345 345 ctx=self,
346 346 listsubrepos=listsubrepos,
347 347 badfn=badfn,
348 348 )
349 349
350 350 def diff(
351 351 self,
352 352 ctx2=None,
353 353 match=None,
354 354 changes=None,
355 355 opts=None,
356 356 losedatafn=None,
357 357 pathfn=None,
358 358 copy=None,
359 359 copysourcematch=None,
360 360 hunksfilterfn=None,
361 361 ):
362 362 """Returns a diff generator for the given contexts and matcher"""
363 363 if ctx2 is None:
364 364 ctx2 = self.p1()
365 365 if ctx2 is not None:
366 366 ctx2 = self._repo[ctx2]
367 367 return patch.diff(
368 368 self._repo,
369 369 ctx2,
370 370 self,
371 371 match=match,
372 372 changes=changes,
373 373 opts=opts,
374 374 losedatafn=losedatafn,
375 375 pathfn=pathfn,
376 376 copy=copy,
377 377 copysourcematch=copysourcematch,
378 378 hunksfilterfn=hunksfilterfn,
379 379 )
380 380
381 381 def dirs(self):
382 382 return self._manifest.dirs()
383 383
384 384 def hasdir(self, dir):
385 385 return self._manifest.hasdir(dir)
386 386
387 387 def status(
388 388 self,
389 389 other=None,
390 390 match=None,
391 391 listignored=False,
392 392 listclean=False,
393 393 listunknown=False,
394 394 listsubrepos=False,
395 395 ):
396 396 """return status of files between two nodes or node and working
397 397 directory.
398 398
399 399 If other is None, compare this node with working directory.
400 400
401 401 returns (modified, added, removed, deleted, unknown, ignored, clean)
402 402 """
403 403
404 404 ctx1 = self
405 405 ctx2 = self._repo[other]
406 406
407 407 # This next code block is, admittedly, fragile logic that tests for
408 408 # reversing the contexts and wouldn't need to exist if it weren't for
409 409 # the fast (and common) code path of comparing the working directory
410 410 # with its first parent.
411 411 #
412 412 # What we're aiming for here is the ability to call:
413 413 #
414 414 # workingctx.status(parentctx)
415 415 #
416 416 # If we always built the manifest for each context and compared those,
417 417 # then we'd be done. But the special case of the above call means we
418 418 # just copy the manifest of the parent.
419 419 reversed = False
420 420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
421 421 reversed = True
422 422 ctx1, ctx2 = ctx2, ctx1
423 423
424 424 match = self._repo.narrowmatch(match)
425 425 match = ctx2._matchstatus(ctx1, match)
426 426 r = scmutil.status([], [], [], [], [], [], [])
427 427 r = ctx2._buildstatus(
428 428 ctx1, r, match, listignored, listclean, listunknown
429 429 )
430 430
431 431 if reversed:
432 432 # Reverse added and removed. Clear deleted, unknown and ignored as
433 433 # these make no sense to reverse.
434 434 r = scmutil.status(
435 435 r.modified, r.removed, r.added, [], [], [], r.clean
436 436 )
437 437
438 438 if listsubrepos:
439 439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
440 440 try:
441 441 rev2 = ctx2.subrev(subpath)
442 442 except KeyError:
443 443 # A subrepo that existed in node1 was deleted between
444 444 # node1 and node2 (inclusive). Thus, ctx2's substate
445 445 # won't contain that subpath. The best we can do ignore it.
446 446 rev2 = None
447 447 submatch = matchmod.subdirmatcher(subpath, match)
448 448 s = sub.status(
449 449 rev2,
450 450 match=submatch,
451 451 ignored=listignored,
452 452 clean=listclean,
453 453 unknown=listunknown,
454 454 listsubrepos=True,
455 455 )
456 456 for k in (
457 457 'modified',
458 458 'added',
459 459 'removed',
460 460 'deleted',
461 461 'unknown',
462 462 'ignored',
463 463 'clean',
464 464 ):
465 465 rfiles, sfiles = getattr(r, k), getattr(s, k)
466 466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
467 467
468 468 r.modified.sort()
469 469 r.added.sort()
470 470 r.removed.sort()
471 471 r.deleted.sort()
472 472 r.unknown.sort()
473 473 r.ignored.sort()
474 474 r.clean.sort()
475 475
476 476 return r
477 477
478 478 def mergestate(self, clean=False):
479 479 """Get a mergestate object for this context."""
480 480 raise NotImplementedError(
481 481 '%s does not implement mergestate()' % self.__class__
482 482 )
483 483
484 484
485 485 class changectx(basectx):
486 486 """A changecontext object makes access to data related to a particular
487 487 changeset convenient. It represents a read-only context already present in
488 488 the repo."""
489 489
490 490 def __init__(self, repo, rev, node, maybe_filtered=True):
491 491 super(changectx, self).__init__(repo)
492 492 self._rev = rev
493 493 self._node = node
494 494 # When maybe_filtered is True, the revision might be affected by
495 495 # changelog filtering and operation through the filtered changelog must be used.
496 496 #
497 497 # When maybe_filtered is False, the revision has already been checked
498 498 # against filtering and is not filtered. Operation through the
499 499 # unfiltered changelog might be used in some case.
500 500 self._maybe_filtered = maybe_filtered
501 501
502 502 def __hash__(self):
503 503 try:
504 504 return hash(self._rev)
505 505 except AttributeError:
506 506 return id(self)
507 507
508 508 def __nonzero__(self):
509 509 return self._rev != nullrev
510 510
511 511 __bool__ = __nonzero__
512 512
513 513 @propertycache
514 514 def _changeset(self):
515 515 if self._maybe_filtered:
516 516 repo = self._repo
517 517 else:
518 518 repo = self._repo.unfiltered()
519 519 return repo.changelog.changelogrevision(self.rev())
520 520
521 521 @propertycache
522 522 def _manifest(self):
523 523 return self._manifestctx.read()
524 524
525 525 @property
526 526 def _manifestctx(self):
527 527 return self._repo.manifestlog[self._changeset.manifest]
528 528
529 529 @propertycache
530 530 def _manifestdelta(self):
531 531 return self._manifestctx.readdelta()
532 532
533 533 @propertycache
534 534 def _parents(self):
535 535 repo = self._repo
536 536 if self._maybe_filtered:
537 537 cl = repo.changelog
538 538 else:
539 539 cl = repo.unfiltered().changelog
540 540
541 541 p1, p2 = cl.parentrevs(self._rev)
542 542 if p2 == nullrev:
543 543 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
544 544 return [
545 545 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
546 546 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
547 547 ]
548 548
549 549 def changeset(self):
550 550 c = self._changeset
551 551 return (
552 552 c.manifest,
553 553 c.user,
554 554 c.date,
555 555 c.files,
556 556 c.description,
557 557 c.extra,
558 558 )
559 559
560 560 def manifestnode(self):
561 561 return self._changeset.manifest
562 562
563 563 def user(self):
564 564 return self._changeset.user
565 565
566 566 def date(self):
567 567 return self._changeset.date
568 568
569 569 def files(self):
570 570 return self._changeset.files
571 571
572 572 def filesmodified(self):
573 573 modified = set(self.files())
574 574 modified.difference_update(self.filesadded())
575 575 modified.difference_update(self.filesremoved())
576 576 return sorted(modified)
577 577
578 578 def filesadded(self):
579 579 filesadded = self._changeset.filesadded
580 580 compute_on_none = True
581 581 if self._repo.filecopiesmode == b'changeset-sidedata':
582 582 compute_on_none = False
583 583 else:
584 584 source = self._repo.ui.config(b'experimental', b'copies.read-from')
585 585 if source == b'changeset-only':
586 586 compute_on_none = False
587 587 elif source != b'compatibility':
588 588 # filelog mode, ignore any changelog content
589 589 filesadded = None
590 590 if filesadded is None:
591 591 if compute_on_none:
592 592 filesadded = metadata.computechangesetfilesadded(self)
593 593 else:
594 594 filesadded = []
595 595 return filesadded
596 596
597 597 def filesremoved(self):
598 598 filesremoved = self._changeset.filesremoved
599 599 compute_on_none = True
600 600 if self._repo.filecopiesmode == b'changeset-sidedata':
601 601 compute_on_none = False
602 602 else:
603 603 source = self._repo.ui.config(b'experimental', b'copies.read-from')
604 604 if source == b'changeset-only':
605 605 compute_on_none = False
606 606 elif source != b'compatibility':
607 607 # filelog mode, ignore any changelog content
608 608 filesremoved = None
609 609 if filesremoved is None:
610 610 if compute_on_none:
611 611 filesremoved = metadata.computechangesetfilesremoved(self)
612 612 else:
613 613 filesremoved = []
614 614 return filesremoved
615 615
616 616 @propertycache
617 617 def _copies(self):
618 618 p1copies = self._changeset.p1copies
619 619 p2copies = self._changeset.p2copies
620 620 compute_on_none = True
621 621 if self._repo.filecopiesmode == b'changeset-sidedata':
622 622 compute_on_none = False
623 623 else:
624 624 source = self._repo.ui.config(b'experimental', b'copies.read-from')
625 625 # If config says to get copy metadata only from changeset, then
626 626 # return that, defaulting to {} if there was no copy metadata. In
627 627 # compatibility mode, we return copy data from the changeset if it
628 628 # was recorded there, and otherwise we fall back to getting it from
629 629 # the filelogs (below).
630 630 #
631 631 # If we are in compatiblity mode and there is not data in the
632 632 # changeset), we get the copy metadata from the filelogs.
633 633 #
634 634 # otherwise, when config said to read only from filelog, we get the
635 635 # copy metadata from the filelogs.
636 636 if source == b'changeset-only':
637 637 compute_on_none = False
638 638 elif source != b'compatibility':
639 639 # filelog mode, ignore any changelog content
640 640 p1copies = p2copies = None
641 641 if p1copies is None:
642 642 if compute_on_none:
643 643 p1copies, p2copies = super(changectx, self)._copies
644 644 else:
645 645 if p1copies is None:
646 646 p1copies = {}
647 647 if p2copies is None:
648 648 p2copies = {}
649 649 return p1copies, p2copies
650 650
651 651 def description(self):
652 652 return self._changeset.description
653 653
654 654 def branch(self):
655 655 return encoding.tolocal(self._changeset.extra.get(b"branch"))
656 656
657 657 def closesbranch(self):
658 658 return b'close' in self._changeset.extra
659 659
660 660 def extra(self):
661 661 """Return a dict of extra information."""
662 662 return self._changeset.extra
663 663
664 664 def tags(self):
665 665 """Return a list of byte tag names"""
666 666 return self._repo.nodetags(self._node)
667 667
668 668 def bookmarks(self):
669 669 """Return a list of byte bookmark names."""
670 670 return self._repo.nodebookmarks(self._node)
671 671
672 672 def phase(self):
673 673 return self._repo._phasecache.phase(self._repo, self._rev)
674 674
675 675 def hidden(self):
676 676 return self._rev in repoview.filterrevs(self._repo, b'visible')
677 677
678 678 def isinmemory(self):
679 679 return False
680 680
681 681 def children(self):
682 682 """return list of changectx contexts for each child changeset.
683 683
684 684 This returns only the immediate child changesets. Use descendants() to
685 685 recursively walk children.
686 686 """
687 687 c = self._repo.changelog.children(self._node)
688 688 return [self._repo[x] for x in c]
689 689
690 690 def ancestors(self):
691 691 for a in self._repo.changelog.ancestors([self._rev]):
692 692 yield self._repo[a]
693 693
694 694 def descendants(self):
695 695 """Recursively yield all children of the changeset.
696 696
697 697 For just the immediate children, use children()
698 698 """
699 699 for d in self._repo.changelog.descendants([self._rev]):
700 700 yield self._repo[d]
701 701
702 702 def filectx(self, path, fileid=None, filelog=None):
703 703 """get a file context from this changeset"""
704 704 if fileid is None:
705 705 fileid = self.filenode(path)
706 706 return filectx(
707 707 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
708 708 )
709 709
710 710 def ancestor(self, c2, warn=False):
711 711 """return the "best" ancestor context of self and c2
712 712
713 713 If there are multiple candidates, it will show a message and check
714 714 merge.preferancestor configuration before falling back to the
715 715 revlog ancestor."""
716 716 # deal with workingctxs
717 717 n2 = c2._node
718 718 if n2 is None:
719 719 n2 = c2._parents[0]._node
720 720 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
721 721 if not cahs:
722 722 anc = nullid
723 723 elif len(cahs) == 1:
724 724 anc = cahs[0]
725 725 else:
726 726 # experimental config: merge.preferancestor
727 727 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
728 728 try:
729 729 ctx = scmutil.revsymbol(self._repo, r)
730 730 except error.RepoLookupError:
731 731 continue
732 732 anc = ctx.node()
733 733 if anc in cahs:
734 734 break
735 735 else:
736 736 anc = self._repo.changelog.ancestor(self._node, n2)
737 737 if warn:
738 738 self._repo.ui.status(
739 739 (
740 740 _(b"note: using %s as ancestor of %s and %s\n")
741 741 % (short(anc), short(self._node), short(n2))
742 742 )
743 743 + b''.join(
744 744 _(
745 745 b" alternatively, use --config "
746 746 b"merge.preferancestor=%s\n"
747 747 )
748 748 % short(n)
749 749 for n in sorted(cahs)
750 750 if n != anc
751 751 )
752 752 )
753 753 return self._repo[anc]
754 754
755 755 def isancestorof(self, other):
756 756 """True if this changeset is an ancestor of other"""
757 757 return self._repo.changelog.isancestorrev(self._rev, other._rev)
758 758
759 759 def walk(self, match):
760 760 '''Generates matching file names.'''
761 761
762 762 # Wrap match.bad method to have message with nodeid
763 763 def bad(fn, msg):
764 764 # The manifest doesn't know about subrepos, so don't complain about
765 765 # paths into valid subrepos.
766 766 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
767 767 return
768 768 match.bad(fn, _(b'no such file in rev %s') % self)
769 769
770 770 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
771 771 return self._manifest.walk(m)
772 772
773 773 def matches(self, match):
774 774 return self.walk(match)
775 775
776 776
777 777 class basefilectx(object):
778 778 """A filecontext object represents the common logic for its children:
779 779 filectx: read-only access to a filerevision that is already present
780 780 in the repo,
781 781 workingfilectx: a filecontext that represents files from the working
782 782 directory,
783 783 memfilectx: a filecontext that represents files in-memory,
784 784 """
785 785
786 786 @propertycache
787 787 def _filelog(self):
788 788 return self._repo.file(self._path)
789 789
790 790 @propertycache
791 791 def _changeid(self):
792 792 if '_changectx' in self.__dict__:
793 793 return self._changectx.rev()
794 794 elif '_descendantrev' in self.__dict__:
795 795 # this file context was created from a revision with a known
796 796 # descendant, we can (lazily) correct for linkrev aliases
797 797 return self._adjustlinkrev(self._descendantrev)
798 798 else:
799 799 return self._filelog.linkrev(self._filerev)
800 800
801 801 @propertycache
802 802 def _filenode(self):
803 803 if '_fileid' in self.__dict__:
804 804 return self._filelog.lookup(self._fileid)
805 805 else:
806 806 return self._changectx.filenode(self._path)
807 807
808 808 @propertycache
809 809 def _filerev(self):
810 810 return self._filelog.rev(self._filenode)
811 811
812 812 @propertycache
813 813 def _repopath(self):
814 814 return self._path
815 815
816 816 def __nonzero__(self):
817 817 try:
818 818 self._filenode
819 819 return True
820 820 except error.LookupError:
821 821 # file is missing
822 822 return False
823 823
824 824 __bool__ = __nonzero__
825 825
826 826 def __bytes__(self):
827 827 try:
828 828 return b"%s@%s" % (self.path(), self._changectx)
829 829 except error.LookupError:
830 830 return b"%s@???" % self.path()
831 831
832 832 __str__ = encoding.strmethod(__bytes__)
833 833
834 834 def __repr__(self):
835 835 return "<%s %s>" % (type(self).__name__, str(self))
836 836
837 837 def __hash__(self):
838 838 try:
839 839 return hash((self._path, self._filenode))
840 840 except AttributeError:
841 841 return id(self)
842 842
843 843 def __eq__(self, other):
844 844 try:
845 845 return (
846 846 type(self) == type(other)
847 847 and self._path == other._path
848 848 and self._filenode == other._filenode
849 849 )
850 850 except AttributeError:
851 851 return False
852 852
853 853 def __ne__(self, other):
854 854 return not (self == other)
855 855
856 856 def filerev(self):
857 857 return self._filerev
858 858
859 859 def filenode(self):
860 860 return self._filenode
861 861
862 862 @propertycache
863 863 def _flags(self):
864 864 return self._changectx.flags(self._path)
865 865
866 866 def flags(self):
867 867 return self._flags
868 868
869 869 def filelog(self):
870 870 return self._filelog
871 871
872 872 def rev(self):
873 873 return self._changeid
874 874
875 875 def linkrev(self):
876 876 return self._filelog.linkrev(self._filerev)
877 877
878 878 def node(self):
879 879 return self._changectx.node()
880 880
881 881 def hex(self):
882 882 return self._changectx.hex()
883 883
884 884 def user(self):
885 885 return self._changectx.user()
886 886
887 887 def date(self):
888 888 return self._changectx.date()
889 889
890 890 def files(self):
891 891 return self._changectx.files()
892 892
893 893 def description(self):
894 894 return self._changectx.description()
895 895
896 896 def branch(self):
897 897 return self._changectx.branch()
898 898
899 899 def extra(self):
900 900 return self._changectx.extra()
901 901
902 902 def phase(self):
903 903 return self._changectx.phase()
904 904
905 905 def phasestr(self):
906 906 return self._changectx.phasestr()
907 907
908 908 def obsolete(self):
909 909 return self._changectx.obsolete()
910 910
911 911 def instabilities(self):
912 912 return self._changectx.instabilities()
913 913
914 914 def manifest(self):
915 915 return self._changectx.manifest()
916 916
917 917 def changectx(self):
918 918 return self._changectx
919 919
920 920 def renamed(self):
921 921 return self._copied
922 922
923 923 def copysource(self):
924 924 return self._copied and self._copied[0]
925 925
926 926 def repo(self):
927 927 return self._repo
928 928
929 929 def size(self):
930 930 return len(self.data())
931 931
932 932 def path(self):
933 933 return self._path
934 934
935 935 def isbinary(self):
936 936 try:
937 937 return stringutil.binary(self.data())
938 938 except IOError:
939 939 return False
940 940
941 941 def isexec(self):
942 942 return b'x' in self.flags()
943 943
944 944 def islink(self):
945 945 return b'l' in self.flags()
946 946
947 947 def isabsent(self):
948 948 """whether this filectx represents a file not in self._changectx
949 949
950 950 This is mainly for merge code to detect change/delete conflicts. This is
951 951 expected to be True for all subclasses of basectx."""
952 952 return False
953 953
954 954 _customcmp = False
955 955
956 956 def cmp(self, fctx):
957 957 """compare with other file context
958 958
959 959 returns True if different than fctx.
960 960 """
961 961 if fctx._customcmp:
962 962 return fctx.cmp(self)
963 963
964 964 if self._filenode is None:
965 965 raise error.ProgrammingError(
966 966 b'filectx.cmp() must be reimplemented if not backed by revlog'
967 967 )
968 968
969 969 if fctx._filenode is None:
970 970 if self._repo._encodefilterpats:
971 971 # can't rely on size() because wdir content may be decoded
972 972 return self._filelog.cmp(self._filenode, fctx.data())
973 973 if self.size() - 4 == fctx.size():
974 974 # size() can match:
975 975 # if file data starts with '\1\n', empty metadata block is
976 976 # prepended, which adds 4 bytes to filelog.size().
977 977 return self._filelog.cmp(self._filenode, fctx.data())
978 978 if self.size() == fctx.size():
979 979 # size() matches: need to compare content
980 980 return self._filelog.cmp(self._filenode, fctx.data())
981 981
982 982 # size() differs
983 983 return True
984 984
985 985 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
986 986 """return the first ancestor of <srcrev> introducing <fnode>
987 987
988 988 If the linkrev of the file revision does not point to an ancestor of
989 989 srcrev, we'll walk down the ancestors until we find one introducing
990 990 this file revision.
991 991
992 992 :srcrev: the changeset revision we search ancestors from
993 993 :inclusive: if true, the src revision will also be checked
994 994 :stoprev: an optional revision to stop the walk at. If no introduction
995 995 of this file content could be found before this floor
996 996 revision, the function will returns "None" and stops its
997 997 iteration.
998 998 """
999 999 repo = self._repo
1000 1000 cl = repo.unfiltered().changelog
1001 1001 mfl = repo.manifestlog
1002 1002 # fetch the linkrev
1003 1003 lkr = self.linkrev()
1004 1004 if srcrev == lkr:
1005 1005 return lkr
1006 1006 # hack to reuse ancestor computation when searching for renames
1007 1007 memberanc = getattr(self, '_ancestrycontext', None)
1008 1008 iteranc = None
1009 1009 if srcrev is None:
1010 1010 # wctx case, used by workingfilectx during mergecopy
1011 1011 revs = [p.rev() for p in self._repo[None].parents()]
1012 1012 inclusive = True # we skipped the real (revless) source
1013 1013 else:
1014 1014 revs = [srcrev]
1015 1015 if memberanc is None:
1016 1016 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1017 1017 # check if this linkrev is an ancestor of srcrev
1018 1018 if lkr not in memberanc:
1019 1019 if iteranc is None:
1020 1020 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1021 1021 fnode = self._filenode
1022 1022 path = self._path
1023 1023 for a in iteranc:
1024 1024 if stoprev is not None and a < stoprev:
1025 1025 return None
1026 1026 ac = cl.read(a) # get changeset data (we avoid object creation)
1027 1027 if path in ac[3]: # checking the 'files' field.
1028 1028 # The file has been touched, check if the content is
1029 1029 # similar to the one we search for.
1030 1030 if fnode == mfl[ac[0]].readfast().get(path):
1031 1031 return a
1032 1032 # In theory, we should never get out of that loop without a result.
1033 1033 # But if manifest uses a buggy file revision (not children of the
1034 1034 # one it replaces) we could. Such a buggy situation will likely
1035 1035 # result is crash somewhere else at to some point.
1036 1036 return lkr
1037 1037
1038 1038 def isintroducedafter(self, changelogrev):
1039 1039 """True if a filectx has been introduced after a given floor revision
1040 1040 """
1041 1041 if self.linkrev() >= changelogrev:
1042 1042 return True
1043 1043 introrev = self._introrev(stoprev=changelogrev)
1044 1044 if introrev is None:
1045 1045 return False
1046 1046 return introrev >= changelogrev
1047 1047
1048 1048 def introrev(self):
1049 1049 """return the rev of the changeset which introduced this file revision
1050 1050
1051 1051 This method is different from linkrev because it take into account the
1052 1052 changeset the filectx was created from. It ensures the returned
1053 1053 revision is one of its ancestors. This prevents bugs from
1054 1054 'linkrev-shadowing' when a file revision is used by multiple
1055 1055 changesets.
1056 1056 """
1057 1057 return self._introrev()
1058 1058
1059 1059 def _introrev(self, stoprev=None):
1060 1060 """
1061 1061 Same as `introrev` but, with an extra argument to limit changelog
1062 1062 iteration range in some internal usecase.
1063 1063
1064 1064 If `stoprev` is set, the `introrev` will not be searched past that
1065 1065 `stoprev` revision and "None" might be returned. This is useful to
1066 1066 limit the iteration range.
1067 1067 """
1068 1068 toprev = None
1069 1069 attrs = vars(self)
1070 1070 if '_changeid' in attrs:
1071 1071 # We have a cached value already
1072 1072 toprev = self._changeid
1073 1073 elif '_changectx' in attrs:
1074 1074 # We know which changelog entry we are coming from
1075 1075 toprev = self._changectx.rev()
1076 1076
1077 1077 if toprev is not None:
1078 1078 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1079 1079 elif '_descendantrev' in attrs:
1080 1080 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1081 1081 # be nice and cache the result of the computation
1082 1082 if introrev is not None:
1083 1083 self._changeid = introrev
1084 1084 return introrev
1085 1085 else:
1086 1086 return self.linkrev()
1087 1087
1088 1088 def introfilectx(self):
1089 1089 """Return filectx having identical contents, but pointing to the
1090 1090 changeset revision where this filectx was introduced"""
1091 1091 introrev = self.introrev()
1092 1092 if self.rev() == introrev:
1093 1093 return self
1094 1094 return self.filectx(self.filenode(), changeid=introrev)
1095 1095
1096 1096 def _parentfilectx(self, path, fileid, filelog):
1097 1097 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1098 1098 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1099 1099 if '_changeid' in vars(self) or '_changectx' in vars(self):
1100 1100 # If self is associated with a changeset (probably explicitly
1101 1101 # fed), ensure the created filectx is associated with a
1102 1102 # changeset that is an ancestor of self.changectx.
1103 1103 # This lets us later use _adjustlinkrev to get a correct link.
1104 1104 fctx._descendantrev = self.rev()
1105 1105 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1106 1106 elif '_descendantrev' in vars(self):
1107 1107 # Otherwise propagate _descendantrev if we have one associated.
1108 1108 fctx._descendantrev = self._descendantrev
1109 1109 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1110 1110 return fctx
1111 1111
1112 1112 def parents(self):
1113 1113 _path = self._path
1114 1114 fl = self._filelog
1115 1115 parents = self._filelog.parents(self._filenode)
1116 1116 pl = [(_path, node, fl) for node in parents if node != nullid]
1117 1117
1118 1118 r = fl.renamed(self._filenode)
1119 1119 if r:
1120 1120 # - In the simple rename case, both parent are nullid, pl is empty.
1121 1121 # - In case of merge, only one of the parent is null id and should
1122 1122 # be replaced with the rename information. This parent is -always-
1123 1123 # the first one.
1124 1124 #
1125 1125 # As null id have always been filtered out in the previous list
1126 1126 # comprehension, inserting to 0 will always result in "replacing
1127 1127 # first nullid parent with rename information.
1128 1128 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1129 1129
1130 1130 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1131 1131
1132 1132 def p1(self):
1133 1133 return self.parents()[0]
1134 1134
1135 1135 def p2(self):
1136 1136 p = self.parents()
1137 1137 if len(p) == 2:
1138 1138 return p[1]
1139 1139 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1140 1140
1141 1141 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1142 1142 """Returns a list of annotateline objects for each line in the file
1143 1143
1144 1144 - line.fctx is the filectx of the node where that line was last changed
1145 1145 - line.lineno is the line number at the first appearance in the managed
1146 1146 file
1147 1147 - line.text is the data on that line (including newline character)
1148 1148 """
1149 1149 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1150 1150
1151 1151 def parents(f):
1152 1152 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1153 1153 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1154 1154 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1155 1155 # isn't an ancestor of the srcrev.
1156 1156 f._changeid
1157 1157 pl = f.parents()
1158 1158
1159 1159 # Don't return renamed parents if we aren't following.
1160 1160 if not follow:
1161 1161 pl = [p for p in pl if p.path() == f.path()]
1162 1162
1163 1163 # renamed filectx won't have a filelog yet, so set it
1164 1164 # from the cache to save time
1165 1165 for p in pl:
1166 1166 if not '_filelog' in p.__dict__:
1167 1167 p._filelog = getlog(p.path())
1168 1168
1169 1169 return pl
1170 1170
1171 1171 # use linkrev to find the first changeset where self appeared
1172 1172 base = self.introfilectx()
1173 1173 if getattr(base, '_ancestrycontext', None) is None:
1174 1174 # it is safe to use an unfiltered repository here because we are
1175 1175 # walking ancestors only.
1176 1176 cl = self._repo.unfiltered().changelog
1177 1177 if base.rev() is None:
1178 1178 # wctx is not inclusive, but works because _ancestrycontext
1179 1179 # is used to test filelog revisions
1180 1180 ac = cl.ancestors(
1181 1181 [p.rev() for p in base.parents()], inclusive=True
1182 1182 )
1183 1183 else:
1184 1184 ac = cl.ancestors([base.rev()], inclusive=True)
1185 1185 base._ancestrycontext = ac
1186 1186
1187 1187 return dagop.annotate(
1188 1188 base, parents, skiprevs=skiprevs, diffopts=diffopts
1189 1189 )
1190 1190
1191 1191 def ancestors(self, followfirst=False):
1192 1192 visit = {}
1193 1193 c = self
1194 1194 if followfirst:
1195 1195 cut = 1
1196 1196 else:
1197 1197 cut = None
1198 1198
1199 1199 while True:
1200 1200 for parent in c.parents()[:cut]:
1201 1201 visit[(parent.linkrev(), parent.filenode())] = parent
1202 1202 if not visit:
1203 1203 break
1204 1204 c = visit.pop(max(visit))
1205 1205 yield c
1206 1206
1207 1207 def decodeddata(self):
1208 1208 """Returns `data()` after running repository decoding filters.
1209 1209
1210 1210 This is often equivalent to how the data would be expressed on disk.
1211 1211 """
1212 1212 return self._repo.wwritedata(self.path(), self.data())
1213 1213
1214 1214
1215 1215 class filectx(basefilectx):
1216 1216 """A filecontext object makes access to data related to a particular
1217 1217 filerevision convenient."""
1218 1218
1219 1219 def __init__(
1220 1220 self,
1221 1221 repo,
1222 1222 path,
1223 1223 changeid=None,
1224 1224 fileid=None,
1225 1225 filelog=None,
1226 1226 changectx=None,
1227 1227 ):
1228 1228 """changeid must be a revision number, if specified.
1229 1229 fileid can be a file revision or node."""
1230 1230 self._repo = repo
1231 1231 self._path = path
1232 1232
1233 1233 assert (
1234 1234 changeid is not None or fileid is not None or changectx is not None
1235 1235 ), (
1236 1236 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1237 1237 % (changeid, fileid, changectx,)
1238 1238 )
1239 1239
1240 1240 if filelog is not None:
1241 1241 self._filelog = filelog
1242 1242
1243 1243 if changeid is not None:
1244 1244 self._changeid = changeid
1245 1245 if changectx is not None:
1246 1246 self._changectx = changectx
1247 1247 if fileid is not None:
1248 1248 self._fileid = fileid
1249 1249
1250 1250 @propertycache
1251 1251 def _changectx(self):
1252 1252 try:
1253 1253 return self._repo[self._changeid]
1254 1254 except error.FilteredRepoLookupError:
1255 1255 # Linkrev may point to any revision in the repository. When the
1256 1256 # repository is filtered this may lead to `filectx` trying to build
1257 1257 # `changectx` for filtered revision. In such case we fallback to
1258 1258 # creating `changectx` on the unfiltered version of the reposition.
1259 1259 # This fallback should not be an issue because `changectx` from
1260 1260 # `filectx` are not used in complex operations that care about
1261 1261 # filtering.
1262 1262 #
1263 1263 # This fallback is a cheap and dirty fix that prevent several
1264 1264 # crashes. It does not ensure the behavior is correct. However the
1265 1265 # behavior was not correct before filtering either and "incorrect
1266 1266 # behavior" is seen as better as "crash"
1267 1267 #
1268 1268 # Linkrevs have several serious troubles with filtering that are
1269 1269 # complicated to solve. Proper handling of the issue here should be
1270 1270 # considered when solving linkrev issue are on the table.
1271 1271 return self._repo.unfiltered()[self._changeid]
1272 1272
1273 1273 def filectx(self, fileid, changeid=None):
1274 1274 '''opens an arbitrary revision of the file without
1275 1275 opening a new filelog'''
1276 1276 return filectx(
1277 1277 self._repo,
1278 1278 self._path,
1279 1279 fileid=fileid,
1280 1280 filelog=self._filelog,
1281 1281 changeid=changeid,
1282 1282 )
1283 1283
1284 1284 def rawdata(self):
1285 1285 return self._filelog.rawdata(self._filenode)
1286 1286
1287 1287 def rawflags(self):
1288 1288 """low-level revlog flags"""
1289 1289 return self._filelog.flags(self._filerev)
1290 1290
1291 1291 def data(self):
1292 1292 try:
1293 1293 return self._filelog.read(self._filenode)
1294 1294 except error.CensoredNodeError:
1295 1295 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1296 1296 return b""
1297 1297 raise error.Abort(
1298 1298 _(b"censored node: %s") % short(self._filenode),
1299 1299 hint=_(b"set censor.policy to ignore errors"),
1300 1300 )
1301 1301
1302 1302 def size(self):
1303 1303 return self._filelog.size(self._filerev)
1304 1304
1305 1305 @propertycache
1306 1306 def _copied(self):
1307 1307 """check if file was actually renamed in this changeset revision
1308 1308
1309 1309 If rename logged in file revision, we report copy for changeset only
1310 1310 if file revisions linkrev points back to the changeset in question
1311 1311 or both changeset parents contain different file revisions.
1312 1312 """
1313 1313
1314 1314 renamed = self._filelog.renamed(self._filenode)
1315 1315 if not renamed:
1316 1316 return None
1317 1317
1318 1318 if self.rev() == self.linkrev():
1319 1319 return renamed
1320 1320
1321 1321 name = self.path()
1322 1322 fnode = self._filenode
1323 1323 for p in self._changectx.parents():
1324 1324 try:
1325 1325 if fnode == p.filenode(name):
1326 1326 return None
1327 1327 except error.LookupError:
1328 1328 pass
1329 1329 return renamed
1330 1330
1331 1331 def children(self):
1332 1332 # hard for renames
1333 1333 c = self._filelog.children(self._filenode)
1334 1334 return [
1335 1335 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1336 1336 for x in c
1337 1337 ]
1338 1338
1339 1339
1340 1340 class committablectx(basectx):
1341 1341 """A committablectx object provides common functionality for a context that
1342 1342 wants the ability to commit, e.g. workingctx or memctx."""
1343 1343
1344 1344 def __init__(
1345 1345 self,
1346 1346 repo,
1347 1347 text=b"",
1348 1348 user=None,
1349 1349 date=None,
1350 1350 extra=None,
1351 1351 changes=None,
1352 1352 branch=None,
1353 1353 ):
1354 1354 super(committablectx, self).__init__(repo)
1355 1355 self._rev = None
1356 1356 self._node = None
1357 1357 self._text = text
1358 1358 if date:
1359 1359 self._date = dateutil.parsedate(date)
1360 1360 if user:
1361 1361 self._user = user
1362 1362 if changes:
1363 1363 self._status = changes
1364 1364
1365 1365 self._extra = {}
1366 1366 if extra:
1367 1367 self._extra = extra.copy()
1368 1368 if branch is not None:
1369 1369 self._extra[b'branch'] = encoding.fromlocal(branch)
1370 1370 if not self._extra.get(b'branch'):
1371 1371 self._extra[b'branch'] = b'default'
1372 1372
1373 1373 def __bytes__(self):
1374 1374 return bytes(self._parents[0]) + b"+"
1375 1375
1376 1376 __str__ = encoding.strmethod(__bytes__)
1377 1377
1378 1378 def __nonzero__(self):
1379 1379 return True
1380 1380
1381 1381 __bool__ = __nonzero__
1382 1382
1383 1383 @propertycache
1384 1384 def _status(self):
1385 1385 return self._repo.status()
1386 1386
1387 1387 @propertycache
1388 1388 def _user(self):
1389 1389 return self._repo.ui.username()
1390 1390
1391 1391 @propertycache
1392 1392 def _date(self):
1393 1393 ui = self._repo.ui
1394 1394 date = ui.configdate(b'devel', b'default-date')
1395 1395 if date is None:
1396 1396 date = dateutil.makedate()
1397 1397 return date
1398 1398
1399 1399 def subrev(self, subpath):
1400 1400 return None
1401 1401
1402 1402 def manifestnode(self):
1403 1403 return None
1404 1404
1405 1405 def user(self):
1406 1406 return self._user or self._repo.ui.username()
1407 1407
1408 1408 def date(self):
1409 1409 return self._date
1410 1410
1411 1411 def description(self):
1412 1412 return self._text
1413 1413
1414 1414 def files(self):
1415 1415 return sorted(
1416 1416 self._status.modified + self._status.added + self._status.removed
1417 1417 )
1418 1418
1419 1419 def modified(self):
1420 1420 return self._status.modified
1421 1421
1422 1422 def added(self):
1423 1423 return self._status.added
1424 1424
1425 1425 def removed(self):
1426 1426 return self._status.removed
1427 1427
1428 1428 def deleted(self):
1429 1429 return self._status.deleted
1430 1430
1431 1431 filesmodified = modified
1432 1432 filesadded = added
1433 1433 filesremoved = removed
1434 1434
1435 1435 def branch(self):
1436 1436 return encoding.tolocal(self._extra[b'branch'])
1437 1437
1438 1438 def closesbranch(self):
1439 1439 return b'close' in self._extra
1440 1440
1441 1441 def extra(self):
1442 1442 return self._extra
1443 1443
1444 1444 def isinmemory(self):
1445 1445 return False
1446 1446
1447 1447 def tags(self):
1448 1448 return []
1449 1449
1450 1450 def bookmarks(self):
1451 1451 b = []
1452 1452 for p in self.parents():
1453 1453 b.extend(p.bookmarks())
1454 1454 return b
1455 1455
1456 1456 def phase(self):
1457 1457 phase = phases.newcommitphase(self._repo.ui)
1458 1458 for p in self.parents():
1459 1459 phase = max(phase, p.phase())
1460 1460 return phase
1461 1461
1462 1462 def hidden(self):
1463 1463 return False
1464 1464
1465 1465 def children(self):
1466 1466 return []
1467 1467
1468 1468 def flags(self, path):
1469 1469 if '_manifest' in self.__dict__:
1470 1470 try:
1471 1471 return self._manifest.flags(path)
1472 1472 except KeyError:
1473 1473 return b''
1474 1474
1475 1475 try:
1476 1476 return self._flagfunc(path)
1477 1477 except OSError:
1478 1478 return b''
1479 1479
1480 1480 def ancestor(self, c2):
1481 1481 """return the "best" ancestor context of self and c2"""
1482 1482 return self._parents[0].ancestor(c2) # punt on two parents for now
1483 1483
1484 1484 def ancestors(self):
1485 1485 for p in self._parents:
1486 1486 yield p
1487 1487 for a in self._repo.changelog.ancestors(
1488 1488 [p.rev() for p in self._parents]
1489 1489 ):
1490 1490 yield self._repo[a]
1491 1491
1492 1492 def markcommitted(self, node):
1493 1493 """Perform post-commit cleanup necessary after committing this ctx
1494 1494
1495 1495 Specifically, this updates backing stores this working context
1496 1496 wraps to reflect the fact that the changes reflected by this
1497 1497 workingctx have been committed. For example, it marks
1498 1498 modified and added files as normal in the dirstate.
1499 1499
1500 1500 """
1501 1501
1502 1502 def dirty(self, missing=False, merge=True, branch=True):
1503 1503 return False
1504 1504
1505 1505
1506 1506 class workingctx(committablectx):
1507 1507 """A workingctx object makes access to data related to
1508 1508 the current working directory convenient.
1509 1509 date - any valid date string or (unixtime, offset), or None.
1510 1510 user - username string, or None.
1511 1511 extra - a dictionary of extra values, or None.
1512 1512 changes - a list of file lists as returned by localrepo.status()
1513 1513 or None to use the repository status.
1514 1514 """
1515 1515
1516 1516 def __init__(
1517 1517 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1518 1518 ):
1519 1519 branch = None
1520 1520 if not extra or b'branch' not in extra:
1521 1521 try:
1522 1522 branch = repo.dirstate.branch()
1523 1523 except UnicodeDecodeError:
1524 1524 raise error.Abort(_(b'branch name not in UTF-8!'))
1525 1525 super(workingctx, self).__init__(
1526 1526 repo, text, user, date, extra, changes, branch=branch
1527 1527 )
1528 1528
1529 1529 def __iter__(self):
1530 1530 d = self._repo.dirstate
1531 1531 for f in d:
1532 1532 if d[f] != b'r':
1533 1533 yield f
1534 1534
1535 1535 def __contains__(self, key):
1536 1536 return self._repo.dirstate[key] not in b"?r"
1537 1537
1538 1538 def hex(self):
1539 1539 return wdirhex
1540 1540
1541 1541 @propertycache
1542 1542 def _parents(self):
1543 1543 p = self._repo.dirstate.parents()
1544 1544 if p[1] == nullid:
1545 1545 p = p[:-1]
1546 1546 # use unfiltered repo to delay/avoid loading obsmarkers
1547 1547 unfi = self._repo.unfiltered()
1548 1548 return [
1549 1549 changectx(
1550 1550 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1551 1551 )
1552 1552 for n in p
1553 1553 ]
1554 1554
1555 1555 def setparents(self, p1node, p2node=nullid):
1556 1556 dirstate = self._repo.dirstate
1557 1557 with dirstate.parentchange():
1558 1558 copies = dirstate.setparents(p1node, p2node)
1559 1559 pctx = self._repo[p1node]
1560 1560 if copies:
1561 1561 # Adjust copy records, the dirstate cannot do it, it
1562 1562 # requires access to parents manifests. Preserve them
1563 1563 # only for entries added to first parent.
1564 1564 for f in copies:
1565 1565 if f not in pctx and copies[f] in pctx:
1566 1566 dirstate.copy(copies[f], f)
1567 1567 if p2node == nullid:
1568 1568 for f, s in sorted(dirstate.copies().items()):
1569 1569 if f not in pctx and s not in pctx:
1570 1570 dirstate.copy(None, f)
1571 1571
1572 1572 def _fileinfo(self, path):
1573 1573 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1574 1574 self._manifest
1575 1575 return super(workingctx, self)._fileinfo(path)
1576 1576
1577 1577 def _buildflagfunc(self):
1578 1578 # Create a fallback function for getting file flags when the
1579 1579 # filesystem doesn't support them
1580 1580
1581 1581 copiesget = self._repo.dirstate.copies().get
1582 1582 parents = self.parents()
1583 1583 if len(parents) < 2:
1584 1584 # when we have one parent, it's easy: copy from parent
1585 1585 man = parents[0].manifest()
1586 1586
1587 1587 def func(f):
1588 1588 f = copiesget(f, f)
1589 1589 return man.flags(f)
1590 1590
1591 1591 else:
1592 1592 # merges are tricky: we try to reconstruct the unstored
1593 1593 # result from the merge (issue1802)
1594 1594 p1, p2 = parents
1595 1595 pa = p1.ancestor(p2)
1596 1596 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1597 1597
1598 1598 def func(f):
1599 1599 f = copiesget(f, f) # may be wrong for merges with copies
1600 1600 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1601 1601 if fl1 == fl2:
1602 1602 return fl1
1603 1603 if fl1 == fla:
1604 1604 return fl2
1605 1605 if fl2 == fla:
1606 1606 return fl1
1607 1607 return b'' # punt for conflicts
1608 1608
1609 1609 return func
1610 1610
1611 1611 @propertycache
1612 1612 def _flagfunc(self):
1613 1613 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1614 1614
1615 1615 def flags(self, path):
1616 1616 try:
1617 1617 return self._flagfunc(path)
1618 1618 except OSError:
1619 1619 return b''
1620 1620
1621 1621 def filectx(self, path, filelog=None):
1622 1622 """get a file context from the working directory"""
1623 1623 return workingfilectx(
1624 1624 self._repo, path, workingctx=self, filelog=filelog
1625 1625 )
1626 1626
1627 1627 def dirty(self, missing=False, merge=True, branch=True):
1628 1628 """check whether a working directory is modified"""
1629 1629 # check subrepos first
1630 1630 for s in sorted(self.substate):
1631 1631 if self.sub(s).dirty(missing=missing):
1632 1632 return True
1633 1633 # check current working dir
1634 1634 return (
1635 1635 (merge and self.p2())
1636 1636 or (branch and self.branch() != self.p1().branch())
1637 1637 or self.modified()
1638 1638 or self.added()
1639 1639 or self.removed()
1640 1640 or (missing and self.deleted())
1641 1641 )
1642 1642
1643 1643 def add(self, list, prefix=b""):
1644 1644 with self._repo.wlock():
1645 1645 ui, ds = self._repo.ui, self._repo.dirstate
1646 1646 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1647 1647 rejected = []
1648 1648 lstat = self._repo.wvfs.lstat
1649 1649 for f in list:
1650 1650 # ds.pathto() returns an absolute file when this is invoked from
1651 1651 # the keyword extension. That gets flagged as non-portable on
1652 1652 # Windows, since it contains the drive letter and colon.
1653 1653 scmutil.checkportable(ui, os.path.join(prefix, f))
1654 1654 try:
1655 1655 st = lstat(f)
1656 1656 except OSError:
1657 1657 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1658 1658 rejected.append(f)
1659 1659 continue
1660 1660 limit = ui.configbytes(b'ui', b'large-file-limit')
1661 1661 if limit != 0 and st.st_size > limit:
1662 1662 ui.warn(
1663 1663 _(
1664 1664 b"%s: up to %d MB of RAM may be required "
1665 1665 b"to manage this file\n"
1666 1666 b"(use 'hg revert %s' to cancel the "
1667 1667 b"pending addition)\n"
1668 1668 )
1669 1669 % (f, 3 * st.st_size // 1000000, uipath(f))
1670 1670 )
1671 1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1672 1672 ui.warn(
1673 1673 _(
1674 1674 b"%s not added: only files and symlinks "
1675 1675 b"supported currently\n"
1676 1676 )
1677 1677 % uipath(f)
1678 1678 )
1679 1679 rejected.append(f)
1680 1680 elif ds[f] in b'amn':
1681 1681 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1682 1682 elif ds[f] == b'r':
1683 1683 ds.normallookup(f)
1684 1684 else:
1685 1685 ds.add(f)
1686 1686 return rejected
1687 1687
1688 1688 def forget(self, files, prefix=b""):
1689 1689 with self._repo.wlock():
1690 1690 ds = self._repo.dirstate
1691 1691 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1692 1692 rejected = []
1693 1693 for f in files:
1694 1694 if f not in ds:
1695 1695 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1696 1696 rejected.append(f)
1697 1697 elif ds[f] != b'a':
1698 1698 ds.remove(f)
1699 1699 else:
1700 1700 ds.drop(f)
1701 1701 return rejected
1702 1702
1703 1703 def copy(self, source, dest):
1704 1704 try:
1705 1705 st = self._repo.wvfs.lstat(dest)
1706 1706 except OSError as err:
1707 1707 if err.errno != errno.ENOENT:
1708 1708 raise
1709 1709 self._repo.ui.warn(
1710 1710 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1711 1711 )
1712 1712 return
1713 1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1714 1714 self._repo.ui.warn(
1715 1715 _(b"copy failed: %s is not a file or a symbolic link\n")
1716 1716 % self._repo.dirstate.pathto(dest)
1717 1717 )
1718 1718 else:
1719 1719 with self._repo.wlock():
1720 1720 ds = self._repo.dirstate
1721 1721 if ds[dest] in b'?':
1722 1722 ds.add(dest)
1723 1723 elif ds[dest] in b'r':
1724 1724 ds.normallookup(dest)
1725 1725 ds.copy(source, dest)
1726 1726
1727 1727 def match(
1728 1728 self,
1729 1729 pats=None,
1730 1730 include=None,
1731 1731 exclude=None,
1732 1732 default=b'glob',
1733 1733 listsubrepos=False,
1734 1734 badfn=None,
1735 1735 cwd=None,
1736 1736 ):
1737 1737 r = self._repo
1738 1738 if not cwd:
1739 1739 cwd = r.getcwd()
1740 1740
1741 1741 # Only a case insensitive filesystem needs magic to translate user input
1742 1742 # to actual case in the filesystem.
1743 1743 icasefs = not util.fscasesensitive(r.root)
1744 1744 return matchmod.match(
1745 1745 r.root,
1746 1746 cwd,
1747 1747 pats,
1748 1748 include,
1749 1749 exclude,
1750 1750 default,
1751 1751 auditor=r.auditor,
1752 1752 ctx=self,
1753 1753 listsubrepos=listsubrepos,
1754 1754 badfn=badfn,
1755 1755 icasefs=icasefs,
1756 1756 )
1757 1757
1758 1758 def _filtersuspectsymlink(self, files):
1759 1759 if not files or self._repo.dirstate._checklink:
1760 1760 return files
1761 1761
1762 1762 # Symlink placeholders may get non-symlink-like contents
1763 1763 # via user error or dereferencing by NFS or Samba servers,
1764 1764 # so we filter out any placeholders that don't look like a
1765 1765 # symlink
1766 1766 sane = []
1767 1767 for f in files:
1768 1768 if self.flags(f) == b'l':
1769 1769 d = self[f].data()
1770 1770 if (
1771 1771 d == b''
1772 1772 or len(d) >= 1024
1773 1773 or b'\n' in d
1774 1774 or stringutil.binary(d)
1775 1775 ):
1776 1776 self._repo.ui.debug(
1777 1777 b'ignoring suspect symlink placeholder "%s"\n' % f
1778 1778 )
1779 1779 continue
1780 1780 sane.append(f)
1781 1781 return sane
1782 1782
1783 1783 def _checklookup(self, files):
1784 1784 # check for any possibly clean files
1785 1785 if not files:
1786 1786 return [], [], []
1787 1787
1788 1788 modified = []
1789 1789 deleted = []
1790 1790 fixup = []
1791 1791 pctx = self._parents[0]
1792 1792 # do a full compare of any files that might have changed
1793 1793 for f in sorted(files):
1794 1794 try:
1795 1795 # This will return True for a file that got replaced by a
1796 1796 # directory in the interim, but fixing that is pretty hard.
1797 1797 if (
1798 1798 f not in pctx
1799 1799 or self.flags(f) != pctx.flags(f)
1800 1800 or pctx[f].cmp(self[f])
1801 1801 ):
1802 1802 modified.append(f)
1803 1803 else:
1804 1804 fixup.append(f)
1805 1805 except (IOError, OSError):
1806 1806 # A file become inaccessible in between? Mark it as deleted,
1807 1807 # matching dirstate behavior (issue5584).
1808 1808 # The dirstate has more complex behavior around whether a
1809 1809 # missing file matches a directory, etc, but we don't need to
1810 1810 # bother with that: if f has made it to this point, we're sure
1811 1811 # it's in the dirstate.
1812 1812 deleted.append(f)
1813 1813
1814 1814 return modified, deleted, fixup
1815 1815
1816 1816 def _poststatusfixup(self, status, fixup):
1817 1817 """update dirstate for files that are actually clean"""
1818 1818 poststatus = self._repo.postdsstatus()
1819 1819 if fixup or poststatus:
1820 1820 try:
1821 1821 oldid = self._repo.dirstate.identity()
1822 1822
1823 1823 # updating the dirstate is optional
1824 1824 # so we don't wait on the lock
1825 1825 # wlock can invalidate the dirstate, so cache normal _after_
1826 1826 # taking the lock
1827 1827 with self._repo.wlock(False):
1828 1828 if self._repo.dirstate.identity() == oldid:
1829 1829 if fixup:
1830 1830 normal = self._repo.dirstate.normal
1831 1831 for f in fixup:
1832 1832 normal(f)
1833 1833 # write changes out explicitly, because nesting
1834 1834 # wlock at runtime may prevent 'wlock.release()'
1835 1835 # after this block from doing so for subsequent
1836 1836 # changing files
1837 1837 tr = self._repo.currenttransaction()
1838 1838 self._repo.dirstate.write(tr)
1839 1839
1840 1840 if poststatus:
1841 1841 for ps in poststatus:
1842 1842 ps(self, status)
1843 1843 else:
1844 1844 # in this case, writing changes out breaks
1845 1845 # consistency, because .hg/dirstate was
1846 1846 # already changed simultaneously after last
1847 1847 # caching (see also issue5584 for detail)
1848 1848 self._repo.ui.debug(
1849 1849 b'skip updating dirstate: identity mismatch\n'
1850 1850 )
1851 1851 except error.LockError:
1852 1852 pass
1853 1853 finally:
1854 1854 # Even if the wlock couldn't be grabbed, clear out the list.
1855 1855 self._repo.clearpostdsstatus()
1856 1856
1857 1857 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1858 1858 '''Gets the status from the dirstate -- internal use only.'''
1859 1859 subrepos = []
1860 1860 if b'.hgsub' in self:
1861 1861 subrepos = sorted(self.substate)
1862 1862 cmp, s = self._repo.dirstate.status(
1863 1863 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1864 1864 )
1865 1865
1866 1866 # check for any possibly clean files
1867 1867 fixup = []
1868 1868 if cmp:
1869 1869 modified2, deleted2, fixup = self._checklookup(cmp)
1870 1870 s.modified.extend(modified2)
1871 1871 s.deleted.extend(deleted2)
1872 1872
1873 1873 if fixup and clean:
1874 1874 s.clean.extend(fixup)
1875 1875
1876 1876 self._poststatusfixup(s, fixup)
1877 1877
1878 1878 if match.always():
1879 1879 # cache for performance
1880 1880 if s.unknown or s.ignored or s.clean:
1881 1881 # "_status" is cached with list*=False in the normal route
1882 1882 self._status = scmutil.status(
1883 1883 s.modified, s.added, s.removed, s.deleted, [], [], []
1884 1884 )
1885 1885 else:
1886 1886 self._status = s
1887 1887
1888 1888 return s
1889 1889
1890 1890 @propertycache
1891 1891 def _copies(self):
1892 1892 p1copies = {}
1893 1893 p2copies = {}
1894 1894 parents = self._repo.dirstate.parents()
1895 1895 p1manifest = self._repo[parents[0]].manifest()
1896 1896 p2manifest = self._repo[parents[1]].manifest()
1897 1897 changedset = set(self.added()) | set(self.modified())
1898 1898 narrowmatch = self._repo.narrowmatch()
1899 1899 for dst, src in self._repo.dirstate.copies().items():
1900 1900 if dst not in changedset or not narrowmatch(dst):
1901 1901 continue
1902 1902 if src in p1manifest:
1903 1903 p1copies[dst] = src
1904 1904 elif src in p2manifest:
1905 1905 p2copies[dst] = src
1906 1906 return p1copies, p2copies
1907 1907
1908 1908 @propertycache
1909 1909 def _manifest(self):
1910 1910 """generate a manifest corresponding to the values in self._status
1911 1911
1912 1912 This reuse the file nodeid from parent, but we use special node
1913 1913 identifiers for added and modified files. This is used by manifests
1914 1914 merge to see that files are different and by update logic to avoid
1915 1915 deleting newly added files.
1916 1916 """
1917 1917 return self._buildstatusmanifest(self._status)
1918 1918
1919 1919 def _buildstatusmanifest(self, status):
1920 1920 """Builds a manifest that includes the given status results."""
1921 1921 parents = self.parents()
1922 1922
1923 1923 man = parents[0].manifest().copy()
1924 1924
1925 1925 ff = self._flagfunc
1926 1926 for i, l in (
1927 1927 (addednodeid, status.added),
1928 1928 (modifiednodeid, status.modified),
1929 1929 ):
1930 1930 for f in l:
1931 1931 man[f] = i
1932 1932 try:
1933 1933 man.setflag(f, ff(f))
1934 1934 except OSError:
1935 1935 pass
1936 1936
1937 1937 for f in status.deleted + status.removed:
1938 1938 if f in man:
1939 1939 del man[f]
1940 1940
1941 1941 return man
1942 1942
1943 1943 def _buildstatus(
1944 1944 self, other, s, match, listignored, listclean, listunknown
1945 1945 ):
1946 1946 """build a status with respect to another context
1947 1947
1948 1948 This includes logic for maintaining the fast path of status when
1949 1949 comparing the working directory against its parent, which is to skip
1950 1950 building a new manifest if self (working directory) is not comparing
1951 1951 against its parent (repo['.']).
1952 1952 """
1953 1953 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1954 1954 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1955 1955 # might have accidentally ended up with the entire contents of the file
1956 1956 # they are supposed to be linking to.
1957 1957 s.modified[:] = self._filtersuspectsymlink(s.modified)
1958 1958 if other != self._repo[b'.']:
1959 1959 s = super(workingctx, self)._buildstatus(
1960 1960 other, s, match, listignored, listclean, listunknown
1961 1961 )
1962 1962 return s
1963 1963
1964 1964 def _matchstatus(self, other, match):
1965 1965 """override the match method with a filter for directory patterns
1966 1966
1967 1967 We use inheritance to customize the match.bad method only in cases of
1968 1968 workingctx since it belongs only to the working directory when
1969 1969 comparing against the parent changeset.
1970 1970
1971 1971 If we aren't comparing against the working directory's parent, then we
1972 1972 just use the default match object sent to us.
1973 1973 """
1974 1974 if other != self._repo[b'.']:
1975 1975
1976 1976 def bad(f, msg):
1977 1977 # 'f' may be a directory pattern from 'match.files()',
1978 1978 # so 'f not in ctx1' is not enough
1979 1979 if f not in other and not other.hasdir(f):
1980 1980 self._repo.ui.warn(
1981 1981 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1982 1982 )
1983 1983
1984 1984 match.bad = bad
1985 1985 return match
1986 1986
1987 1987 def walk(self, match):
1988 1988 '''Generates matching file names.'''
1989 1989 return sorted(
1990 1990 self._repo.dirstate.walk(
1991 1991 self._repo.narrowmatch(match),
1992 1992 subrepos=sorted(self.substate),
1993 1993 unknown=True,
1994 1994 ignored=False,
1995 1995 )
1996 1996 )
1997 1997
1998 1998 def matches(self, match):
1999 1999 match = self._repo.narrowmatch(match)
2000 2000 ds = self._repo.dirstate
2001 2001 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2002 2002
2003 2003 def markcommitted(self, node):
2004 2004 with self._repo.dirstate.parentchange():
2005 2005 for f in self.modified() + self.added():
2006 2006 self._repo.dirstate.normal(f)
2007 2007 for f in self.removed():
2008 2008 self._repo.dirstate.drop(f)
2009 2009 self._repo.dirstate.setparents(node)
2010 2010 self._repo._quick_access_changeid_invalidate()
2011 2011
2012 2012 # write changes out explicitly, because nesting wlock at
2013 2013 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2014 2014 # from immediately doing so for subsequent changing files
2015 2015 self._repo.dirstate.write(self._repo.currenttransaction())
2016 2016
2017 2017 sparse.aftercommit(self._repo, node)
2018 2018
2019 2019 def mergestate(self, clean=False):
2020 2020 if clean:
2021 2021 return mergestatemod.mergestate.clean(self._repo)
2022 2022 return mergestatemod.mergestate.read(self._repo)
2023 2023
2024 2024
2025 2025 class committablefilectx(basefilectx):
2026 2026 """A committablefilectx provides common functionality for a file context
2027 2027 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2028 2028
2029 2029 def __init__(self, repo, path, filelog=None, ctx=None):
2030 2030 self._repo = repo
2031 2031 self._path = path
2032 2032 self._changeid = None
2033 2033 self._filerev = self._filenode = None
2034 2034
2035 2035 if filelog is not None:
2036 2036 self._filelog = filelog
2037 2037 if ctx:
2038 2038 self._changectx = ctx
2039 2039
2040 2040 def __nonzero__(self):
2041 2041 return True
2042 2042
2043 2043 __bool__ = __nonzero__
2044 2044
2045 2045 def linkrev(self):
2046 2046 # linked to self._changectx no matter if file is modified or not
2047 2047 return self.rev()
2048 2048
2049 2049 def renamed(self):
2050 2050 path = self.copysource()
2051 2051 if not path:
2052 2052 return None
2053 2053 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2054 2054
2055 2055 def parents(self):
2056 2056 '''return parent filectxs, following copies if necessary'''
2057 2057
2058 2058 def filenode(ctx, path):
2059 2059 return ctx._manifest.get(path, nullid)
2060 2060
2061 2061 path = self._path
2062 2062 fl = self._filelog
2063 2063 pcl = self._changectx._parents
2064 2064 renamed = self.renamed()
2065 2065
2066 2066 if renamed:
2067 2067 pl = [renamed + (None,)]
2068 2068 else:
2069 2069 pl = [(path, filenode(pcl[0], path), fl)]
2070 2070
2071 2071 for pc in pcl[1:]:
2072 2072 pl.append((path, filenode(pc, path), fl))
2073 2073
2074 2074 return [
2075 2075 self._parentfilectx(p, fileid=n, filelog=l)
2076 2076 for p, n, l in pl
2077 2077 if n != nullid
2078 2078 ]
2079 2079
2080 2080 def children(self):
2081 2081 return []
2082 2082
2083 2083
2084 2084 class workingfilectx(committablefilectx):
2085 2085 """A workingfilectx object makes access to data related to a particular
2086 2086 file in the working directory convenient."""
2087 2087
2088 2088 def __init__(self, repo, path, filelog=None, workingctx=None):
2089 2089 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2090 2090
2091 2091 @propertycache
2092 2092 def _changectx(self):
2093 2093 return workingctx(self._repo)
2094 2094
2095 2095 def data(self):
2096 2096 return self._repo.wread(self._path)
2097 2097
2098 2098 def copysource(self):
2099 2099 return self._repo.dirstate.copied(self._path)
2100 2100
2101 2101 def size(self):
2102 2102 return self._repo.wvfs.lstat(self._path).st_size
2103 2103
2104 2104 def lstat(self):
2105 2105 return self._repo.wvfs.lstat(self._path)
2106 2106
2107 2107 def date(self):
2108 2108 t, tz = self._changectx.date()
2109 2109 try:
2110 2110 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2111 2111 except OSError as err:
2112 2112 if err.errno != errno.ENOENT:
2113 2113 raise
2114 2114 return (t, tz)
2115 2115
2116 2116 def exists(self):
2117 2117 return self._repo.wvfs.exists(self._path)
2118 2118
2119 2119 def lexists(self):
2120 2120 return self._repo.wvfs.lexists(self._path)
2121 2121
2122 2122 def audit(self):
2123 2123 return self._repo.wvfs.audit(self._path)
2124 2124
2125 2125 def cmp(self, fctx):
2126 2126 """compare with other file context
2127 2127
2128 2128 returns True if different than fctx.
2129 2129 """
2130 2130 # fctx should be a filectx (not a workingfilectx)
2131 2131 # invert comparison to reuse the same code path
2132 2132 return fctx.cmp(self)
2133 2133
2134 2134 def remove(self, ignoremissing=False):
2135 2135 """wraps unlink for a repo's working directory"""
2136 2136 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2137 2137 self._repo.wvfs.unlinkpath(
2138 2138 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2139 2139 )
2140 2140
2141 2141 def write(self, data, flags, backgroundclose=False, **kwargs):
2142 2142 """wraps repo.wwrite"""
2143 2143 return self._repo.wwrite(
2144 2144 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2145 2145 )
2146 2146
2147 2147 def markcopied(self, src):
2148 2148 """marks this file a copy of `src`"""
2149 2149 self._repo.dirstate.copy(src, self._path)
2150 2150
2151 2151 def clearunknown(self):
2152 2152 """Removes conflicting items in the working directory so that
2153 2153 ``write()`` can be called successfully.
2154 2154 """
2155 2155 wvfs = self._repo.wvfs
2156 2156 f = self._path
2157 2157 wvfs.audit(f)
2158 2158 if self._repo.ui.configbool(
2159 2159 b'experimental', b'merge.checkpathconflicts'
2160 2160 ):
2161 2161 # remove files under the directory as they should already be
2162 2162 # warned and backed up
2163 2163 if wvfs.isdir(f) and not wvfs.islink(f):
2164 2164 wvfs.rmtree(f, forcibly=True)
2165 2165 for p in reversed(list(pathutil.finddirs(f))):
2166 2166 if wvfs.isfileorlink(p):
2167 2167 wvfs.unlink(p)
2168 2168 break
2169 2169 else:
2170 2170 # don't remove files if path conflicts are not processed
2171 2171 if wvfs.isdir(f) and not wvfs.islink(f):
2172 2172 wvfs.removedirs(f)
2173 2173
2174 2174 def setflags(self, l, x):
2175 2175 self._repo.wvfs.setflags(self._path, l, x)
2176 2176
2177 2177
2178 2178 class overlayworkingctx(committablectx):
2179 2179 """Wraps another mutable context with a write-back cache that can be
2180 2180 converted into a commit context.
2181 2181
2182 2182 self._cache[path] maps to a dict with keys: {
2183 2183 'exists': bool?
2184 2184 'date': date?
2185 2185 'data': str?
2186 2186 'flags': str?
2187 2187 'copied': str? (path or None)
2188 2188 }
2189 2189 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2190 2190 is `False`, the file was deleted.
2191 2191 """
2192 2192
2193 2193 def __init__(self, repo):
2194 2194 super(overlayworkingctx, self).__init__(repo)
2195 2195 self.clean()
2196 2196
2197 2197 def setbase(self, wrappedctx):
2198 2198 self._wrappedctx = wrappedctx
2199 2199 self._parents = [wrappedctx]
2200 2200 # Drop old manifest cache as it is now out of date.
2201 2201 # This is necessary when, e.g., rebasing several nodes with one
2202 2202 # ``overlayworkingctx`` (e.g. with --collapse).
2203 2203 util.clearcachedproperty(self, b'_manifest')
2204 2204
2205 2205 def setparents(self, p1node, p2node=nullid):
2206 2206 assert p1node == self._wrappedctx.node()
2207 2207 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2208 2208
2209 2209 def data(self, path):
2210 2210 if self.isdirty(path):
2211 2211 if self._cache[path][b'exists']:
2212 2212 if self._cache[path][b'data'] is not None:
2213 2213 return self._cache[path][b'data']
2214 2214 else:
2215 2215 # Must fallback here, too, because we only set flags.
2216 2216 return self._wrappedctx[path].data()
2217 2217 else:
2218 2218 raise error.ProgrammingError(
2219 2219 b"No such file or directory: %s" % path
2220 2220 )
2221 2221 else:
2222 2222 return self._wrappedctx[path].data()
2223 2223
2224 2224 @propertycache
2225 2225 def _manifest(self):
2226 2226 parents = self.parents()
2227 2227 man = parents[0].manifest().copy()
2228 2228
2229 2229 flag = self._flagfunc
2230 2230 for path in self.added():
2231 2231 man[path] = addednodeid
2232 2232 man.setflag(path, flag(path))
2233 2233 for path in self.modified():
2234 2234 man[path] = modifiednodeid
2235 2235 man.setflag(path, flag(path))
2236 2236 for path in self.removed():
2237 2237 del man[path]
2238 2238 return man
2239 2239
2240 2240 @propertycache
2241 2241 def _flagfunc(self):
2242 2242 def f(path):
2243 2243 return self._cache[path][b'flags']
2244 2244
2245 2245 return f
2246 2246
2247 2247 def files(self):
2248 2248 return sorted(self.added() + self.modified() + self.removed())
2249 2249
2250 2250 def modified(self):
2251 2251 return [
2252 2252 f
2253 2253 for f in self._cache.keys()
2254 2254 if self._cache[f][b'exists'] and self._existsinparent(f)
2255 2255 ]
2256 2256
2257 2257 def added(self):
2258 2258 return [
2259 2259 f
2260 2260 for f in self._cache.keys()
2261 2261 if self._cache[f][b'exists'] and not self._existsinparent(f)
2262 2262 ]
2263 2263
2264 2264 def removed(self):
2265 2265 return [
2266 2266 f
2267 2267 for f in self._cache.keys()
2268 2268 if not self._cache[f][b'exists'] and self._existsinparent(f)
2269 2269 ]
2270 2270
2271 2271 def p1copies(self):
2272 2272 copies = {}
2273 2273 narrowmatch = self._repo.narrowmatch()
2274 2274 for f in self._cache.keys():
2275 2275 if not narrowmatch(f):
2276 2276 continue
2277 2277 copies.pop(f, None) # delete if it exists
2278 2278 source = self._cache[f][b'copied']
2279 2279 if source:
2280 2280 copies[f] = source
2281 2281 return copies
2282 2282
2283 2283 def p2copies(self):
2284 2284 copies = {}
2285 2285 narrowmatch = self._repo.narrowmatch()
2286 2286 for f in self._cache.keys():
2287 2287 if not narrowmatch(f):
2288 2288 continue
2289 2289 copies.pop(f, None) # delete if it exists
2290 2290 source = self._cache[f][b'copied']
2291 2291 if source:
2292 2292 copies[f] = source
2293 2293 return copies
2294 2294
2295 2295 def isinmemory(self):
2296 2296 return True
2297 2297
2298 2298 def filedate(self, path):
2299 2299 if self.isdirty(path):
2300 2300 return self._cache[path][b'date']
2301 2301 else:
2302 2302 return self._wrappedctx[path].date()
2303 2303
2304 2304 def markcopied(self, path, origin):
2305 2305 self._markdirty(
2306 2306 path,
2307 2307 exists=True,
2308 2308 date=self.filedate(path),
2309 2309 flags=self.flags(path),
2310 2310 copied=origin,
2311 2311 )
2312 2312
2313 2313 def copydata(self, path):
2314 2314 if self.isdirty(path):
2315 2315 return self._cache[path][b'copied']
2316 2316 else:
2317 2317 return None
2318 2318
2319 2319 def flags(self, path):
2320 2320 if self.isdirty(path):
2321 2321 if self._cache[path][b'exists']:
2322 2322 return self._cache[path][b'flags']
2323 2323 else:
2324 2324 raise error.ProgrammingError(
2325 2325 b"No such file or directory: %s" % path
2326 2326 )
2327 2327 else:
2328 2328 return self._wrappedctx[path].flags()
2329 2329
2330 2330 def __contains__(self, key):
2331 2331 if key in self._cache:
2332 2332 return self._cache[key][b'exists']
2333 2333 return key in self.p1()
2334 2334
2335 2335 def _existsinparent(self, path):
2336 2336 try:
2337 2337 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2338 2338 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2339 2339 # with an ``exists()`` function.
2340 2340 self._wrappedctx[path]
2341 2341 return True
2342 2342 except error.ManifestLookupError:
2343 2343 return False
2344 2344
2345 2345 def _auditconflicts(self, path):
2346 2346 """Replicates conflict checks done by wvfs.write().
2347 2347
2348 2348 Since we never write to the filesystem and never call `applyupdates` in
2349 2349 IMM, we'll never check that a path is actually writable -- e.g., because
2350 2350 it adds `a/foo`, but `a` is actually a file in the other commit.
2351 2351 """
2352 2352
2353 2353 def fail(path, component):
2354 2354 # p1() is the base and we're receiving "writes" for p2()'s
2355 2355 # files.
2356 2356 if b'l' in self.p1()[component].flags():
2357 2357 raise error.Abort(
2358 2358 b"error: %s conflicts with symlink %s "
2359 2359 b"in %d." % (path, component, self.p1().rev())
2360 2360 )
2361 2361 else:
2362 2362 raise error.Abort(
2363 2363 b"error: '%s' conflicts with file '%s' in "
2364 2364 b"%d." % (path, component, self.p1().rev())
2365 2365 )
2366 2366
2367 2367 # Test that each new directory to be created to write this path from p2
2368 2368 # is not a file in p1.
2369 2369 components = path.split(b'/')
2370 2370 for i in pycompat.xrange(len(components)):
2371 2371 component = b"/".join(components[0:i])
2372 2372 if component in self:
2373 2373 fail(path, component)
2374 2374
2375 2375 # Test the other direction -- that this path from p2 isn't a directory
2376 2376 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2377 2377 match = self.match([path], default=b'path')
2378 2378 mfiles = list(self.p1().manifest().walk(match))
2379 2379 if len(mfiles) > 0:
2380 2380 if len(mfiles) == 1 and mfiles[0] == path:
2381 2381 return
2382 2382 # omit the files which are deleted in current IMM wctx
2383 2383 mfiles = [m for m in mfiles if m in self]
2384 2384 if not mfiles:
2385 2385 return
2386 2386 raise error.Abort(
2387 2387 b"error: file '%s' cannot be written because "
2388 2388 b" '%s/' is a directory in %s (containing %d "
2389 2389 b"entries: %s)"
2390 2390 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2391 2391 )
2392 2392
2393 2393 def write(self, path, data, flags=b'', **kwargs):
2394 2394 if data is None:
2395 2395 raise error.ProgrammingError(b"data must be non-None")
2396 2396 self._auditconflicts(path)
2397 2397 self._markdirty(
2398 2398 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2399 2399 )
2400 2400
2401 2401 def setflags(self, path, l, x):
2402 2402 flag = b''
2403 2403 if l:
2404 2404 flag = b'l'
2405 2405 elif x:
2406 2406 flag = b'x'
2407 2407 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2408 2408
2409 2409 def remove(self, path):
2410 2410 self._markdirty(path, exists=False)
2411 2411
2412 2412 def exists(self, path):
2413 2413 """exists behaves like `lexists`, but needs to follow symlinks and
2414 2414 return False if they are broken.
2415 2415 """
2416 2416 if self.isdirty(path):
2417 2417 # If this path exists and is a symlink, "follow" it by calling
2418 2418 # exists on the destination path.
2419 2419 if (
2420 2420 self._cache[path][b'exists']
2421 2421 and b'l' in self._cache[path][b'flags']
2422 2422 ):
2423 2423 return self.exists(self._cache[path][b'data'].strip())
2424 2424 else:
2425 2425 return self._cache[path][b'exists']
2426 2426
2427 2427 return self._existsinparent(path)
2428 2428
2429 2429 def lexists(self, path):
2430 2430 """lexists returns True if the path exists"""
2431 2431 if self.isdirty(path):
2432 2432 return self._cache[path][b'exists']
2433 2433
2434 2434 return self._existsinparent(path)
2435 2435
2436 2436 def size(self, path):
2437 2437 if self.isdirty(path):
2438 2438 if self._cache[path][b'exists']:
2439 2439 return len(self._cache[path][b'data'])
2440 2440 else:
2441 2441 raise error.ProgrammingError(
2442 2442 b"No such file or directory: %s" % path
2443 2443 )
2444 2444 return self._wrappedctx[path].size()
2445 2445
2446 2446 def tomemctx(
2447 2447 self,
2448 2448 text,
2449 2449 branch=None,
2450 2450 extra=None,
2451 2451 date=None,
2452 2452 parents=None,
2453 2453 user=None,
2454 2454 editor=None,
2455 2455 ):
2456 2456 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2457 2457 committed.
2458 2458
2459 2459 ``text`` is the commit message.
2460 2460 ``parents`` (optional) are rev numbers.
2461 2461 """
2462 2462 # Default parents to the wrapped context if not passed.
2463 2463 if parents is None:
2464 2464 parents = self.parents()
2465 2465 if len(parents) == 1:
2466 2466 parents = (parents[0], None)
2467 2467
2468 2468 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2469 2469 if parents[1] is None:
2470 2470 parents = (self._repo[parents[0]], None)
2471 2471 else:
2472 2472 parents = (self._repo[parents[0]], self._repo[parents[1]])
2473 2473
2474 2474 files = self.files()
2475 2475
2476 2476 def getfile(repo, memctx, path):
2477 2477 if self._cache[path][b'exists']:
2478 2478 return memfilectx(
2479 2479 repo,
2480 2480 memctx,
2481 2481 path,
2482 2482 self._cache[path][b'data'],
2483 2483 b'l' in self._cache[path][b'flags'],
2484 2484 b'x' in self._cache[path][b'flags'],
2485 2485 self._cache[path][b'copied'],
2486 2486 )
2487 2487 else:
2488 2488 # Returning None, but including the path in `files`, is
2489 2489 # necessary for memctx to register a deletion.
2490 2490 return None
2491 2491
2492 2492 if branch is None:
2493 2493 branch = self._wrappedctx.branch()
2494 2494
2495 2495 return memctx(
2496 2496 self._repo,
2497 2497 parents,
2498 2498 text,
2499 2499 files,
2500 2500 getfile,
2501 2501 date=date,
2502 2502 extra=extra,
2503 2503 user=user,
2504 2504 branch=branch,
2505 2505 editor=editor,
2506 2506 )
2507 2507
2508 2508 def tomemctx_for_amend(self, precursor):
2509 2509 extra = precursor.extra().copy()
2510 2510 extra[b'amend_source'] = precursor.hex()
2511 2511 return self.tomemctx(
2512 2512 text=precursor.description(),
2513 2513 branch=precursor.branch(),
2514 2514 extra=extra,
2515 2515 date=precursor.date(),
2516 2516 user=precursor.user(),
2517 2517 )
2518 2518
2519 2519 def isdirty(self, path):
2520 2520 return path in self._cache
2521 2521
2522 def isempty(self):
2522 def nofilechanges(self):
2523 2523 # We need to discard any keys that are actually clean before the empty
2524 2524 # commit check.
2525 2525 self._compact()
2526 2526 return len(self._cache) == 0
2527 2527
2528 2528 def clean(self):
2529 2529 self._cache = {}
2530 2530
2531 2531 def _compact(self):
2532 2532 """Removes keys from the cache that are actually clean, by comparing
2533 2533 them with the underlying context.
2534 2534
2535 2535 This can occur during the merge process, e.g. by passing --tool :local
2536 2536 to resolve a conflict.
2537 2537 """
2538 2538 keys = []
2539 2539 # This won't be perfect, but can help performance significantly when
2540 2540 # using things like remotefilelog.
2541 2541 scmutil.prefetchfiles(
2542 2542 self.repo(),
2543 2543 [
2544 2544 (
2545 2545 self.p1().rev(),
2546 2546 scmutil.matchfiles(self.repo(), self._cache.keys()),
2547 2547 )
2548 2548 ],
2549 2549 )
2550 2550
2551 2551 for path in self._cache.keys():
2552 2552 cache = self._cache[path]
2553 2553 try:
2554 2554 underlying = self._wrappedctx[path]
2555 2555 if (
2556 2556 underlying.data() == cache[b'data']
2557 2557 and underlying.flags() == cache[b'flags']
2558 2558 ):
2559 2559 keys.append(path)
2560 2560 except error.ManifestLookupError:
2561 2561 # Path not in the underlying manifest (created).
2562 2562 continue
2563 2563
2564 2564 for path in keys:
2565 2565 del self._cache[path]
2566 2566 return keys
2567 2567
2568 2568 def _markdirty(
2569 2569 self, path, exists, data=None, date=None, flags=b'', copied=None
2570 2570 ):
2571 2571 # data not provided, let's see if we already have some; if not, let's
2572 2572 # grab it from our underlying context, so that we always have data if
2573 2573 # the file is marked as existing.
2574 2574 if exists and data is None:
2575 2575 oldentry = self._cache.get(path) or {}
2576 2576 data = oldentry.get(b'data')
2577 2577 if data is None:
2578 2578 data = self._wrappedctx[path].data()
2579 2579
2580 2580 self._cache[path] = {
2581 2581 b'exists': exists,
2582 2582 b'data': data,
2583 2583 b'date': date,
2584 2584 b'flags': flags,
2585 2585 b'copied': copied,
2586 2586 }
2587 2587
2588 2588 def filectx(self, path, filelog=None):
2589 2589 return overlayworkingfilectx(
2590 2590 self._repo, path, parent=self, filelog=filelog
2591 2591 )
2592 2592
2593 2593
2594 2594 class overlayworkingfilectx(committablefilectx):
2595 2595 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2596 2596 cache, which can be flushed through later by calling ``flush()``."""
2597 2597
2598 2598 def __init__(self, repo, path, filelog=None, parent=None):
2599 2599 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2600 2600 self._repo = repo
2601 2601 self._parent = parent
2602 2602 self._path = path
2603 2603
2604 2604 def cmp(self, fctx):
2605 2605 return self.data() != fctx.data()
2606 2606
2607 2607 def changectx(self):
2608 2608 return self._parent
2609 2609
2610 2610 def data(self):
2611 2611 return self._parent.data(self._path)
2612 2612
2613 2613 def date(self):
2614 2614 return self._parent.filedate(self._path)
2615 2615
2616 2616 def exists(self):
2617 2617 return self.lexists()
2618 2618
2619 2619 def lexists(self):
2620 2620 return self._parent.exists(self._path)
2621 2621
2622 2622 def copysource(self):
2623 2623 return self._parent.copydata(self._path)
2624 2624
2625 2625 def size(self):
2626 2626 return self._parent.size(self._path)
2627 2627
2628 2628 def markcopied(self, origin):
2629 2629 self._parent.markcopied(self._path, origin)
2630 2630
2631 2631 def audit(self):
2632 2632 pass
2633 2633
2634 2634 def flags(self):
2635 2635 return self._parent.flags(self._path)
2636 2636
2637 2637 def setflags(self, islink, isexec):
2638 2638 return self._parent.setflags(self._path, islink, isexec)
2639 2639
2640 2640 def write(self, data, flags, backgroundclose=False, **kwargs):
2641 2641 return self._parent.write(self._path, data, flags, **kwargs)
2642 2642
2643 2643 def remove(self, ignoremissing=False):
2644 2644 return self._parent.remove(self._path)
2645 2645
2646 2646 def clearunknown(self):
2647 2647 pass
2648 2648
2649 2649
2650 2650 class workingcommitctx(workingctx):
2651 2651 """A workingcommitctx object makes access to data related to
2652 2652 the revision being committed convenient.
2653 2653
2654 2654 This hides changes in the working directory, if they aren't
2655 2655 committed in this context.
2656 2656 """
2657 2657
2658 2658 def __init__(
2659 2659 self, repo, changes, text=b"", user=None, date=None, extra=None
2660 2660 ):
2661 2661 super(workingcommitctx, self).__init__(
2662 2662 repo, text, user, date, extra, changes
2663 2663 )
2664 2664
2665 2665 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2666 2666 """Return matched files only in ``self._status``
2667 2667
2668 2668 Uncommitted files appear "clean" via this context, even if
2669 2669 they aren't actually so in the working directory.
2670 2670 """
2671 2671 if clean:
2672 2672 clean = [f for f in self._manifest if f not in self._changedset]
2673 2673 else:
2674 2674 clean = []
2675 2675 return scmutil.status(
2676 2676 [f for f in self._status.modified if match(f)],
2677 2677 [f for f in self._status.added if match(f)],
2678 2678 [f for f in self._status.removed if match(f)],
2679 2679 [],
2680 2680 [],
2681 2681 [],
2682 2682 clean,
2683 2683 )
2684 2684
2685 2685 @propertycache
2686 2686 def _changedset(self):
2687 2687 """Return the set of files changed in this context
2688 2688 """
2689 2689 changed = set(self._status.modified)
2690 2690 changed.update(self._status.added)
2691 2691 changed.update(self._status.removed)
2692 2692 return changed
2693 2693
2694 2694
2695 2695 def makecachingfilectxfn(func):
2696 2696 """Create a filectxfn that caches based on the path.
2697 2697
2698 2698 We can't use util.cachefunc because it uses all arguments as the cache
2699 2699 key and this creates a cycle since the arguments include the repo and
2700 2700 memctx.
2701 2701 """
2702 2702 cache = {}
2703 2703
2704 2704 def getfilectx(repo, memctx, path):
2705 2705 if path not in cache:
2706 2706 cache[path] = func(repo, memctx, path)
2707 2707 return cache[path]
2708 2708
2709 2709 return getfilectx
2710 2710
2711 2711
2712 2712 def memfilefromctx(ctx):
2713 2713 """Given a context return a memfilectx for ctx[path]
2714 2714
2715 2715 This is a convenience method for building a memctx based on another
2716 2716 context.
2717 2717 """
2718 2718
2719 2719 def getfilectx(repo, memctx, path):
2720 2720 fctx = ctx[path]
2721 2721 copysource = fctx.copysource()
2722 2722 return memfilectx(
2723 2723 repo,
2724 2724 memctx,
2725 2725 path,
2726 2726 fctx.data(),
2727 2727 islink=fctx.islink(),
2728 2728 isexec=fctx.isexec(),
2729 2729 copysource=copysource,
2730 2730 )
2731 2731
2732 2732 return getfilectx
2733 2733
2734 2734
2735 2735 def memfilefrompatch(patchstore):
2736 2736 """Given a patch (e.g. patchstore object) return a memfilectx
2737 2737
2738 2738 This is a convenience method for building a memctx based on a patchstore.
2739 2739 """
2740 2740
2741 2741 def getfilectx(repo, memctx, path):
2742 2742 data, mode, copysource = patchstore.getfile(path)
2743 2743 if data is None:
2744 2744 return None
2745 2745 islink, isexec = mode
2746 2746 return memfilectx(
2747 2747 repo,
2748 2748 memctx,
2749 2749 path,
2750 2750 data,
2751 2751 islink=islink,
2752 2752 isexec=isexec,
2753 2753 copysource=copysource,
2754 2754 )
2755 2755
2756 2756 return getfilectx
2757 2757
2758 2758
2759 2759 class memctx(committablectx):
2760 2760 """Use memctx to perform in-memory commits via localrepo.commitctx().
2761 2761
2762 2762 Revision information is supplied at initialization time while
2763 2763 related files data and is made available through a callback
2764 2764 mechanism. 'repo' is the current localrepo, 'parents' is a
2765 2765 sequence of two parent revisions identifiers (pass None for every
2766 2766 missing parent), 'text' is the commit message and 'files' lists
2767 2767 names of files touched by the revision (normalized and relative to
2768 2768 repository root).
2769 2769
2770 2770 filectxfn(repo, memctx, path) is a callable receiving the
2771 2771 repository, the current memctx object and the normalized path of
2772 2772 requested file, relative to repository root. It is fired by the
2773 2773 commit function for every file in 'files', but calls order is
2774 2774 undefined. If the file is available in the revision being
2775 2775 committed (updated or added), filectxfn returns a memfilectx
2776 2776 object. If the file was removed, filectxfn return None for recent
2777 2777 Mercurial. Moved files are represented by marking the source file
2778 2778 removed and the new file added with copy information (see
2779 2779 memfilectx).
2780 2780
2781 2781 user receives the committer name and defaults to current
2782 2782 repository username, date is the commit date in any format
2783 2783 supported by dateutil.parsedate() and defaults to current date, extra
2784 2784 is a dictionary of metadata or is left empty.
2785 2785 """
2786 2786
2787 2787 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2788 2788 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2789 2789 # this field to determine what to do in filectxfn.
2790 2790 _returnnoneformissingfiles = True
2791 2791
2792 2792 def __init__(
2793 2793 self,
2794 2794 repo,
2795 2795 parents,
2796 2796 text,
2797 2797 files,
2798 2798 filectxfn,
2799 2799 user=None,
2800 2800 date=None,
2801 2801 extra=None,
2802 2802 branch=None,
2803 2803 editor=None,
2804 2804 ):
2805 2805 super(memctx, self).__init__(
2806 2806 repo, text, user, date, extra, branch=branch
2807 2807 )
2808 2808 self._rev = None
2809 2809 self._node = None
2810 2810 parents = [(p or nullid) for p in parents]
2811 2811 p1, p2 = parents
2812 2812 self._parents = [self._repo[p] for p in (p1, p2)]
2813 2813 files = sorted(set(files))
2814 2814 self._files = files
2815 2815 self.substate = {}
2816 2816
2817 2817 if isinstance(filectxfn, patch.filestore):
2818 2818 filectxfn = memfilefrompatch(filectxfn)
2819 2819 elif not callable(filectxfn):
2820 2820 # if store is not callable, wrap it in a function
2821 2821 filectxfn = memfilefromctx(filectxfn)
2822 2822
2823 2823 # memoizing increases performance for e.g. vcs convert scenarios.
2824 2824 self._filectxfn = makecachingfilectxfn(filectxfn)
2825 2825
2826 2826 if editor:
2827 2827 self._text = editor(self._repo, self, [])
2828 2828 self._repo.savecommitmessage(self._text)
2829 2829
2830 2830 def filectx(self, path, filelog=None):
2831 2831 """get a file context from the working directory
2832 2832
2833 2833 Returns None if file doesn't exist and should be removed."""
2834 2834 return self._filectxfn(self._repo, self, path)
2835 2835
2836 2836 def commit(self):
2837 2837 """commit context to the repo"""
2838 2838 return self._repo.commitctx(self)
2839 2839
2840 2840 @propertycache
2841 2841 def _manifest(self):
2842 2842 """generate a manifest based on the return values of filectxfn"""
2843 2843
2844 2844 # keep this simple for now; just worry about p1
2845 2845 pctx = self._parents[0]
2846 2846 man = pctx.manifest().copy()
2847 2847
2848 2848 for f in self._status.modified:
2849 2849 man[f] = modifiednodeid
2850 2850
2851 2851 for f in self._status.added:
2852 2852 man[f] = addednodeid
2853 2853
2854 2854 for f in self._status.removed:
2855 2855 if f in man:
2856 2856 del man[f]
2857 2857
2858 2858 return man
2859 2859
2860 2860 @propertycache
2861 2861 def _status(self):
2862 2862 """Calculate exact status from ``files`` specified at construction
2863 2863 """
2864 2864 man1 = self.p1().manifest()
2865 2865 p2 = self._parents[1]
2866 2866 # "1 < len(self._parents)" can't be used for checking
2867 2867 # existence of the 2nd parent, because "memctx._parents" is
2868 2868 # explicitly initialized by the list, of which length is 2.
2869 2869 if p2.node() != nullid:
2870 2870 man2 = p2.manifest()
2871 2871 managing = lambda f: f in man1 or f in man2
2872 2872 else:
2873 2873 managing = lambda f: f in man1
2874 2874
2875 2875 modified, added, removed = [], [], []
2876 2876 for f in self._files:
2877 2877 if not managing(f):
2878 2878 added.append(f)
2879 2879 elif self[f]:
2880 2880 modified.append(f)
2881 2881 else:
2882 2882 removed.append(f)
2883 2883
2884 2884 return scmutil.status(modified, added, removed, [], [], [], [])
2885 2885
2886 2886
2887 2887 class memfilectx(committablefilectx):
2888 2888 """memfilectx represents an in-memory file to commit.
2889 2889
2890 2890 See memctx and committablefilectx for more details.
2891 2891 """
2892 2892
2893 2893 def __init__(
2894 2894 self,
2895 2895 repo,
2896 2896 changectx,
2897 2897 path,
2898 2898 data,
2899 2899 islink=False,
2900 2900 isexec=False,
2901 2901 copysource=None,
2902 2902 ):
2903 2903 """
2904 2904 path is the normalized file path relative to repository root.
2905 2905 data is the file content as a string.
2906 2906 islink is True if the file is a symbolic link.
2907 2907 isexec is True if the file is executable.
2908 2908 copied is the source file path if current file was copied in the
2909 2909 revision being committed, or None."""
2910 2910 super(memfilectx, self).__init__(repo, path, None, changectx)
2911 2911 self._data = data
2912 2912 if islink:
2913 2913 self._flags = b'l'
2914 2914 elif isexec:
2915 2915 self._flags = b'x'
2916 2916 else:
2917 2917 self._flags = b''
2918 2918 self._copysource = copysource
2919 2919
2920 2920 def copysource(self):
2921 2921 return self._copysource
2922 2922
2923 2923 def cmp(self, fctx):
2924 2924 return self.data() != fctx.data()
2925 2925
2926 2926 def data(self):
2927 2927 return self._data
2928 2928
2929 2929 def remove(self, ignoremissing=False):
2930 2930 """wraps unlink for a repo's working directory"""
2931 2931 # need to figure out what to do here
2932 2932 del self._changectx[self._path]
2933 2933
2934 2934 def write(self, data, flags, **kwargs):
2935 2935 """wraps repo.wwrite"""
2936 2936 self._data = data
2937 2937
2938 2938
2939 2939 class metadataonlyctx(committablectx):
2940 2940 """Like memctx but it's reusing the manifest of different commit.
2941 2941 Intended to be used by lightweight operations that are creating
2942 2942 metadata-only changes.
2943 2943
2944 2944 Revision information is supplied at initialization time. 'repo' is the
2945 2945 current localrepo, 'ctx' is original revision which manifest we're reuisng
2946 2946 'parents' is a sequence of two parent revisions identifiers (pass None for
2947 2947 every missing parent), 'text' is the commit.
2948 2948
2949 2949 user receives the committer name and defaults to current repository
2950 2950 username, date is the commit date in any format supported by
2951 2951 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2952 2952 metadata or is left empty.
2953 2953 """
2954 2954
2955 2955 def __init__(
2956 2956 self,
2957 2957 repo,
2958 2958 originalctx,
2959 2959 parents=None,
2960 2960 text=None,
2961 2961 user=None,
2962 2962 date=None,
2963 2963 extra=None,
2964 2964 editor=None,
2965 2965 ):
2966 2966 if text is None:
2967 2967 text = originalctx.description()
2968 2968 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2969 2969 self._rev = None
2970 2970 self._node = None
2971 2971 self._originalctx = originalctx
2972 2972 self._manifestnode = originalctx.manifestnode()
2973 2973 if parents is None:
2974 2974 parents = originalctx.parents()
2975 2975 else:
2976 2976 parents = [repo[p] for p in parents if p is not None]
2977 2977 parents = parents[:]
2978 2978 while len(parents) < 2:
2979 2979 parents.append(repo[nullid])
2980 2980 p1, p2 = self._parents = parents
2981 2981
2982 2982 # sanity check to ensure that the reused manifest parents are
2983 2983 # manifests of our commit parents
2984 2984 mp1, mp2 = self.manifestctx().parents
2985 2985 if p1 != nullid and p1.manifestnode() != mp1:
2986 2986 raise RuntimeError(
2987 2987 r"can't reuse the manifest: its p1 "
2988 2988 r"doesn't match the new ctx p1"
2989 2989 )
2990 2990 if p2 != nullid and p2.manifestnode() != mp2:
2991 2991 raise RuntimeError(
2992 2992 r"can't reuse the manifest: "
2993 2993 r"its p2 doesn't match the new ctx p2"
2994 2994 )
2995 2995
2996 2996 self._files = originalctx.files()
2997 2997 self.substate = {}
2998 2998
2999 2999 if editor:
3000 3000 self._text = editor(self._repo, self, [])
3001 3001 self._repo.savecommitmessage(self._text)
3002 3002
3003 3003 def manifestnode(self):
3004 3004 return self._manifestnode
3005 3005
3006 3006 @property
3007 3007 def _manifestctx(self):
3008 3008 return self._repo.manifestlog[self._manifestnode]
3009 3009
3010 3010 def filectx(self, path, filelog=None):
3011 3011 return self._originalctx.filectx(path, filelog=filelog)
3012 3012
3013 3013 def commit(self):
3014 3014 """commit context to the repo"""
3015 3015 return self._repo.commitctx(self)
3016 3016
3017 3017 @property
3018 3018 def _manifest(self):
3019 3019 return self._originalctx.manifest()
3020 3020
3021 3021 @propertycache
3022 3022 def _status(self):
3023 3023 """Calculate exact status from ``files`` specified in the ``origctx``
3024 3024 and parents manifests.
3025 3025 """
3026 3026 man1 = self.p1().manifest()
3027 3027 p2 = self._parents[1]
3028 3028 # "1 < len(self._parents)" can't be used for checking
3029 3029 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3030 3030 # explicitly initialized by the list, of which length is 2.
3031 3031 if p2.node() != nullid:
3032 3032 man2 = p2.manifest()
3033 3033 managing = lambda f: f in man1 or f in man2
3034 3034 else:
3035 3035 managing = lambda f: f in man1
3036 3036
3037 3037 modified, added, removed = [], [], []
3038 3038 for f in self._files:
3039 3039 if not managing(f):
3040 3040 added.append(f)
3041 3041 elif f in self:
3042 3042 modified.append(f)
3043 3043 else:
3044 3044 removed.append(f)
3045 3045
3046 3046 return scmutil.status(modified, added, removed, [], [], [], [])
3047 3047
3048 3048
3049 3049 class arbitraryfilectx(object):
3050 3050 """Allows you to use filectx-like functions on a file in an arbitrary
3051 3051 location on disk, possibly not in the working directory.
3052 3052 """
3053 3053
3054 3054 def __init__(self, path, repo=None):
3055 3055 # Repo is optional because contrib/simplemerge uses this class.
3056 3056 self._repo = repo
3057 3057 self._path = path
3058 3058
3059 3059 def cmp(self, fctx):
3060 3060 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3061 3061 # path if either side is a symlink.
3062 3062 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3063 3063 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3064 3064 # Add a fast-path for merge if both sides are disk-backed.
3065 3065 # Note that filecmp uses the opposite return values (True if same)
3066 3066 # from our cmp functions (True if different).
3067 3067 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3068 3068 return self.data() != fctx.data()
3069 3069
3070 3070 def path(self):
3071 3071 return self._path
3072 3072
3073 3073 def flags(self):
3074 3074 return b''
3075 3075
3076 3076 def data(self):
3077 3077 return util.readfile(self._path)
3078 3078
3079 3079 def decodeddata(self):
3080 3080 with open(self._path, b"rb") as f:
3081 3081 return f.read()
3082 3082
3083 3083 def remove(self):
3084 3084 util.unlink(self._path)
3085 3085
3086 3086 def write(self, data, flags, **kwargs):
3087 3087 assert not flags
3088 3088 with open(self._path, b"wb") as f:
3089 3089 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now