##// END OF EJS Templates
hidden: extract the code generating "filtered rev" error for wrapping...
Pierre-Yves David -
r32006:c84c83b5 default
parent child Browse files
Show More
@@ -1,2160 +1,2167
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 revlog,
39 39 scmutil,
40 40 subrepo,
41 41 util,
42 42 )
43 43
44 44 propertycache = util.propertycache
45 45
46 46 nonascii = re.compile(r'[^\x21-\x7f]').search
47 47
48 48 class basectx(object):
49 49 """A basectx object represents the common logic for its children:
50 50 changectx: read-only context that is already present in the repo,
51 51 workingctx: a context that represents the working directory and can
52 52 be committed,
53 53 memctx: a context that represents changes in-memory and can also
54 54 be committed."""
55 55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 56 if isinstance(changeid, basectx):
57 57 return changeid
58 58
59 59 o = super(basectx, cls).__new__(cls)
60 60
61 61 o._repo = repo
62 62 o._rev = nullrev
63 63 o._node = nullid
64 64
65 65 return o
66 66
67 67 def __str__(self):
68 68 r = short(self.node())
69 69 if pycompat.ispy3:
70 70 return r.decode('ascii')
71 71 return r
72 72
73 73 def __bytes__(self):
74 74 return short(self.node())
75 75
76 76 def __int__(self):
77 77 return self.rev()
78 78
79 79 def __repr__(self):
80 80 return "<%s %s>" % (type(self).__name__, str(self))
81 81
82 82 def __eq__(self, other):
83 83 try:
84 84 return type(self) == type(other) and self._rev == other._rev
85 85 except AttributeError:
86 86 return False
87 87
88 88 def __ne__(self, other):
89 89 return not (self == other)
90 90
91 91 def __contains__(self, key):
92 92 return key in self._manifest
93 93
94 94 def __getitem__(self, key):
95 95 return self.filectx(key)
96 96
97 97 def __iter__(self):
98 98 return iter(self._manifest)
99 99
100 100 def _buildstatusmanifest(self, status):
101 101 """Builds a manifest that includes the given status results, if this is
102 102 a working copy context. For non-working copy contexts, it just returns
103 103 the normal manifest."""
104 104 return self.manifest()
105 105
106 106 def _matchstatus(self, other, match):
107 107 """return match.always if match is none
108 108
109 109 This internal method provides a way for child objects to override the
110 110 match operator.
111 111 """
112 112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 113
114 114 def _buildstatus(self, other, s, match, listignored, listclean,
115 115 listunknown):
116 116 """build a status with respect to another context"""
117 117 # Load earliest manifest first for caching reasons. More specifically,
118 118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 121 # delta to what's in the cache. So that's one full reconstruction + one
122 122 # delta application.
123 123 mf2 = None
124 124 if self.rev() is not None and self.rev() < other.rev():
125 125 mf2 = self._buildstatusmanifest(s)
126 126 mf1 = other._buildstatusmanifest(s)
127 127 if mf2 is None:
128 128 mf2 = self._buildstatusmanifest(s)
129 129
130 130 modified, added = [], []
131 131 removed = []
132 132 clean = []
133 133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 134 deletedset = set(deleted)
135 135 d = mf1.diff(mf2, match=match, clean=listclean)
136 136 for fn, value in d.iteritems():
137 137 if fn in deletedset:
138 138 continue
139 139 if value is None:
140 140 clean.append(fn)
141 141 continue
142 142 (node1, flag1), (node2, flag2) = value
143 143 if node1 is None:
144 144 added.append(fn)
145 145 elif node2 is None:
146 146 removed.append(fn)
147 147 elif flag1 != flag2:
148 148 modified.append(fn)
149 149 elif node2 not in wdirnodes:
150 150 # When comparing files between two commits, we save time by
151 151 # not comparing the file contents when the nodeids differ.
152 152 # Note that this means we incorrectly report a reverted change
153 153 # to a file as a modification.
154 154 modified.append(fn)
155 155 elif self[fn].cmp(other[fn]):
156 156 modified.append(fn)
157 157 else:
158 158 clean.append(fn)
159 159
160 160 if removed:
161 161 # need to filter files if they are already reported as removed
162 162 unknown = [fn for fn in unknown if fn not in mf1 and
163 163 (not match or match(fn))]
164 164 ignored = [fn for fn in ignored if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(modified, added, removed, deleted, unknown,
170 170 ignored, clean)
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepo.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181 def node(self):
182 182 return self._node
183 183 def hex(self):
184 184 return hex(self.node())
185 185 def manifest(self):
186 186 return self._manifest
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189 def repo(self):
190 190 return self._repo
191 191 def phasestr(self):
192 192 return phases.phasenames[self.phase()]
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def getfileset(self, expr):
197 197 return fileset.getfileset(self, expr)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 206
207 207 def unstable(self):
208 208 """True if the changeset is not obsolete but it's ancestor are"""
209 209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 210
211 211 def bumped(self):
212 212 """True if the changeset try to be a successor of a public changeset
213 213
214 214 Only non-public and non-obsolete changesets may be bumped.
215 215 """
216 216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 217
218 218 def divergent(self):
219 219 """Is a successors of a changeset with multiple possible successors set
220 220
221 221 Only non-public and non-obsolete changesets may be divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 224
225 225 def troubled(self):
226 226 """True if the changeset is either unstable, bumped or divergent"""
227 227 return self.unstable() or self.bumped() or self.divergent()
228 228
229 229 def troubles(self):
230 230 """return the list of troubles affecting this changesets.
231 231
232 232 Troubles are returned as strings. possible values are:
233 233 - unstable,
234 234 - bumped,
235 235 - divergent.
236 236 """
237 237 troubles = []
238 238 if self.unstable():
239 239 troubles.append('unstable')
240 240 if self.bumped():
241 241 troubles.append('bumped')
242 242 if self.divergent():
243 243 troubles.append('divergent')
244 244 return troubles
245 245
246 246 def parents(self):
247 247 """return contexts for each parent changeset"""
248 248 return self._parents
249 249
250 250 def p1(self):
251 251 return self._parents[0]
252 252
253 253 def p2(self):
254 254 parents = self._parents
255 255 if len(parents) == 2:
256 256 return parents[1]
257 257 return changectx(self._repo, nullrev)
258 258
259 259 def _fileinfo(self, path):
260 260 if '_manifest' in self.__dict__:
261 261 try:
262 262 return self._manifest[path], self._manifest.flags(path)
263 263 except KeyError:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266 if '_manifestdelta' in self.__dict__ or path in self.files():
267 267 if path in self._manifestdelta:
268 268 return (self._manifestdelta[path],
269 269 self._manifestdelta.flags(path))
270 270 mfl = self._repo.manifestlog
271 271 try:
272 272 node, flag = mfl[self._changeset.manifest].find(path)
273 273 except KeyError:
274 274 raise error.ManifestLookupError(self._node, path,
275 275 _('not found in manifest'))
276 276
277 277 return node, flag
278 278
279 279 def filenode(self, path):
280 280 return self._fileinfo(path)[0]
281 281
282 282 def flags(self, path):
283 283 try:
284 284 return self._fileinfo(path)[1]
285 285 except error.LookupError:
286 286 return ''
287 287
288 288 def sub(self, path, allowcreate=True):
289 289 '''return a subrepo for the stored revision of path, never wdir()'''
290 290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 291
292 292 def nullsub(self, path, pctx):
293 293 return subrepo.nullsubrepo(self, path, pctx)
294 294
295 295 def workingsub(self, path):
296 296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 297 context.
298 298 '''
299 299 return subrepo.subrepo(self, path, allowwdir=True)
300 300
301 301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 302 listsubrepos=False, badfn=None):
303 303 if pats is None:
304 304 pats = []
305 305 r = self._repo
306 306 return matchmod.match(r.root, r.getcwd(), pats,
307 307 include, exclude, default,
308 308 auditor=r.nofsauditor, ctx=self,
309 309 listsubrepos=listsubrepos, badfn=badfn)
310 310
311 311 def diff(self, ctx2=None, match=None, **opts):
312 312 """Returns a diff generator for the given contexts and matcher"""
313 313 if ctx2 is None:
314 314 ctx2 = self.p1()
315 315 if ctx2 is not None:
316 316 ctx2 = self._repo[ctx2]
317 317 diffopts = patch.diffopts(self._repo.ui, opts)
318 318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319 319
320 320 def dirs(self):
321 321 return self._manifest.dirs()
322 322
323 323 def hasdir(self, dir):
324 324 return self._manifest.hasdir(dir)
325 325
326 326 def dirty(self, missing=False, merge=True, branch=True):
327 327 return False
328 328
329 329 def status(self, other=None, match=None, listignored=False,
330 330 listclean=False, listunknown=False, listsubrepos=False):
331 331 """return status of files between two nodes or node and working
332 332 directory.
333 333
334 334 If other is None, compare this node with working directory.
335 335
336 336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 337 """
338 338
339 339 ctx1 = self
340 340 ctx2 = self._repo[other]
341 341
342 342 # This next code block is, admittedly, fragile logic that tests for
343 343 # reversing the contexts and wouldn't need to exist if it weren't for
344 344 # the fast (and common) code path of comparing the working directory
345 345 # with its first parent.
346 346 #
347 347 # What we're aiming for here is the ability to call:
348 348 #
349 349 # workingctx.status(parentctx)
350 350 #
351 351 # If we always built the manifest for each context and compared those,
352 352 # then we'd be done. But the special case of the above call means we
353 353 # just copy the manifest of the parent.
354 354 reversed = False
355 355 if (not isinstance(ctx1, changectx)
356 356 and isinstance(ctx2, changectx)):
357 357 reversed = True
358 358 ctx1, ctx2 = ctx2, ctx1
359 359
360 360 match = ctx2._matchstatus(ctx1, match)
361 361 r = scmutil.status([], [], [], [], [], [], [])
362 362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 363 listunknown)
364 364
365 365 if reversed:
366 366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 367 # these make no sense to reverse.
368 368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 369 r.clean)
370 370
371 371 if listsubrepos:
372 372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 373 try:
374 374 rev2 = ctx2.subrev(subpath)
375 375 except KeyError:
376 376 # A subrepo that existed in node1 was deleted between
377 377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 378 # won't contain that subpath. The best we can do ignore it.
379 379 rev2 = None
380 380 submatch = matchmod.subdirmatcher(subpath, match)
381 381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 382 clean=listclean, unknown=listunknown,
383 383 listsubrepos=True)
384 384 for rfiles, sfiles in zip(r, s):
385 385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386 386
387 387 for l in r:
388 388 l.sort()
389 389
390 390 return r
391 391
392 392
393 393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 394 editor=None, extra=None):
395 395 def getfilectx(repo, memctx, path):
396 396 data, mode, copied = store.getfile(path)
397 397 if data is None:
398 398 return None
399 399 islink, isexec = mode
400 400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 401 copied=copied, memctx=memctx)
402 402 if extra is None:
403 403 extra = {}
404 404 if branch:
405 405 extra['branch'] = encoding.fromlocal(branch)
406 406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 407 date, extra, editor)
408 408 return ctx
409 409
410 def _filterederror(repo, changeid):
411 """build an exception to be raised about a filtered changeid
412
413 This is extracted in a function to help extensions (eg: evolve) to
414 experiment with various message variants."""
415 if repo.filtername.startswith('visible'):
416 msg = _("hidden revision '%s'") % changeid
417 hint = _('use --hidden to access hidden revisions')
418 return error.FilteredRepoLookupError(msg, hint=hint)
419 msg = _("filtered revision '%s' (not in '%s' subset)")
420 msg %= (changeid, repo.filtername)
421 return error.FilteredRepoLookupError(msg)
422
410 423 class changectx(basectx):
411 424 """A changecontext object makes access to data related to a particular
412 425 changeset convenient. It represents a read-only context already present in
413 426 the repo."""
414 427 def __init__(self, repo, changeid=''):
415 428 """changeid is a revision number, node, or tag"""
416 429
417 430 # since basectx.__new__ already took care of copying the object, we
418 431 # don't need to do anything in __init__, so we just exit here
419 432 if isinstance(changeid, basectx):
420 433 return
421 434
422 435 if changeid == '':
423 436 changeid = '.'
424 437 self._repo = repo
425 438
426 439 try:
427 440 if isinstance(changeid, int):
428 441 self._node = repo.changelog.node(changeid)
429 442 self._rev = changeid
430 443 return
431 444 if not pycompat.ispy3 and isinstance(changeid, long):
432 445 changeid = str(changeid)
433 446 if changeid == 'null':
434 447 self._node = nullid
435 448 self._rev = nullrev
436 449 return
437 450 if changeid == 'tip':
438 451 self._node = repo.changelog.tip()
439 452 self._rev = repo.changelog.rev(self._node)
440 453 return
441 454 if changeid == '.' or changeid == repo.dirstate.p1():
442 455 # this is a hack to delay/avoid loading obsmarkers
443 456 # when we know that '.' won't be hidden
444 457 self._node = repo.dirstate.p1()
445 458 self._rev = repo.unfiltered().changelog.rev(self._node)
446 459 return
447 460 if len(changeid) == 20:
448 461 try:
449 462 self._node = changeid
450 463 self._rev = repo.changelog.rev(changeid)
451 464 return
452 465 except error.FilteredRepoLookupError:
453 466 raise
454 467 except LookupError:
455 468 pass
456 469
457 470 try:
458 471 r = int(changeid)
459 472 if '%d' % r != changeid:
460 473 raise ValueError
461 474 l = len(repo.changelog)
462 475 if r < 0:
463 476 r += l
464 477 if r < 0 or r >= l:
465 478 raise ValueError
466 479 self._rev = r
467 480 self._node = repo.changelog.node(r)
468 481 return
469 482 except error.FilteredIndexError:
470 483 raise
471 484 except (ValueError, OverflowError, IndexError):
472 485 pass
473 486
474 487 if len(changeid) == 40:
475 488 try:
476 489 self._node = bin(changeid)
477 490 self._rev = repo.changelog.rev(self._node)
478 491 return
479 492 except error.FilteredLookupError:
480 493 raise
481 494 except (TypeError, LookupError):
482 495 pass
483 496
484 497 # lookup bookmarks through the name interface
485 498 try:
486 499 self._node = repo.names.singlenode(repo, changeid)
487 500 self._rev = repo.changelog.rev(self._node)
488 501 return
489 502 except KeyError:
490 503 pass
491 504 except error.FilteredRepoLookupError:
492 505 raise
493 506 except error.RepoLookupError:
494 507 pass
495 508
496 509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
497 510 if self._node is not None:
498 511 self._rev = repo.changelog.rev(self._node)
499 512 return
500 513
501 514 # lookup failed
502 515 # check if it might have come from damaged dirstate
503 516 #
504 517 # XXX we could avoid the unfiltered if we had a recognizable
505 518 # exception for filtered changeset access
506 519 if changeid in repo.unfiltered().dirstate.parents():
507 520 msg = _("working directory has unknown parent '%s'!")
508 521 raise error.Abort(msg % short(changeid))
509 522 try:
510 523 if len(changeid) == 20 and nonascii(changeid):
511 524 changeid = hex(changeid)
512 525 except TypeError:
513 526 pass
514 527 except (error.FilteredIndexError, error.FilteredLookupError,
515 528 error.FilteredRepoLookupError):
516 if repo.filtername.startswith('visible'):
517 msg = _("hidden revision '%s'") % changeid
518 hint = _('use --hidden to access hidden revisions')
519 raise error.FilteredRepoLookupError(msg, hint=hint)
520 msg = _("filtered revision '%s' (not in '%s' subset)")
521 msg %= (changeid, repo.filtername)
522 raise error.FilteredRepoLookupError(msg)
529 raise _filterederror(repo, changeid)
523 530 except IndexError:
524 531 pass
525 532 raise error.RepoLookupError(
526 533 _("unknown revision '%s'") % changeid)
527 534
528 535 def __hash__(self):
529 536 try:
530 537 return hash(self._rev)
531 538 except AttributeError:
532 539 return id(self)
533 540
534 541 def __nonzero__(self):
535 542 return self._rev != nullrev
536 543
537 544 __bool__ = __nonzero__
538 545
539 546 @propertycache
540 547 def _changeset(self):
541 548 return self._repo.changelog.changelogrevision(self.rev())
542 549
543 550 @propertycache
544 551 def _manifest(self):
545 552 return self._manifestctx.read()
546 553
547 554 @propertycache
548 555 def _manifestctx(self):
549 556 return self._repo.manifestlog[self._changeset.manifest]
550 557
551 558 @propertycache
552 559 def _manifestdelta(self):
553 560 return self._manifestctx.readdelta()
554 561
555 562 @propertycache
556 563 def _parents(self):
557 564 repo = self._repo
558 565 p1, p2 = repo.changelog.parentrevs(self._rev)
559 566 if p2 == nullrev:
560 567 return [changectx(repo, p1)]
561 568 return [changectx(repo, p1), changectx(repo, p2)]
562 569
563 570 def changeset(self):
564 571 c = self._changeset
565 572 return (
566 573 c.manifest,
567 574 c.user,
568 575 c.date,
569 576 c.files,
570 577 c.description,
571 578 c.extra,
572 579 )
573 580 def manifestnode(self):
574 581 return self._changeset.manifest
575 582
576 583 def user(self):
577 584 return self._changeset.user
578 585 def date(self):
579 586 return self._changeset.date
580 587 def files(self):
581 588 return self._changeset.files
582 589 def description(self):
583 590 return self._changeset.description
584 591 def branch(self):
585 592 return encoding.tolocal(self._changeset.extra.get("branch"))
586 593 def closesbranch(self):
587 594 return 'close' in self._changeset.extra
588 595 def extra(self):
589 596 return self._changeset.extra
590 597 def tags(self):
591 598 return self._repo.nodetags(self._node)
592 599 def bookmarks(self):
593 600 return self._repo.nodebookmarks(self._node)
594 601 def phase(self):
595 602 return self._repo._phasecache.phase(self._repo, self._rev)
596 603 def hidden(self):
597 604 return self._rev in repoview.filterrevs(self._repo, 'visible')
598 605
599 606 def children(self):
600 607 """return contexts for each child changeset"""
601 608 c = self._repo.changelog.children(self._node)
602 609 return [changectx(self._repo, x) for x in c]
603 610
604 611 def ancestors(self):
605 612 for a in self._repo.changelog.ancestors([self._rev]):
606 613 yield changectx(self._repo, a)
607 614
608 615 def descendants(self):
609 616 for d in self._repo.changelog.descendants([self._rev]):
610 617 yield changectx(self._repo, d)
611 618
612 619 def filectx(self, path, fileid=None, filelog=None):
613 620 """get a file context from this changeset"""
614 621 if fileid is None:
615 622 fileid = self.filenode(path)
616 623 return filectx(self._repo, path, fileid=fileid,
617 624 changectx=self, filelog=filelog)
618 625
619 626 def ancestor(self, c2, warn=False):
620 627 """return the "best" ancestor context of self and c2
621 628
622 629 If there are multiple candidates, it will show a message and check
623 630 merge.preferancestor configuration before falling back to the
624 631 revlog ancestor."""
625 632 # deal with workingctxs
626 633 n2 = c2._node
627 634 if n2 is None:
628 635 n2 = c2._parents[0]._node
629 636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
630 637 if not cahs:
631 638 anc = nullid
632 639 elif len(cahs) == 1:
633 640 anc = cahs[0]
634 641 else:
635 642 # experimental config: merge.preferancestor
636 643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
637 644 try:
638 645 ctx = changectx(self._repo, r)
639 646 except error.RepoLookupError:
640 647 continue
641 648 anc = ctx.node()
642 649 if anc in cahs:
643 650 break
644 651 else:
645 652 anc = self._repo.changelog.ancestor(self._node, n2)
646 653 if warn:
647 654 self._repo.ui.status(
648 655 (_("note: using %s as ancestor of %s and %s\n") %
649 656 (short(anc), short(self._node), short(n2))) +
650 657 ''.join(_(" alternatively, use --config "
651 658 "merge.preferancestor=%s\n") %
652 659 short(n) for n in sorted(cahs) if n != anc))
653 660 return changectx(self._repo, anc)
654 661
655 662 def descendant(self, other):
656 663 """True if other is descendant of this changeset"""
657 664 return self._repo.changelog.descendant(self._rev, other._rev)
658 665
659 666 def walk(self, match):
660 667 '''Generates matching file names.'''
661 668
662 669 # Wrap match.bad method to have message with nodeid
663 670 def bad(fn, msg):
664 671 # The manifest doesn't know about subrepos, so don't complain about
665 672 # paths into valid subrepos.
666 673 if any(fn == s or fn.startswith(s + '/')
667 674 for s in self.substate):
668 675 return
669 676 match.bad(fn, _('no such file in rev %s') % self)
670 677
671 678 m = matchmod.badmatch(match, bad)
672 679 return self._manifest.walk(m)
673 680
674 681 def matches(self, match):
675 682 return self.walk(match)
676 683
677 684 class basefilectx(object):
678 685 """A filecontext object represents the common logic for its children:
679 686 filectx: read-only access to a filerevision that is already present
680 687 in the repo,
681 688 workingfilectx: a filecontext that represents files from the working
682 689 directory,
683 690 memfilectx: a filecontext that represents files in-memory."""
684 691 def __new__(cls, repo, path, *args, **kwargs):
685 692 return super(basefilectx, cls).__new__(cls)
686 693
687 694 @propertycache
688 695 def _filelog(self):
689 696 return self._repo.file(self._path)
690 697
691 698 @propertycache
692 699 def _changeid(self):
693 700 if '_changeid' in self.__dict__:
694 701 return self._changeid
695 702 elif '_changectx' in self.__dict__:
696 703 return self._changectx.rev()
697 704 elif '_descendantrev' in self.__dict__:
698 705 # this file context was created from a revision with a known
699 706 # descendant, we can (lazily) correct for linkrev aliases
700 707 return self._adjustlinkrev(self._descendantrev)
701 708 else:
702 709 return self._filelog.linkrev(self._filerev)
703 710
704 711 @propertycache
705 712 def _filenode(self):
706 713 if '_fileid' in self.__dict__:
707 714 return self._filelog.lookup(self._fileid)
708 715 else:
709 716 return self._changectx.filenode(self._path)
710 717
711 718 @propertycache
712 719 def _filerev(self):
713 720 return self._filelog.rev(self._filenode)
714 721
715 722 @propertycache
716 723 def _repopath(self):
717 724 return self._path
718 725
719 726 def __nonzero__(self):
720 727 try:
721 728 self._filenode
722 729 return True
723 730 except error.LookupError:
724 731 # file is missing
725 732 return False
726 733
727 734 __bool__ = __nonzero__
728 735
729 736 def __str__(self):
730 737 try:
731 738 return "%s@%s" % (self.path(), self._changectx)
732 739 except error.LookupError:
733 740 return "%s@???" % self.path()
734 741
735 742 def __repr__(self):
736 743 return "<%s %s>" % (type(self).__name__, str(self))
737 744
738 745 def __hash__(self):
739 746 try:
740 747 return hash((self._path, self._filenode))
741 748 except AttributeError:
742 749 return id(self)
743 750
744 751 def __eq__(self, other):
745 752 try:
746 753 return (type(self) == type(other) and self._path == other._path
747 754 and self._filenode == other._filenode)
748 755 except AttributeError:
749 756 return False
750 757
751 758 def __ne__(self, other):
752 759 return not (self == other)
753 760
754 761 def filerev(self):
755 762 return self._filerev
756 763 def filenode(self):
757 764 return self._filenode
758 765 def flags(self):
759 766 return self._changectx.flags(self._path)
760 767 def filelog(self):
761 768 return self._filelog
762 769 def rev(self):
763 770 return self._changeid
764 771 def linkrev(self):
765 772 return self._filelog.linkrev(self._filerev)
766 773 def node(self):
767 774 return self._changectx.node()
768 775 def hex(self):
769 776 return self._changectx.hex()
770 777 def user(self):
771 778 return self._changectx.user()
772 779 def date(self):
773 780 return self._changectx.date()
774 781 def files(self):
775 782 return self._changectx.files()
776 783 def description(self):
777 784 return self._changectx.description()
778 785 def branch(self):
779 786 return self._changectx.branch()
780 787 def extra(self):
781 788 return self._changectx.extra()
782 789 def phase(self):
783 790 return self._changectx.phase()
784 791 def phasestr(self):
785 792 return self._changectx.phasestr()
786 793 def manifest(self):
787 794 return self._changectx.manifest()
788 795 def changectx(self):
789 796 return self._changectx
790 797 def repo(self):
791 798 return self._repo
792 799
793 800 def path(self):
794 801 return self._path
795 802
796 803 def isbinary(self):
797 804 try:
798 805 return util.binary(self.data())
799 806 except IOError:
800 807 return False
801 808 def isexec(self):
802 809 return 'x' in self.flags()
803 810 def islink(self):
804 811 return 'l' in self.flags()
805 812
806 813 def isabsent(self):
807 814 """whether this filectx represents a file not in self._changectx
808 815
809 816 This is mainly for merge code to detect change/delete conflicts. This is
810 817 expected to be True for all subclasses of basectx."""
811 818 return False
812 819
813 820 _customcmp = False
814 821 def cmp(self, fctx):
815 822 """compare with other file context
816 823
817 824 returns True if different than fctx.
818 825 """
819 826 if fctx._customcmp:
820 827 return fctx.cmp(self)
821 828
822 829 if (fctx._filenode is None
823 830 and (self._repo._encodefilterpats
824 831 # if file data starts with '\1\n', empty metadata block is
825 832 # prepended, which adds 4 bytes to filelog.size().
826 833 or self.size() - 4 == fctx.size())
827 834 or self.size() == fctx.size()):
828 835 return self._filelog.cmp(self._filenode, fctx.data())
829 836
830 837 return True
831 838
832 839 def _adjustlinkrev(self, srcrev, inclusive=False):
833 840 """return the first ancestor of <srcrev> introducing <fnode>
834 841
835 842 If the linkrev of the file revision does not point to an ancestor of
836 843 srcrev, we'll walk down the ancestors until we find one introducing
837 844 this file revision.
838 845
839 846 :srcrev: the changeset revision we search ancestors from
840 847 :inclusive: if true, the src revision will also be checked
841 848 """
842 849 repo = self._repo
843 850 cl = repo.unfiltered().changelog
844 851 mfl = repo.manifestlog
845 852 # fetch the linkrev
846 853 lkr = self.linkrev()
847 854 # hack to reuse ancestor computation when searching for renames
848 855 memberanc = getattr(self, '_ancestrycontext', None)
849 856 iteranc = None
850 857 if srcrev is None:
851 858 # wctx case, used by workingfilectx during mergecopy
852 859 revs = [p.rev() for p in self._repo[None].parents()]
853 860 inclusive = True # we skipped the real (revless) source
854 861 else:
855 862 revs = [srcrev]
856 863 if memberanc is None:
857 864 memberanc = iteranc = cl.ancestors(revs, lkr,
858 865 inclusive=inclusive)
859 866 # check if this linkrev is an ancestor of srcrev
860 867 if lkr not in memberanc:
861 868 if iteranc is None:
862 869 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
863 870 fnode = self._filenode
864 871 path = self._path
865 872 for a in iteranc:
866 873 ac = cl.read(a) # get changeset data (we avoid object creation)
867 874 if path in ac[3]: # checking the 'files' field.
868 875 # The file has been touched, check if the content is
869 876 # similar to the one we search for.
870 877 if fnode == mfl[ac[0]].readfast().get(path):
871 878 return a
872 879 # In theory, we should never get out of that loop without a result.
873 880 # But if manifest uses a buggy file revision (not children of the
874 881 # one it replaces) we could. Such a buggy situation will likely
875 882 # result is crash somewhere else at to some point.
876 883 return lkr
877 884
878 885 def introrev(self):
879 886 """return the rev of the changeset which introduced this file revision
880 887
881 888 This method is different from linkrev because it take into account the
882 889 changeset the filectx was created from. It ensures the returned
883 890 revision is one of its ancestors. This prevents bugs from
884 891 'linkrev-shadowing' when a file revision is used by multiple
885 892 changesets.
886 893 """
887 894 lkr = self.linkrev()
888 895 attrs = vars(self)
889 896 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
890 897 if noctx or self.rev() == lkr:
891 898 return self.linkrev()
892 899 return self._adjustlinkrev(self.rev(), inclusive=True)
893 900
894 901 def _parentfilectx(self, path, fileid, filelog):
895 902 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
896 903 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
897 904 if '_changeid' in vars(self) or '_changectx' in vars(self):
898 905 # If self is associated with a changeset (probably explicitly
899 906 # fed), ensure the created filectx is associated with a
900 907 # changeset that is an ancestor of self.changectx.
901 908 # This lets us later use _adjustlinkrev to get a correct link.
902 909 fctx._descendantrev = self.rev()
903 910 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
904 911 elif '_descendantrev' in vars(self):
905 912 # Otherwise propagate _descendantrev if we have one associated.
906 913 fctx._descendantrev = self._descendantrev
907 914 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
908 915 return fctx
909 916
910 917 def parents(self):
911 918 _path = self._path
912 919 fl = self._filelog
913 920 parents = self._filelog.parents(self._filenode)
914 921 pl = [(_path, node, fl) for node in parents if node != nullid]
915 922
916 923 r = fl.renamed(self._filenode)
917 924 if r:
918 925 # - In the simple rename case, both parent are nullid, pl is empty.
919 926 # - In case of merge, only one of the parent is null id and should
920 927 # be replaced with the rename information. This parent is -always-
921 928 # the first one.
922 929 #
923 930 # As null id have always been filtered out in the previous list
924 931 # comprehension, inserting to 0 will always result in "replacing
925 932 # first nullid parent with rename information.
926 933 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
927 934
928 935 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
929 936
930 937 def p1(self):
931 938 return self.parents()[0]
932 939
933 940 def p2(self):
934 941 p = self.parents()
935 942 if len(p) == 2:
936 943 return p[1]
937 944 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
938 945
939 946 def annotate(self, follow=False, linenumber=False, diffopts=None):
940 947 '''returns a list of tuples of ((ctx, number), line) for each line
941 948 in the file, where ctx is the filectx of the node where
942 949 that line was last changed; if linenumber parameter is true, number is
943 950 the line number at the first appearance in the managed file, otherwise,
944 951 number has a fixed value of False.
945 952 '''
946 953
947 954 def lines(text):
948 955 if text.endswith("\n"):
949 956 return text.count("\n")
950 957 return text.count("\n") + int(bool(text))
951 958
952 959 if linenumber:
953 960 def decorate(text, rev):
954 961 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
955 962 else:
956 963 def decorate(text, rev):
957 964 return ([(rev, False)] * lines(text), text)
958 965
959 966 def pair(parent, child):
960 967 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
961 968 for (a1, a2, b1, b2), t in blocks:
962 969 # Changed blocks ('!') or blocks made only of blank lines ('~')
963 970 # belong to the child.
964 971 if t == '=':
965 972 child[0][b1:b2] = parent[0][a1:a2]
966 973 return child
967 974
968 975 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
969 976
970 977 def parents(f):
971 978 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
972 979 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
973 980 # from the topmost introrev (= srcrev) down to p.linkrev() if it
974 981 # isn't an ancestor of the srcrev.
975 982 f._changeid
976 983 pl = f.parents()
977 984
978 985 # Don't return renamed parents if we aren't following.
979 986 if not follow:
980 987 pl = [p for p in pl if p.path() == f.path()]
981 988
982 989 # renamed filectx won't have a filelog yet, so set it
983 990 # from the cache to save time
984 991 for p in pl:
985 992 if not '_filelog' in p.__dict__:
986 993 p._filelog = getlog(p.path())
987 994
988 995 return pl
989 996
990 997 # use linkrev to find the first changeset where self appeared
991 998 base = self
992 999 introrev = self.introrev()
993 1000 if self.rev() != introrev:
994 1001 base = self.filectx(self.filenode(), changeid=introrev)
995 1002 if getattr(base, '_ancestrycontext', None) is None:
996 1003 cl = self._repo.changelog
997 1004 if introrev is None:
998 1005 # wctx is not inclusive, but works because _ancestrycontext
999 1006 # is used to test filelog revisions
1000 1007 ac = cl.ancestors([p.rev() for p in base.parents()],
1001 1008 inclusive=True)
1002 1009 else:
1003 1010 ac = cl.ancestors([introrev], inclusive=True)
1004 1011 base._ancestrycontext = ac
1005 1012
1006 1013 # This algorithm would prefer to be recursive, but Python is a
1007 1014 # bit recursion-hostile. Instead we do an iterative
1008 1015 # depth-first search.
1009 1016
1010 1017 # 1st DFS pre-calculates pcache and needed
1011 1018 visit = [base]
1012 1019 pcache = {}
1013 1020 needed = {base: 1}
1014 1021 while visit:
1015 1022 f = visit.pop()
1016 1023 if f in pcache:
1017 1024 continue
1018 1025 pl = parents(f)
1019 1026 pcache[f] = pl
1020 1027 for p in pl:
1021 1028 needed[p] = needed.get(p, 0) + 1
1022 1029 if p not in pcache:
1023 1030 visit.append(p)
1024 1031
1025 1032 # 2nd DFS does the actual annotate
1026 1033 visit[:] = [base]
1027 1034 hist = {}
1028 1035 while visit:
1029 1036 f = visit[-1]
1030 1037 if f in hist:
1031 1038 visit.pop()
1032 1039 continue
1033 1040
1034 1041 ready = True
1035 1042 pl = pcache[f]
1036 1043 for p in pl:
1037 1044 if p not in hist:
1038 1045 ready = False
1039 1046 visit.append(p)
1040 1047 if ready:
1041 1048 visit.pop()
1042 1049 curr = decorate(f.data(), f)
1043 1050 for p in pl:
1044 1051 curr = pair(hist[p], curr)
1045 1052 if needed[p] == 1:
1046 1053 del hist[p]
1047 1054 del needed[p]
1048 1055 else:
1049 1056 needed[p] -= 1
1050 1057
1051 1058 hist[f] = curr
1052 1059 del pcache[f]
1053 1060
1054 1061 return zip(hist[base][0], hist[base][1].splitlines(True))
1055 1062
1056 1063 def ancestors(self, followfirst=False):
1057 1064 visit = {}
1058 1065 c = self
1059 1066 if followfirst:
1060 1067 cut = 1
1061 1068 else:
1062 1069 cut = None
1063 1070
1064 1071 while True:
1065 1072 for parent in c.parents()[:cut]:
1066 1073 visit[(parent.linkrev(), parent.filenode())] = parent
1067 1074 if not visit:
1068 1075 break
1069 1076 c = visit.pop(max(visit))
1070 1077 yield c
1071 1078
1072 1079 class filectx(basefilectx):
1073 1080 """A filecontext object makes access to data related to a particular
1074 1081 filerevision convenient."""
1075 1082 def __init__(self, repo, path, changeid=None, fileid=None,
1076 1083 filelog=None, changectx=None):
1077 1084 """changeid can be a changeset revision, node, or tag.
1078 1085 fileid can be a file revision or node."""
1079 1086 self._repo = repo
1080 1087 self._path = path
1081 1088
1082 1089 assert (changeid is not None
1083 1090 or fileid is not None
1084 1091 or changectx is not None), \
1085 1092 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1086 1093 % (changeid, fileid, changectx))
1087 1094
1088 1095 if filelog is not None:
1089 1096 self._filelog = filelog
1090 1097
1091 1098 if changeid is not None:
1092 1099 self._changeid = changeid
1093 1100 if changectx is not None:
1094 1101 self._changectx = changectx
1095 1102 if fileid is not None:
1096 1103 self._fileid = fileid
1097 1104
1098 1105 @propertycache
1099 1106 def _changectx(self):
1100 1107 try:
1101 1108 return changectx(self._repo, self._changeid)
1102 1109 except error.FilteredRepoLookupError:
1103 1110 # Linkrev may point to any revision in the repository. When the
1104 1111 # repository is filtered this may lead to `filectx` trying to build
1105 1112 # `changectx` for filtered revision. In such case we fallback to
1106 1113 # creating `changectx` on the unfiltered version of the reposition.
1107 1114 # This fallback should not be an issue because `changectx` from
1108 1115 # `filectx` are not used in complex operations that care about
1109 1116 # filtering.
1110 1117 #
1111 1118 # This fallback is a cheap and dirty fix that prevent several
1112 1119 # crashes. It does not ensure the behavior is correct. However the
1113 1120 # behavior was not correct before filtering either and "incorrect
1114 1121 # behavior" is seen as better as "crash"
1115 1122 #
1116 1123 # Linkrevs have several serious troubles with filtering that are
1117 1124 # complicated to solve. Proper handling of the issue here should be
1118 1125 # considered when solving linkrev issue are on the table.
1119 1126 return changectx(self._repo.unfiltered(), self._changeid)
1120 1127
1121 1128 def filectx(self, fileid, changeid=None):
1122 1129 '''opens an arbitrary revision of the file without
1123 1130 opening a new filelog'''
1124 1131 return filectx(self._repo, self._path, fileid=fileid,
1125 1132 filelog=self._filelog, changeid=changeid)
1126 1133
1127 1134 def rawdata(self):
1128 1135 return self._filelog.revision(self._filenode, raw=True)
1129 1136
1130 1137 def data(self):
1131 1138 try:
1132 1139 return self._filelog.read(self._filenode)
1133 1140 except error.CensoredNodeError:
1134 1141 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1135 1142 return ""
1136 1143 raise error.Abort(_("censored node: %s") % short(self._filenode),
1137 1144 hint=_("set censor.policy to ignore errors"))
1138 1145
1139 1146 def size(self):
1140 1147 return self._filelog.size(self._filerev)
1141 1148
1142 1149 def renamed(self):
1143 1150 """check if file was actually renamed in this changeset revision
1144 1151
1145 1152 If rename logged in file revision, we report copy for changeset only
1146 1153 if file revisions linkrev points back to the changeset in question
1147 1154 or both changeset parents contain different file revisions.
1148 1155 """
1149 1156
1150 1157 renamed = self._filelog.renamed(self._filenode)
1151 1158 if not renamed:
1152 1159 return renamed
1153 1160
1154 1161 if self.rev() == self.linkrev():
1155 1162 return renamed
1156 1163
1157 1164 name = self.path()
1158 1165 fnode = self._filenode
1159 1166 for p in self._changectx.parents():
1160 1167 try:
1161 1168 if fnode == p.filenode(name):
1162 1169 return None
1163 1170 except error.LookupError:
1164 1171 pass
1165 1172 return renamed
1166 1173
1167 1174 def children(self):
1168 1175 # hard for renames
1169 1176 c = self._filelog.children(self._filenode)
1170 1177 return [filectx(self._repo, self._path, fileid=x,
1171 1178 filelog=self._filelog) for x in c]
1172 1179
1173 1180 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1174 1181 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1175 1182 if diff from fctx2 to fctx1 has changes in linerange2 and
1176 1183 `linerange1` is the new line range for fctx1.
1177 1184 """
1178 1185 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1179 1186 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1180 1187 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1181 1188 return diffinrange, linerange1
1182 1189
1183 1190 def blockancestors(fctx, fromline, toline, followfirst=False):
1184 1191 """Yield ancestors of `fctx` with respect to the block of lines within
1185 1192 `fromline`-`toline` range.
1186 1193 """
1187 1194 diffopts = patch.diffopts(fctx._repo.ui)
1188 1195 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1189 1196 while visit:
1190 1197 c, linerange2 = visit.pop(max(visit))
1191 1198 pl = c.parents()
1192 1199 if followfirst:
1193 1200 pl = pl[:1]
1194 1201 if not pl:
1195 1202 # The block originates from the initial revision.
1196 1203 yield c, linerange2
1197 1204 continue
1198 1205 inrange = False
1199 1206 for p in pl:
1200 1207 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1201 1208 inrange = inrange or inrangep
1202 1209 if linerange1[0] == linerange1[1]:
1203 1210 # Parent's linerange is empty, meaning that the block got
1204 1211 # introduced in this revision; no need to go futher in this
1205 1212 # branch.
1206 1213 continue
1207 1214 visit[p.linkrev(), p.filenode()] = p, linerange1
1208 1215 if inrange:
1209 1216 yield c, linerange2
1210 1217
1211 1218 def blockdescendants(fctx, fromline, toline):
1212 1219 """Yield descendants of `fctx` with respect to the block of lines within
1213 1220 `fromline`-`toline` range.
1214 1221 """
1215 1222 # First possibly yield 'fctx' if it has changes in range with respect to
1216 1223 # its parents.
1217 1224 try:
1218 1225 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1219 1226 except StopIteration:
1220 1227 pass
1221 1228 else:
1222 1229 if c == fctx:
1223 1230 yield c, linerange1
1224 1231
1225 1232 diffopts = patch.diffopts(fctx._repo.ui)
1226 1233 fl = fctx.filelog()
1227 1234 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1228 1235 for i in fl.descendants([fctx.filerev()]):
1229 1236 c = fctx.filectx(i)
1230 1237 inrange = False
1231 1238 for x in fl.parentrevs(i):
1232 1239 try:
1233 1240 p, linerange2 = seen[x]
1234 1241 except KeyError:
1235 1242 # nullrev or other branch
1236 1243 continue
1237 1244 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1238 1245 inrange = inrange or inrangep
1239 1246 # If revision 'i' has been seen (it's a merge), we assume that its
1240 1247 # line range is the same independently of which parents was used
1241 1248 # to compute it.
1242 1249 assert i not in seen or seen[i][1] == linerange1, (
1243 1250 'computed line range for %s is not consistent between '
1244 1251 'ancestor branches' % c)
1245 1252 seen[i] = c, linerange1
1246 1253 if inrange:
1247 1254 yield c, linerange1
1248 1255
1249 1256 class committablectx(basectx):
1250 1257 """A committablectx object provides common functionality for a context that
1251 1258 wants the ability to commit, e.g. workingctx or memctx."""
1252 1259 def __init__(self, repo, text="", user=None, date=None, extra=None,
1253 1260 changes=None):
1254 1261 self._repo = repo
1255 1262 self._rev = None
1256 1263 self._node = None
1257 1264 self._text = text
1258 1265 if date:
1259 1266 self._date = util.parsedate(date)
1260 1267 if user:
1261 1268 self._user = user
1262 1269 if changes:
1263 1270 self._status = changes
1264 1271
1265 1272 self._extra = {}
1266 1273 if extra:
1267 1274 self._extra = extra.copy()
1268 1275 if 'branch' not in self._extra:
1269 1276 try:
1270 1277 branch = encoding.fromlocal(self._repo.dirstate.branch())
1271 1278 except UnicodeDecodeError:
1272 1279 raise error.Abort(_('branch name not in UTF-8!'))
1273 1280 self._extra['branch'] = branch
1274 1281 if self._extra['branch'] == '':
1275 1282 self._extra['branch'] = 'default'
1276 1283
1277 1284 def __str__(self):
1278 1285 return str(self._parents[0]) + "+"
1279 1286
1280 1287 def __nonzero__(self):
1281 1288 return True
1282 1289
1283 1290 __bool__ = __nonzero__
1284 1291
1285 1292 def _buildflagfunc(self):
1286 1293 # Create a fallback function for getting file flags when the
1287 1294 # filesystem doesn't support them
1288 1295
1289 1296 copiesget = self._repo.dirstate.copies().get
1290 1297 parents = self.parents()
1291 1298 if len(parents) < 2:
1292 1299 # when we have one parent, it's easy: copy from parent
1293 1300 man = parents[0].manifest()
1294 1301 def func(f):
1295 1302 f = copiesget(f, f)
1296 1303 return man.flags(f)
1297 1304 else:
1298 1305 # merges are tricky: we try to reconstruct the unstored
1299 1306 # result from the merge (issue1802)
1300 1307 p1, p2 = parents
1301 1308 pa = p1.ancestor(p2)
1302 1309 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1303 1310
1304 1311 def func(f):
1305 1312 f = copiesget(f, f) # may be wrong for merges with copies
1306 1313 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1307 1314 if fl1 == fl2:
1308 1315 return fl1
1309 1316 if fl1 == fla:
1310 1317 return fl2
1311 1318 if fl2 == fla:
1312 1319 return fl1
1313 1320 return '' # punt for conflicts
1314 1321
1315 1322 return func
1316 1323
1317 1324 @propertycache
1318 1325 def _flagfunc(self):
1319 1326 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1320 1327
1321 1328 @propertycache
1322 1329 def _status(self):
1323 1330 return self._repo.status()
1324 1331
1325 1332 @propertycache
1326 1333 def _user(self):
1327 1334 return self._repo.ui.username()
1328 1335
1329 1336 @propertycache
1330 1337 def _date(self):
1331 1338 return util.makedate()
1332 1339
1333 1340 def subrev(self, subpath):
1334 1341 return None
1335 1342
1336 1343 def manifestnode(self):
1337 1344 return None
1338 1345 def user(self):
1339 1346 return self._user or self._repo.ui.username()
1340 1347 def date(self):
1341 1348 return self._date
1342 1349 def description(self):
1343 1350 return self._text
1344 1351 def files(self):
1345 1352 return sorted(self._status.modified + self._status.added +
1346 1353 self._status.removed)
1347 1354
1348 1355 def modified(self):
1349 1356 return self._status.modified
1350 1357 def added(self):
1351 1358 return self._status.added
1352 1359 def removed(self):
1353 1360 return self._status.removed
1354 1361 def deleted(self):
1355 1362 return self._status.deleted
1356 1363 def branch(self):
1357 1364 return encoding.tolocal(self._extra['branch'])
1358 1365 def closesbranch(self):
1359 1366 return 'close' in self._extra
1360 1367 def extra(self):
1361 1368 return self._extra
1362 1369
1363 1370 def tags(self):
1364 1371 return []
1365 1372
1366 1373 def bookmarks(self):
1367 1374 b = []
1368 1375 for p in self.parents():
1369 1376 b.extend(p.bookmarks())
1370 1377 return b
1371 1378
1372 1379 def phase(self):
1373 1380 phase = phases.draft # default phase to draft
1374 1381 for p in self.parents():
1375 1382 phase = max(phase, p.phase())
1376 1383 return phase
1377 1384
1378 1385 def hidden(self):
1379 1386 return False
1380 1387
1381 1388 def children(self):
1382 1389 return []
1383 1390
1384 1391 def flags(self, path):
1385 1392 if '_manifest' in self.__dict__:
1386 1393 try:
1387 1394 return self._manifest.flags(path)
1388 1395 except KeyError:
1389 1396 return ''
1390 1397
1391 1398 try:
1392 1399 return self._flagfunc(path)
1393 1400 except OSError:
1394 1401 return ''
1395 1402
1396 1403 def ancestor(self, c2):
1397 1404 """return the "best" ancestor context of self and c2"""
1398 1405 return self._parents[0].ancestor(c2) # punt on two parents for now
1399 1406
1400 1407 def walk(self, match):
1401 1408 '''Generates matching file names.'''
1402 1409 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1403 1410 True, False))
1404 1411
1405 1412 def matches(self, match):
1406 1413 return sorted(self._repo.dirstate.matches(match))
1407 1414
1408 1415 def ancestors(self):
1409 1416 for p in self._parents:
1410 1417 yield p
1411 1418 for a in self._repo.changelog.ancestors(
1412 1419 [p.rev() for p in self._parents]):
1413 1420 yield changectx(self._repo, a)
1414 1421
1415 1422 def markcommitted(self, node):
1416 1423 """Perform post-commit cleanup necessary after committing this ctx
1417 1424
1418 1425 Specifically, this updates backing stores this working context
1419 1426 wraps to reflect the fact that the changes reflected by this
1420 1427 workingctx have been committed. For example, it marks
1421 1428 modified and added files as normal in the dirstate.
1422 1429
1423 1430 """
1424 1431
1425 1432 self._repo.dirstate.beginparentchange()
1426 1433 for f in self.modified() + self.added():
1427 1434 self._repo.dirstate.normal(f)
1428 1435 for f in self.removed():
1429 1436 self._repo.dirstate.drop(f)
1430 1437 self._repo.dirstate.setparents(node)
1431 1438 self._repo.dirstate.endparentchange()
1432 1439
1433 1440 # write changes out explicitly, because nesting wlock at
1434 1441 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1435 1442 # from immediately doing so for subsequent changing files
1436 1443 self._repo.dirstate.write(self._repo.currenttransaction())
1437 1444
1438 1445 class workingctx(committablectx):
1439 1446 """A workingctx object makes access to data related to
1440 1447 the current working directory convenient.
1441 1448 date - any valid date string or (unixtime, offset), or None.
1442 1449 user - username string, or None.
1443 1450 extra - a dictionary of extra values, or None.
1444 1451 changes - a list of file lists as returned by localrepo.status()
1445 1452 or None to use the repository status.
1446 1453 """
1447 1454 def __init__(self, repo, text="", user=None, date=None, extra=None,
1448 1455 changes=None):
1449 1456 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1450 1457
1451 1458 def __iter__(self):
1452 1459 d = self._repo.dirstate
1453 1460 for f in d:
1454 1461 if d[f] != 'r':
1455 1462 yield f
1456 1463
1457 1464 def __contains__(self, key):
1458 1465 return self._repo.dirstate[key] not in "?r"
1459 1466
1460 1467 def hex(self):
1461 1468 return hex(wdirid)
1462 1469
1463 1470 @propertycache
1464 1471 def _parents(self):
1465 1472 p = self._repo.dirstate.parents()
1466 1473 if p[1] == nullid:
1467 1474 p = p[:-1]
1468 1475 return [changectx(self._repo, x) for x in p]
1469 1476
1470 1477 def filectx(self, path, filelog=None):
1471 1478 """get a file context from the working directory"""
1472 1479 return workingfilectx(self._repo, path, workingctx=self,
1473 1480 filelog=filelog)
1474 1481
1475 1482 def dirty(self, missing=False, merge=True, branch=True):
1476 1483 "check whether a working directory is modified"
1477 1484 # check subrepos first
1478 1485 for s in sorted(self.substate):
1479 1486 if self.sub(s).dirty():
1480 1487 return True
1481 1488 # check current working dir
1482 1489 return ((merge and self.p2()) or
1483 1490 (branch and self.branch() != self.p1().branch()) or
1484 1491 self.modified() or self.added() or self.removed() or
1485 1492 (missing and self.deleted()))
1486 1493
1487 1494 def add(self, list, prefix=""):
1488 1495 join = lambda f: os.path.join(prefix, f)
1489 1496 with self._repo.wlock():
1490 1497 ui, ds = self._repo.ui, self._repo.dirstate
1491 1498 rejected = []
1492 1499 lstat = self._repo.wvfs.lstat
1493 1500 for f in list:
1494 1501 scmutil.checkportable(ui, join(f))
1495 1502 try:
1496 1503 st = lstat(f)
1497 1504 except OSError:
1498 1505 ui.warn(_("%s does not exist!\n") % join(f))
1499 1506 rejected.append(f)
1500 1507 continue
1501 1508 if st.st_size > 10000000:
1502 1509 ui.warn(_("%s: up to %d MB of RAM may be required "
1503 1510 "to manage this file\n"
1504 1511 "(use 'hg revert %s' to cancel the "
1505 1512 "pending addition)\n")
1506 1513 % (f, 3 * st.st_size // 1000000, join(f)))
1507 1514 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1508 1515 ui.warn(_("%s not added: only files and symlinks "
1509 1516 "supported currently\n") % join(f))
1510 1517 rejected.append(f)
1511 1518 elif ds[f] in 'amn':
1512 1519 ui.warn(_("%s already tracked!\n") % join(f))
1513 1520 elif ds[f] == 'r':
1514 1521 ds.normallookup(f)
1515 1522 else:
1516 1523 ds.add(f)
1517 1524 return rejected
1518 1525
1519 1526 def forget(self, files, prefix=""):
1520 1527 join = lambda f: os.path.join(prefix, f)
1521 1528 with self._repo.wlock():
1522 1529 rejected = []
1523 1530 for f in files:
1524 1531 if f not in self._repo.dirstate:
1525 1532 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1526 1533 rejected.append(f)
1527 1534 elif self._repo.dirstate[f] != 'a':
1528 1535 self._repo.dirstate.remove(f)
1529 1536 else:
1530 1537 self._repo.dirstate.drop(f)
1531 1538 return rejected
1532 1539
1533 1540 def undelete(self, list):
1534 1541 pctxs = self.parents()
1535 1542 with self._repo.wlock():
1536 1543 for f in list:
1537 1544 if self._repo.dirstate[f] != 'r':
1538 1545 self._repo.ui.warn(_("%s not removed!\n") % f)
1539 1546 else:
1540 1547 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1541 1548 t = fctx.data()
1542 1549 self._repo.wwrite(f, t, fctx.flags())
1543 1550 self._repo.dirstate.normal(f)
1544 1551
1545 1552 def copy(self, source, dest):
1546 1553 try:
1547 1554 st = self._repo.wvfs.lstat(dest)
1548 1555 except OSError as err:
1549 1556 if err.errno != errno.ENOENT:
1550 1557 raise
1551 1558 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1552 1559 return
1553 1560 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1554 1561 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1555 1562 "symbolic link\n") % dest)
1556 1563 else:
1557 1564 with self._repo.wlock():
1558 1565 if self._repo.dirstate[dest] in '?':
1559 1566 self._repo.dirstate.add(dest)
1560 1567 elif self._repo.dirstate[dest] in 'r':
1561 1568 self._repo.dirstate.normallookup(dest)
1562 1569 self._repo.dirstate.copy(source, dest)
1563 1570
1564 1571 def match(self, pats=None, include=None, exclude=None, default='glob',
1565 1572 listsubrepos=False, badfn=None):
1566 1573 if pats is None:
1567 1574 pats = []
1568 1575 r = self._repo
1569 1576
1570 1577 # Only a case insensitive filesystem needs magic to translate user input
1571 1578 # to actual case in the filesystem.
1572 1579 matcherfunc = matchmod.match
1573 1580 if not util.fscasesensitive(r.root):
1574 1581 matcherfunc = matchmod.icasefsmatcher
1575 1582 return matcherfunc(r.root, r.getcwd(), pats,
1576 1583 include, exclude, default,
1577 1584 auditor=r.auditor, ctx=self,
1578 1585 listsubrepos=listsubrepos, badfn=badfn)
1579 1586
1580 1587 def _filtersuspectsymlink(self, files):
1581 1588 if not files or self._repo.dirstate._checklink:
1582 1589 return files
1583 1590
1584 1591 # Symlink placeholders may get non-symlink-like contents
1585 1592 # via user error or dereferencing by NFS or Samba servers,
1586 1593 # so we filter out any placeholders that don't look like a
1587 1594 # symlink
1588 1595 sane = []
1589 1596 for f in files:
1590 1597 if self.flags(f) == 'l':
1591 1598 d = self[f].data()
1592 1599 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1593 1600 self._repo.ui.debug('ignoring suspect symlink placeholder'
1594 1601 ' "%s"\n' % f)
1595 1602 continue
1596 1603 sane.append(f)
1597 1604 return sane
1598 1605
1599 1606 def _checklookup(self, files):
1600 1607 # check for any possibly clean files
1601 1608 if not files:
1602 1609 return [], []
1603 1610
1604 1611 modified = []
1605 1612 fixup = []
1606 1613 pctx = self._parents[0]
1607 1614 # do a full compare of any files that might have changed
1608 1615 for f in sorted(files):
1609 1616 if (f not in pctx or self.flags(f) != pctx.flags(f)
1610 1617 or pctx[f].cmp(self[f])):
1611 1618 modified.append(f)
1612 1619 else:
1613 1620 fixup.append(f)
1614 1621
1615 1622 # update dirstate for files that are actually clean
1616 1623 if fixup:
1617 1624 try:
1618 1625 # updating the dirstate is optional
1619 1626 # so we don't wait on the lock
1620 1627 # wlock can invalidate the dirstate, so cache normal _after_
1621 1628 # taking the lock
1622 1629 with self._repo.wlock(False):
1623 1630 normal = self._repo.dirstate.normal
1624 1631 for f in fixup:
1625 1632 normal(f)
1626 1633 # write changes out explicitly, because nesting
1627 1634 # wlock at runtime may prevent 'wlock.release()'
1628 1635 # after this block from doing so for subsequent
1629 1636 # changing files
1630 1637 self._repo.dirstate.write(self._repo.currenttransaction())
1631 1638 except error.LockError:
1632 1639 pass
1633 1640 return modified, fixup
1634 1641
1635 1642 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1636 1643 unknown=False):
1637 1644 '''Gets the status from the dirstate -- internal use only.'''
1638 1645 listignored, listclean, listunknown = ignored, clean, unknown
1639 1646 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1640 1647 subrepos = []
1641 1648 if '.hgsub' in self:
1642 1649 subrepos = sorted(self.substate)
1643 1650 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1644 1651 listclean, listunknown)
1645 1652
1646 1653 # check for any possibly clean files
1647 1654 if cmp:
1648 1655 modified2, fixup = self._checklookup(cmp)
1649 1656 s.modified.extend(modified2)
1650 1657
1651 1658 # update dirstate for files that are actually clean
1652 1659 if fixup and listclean:
1653 1660 s.clean.extend(fixup)
1654 1661
1655 1662 if match.always():
1656 1663 # cache for performance
1657 1664 if s.unknown or s.ignored or s.clean:
1658 1665 # "_status" is cached with list*=False in the normal route
1659 1666 self._status = scmutil.status(s.modified, s.added, s.removed,
1660 1667 s.deleted, [], [], [])
1661 1668 else:
1662 1669 self._status = s
1663 1670
1664 1671 return s
1665 1672
1666 1673 @propertycache
1667 1674 def _manifest(self):
1668 1675 """generate a manifest corresponding to the values in self._status
1669 1676
1670 1677 This reuse the file nodeid from parent, but we use special node
1671 1678 identifiers for added and modified files. This is used by manifests
1672 1679 merge to see that files are different and by update logic to avoid
1673 1680 deleting newly added files.
1674 1681 """
1675 1682 return self._buildstatusmanifest(self._status)
1676 1683
1677 1684 def _buildstatusmanifest(self, status):
1678 1685 """Builds a manifest that includes the given status results."""
1679 1686 parents = self.parents()
1680 1687
1681 1688 man = parents[0].manifest().copy()
1682 1689
1683 1690 ff = self._flagfunc
1684 1691 for i, l in ((addednodeid, status.added),
1685 1692 (modifiednodeid, status.modified)):
1686 1693 for f in l:
1687 1694 man[f] = i
1688 1695 try:
1689 1696 man.setflag(f, ff(f))
1690 1697 except OSError:
1691 1698 pass
1692 1699
1693 1700 for f in status.deleted + status.removed:
1694 1701 if f in man:
1695 1702 del man[f]
1696 1703
1697 1704 return man
1698 1705
1699 1706 def _buildstatus(self, other, s, match, listignored, listclean,
1700 1707 listunknown):
1701 1708 """build a status with respect to another context
1702 1709
1703 1710 This includes logic for maintaining the fast path of status when
1704 1711 comparing the working directory against its parent, which is to skip
1705 1712 building a new manifest if self (working directory) is not comparing
1706 1713 against its parent (repo['.']).
1707 1714 """
1708 1715 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1709 1716 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1710 1717 # might have accidentally ended up with the entire contents of the file
1711 1718 # they are supposed to be linking to.
1712 1719 s.modified[:] = self._filtersuspectsymlink(s.modified)
1713 1720 if other != self._repo['.']:
1714 1721 s = super(workingctx, self)._buildstatus(other, s, match,
1715 1722 listignored, listclean,
1716 1723 listunknown)
1717 1724 return s
1718 1725
1719 1726 def _matchstatus(self, other, match):
1720 1727 """override the match method with a filter for directory patterns
1721 1728
1722 1729 We use inheritance to customize the match.bad method only in cases of
1723 1730 workingctx since it belongs only to the working directory when
1724 1731 comparing against the parent changeset.
1725 1732
1726 1733 If we aren't comparing against the working directory's parent, then we
1727 1734 just use the default match object sent to us.
1728 1735 """
1729 1736 superself = super(workingctx, self)
1730 1737 match = superself._matchstatus(other, match)
1731 1738 if other != self._repo['.']:
1732 1739 def bad(f, msg):
1733 1740 # 'f' may be a directory pattern from 'match.files()',
1734 1741 # so 'f not in ctx1' is not enough
1735 1742 if f not in other and not other.hasdir(f):
1736 1743 self._repo.ui.warn('%s: %s\n' %
1737 1744 (self._repo.dirstate.pathto(f), msg))
1738 1745 match.bad = bad
1739 1746 return match
1740 1747
1741 1748 class committablefilectx(basefilectx):
1742 1749 """A committablefilectx provides common functionality for a file context
1743 1750 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1744 1751 def __init__(self, repo, path, filelog=None, ctx=None):
1745 1752 self._repo = repo
1746 1753 self._path = path
1747 1754 self._changeid = None
1748 1755 self._filerev = self._filenode = None
1749 1756
1750 1757 if filelog is not None:
1751 1758 self._filelog = filelog
1752 1759 if ctx:
1753 1760 self._changectx = ctx
1754 1761
1755 1762 def __nonzero__(self):
1756 1763 return True
1757 1764
1758 1765 __bool__ = __nonzero__
1759 1766
1760 1767 def linkrev(self):
1761 1768 # linked to self._changectx no matter if file is modified or not
1762 1769 return self.rev()
1763 1770
1764 1771 def parents(self):
1765 1772 '''return parent filectxs, following copies if necessary'''
1766 1773 def filenode(ctx, path):
1767 1774 return ctx._manifest.get(path, nullid)
1768 1775
1769 1776 path = self._path
1770 1777 fl = self._filelog
1771 1778 pcl = self._changectx._parents
1772 1779 renamed = self.renamed()
1773 1780
1774 1781 if renamed:
1775 1782 pl = [renamed + (None,)]
1776 1783 else:
1777 1784 pl = [(path, filenode(pcl[0], path), fl)]
1778 1785
1779 1786 for pc in pcl[1:]:
1780 1787 pl.append((path, filenode(pc, path), fl))
1781 1788
1782 1789 return [self._parentfilectx(p, fileid=n, filelog=l)
1783 1790 for p, n, l in pl if n != nullid]
1784 1791
1785 1792 def children(self):
1786 1793 return []
1787 1794
1788 1795 class workingfilectx(committablefilectx):
1789 1796 """A workingfilectx object makes access to data related to a particular
1790 1797 file in the working directory convenient."""
1791 1798 def __init__(self, repo, path, filelog=None, workingctx=None):
1792 1799 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1793 1800
1794 1801 @propertycache
1795 1802 def _changectx(self):
1796 1803 return workingctx(self._repo)
1797 1804
1798 1805 def data(self):
1799 1806 return self._repo.wread(self._path)
1800 1807 def renamed(self):
1801 1808 rp = self._repo.dirstate.copied(self._path)
1802 1809 if not rp:
1803 1810 return None
1804 1811 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1805 1812
1806 1813 def size(self):
1807 1814 return self._repo.wvfs.lstat(self._path).st_size
1808 1815 def date(self):
1809 1816 t, tz = self._changectx.date()
1810 1817 try:
1811 1818 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1812 1819 except OSError as err:
1813 1820 if err.errno != errno.ENOENT:
1814 1821 raise
1815 1822 return (t, tz)
1816 1823
1817 1824 def cmp(self, fctx):
1818 1825 """compare with other file context
1819 1826
1820 1827 returns True if different than fctx.
1821 1828 """
1822 1829 # fctx should be a filectx (not a workingfilectx)
1823 1830 # invert comparison to reuse the same code path
1824 1831 return fctx.cmp(self)
1825 1832
1826 1833 def remove(self, ignoremissing=False):
1827 1834 """wraps unlink for a repo's working directory"""
1828 1835 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1829 1836
1830 1837 def write(self, data, flags):
1831 1838 """wraps repo.wwrite"""
1832 1839 self._repo.wwrite(self._path, data, flags)
1833 1840
1834 1841 class workingcommitctx(workingctx):
1835 1842 """A workingcommitctx object makes access to data related to
1836 1843 the revision being committed convenient.
1837 1844
1838 1845 This hides changes in the working directory, if they aren't
1839 1846 committed in this context.
1840 1847 """
1841 1848 def __init__(self, repo, changes,
1842 1849 text="", user=None, date=None, extra=None):
1843 1850 super(workingctx, self).__init__(repo, text, user, date, extra,
1844 1851 changes)
1845 1852
1846 1853 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1847 1854 unknown=False):
1848 1855 """Return matched files only in ``self._status``
1849 1856
1850 1857 Uncommitted files appear "clean" via this context, even if
1851 1858 they aren't actually so in the working directory.
1852 1859 """
1853 1860 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1854 1861 if clean:
1855 1862 clean = [f for f in self._manifest if f not in self._changedset]
1856 1863 else:
1857 1864 clean = []
1858 1865 return scmutil.status([f for f in self._status.modified if match(f)],
1859 1866 [f for f in self._status.added if match(f)],
1860 1867 [f for f in self._status.removed if match(f)],
1861 1868 [], [], [], clean)
1862 1869
1863 1870 @propertycache
1864 1871 def _changedset(self):
1865 1872 """Return the set of files changed in this context
1866 1873 """
1867 1874 changed = set(self._status.modified)
1868 1875 changed.update(self._status.added)
1869 1876 changed.update(self._status.removed)
1870 1877 return changed
1871 1878
1872 1879 def makecachingfilectxfn(func):
1873 1880 """Create a filectxfn that caches based on the path.
1874 1881
1875 1882 We can't use util.cachefunc because it uses all arguments as the cache
1876 1883 key and this creates a cycle since the arguments include the repo and
1877 1884 memctx.
1878 1885 """
1879 1886 cache = {}
1880 1887
1881 1888 def getfilectx(repo, memctx, path):
1882 1889 if path not in cache:
1883 1890 cache[path] = func(repo, memctx, path)
1884 1891 return cache[path]
1885 1892
1886 1893 return getfilectx
1887 1894
1888 1895 class memctx(committablectx):
1889 1896 """Use memctx to perform in-memory commits via localrepo.commitctx().
1890 1897
1891 1898 Revision information is supplied at initialization time while
1892 1899 related files data and is made available through a callback
1893 1900 mechanism. 'repo' is the current localrepo, 'parents' is a
1894 1901 sequence of two parent revisions identifiers (pass None for every
1895 1902 missing parent), 'text' is the commit message and 'files' lists
1896 1903 names of files touched by the revision (normalized and relative to
1897 1904 repository root).
1898 1905
1899 1906 filectxfn(repo, memctx, path) is a callable receiving the
1900 1907 repository, the current memctx object and the normalized path of
1901 1908 requested file, relative to repository root. It is fired by the
1902 1909 commit function for every file in 'files', but calls order is
1903 1910 undefined. If the file is available in the revision being
1904 1911 committed (updated or added), filectxfn returns a memfilectx
1905 1912 object. If the file was removed, filectxfn return None for recent
1906 1913 Mercurial. Moved files are represented by marking the source file
1907 1914 removed and the new file added with copy information (see
1908 1915 memfilectx).
1909 1916
1910 1917 user receives the committer name and defaults to current
1911 1918 repository username, date is the commit date in any format
1912 1919 supported by util.parsedate() and defaults to current date, extra
1913 1920 is a dictionary of metadata or is left empty.
1914 1921 """
1915 1922
1916 1923 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1917 1924 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1918 1925 # this field to determine what to do in filectxfn.
1919 1926 _returnnoneformissingfiles = True
1920 1927
1921 1928 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1922 1929 date=None, extra=None, editor=False):
1923 1930 super(memctx, self).__init__(repo, text, user, date, extra)
1924 1931 self._rev = None
1925 1932 self._node = None
1926 1933 parents = [(p or nullid) for p in parents]
1927 1934 p1, p2 = parents
1928 1935 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1929 1936 files = sorted(set(files))
1930 1937 self._files = files
1931 1938 self.substate = {}
1932 1939
1933 1940 # if store is not callable, wrap it in a function
1934 1941 if not callable(filectxfn):
1935 1942 def getfilectx(repo, memctx, path):
1936 1943 fctx = filectxfn[path]
1937 1944 # this is weird but apparently we only keep track of one parent
1938 1945 # (why not only store that instead of a tuple?)
1939 1946 copied = fctx.renamed()
1940 1947 if copied:
1941 1948 copied = copied[0]
1942 1949 return memfilectx(repo, path, fctx.data(),
1943 1950 islink=fctx.islink(), isexec=fctx.isexec(),
1944 1951 copied=copied, memctx=memctx)
1945 1952 self._filectxfn = getfilectx
1946 1953 else:
1947 1954 # memoizing increases performance for e.g. vcs convert scenarios.
1948 1955 self._filectxfn = makecachingfilectxfn(filectxfn)
1949 1956
1950 1957 if extra:
1951 1958 self._extra = extra.copy()
1952 1959 else:
1953 1960 self._extra = {}
1954 1961
1955 1962 if self._extra.get('branch', '') == '':
1956 1963 self._extra['branch'] = 'default'
1957 1964
1958 1965 if editor:
1959 1966 self._text = editor(self._repo, self, [])
1960 1967 self._repo.savecommitmessage(self._text)
1961 1968
1962 1969 def filectx(self, path, filelog=None):
1963 1970 """get a file context from the working directory
1964 1971
1965 1972 Returns None if file doesn't exist and should be removed."""
1966 1973 return self._filectxfn(self._repo, self, path)
1967 1974
1968 1975 def commit(self):
1969 1976 """commit context to the repo"""
1970 1977 return self._repo.commitctx(self)
1971 1978
1972 1979 @propertycache
1973 1980 def _manifest(self):
1974 1981 """generate a manifest based on the return values of filectxfn"""
1975 1982
1976 1983 # keep this simple for now; just worry about p1
1977 1984 pctx = self._parents[0]
1978 1985 man = pctx.manifest().copy()
1979 1986
1980 1987 for f in self._status.modified:
1981 1988 p1node = nullid
1982 1989 p2node = nullid
1983 1990 p = pctx[f].parents() # if file isn't in pctx, check p2?
1984 1991 if len(p) > 0:
1985 1992 p1node = p[0].filenode()
1986 1993 if len(p) > 1:
1987 1994 p2node = p[1].filenode()
1988 1995 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1989 1996
1990 1997 for f in self._status.added:
1991 1998 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1992 1999
1993 2000 for f in self._status.removed:
1994 2001 if f in man:
1995 2002 del man[f]
1996 2003
1997 2004 return man
1998 2005
1999 2006 @propertycache
2000 2007 def _status(self):
2001 2008 """Calculate exact status from ``files`` specified at construction
2002 2009 """
2003 2010 man1 = self.p1().manifest()
2004 2011 p2 = self._parents[1]
2005 2012 # "1 < len(self._parents)" can't be used for checking
2006 2013 # existence of the 2nd parent, because "memctx._parents" is
2007 2014 # explicitly initialized by the list, of which length is 2.
2008 2015 if p2.node() != nullid:
2009 2016 man2 = p2.manifest()
2010 2017 managing = lambda f: f in man1 or f in man2
2011 2018 else:
2012 2019 managing = lambda f: f in man1
2013 2020
2014 2021 modified, added, removed = [], [], []
2015 2022 for f in self._files:
2016 2023 if not managing(f):
2017 2024 added.append(f)
2018 2025 elif self[f]:
2019 2026 modified.append(f)
2020 2027 else:
2021 2028 removed.append(f)
2022 2029
2023 2030 return scmutil.status(modified, added, removed, [], [], [], [])
2024 2031
2025 2032 class memfilectx(committablefilectx):
2026 2033 """memfilectx represents an in-memory file to commit.
2027 2034
2028 2035 See memctx and committablefilectx for more details.
2029 2036 """
2030 2037 def __init__(self, repo, path, data, islink=False,
2031 2038 isexec=False, copied=None, memctx=None):
2032 2039 """
2033 2040 path is the normalized file path relative to repository root.
2034 2041 data is the file content as a string.
2035 2042 islink is True if the file is a symbolic link.
2036 2043 isexec is True if the file is executable.
2037 2044 copied is the source file path if current file was copied in the
2038 2045 revision being committed, or None."""
2039 2046 super(memfilectx, self).__init__(repo, path, None, memctx)
2040 2047 self._data = data
2041 2048 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2042 2049 self._copied = None
2043 2050 if copied:
2044 2051 self._copied = (copied, nullid)
2045 2052
2046 2053 def data(self):
2047 2054 return self._data
2048 2055 def size(self):
2049 2056 return len(self.data())
2050 2057 def flags(self):
2051 2058 return self._flags
2052 2059 def renamed(self):
2053 2060 return self._copied
2054 2061
2055 2062 def remove(self, ignoremissing=False):
2056 2063 """wraps unlink for a repo's working directory"""
2057 2064 # need to figure out what to do here
2058 2065 del self._changectx[self._path]
2059 2066
2060 2067 def write(self, data, flags):
2061 2068 """wraps repo.wwrite"""
2062 2069 self._data = data
2063 2070
2064 2071 class metadataonlyctx(committablectx):
2065 2072 """Like memctx but it's reusing the manifest of different commit.
2066 2073 Intended to be used by lightweight operations that are creating
2067 2074 metadata-only changes.
2068 2075
2069 2076 Revision information is supplied at initialization time. 'repo' is the
2070 2077 current localrepo, 'ctx' is original revision which manifest we're reuisng
2071 2078 'parents' is a sequence of two parent revisions identifiers (pass None for
2072 2079 every missing parent), 'text' is the commit.
2073 2080
2074 2081 user receives the committer name and defaults to current repository
2075 2082 username, date is the commit date in any format supported by
2076 2083 util.parsedate() and defaults to current date, extra is a dictionary of
2077 2084 metadata or is left empty.
2078 2085 """
2079 2086 def __new__(cls, repo, originalctx, *args, **kwargs):
2080 2087 return super(metadataonlyctx, cls).__new__(cls, repo)
2081 2088
2082 2089 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2083 2090 extra=None, editor=False):
2084 2091 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2085 2092 self._rev = None
2086 2093 self._node = None
2087 2094 self._originalctx = originalctx
2088 2095 self._manifestnode = originalctx.manifestnode()
2089 2096 parents = [(p or nullid) for p in parents]
2090 2097 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2091 2098
2092 2099 # sanity check to ensure that the reused manifest parents are
2093 2100 # manifests of our commit parents
2094 2101 mp1, mp2 = self.manifestctx().parents
2095 2102 if p1 != nullid and p1.manifestnode() != mp1:
2096 2103 raise RuntimeError('can\'t reuse the manifest: '
2097 2104 'its p1 doesn\'t match the new ctx p1')
2098 2105 if p2 != nullid and p2.manifestnode() != mp2:
2099 2106 raise RuntimeError('can\'t reuse the manifest: '
2100 2107 'its p2 doesn\'t match the new ctx p2')
2101 2108
2102 2109 self._files = originalctx.files()
2103 2110 self.substate = {}
2104 2111
2105 2112 if extra:
2106 2113 self._extra = extra.copy()
2107 2114 else:
2108 2115 self._extra = {}
2109 2116
2110 2117 if self._extra.get('branch', '') == '':
2111 2118 self._extra['branch'] = 'default'
2112 2119
2113 2120 if editor:
2114 2121 self._text = editor(self._repo, self, [])
2115 2122 self._repo.savecommitmessage(self._text)
2116 2123
2117 2124 def manifestnode(self):
2118 2125 return self._manifestnode
2119 2126
2120 2127 @propertycache
2121 2128 def _manifestctx(self):
2122 2129 return self._repo.manifestlog[self._manifestnode]
2123 2130
2124 2131 def filectx(self, path, filelog=None):
2125 2132 return self._originalctx.filectx(path, filelog=filelog)
2126 2133
2127 2134 def commit(self):
2128 2135 """commit context to the repo"""
2129 2136 return self._repo.commitctx(self)
2130 2137
2131 2138 @property
2132 2139 def _manifest(self):
2133 2140 return self._originalctx.manifest()
2134 2141
2135 2142 @propertycache
2136 2143 def _status(self):
2137 2144 """Calculate exact status from ``files`` specified in the ``origctx``
2138 2145 and parents manifests.
2139 2146 """
2140 2147 man1 = self.p1().manifest()
2141 2148 p2 = self._parents[1]
2142 2149 # "1 < len(self._parents)" can't be used for checking
2143 2150 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2144 2151 # explicitly initialized by the list, of which length is 2.
2145 2152 if p2.node() != nullid:
2146 2153 man2 = p2.manifest()
2147 2154 managing = lambda f: f in man1 or f in man2
2148 2155 else:
2149 2156 managing = lambda f: f in man1
2150 2157
2151 2158 modified, added, removed = [], [], []
2152 2159 for f in self._files:
2153 2160 if not managing(f):
2154 2161 added.append(f)
2155 2162 elif self[f]:
2156 2163 modified.append(f)
2157 2164 else:
2158 2165 removed.append(f)
2159 2166
2160 2167 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now