##// END OF EJS Templates
adjustlinkrev: remove unnecessary parameters...
Jun Wu -
r30275:e81d72b4 default
parent child Browse files
Show More
@@ -1,1987 +1,1982 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 wdirid,
23 23 )
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 fileset,
28 28 match as matchmod,
29 29 mdiff,
30 30 obsolete as obsmod,
31 31 patch,
32 32 phases,
33 33 repoview,
34 34 revlog,
35 35 scmutil,
36 36 subrepo,
37 37 util,
38 38 )
39 39
40 40 propertycache = util.propertycache
41 41
42 42 # Phony node value to stand-in for new files in some uses of
43 43 # manifests. Manifests support 21-byte hashes for nodes which are
44 44 # dirty in the working copy.
45 45 _newnode = '!' * 21
46 46
47 47 nonascii = re.compile(r'[^\x21-\x7f]').search
48 48
49 49 class basectx(object):
50 50 """A basectx object represents the common logic for its children:
51 51 changectx: read-only context that is already present in the repo,
52 52 workingctx: a context that represents the working directory and can
53 53 be committed,
54 54 memctx: a context that represents changes in-memory and can also
55 55 be committed."""
56 56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 57 if isinstance(changeid, basectx):
58 58 return changeid
59 59
60 60 o = super(basectx, cls).__new__(cls)
61 61
62 62 o._repo = repo
63 63 o._rev = nullrev
64 64 o._node = nullid
65 65
66 66 return o
67 67
68 68 def __str__(self):
69 69 return short(self.node())
70 70
71 71 def __int__(self):
72 72 return self.rev()
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _manifestmatches(self, match, s):
96 96 """generate a new manifest filtered by the match argument
97 97
98 98 This method is for internal use only and mainly exists to provide an
99 99 object oriented way for other contexts to customize the manifest
100 100 generation.
101 101 """
102 102 return self.manifest().matches(match)
103 103
104 104 def _matchstatus(self, other, match):
105 105 """return match.always if match is none
106 106
107 107 This internal method provides a way for child objects to override the
108 108 match operator.
109 109 """
110 110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 111
112 112 def _buildstatus(self, other, s, match, listignored, listclean,
113 113 listunknown):
114 114 """build a status with respect to another context"""
115 115 # Load earliest manifest first for caching reasons. More specifically,
116 116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 119 # delta to what's in the cache. So that's one full reconstruction + one
120 120 # delta application.
121 121 if self.rev() is not None and self.rev() < other.rev():
122 122 self.manifest()
123 123 mf1 = other._manifestmatches(match, s)
124 124 mf2 = self._manifestmatches(match, s)
125 125
126 126 modified, added = [], []
127 127 removed = []
128 128 clean = []
129 129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 130 deletedset = set(deleted)
131 131 d = mf1.diff(mf2, clean=listclean)
132 132 for fn, value in d.iteritems():
133 133 if fn in deletedset:
134 134 continue
135 135 if value is None:
136 136 clean.append(fn)
137 137 continue
138 138 (node1, flag1), (node2, flag2) = value
139 139 if node1 is None:
140 140 added.append(fn)
141 141 elif node2 is None:
142 142 removed.append(fn)
143 143 elif flag1 != flag2:
144 144 modified.append(fn)
145 145 elif node2 != _newnode:
146 146 # When comparing files between two commits, we save time by
147 147 # not comparing the file contents when the nodeids differ.
148 148 # Note that this means we incorrectly report a reverted change
149 149 # to a file as a modification.
150 150 modified.append(fn)
151 151 elif self[fn].cmp(other[fn]):
152 152 modified.append(fn)
153 153 else:
154 154 clean.append(fn)
155 155
156 156 if removed:
157 157 # need to filter files if they are already reported as removed
158 158 unknown = [fn for fn in unknown if fn not in mf1]
159 159 ignored = [fn for fn in ignored if fn not in mf1]
160 160 # if they're deleted, don't report them as removed
161 161 removed = [fn for fn in removed if fn not in deletedset]
162 162
163 163 return scmutil.status(modified, added, removed, deleted, unknown,
164 164 ignored, clean)
165 165
166 166 @propertycache
167 167 def substate(self):
168 168 return subrepo.state(self, self._repo.ui)
169 169
170 170 def subrev(self, subpath):
171 171 return self.substate[subpath][1]
172 172
173 173 def rev(self):
174 174 return self._rev
175 175 def node(self):
176 176 return self._node
177 177 def hex(self):
178 178 return hex(self.node())
179 179 def manifest(self):
180 180 return self._manifest
181 181 def repo(self):
182 182 return self._repo
183 183 def phasestr(self):
184 184 return phases.phasenames[self.phase()]
185 185 def mutable(self):
186 186 return self.phase() > phases.public
187 187
188 188 def getfileset(self, expr):
189 189 return fileset.getfileset(self, expr)
190 190
191 191 def obsolete(self):
192 192 """True if the changeset is obsolete"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 194
195 195 def extinct(self):
196 196 """True if the changeset is extinct"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 198
199 199 def unstable(self):
200 200 """True if the changeset is not obsolete but it's ancestor are"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202 202
203 203 def bumped(self):
204 204 """True if the changeset try to be a successor of a public changeset
205 205
206 206 Only non-public and non-obsolete changesets may be bumped.
207 207 """
208 208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209 209
210 210 def divergent(self):
211 211 """Is a successors of a changeset with multiple possible successors set
212 212
213 213 Only non-public and non-obsolete changesets may be divergent.
214 214 """
215 215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216 216
217 217 def troubled(self):
218 218 """True if the changeset is either unstable, bumped or divergent"""
219 219 return self.unstable() or self.bumped() or self.divergent()
220 220
221 221 def troubles(self):
222 222 """return the list of troubles affecting this changesets.
223 223
224 224 Troubles are returned as strings. possible values are:
225 225 - unstable,
226 226 - bumped,
227 227 - divergent.
228 228 """
229 229 troubles = []
230 230 if self.unstable():
231 231 troubles.append('unstable')
232 232 if self.bumped():
233 233 troubles.append('bumped')
234 234 if self.divergent():
235 235 troubles.append('divergent')
236 236 return troubles
237 237
238 238 def parents(self):
239 239 """return contexts for each parent changeset"""
240 240 return self._parents
241 241
242 242 def p1(self):
243 243 return self._parents[0]
244 244
245 245 def p2(self):
246 246 parents = self._parents
247 247 if len(parents) == 2:
248 248 return parents[1]
249 249 return changectx(self._repo, nullrev)
250 250
251 251 def _fileinfo(self, path):
252 252 if '_manifest' in self.__dict__:
253 253 try:
254 254 return self._manifest[path], self._manifest.flags(path)
255 255 except KeyError:
256 256 raise error.ManifestLookupError(self._node, path,
257 257 _('not found in manifest'))
258 258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 259 if path in self._manifestdelta:
260 260 return (self._manifestdelta[path],
261 261 self._manifestdelta.flags(path))
262 262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
263 263 if not node:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266
267 267 return node, flag
268 268
269 269 def filenode(self, path):
270 270 return self._fileinfo(path)[0]
271 271
272 272 def flags(self, path):
273 273 try:
274 274 return self._fileinfo(path)[1]
275 275 except error.LookupError:
276 276 return ''
277 277
278 278 def sub(self, path, allowcreate=True):
279 279 '''return a subrepo for the stored revision of path, never wdir()'''
280 280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281 281
282 282 def nullsub(self, path, pctx):
283 283 return subrepo.nullsubrepo(self, path, pctx)
284 284
285 285 def workingsub(self, path):
286 286 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 287 context.
288 288 '''
289 289 return subrepo.subrepo(self, path, allowwdir=True)
290 290
291 291 def match(self, pats=[], include=None, exclude=None, default='glob',
292 292 listsubrepos=False, badfn=None):
293 293 r = self._repo
294 294 return matchmod.match(r.root, r.getcwd(), pats,
295 295 include, exclude, default,
296 296 auditor=r.nofsauditor, ctx=self,
297 297 listsubrepos=listsubrepos, badfn=badfn)
298 298
299 299 def diff(self, ctx2=None, match=None, **opts):
300 300 """Returns a diff generator for the given contexts and matcher"""
301 301 if ctx2 is None:
302 302 ctx2 = self.p1()
303 303 if ctx2 is not None:
304 304 ctx2 = self._repo[ctx2]
305 305 diffopts = patch.diffopts(self._repo.ui, opts)
306 306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307 307
308 308 def dirs(self):
309 309 return self._manifest.dirs()
310 310
311 311 def hasdir(self, dir):
312 312 return self._manifest.hasdir(dir)
313 313
314 314 def dirty(self, missing=False, merge=True, branch=True):
315 315 return False
316 316
317 317 def status(self, other=None, match=None, listignored=False,
318 318 listclean=False, listunknown=False, listsubrepos=False):
319 319 """return status of files between two nodes or node and working
320 320 directory.
321 321
322 322 If other is None, compare this node with working directory.
323 323
324 324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 325 """
326 326
327 327 ctx1 = self
328 328 ctx2 = self._repo[other]
329 329
330 330 # This next code block is, admittedly, fragile logic that tests for
331 331 # reversing the contexts and wouldn't need to exist if it weren't for
332 332 # the fast (and common) code path of comparing the working directory
333 333 # with its first parent.
334 334 #
335 335 # What we're aiming for here is the ability to call:
336 336 #
337 337 # workingctx.status(parentctx)
338 338 #
339 339 # If we always built the manifest for each context and compared those,
340 340 # then we'd be done. But the special case of the above call means we
341 341 # just copy the manifest of the parent.
342 342 reversed = False
343 343 if (not isinstance(ctx1, changectx)
344 344 and isinstance(ctx2, changectx)):
345 345 reversed = True
346 346 ctx1, ctx2 = ctx2, ctx1
347 347
348 348 match = ctx2._matchstatus(ctx1, match)
349 349 r = scmutil.status([], [], [], [], [], [], [])
350 350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 351 listunknown)
352 352
353 353 if reversed:
354 354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 355 # these make no sense to reverse.
356 356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 357 r.clean)
358 358
359 359 if listsubrepos:
360 360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 361 try:
362 362 rev2 = ctx2.subrev(subpath)
363 363 except KeyError:
364 364 # A subrepo that existed in node1 was deleted between
365 365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 366 # won't contain that subpath. The best we can do ignore it.
367 367 rev2 = None
368 368 submatch = matchmod.subdirmatcher(subpath, match)
369 369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 370 clean=listclean, unknown=listunknown,
371 371 listsubrepos=True)
372 372 for rfiles, sfiles in zip(r, s):
373 373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374 374
375 375 for l in r:
376 376 l.sort()
377 377
378 378 return r
379 379
380 380
381 381 def makememctx(repo, parents, text, user, date, branch, files, store,
382 382 editor=None, extra=None):
383 383 def getfilectx(repo, memctx, path):
384 384 data, mode, copied = store.getfile(path)
385 385 if data is None:
386 386 return None
387 387 islink, isexec = mode
388 388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 389 copied=copied, memctx=memctx)
390 390 if extra is None:
391 391 extra = {}
392 392 if branch:
393 393 extra['branch'] = encoding.fromlocal(branch)
394 394 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 395 date, extra, editor)
396 396 return ctx
397 397
398 398 class changectx(basectx):
399 399 """A changecontext object makes access to data related to a particular
400 400 changeset convenient. It represents a read-only context already present in
401 401 the repo."""
402 402 def __init__(self, repo, changeid=''):
403 403 """changeid is a revision number, node, or tag"""
404 404
405 405 # since basectx.__new__ already took care of copying the object, we
406 406 # don't need to do anything in __init__, so we just exit here
407 407 if isinstance(changeid, basectx):
408 408 return
409 409
410 410 if changeid == '':
411 411 changeid = '.'
412 412 self._repo = repo
413 413
414 414 try:
415 415 if isinstance(changeid, int):
416 416 self._node = repo.changelog.node(changeid)
417 417 self._rev = changeid
418 418 return
419 419 if isinstance(changeid, long):
420 420 changeid = str(changeid)
421 421 if changeid == 'null':
422 422 self._node = nullid
423 423 self._rev = nullrev
424 424 return
425 425 if changeid == 'tip':
426 426 self._node = repo.changelog.tip()
427 427 self._rev = repo.changelog.rev(self._node)
428 428 return
429 429 if changeid == '.' or changeid == repo.dirstate.p1():
430 430 # this is a hack to delay/avoid loading obsmarkers
431 431 # when we know that '.' won't be hidden
432 432 self._node = repo.dirstate.p1()
433 433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 434 return
435 435 if len(changeid) == 20:
436 436 try:
437 437 self._node = changeid
438 438 self._rev = repo.changelog.rev(changeid)
439 439 return
440 440 except error.FilteredRepoLookupError:
441 441 raise
442 442 except LookupError:
443 443 pass
444 444
445 445 try:
446 446 r = int(changeid)
447 447 if str(r) != changeid:
448 448 raise ValueError
449 449 l = len(repo.changelog)
450 450 if r < 0:
451 451 r += l
452 452 if r < 0 or r >= l:
453 453 raise ValueError
454 454 self._rev = r
455 455 self._node = repo.changelog.node(r)
456 456 return
457 457 except error.FilteredIndexError:
458 458 raise
459 459 except (ValueError, OverflowError, IndexError):
460 460 pass
461 461
462 462 if len(changeid) == 40:
463 463 try:
464 464 self._node = bin(changeid)
465 465 self._rev = repo.changelog.rev(self._node)
466 466 return
467 467 except error.FilteredLookupError:
468 468 raise
469 469 except (TypeError, LookupError):
470 470 pass
471 471
472 472 # lookup bookmarks through the name interface
473 473 try:
474 474 self._node = repo.names.singlenode(repo, changeid)
475 475 self._rev = repo.changelog.rev(self._node)
476 476 return
477 477 except KeyError:
478 478 pass
479 479 except error.FilteredRepoLookupError:
480 480 raise
481 481 except error.RepoLookupError:
482 482 pass
483 483
484 484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 485 if self._node is not None:
486 486 self._rev = repo.changelog.rev(self._node)
487 487 return
488 488
489 489 # lookup failed
490 490 # check if it might have come from damaged dirstate
491 491 #
492 492 # XXX we could avoid the unfiltered if we had a recognizable
493 493 # exception for filtered changeset access
494 494 if changeid in repo.unfiltered().dirstate.parents():
495 495 msg = _("working directory has unknown parent '%s'!")
496 496 raise error.Abort(msg % short(changeid))
497 497 try:
498 498 if len(changeid) == 20 and nonascii(changeid):
499 499 changeid = hex(changeid)
500 500 except TypeError:
501 501 pass
502 502 except (error.FilteredIndexError, error.FilteredLookupError,
503 503 error.FilteredRepoLookupError):
504 504 if repo.filtername.startswith('visible'):
505 505 msg = _("hidden revision '%s'") % changeid
506 506 hint = _('use --hidden to access hidden revisions')
507 507 raise error.FilteredRepoLookupError(msg, hint=hint)
508 508 msg = _("filtered revision '%s' (not in '%s' subset)")
509 509 msg %= (changeid, repo.filtername)
510 510 raise error.FilteredRepoLookupError(msg)
511 511 except IndexError:
512 512 pass
513 513 raise error.RepoLookupError(
514 514 _("unknown revision '%s'") % changeid)
515 515
516 516 def __hash__(self):
517 517 try:
518 518 return hash(self._rev)
519 519 except AttributeError:
520 520 return id(self)
521 521
522 522 def __nonzero__(self):
523 523 return self._rev != nullrev
524 524
525 525 @propertycache
526 526 def _changeset(self):
527 527 return self._repo.changelog.changelogrevision(self.rev())
528 528
529 529 @propertycache
530 530 def _manifest(self):
531 531 return self._repo.manifestlog[self._changeset.manifest].read()
532 532
533 533 @propertycache
534 534 def _manifestdelta(self):
535 535 mfnode = self._changeset.manifest
536 536 return self._repo.manifestlog[mfnode].readdelta()
537 537
538 538 @propertycache
539 539 def _parents(self):
540 540 repo = self._repo
541 541 p1, p2 = repo.changelog.parentrevs(self._rev)
542 542 if p2 == nullrev:
543 543 return [changectx(repo, p1)]
544 544 return [changectx(repo, p1), changectx(repo, p2)]
545 545
546 546 def changeset(self):
547 547 c = self._changeset
548 548 return (
549 549 c.manifest,
550 550 c.user,
551 551 c.date,
552 552 c.files,
553 553 c.description,
554 554 c.extra,
555 555 )
556 556 def manifestnode(self):
557 557 return self._changeset.manifest
558 558
559 559 def user(self):
560 560 return self._changeset.user
561 561 def date(self):
562 562 return self._changeset.date
563 563 def files(self):
564 564 return self._changeset.files
565 565 def description(self):
566 566 return self._changeset.description
567 567 def branch(self):
568 568 return encoding.tolocal(self._changeset.extra.get("branch"))
569 569 def closesbranch(self):
570 570 return 'close' in self._changeset.extra
571 571 def extra(self):
572 572 return self._changeset.extra
573 573 def tags(self):
574 574 return self._repo.nodetags(self._node)
575 575 def bookmarks(self):
576 576 return self._repo.nodebookmarks(self._node)
577 577 def phase(self):
578 578 return self._repo._phasecache.phase(self._repo, self._rev)
579 579 def hidden(self):
580 580 return self._rev in repoview.filterrevs(self._repo, 'visible')
581 581
582 582 def children(self):
583 583 """return contexts for each child changeset"""
584 584 c = self._repo.changelog.children(self._node)
585 585 return [changectx(self._repo, x) for x in c]
586 586
587 587 def ancestors(self):
588 588 for a in self._repo.changelog.ancestors([self._rev]):
589 589 yield changectx(self._repo, a)
590 590
591 591 def descendants(self):
592 592 for d in self._repo.changelog.descendants([self._rev]):
593 593 yield changectx(self._repo, d)
594 594
595 595 def filectx(self, path, fileid=None, filelog=None):
596 596 """get a file context from this changeset"""
597 597 if fileid is None:
598 598 fileid = self.filenode(path)
599 599 return filectx(self._repo, path, fileid=fileid,
600 600 changectx=self, filelog=filelog)
601 601
602 602 def ancestor(self, c2, warn=False):
603 603 """return the "best" ancestor context of self and c2
604 604
605 605 If there are multiple candidates, it will show a message and check
606 606 merge.preferancestor configuration before falling back to the
607 607 revlog ancestor."""
608 608 # deal with workingctxs
609 609 n2 = c2._node
610 610 if n2 is None:
611 611 n2 = c2._parents[0]._node
612 612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
613 613 if not cahs:
614 614 anc = nullid
615 615 elif len(cahs) == 1:
616 616 anc = cahs[0]
617 617 else:
618 618 # experimental config: merge.preferancestor
619 619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
620 620 try:
621 621 ctx = changectx(self._repo, r)
622 622 except error.RepoLookupError:
623 623 continue
624 624 anc = ctx.node()
625 625 if anc in cahs:
626 626 break
627 627 else:
628 628 anc = self._repo.changelog.ancestor(self._node, n2)
629 629 if warn:
630 630 self._repo.ui.status(
631 631 (_("note: using %s as ancestor of %s and %s\n") %
632 632 (short(anc), short(self._node), short(n2))) +
633 633 ''.join(_(" alternatively, use --config "
634 634 "merge.preferancestor=%s\n") %
635 635 short(n) for n in sorted(cahs) if n != anc))
636 636 return changectx(self._repo, anc)
637 637
638 638 def descendant(self, other):
639 639 """True if other is descendant of this changeset"""
640 640 return self._repo.changelog.descendant(self._rev, other._rev)
641 641
642 642 def walk(self, match):
643 643 '''Generates matching file names.'''
644 644
645 645 # Wrap match.bad method to have message with nodeid
646 646 def bad(fn, msg):
647 647 # The manifest doesn't know about subrepos, so don't complain about
648 648 # paths into valid subrepos.
649 649 if any(fn == s or fn.startswith(s + '/')
650 650 for s in self.substate):
651 651 return
652 652 match.bad(fn, _('no such file in rev %s') % self)
653 653
654 654 m = matchmod.badmatch(match, bad)
655 655 return self._manifest.walk(m)
656 656
657 657 def matches(self, match):
658 658 return self.walk(match)
659 659
660 660 class basefilectx(object):
661 661 """A filecontext object represents the common logic for its children:
662 662 filectx: read-only access to a filerevision that is already present
663 663 in the repo,
664 664 workingfilectx: a filecontext that represents files from the working
665 665 directory,
666 666 memfilectx: a filecontext that represents files in-memory."""
667 667 def __new__(cls, repo, path, *args, **kwargs):
668 668 return super(basefilectx, cls).__new__(cls)
669 669
670 670 @propertycache
671 671 def _filelog(self):
672 672 return self._repo.file(self._path)
673 673
674 674 @propertycache
675 675 def _changeid(self):
676 676 if '_changeid' in self.__dict__:
677 677 return self._changeid
678 678 elif '_changectx' in self.__dict__:
679 679 return self._changectx.rev()
680 680 elif '_descendantrev' in self.__dict__:
681 681 # this file context was created from a revision with a known
682 682 # descendant, we can (lazily) correct for linkrev aliases
683 return self._adjustlinkrev(self._path, self._filelog,
684 self._filenode, self._descendantrev)
683 return self._adjustlinkrev(self._descendantrev)
685 684 else:
686 685 return self._filelog.linkrev(self._filerev)
687 686
688 687 @propertycache
689 688 def _filenode(self):
690 689 if '_fileid' in self.__dict__:
691 690 return self._filelog.lookup(self._fileid)
692 691 else:
693 692 return self._changectx.filenode(self._path)
694 693
695 694 @propertycache
696 695 def _filerev(self):
697 696 return self._filelog.rev(self._filenode)
698 697
699 698 @propertycache
700 699 def _repopath(self):
701 700 return self._path
702 701
703 702 def __nonzero__(self):
704 703 try:
705 704 self._filenode
706 705 return True
707 706 except error.LookupError:
708 707 # file is missing
709 708 return False
710 709
711 710 def __str__(self):
712 711 try:
713 712 return "%s@%s" % (self.path(), self._changectx)
714 713 except error.LookupError:
715 714 return "%s@???" % self.path()
716 715
717 716 def __repr__(self):
718 717 return "<%s %s>" % (type(self).__name__, str(self))
719 718
720 719 def __hash__(self):
721 720 try:
722 721 return hash((self._path, self._filenode))
723 722 except AttributeError:
724 723 return id(self)
725 724
726 725 def __eq__(self, other):
727 726 try:
728 727 return (type(self) == type(other) and self._path == other._path
729 728 and self._filenode == other._filenode)
730 729 except AttributeError:
731 730 return False
732 731
733 732 def __ne__(self, other):
734 733 return not (self == other)
735 734
736 735 def filerev(self):
737 736 return self._filerev
738 737 def filenode(self):
739 738 return self._filenode
740 739 def flags(self):
741 740 return self._changectx.flags(self._path)
742 741 def filelog(self):
743 742 return self._filelog
744 743 def rev(self):
745 744 return self._changeid
746 745 def linkrev(self):
747 746 return self._filelog.linkrev(self._filerev)
748 747 def node(self):
749 748 return self._changectx.node()
750 749 def hex(self):
751 750 return self._changectx.hex()
752 751 def user(self):
753 752 return self._changectx.user()
754 753 def date(self):
755 754 return self._changectx.date()
756 755 def files(self):
757 756 return self._changectx.files()
758 757 def description(self):
759 758 return self._changectx.description()
760 759 def branch(self):
761 760 return self._changectx.branch()
762 761 def extra(self):
763 762 return self._changectx.extra()
764 763 def phase(self):
765 764 return self._changectx.phase()
766 765 def phasestr(self):
767 766 return self._changectx.phasestr()
768 767 def manifest(self):
769 768 return self._changectx.manifest()
770 769 def changectx(self):
771 770 return self._changectx
772 771 def repo(self):
773 772 return self._repo
774 773
775 774 def path(self):
776 775 return self._path
777 776
778 777 def isbinary(self):
779 778 try:
780 779 return util.binary(self.data())
781 780 except IOError:
782 781 return False
783 782 def isexec(self):
784 783 return 'x' in self.flags()
785 784 def islink(self):
786 785 return 'l' in self.flags()
787 786
788 787 def isabsent(self):
789 788 """whether this filectx represents a file not in self._changectx
790 789
791 790 This is mainly for merge code to detect change/delete conflicts. This is
792 791 expected to be True for all subclasses of basectx."""
793 792 return False
794 793
795 794 _customcmp = False
796 795 def cmp(self, fctx):
797 796 """compare with other file context
798 797
799 798 returns True if different than fctx.
800 799 """
801 800 if fctx._customcmp:
802 801 return fctx.cmp(self)
803 802
804 803 if (fctx._filenode is None
805 804 and (self._repo._encodefilterpats
806 805 # if file data starts with '\1\n', empty metadata block is
807 806 # prepended, which adds 4 bytes to filelog.size().
808 807 or self.size() - 4 == fctx.size())
809 808 or self.size() == fctx.size()):
810 809 return self._filelog.cmp(self._filenode, fctx.data())
811 810
812 811 return True
813 812
814 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
813 def _adjustlinkrev(self, srcrev, inclusive=False):
815 814 """return the first ancestor of <srcrev> introducing <fnode>
816 815
817 816 If the linkrev of the file revision does not point to an ancestor of
818 817 srcrev, we'll walk down the ancestors until we find one introducing
819 818 this file revision.
820 819
821 :repo: a localrepository object (used to access changelog and manifest)
822 :path: the file path
823 :fnode: the nodeid of the file revision
824 :filelog: the filelog of this path
825 820 :srcrev: the changeset revision we search ancestors from
826 821 :inclusive: if true, the src revision will also be checked
827 822 """
828 823 repo = self._repo
829 824 cl = repo.unfiltered().changelog
830 825 mfl = repo.manifestlog
831 826 # fetch the linkrev
832 fr = filelog.rev(fnode)
833 lkr = filelog.linkrev(fr)
827 lkr = self.linkrev()
834 828 # hack to reuse ancestor computation when searching for renames
835 829 memberanc = getattr(self, '_ancestrycontext', None)
836 830 iteranc = None
837 831 if srcrev is None:
838 832 # wctx case, used by workingfilectx during mergecopy
839 833 revs = [p.rev() for p in self._repo[None].parents()]
840 834 inclusive = True # we skipped the real (revless) source
841 835 else:
842 836 revs = [srcrev]
843 837 if memberanc is None:
844 838 memberanc = iteranc = cl.ancestors(revs, lkr,
845 839 inclusive=inclusive)
846 840 # check if this linkrev is an ancestor of srcrev
847 841 if lkr not in memberanc:
848 842 if iteranc is None:
849 843 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
844 fnode = self._filenode
845 path = self._path
850 846 for a in iteranc:
851 847 ac = cl.read(a) # get changeset data (we avoid object creation)
852 848 if path in ac[3]: # checking the 'files' field.
853 849 # The file has been touched, check if the content is
854 850 # similar to the one we search for.
855 851 if fnode == mfl[ac[0]].readfast().get(path):
856 852 return a
857 853 # In theory, we should never get out of that loop without a result.
858 854 # But if manifest uses a buggy file revision (not children of the
859 855 # one it replaces) we could. Such a buggy situation will likely
860 856 # result is crash somewhere else at to some point.
861 857 return lkr
862 858
863 859 def introrev(self):
864 860 """return the rev of the changeset which introduced this file revision
865 861
866 862 This method is different from linkrev because it take into account the
867 863 changeset the filectx was created from. It ensures the returned
868 864 revision is one of its ancestors. This prevents bugs from
869 865 'linkrev-shadowing' when a file revision is used by multiple
870 866 changesets.
871 867 """
872 868 lkr = self.linkrev()
873 869 attrs = vars(self)
874 870 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
875 871 if noctx or self.rev() == lkr:
876 872 return self.linkrev()
877 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
878 self.rev(), inclusive=True)
873 return self._adjustlinkrev(self.rev(), inclusive=True)
879 874
880 875 def _parentfilectx(self, path, fileid, filelog):
881 876 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 877 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 878 if '_changeid' in vars(self) or '_changectx' in vars(self):
884 879 # If self is associated with a changeset (probably explicitly
885 880 # fed), ensure the created filectx is associated with a
886 881 # changeset that is an ancestor of self.changectx.
887 882 # This lets us later use _adjustlinkrev to get a correct link.
888 883 fctx._descendantrev = self.rev()
889 884 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 885 elif '_descendantrev' in vars(self):
891 886 # Otherwise propagate _descendantrev if we have one associated.
892 887 fctx._descendantrev = self._descendantrev
893 888 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 889 return fctx
895 890
896 891 def parents(self):
897 892 _path = self._path
898 893 fl = self._filelog
899 894 parents = self._filelog.parents(self._filenode)
900 895 pl = [(_path, node, fl) for node in parents if node != nullid]
901 896
902 897 r = fl.renamed(self._filenode)
903 898 if r:
904 899 # - In the simple rename case, both parent are nullid, pl is empty.
905 900 # - In case of merge, only one of the parent is null id and should
906 901 # be replaced with the rename information. This parent is -always-
907 902 # the first one.
908 903 #
909 904 # As null id have always been filtered out in the previous list
910 905 # comprehension, inserting to 0 will always result in "replacing
911 906 # first nullid parent with rename information.
912 907 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913 908
914 909 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915 910
916 911 def p1(self):
917 912 return self.parents()[0]
918 913
919 914 def p2(self):
920 915 p = self.parents()
921 916 if len(p) == 2:
922 917 return p[1]
923 918 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924 919
925 920 def annotate(self, follow=False, linenumber=False, diffopts=None):
926 921 '''returns a list of tuples of ((ctx, number), line) for each line
927 922 in the file, where ctx is the filectx of the node where
928 923 that line was last changed; if linenumber parameter is true, number is
929 924 the line number at the first appearance in the managed file, otherwise,
930 925 number has a fixed value of False.
931 926 '''
932 927
933 928 def lines(text):
934 929 if text.endswith("\n"):
935 930 return text.count("\n")
936 931 return text.count("\n") + int(bool(text))
937 932
938 933 if linenumber:
939 934 def decorate(text, rev):
940 935 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
941 936 else:
942 937 def decorate(text, rev):
943 938 return ([(rev, False)] * lines(text), text)
944 939
945 940 def pair(parent, child):
946 941 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
947 942 for (a1, a2, b1, b2), t in blocks:
948 943 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 944 # belong to the child.
950 945 if t == '=':
951 946 child[0][b1:b2] = parent[0][a1:a2]
952 947 return child
953 948
954 949 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955 950
956 951 def parents(f):
957 952 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 953 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 954 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 955 # isn't an ancestor of the srcrev.
961 956 f._changeid
962 957 pl = f.parents()
963 958
964 959 # Don't return renamed parents if we aren't following.
965 960 if not follow:
966 961 pl = [p for p in pl if p.path() == f.path()]
967 962
968 963 # renamed filectx won't have a filelog yet, so set it
969 964 # from the cache to save time
970 965 for p in pl:
971 966 if not '_filelog' in p.__dict__:
972 967 p._filelog = getlog(p.path())
973 968
974 969 return pl
975 970
976 971 # use linkrev to find the first changeset where self appeared
977 972 base = self
978 973 introrev = self.introrev()
979 974 if self.rev() != introrev:
980 975 base = self.filectx(self.filenode(), changeid=introrev)
981 976 if getattr(base, '_ancestrycontext', None) is None:
982 977 cl = self._repo.changelog
983 978 if introrev is None:
984 979 # wctx is not inclusive, but works because _ancestrycontext
985 980 # is used to test filelog revisions
986 981 ac = cl.ancestors([p.rev() for p in base.parents()],
987 982 inclusive=True)
988 983 else:
989 984 ac = cl.ancestors([introrev], inclusive=True)
990 985 base._ancestrycontext = ac
991 986
992 987 # This algorithm would prefer to be recursive, but Python is a
993 988 # bit recursion-hostile. Instead we do an iterative
994 989 # depth-first search.
995 990
996 991 # 1st DFS pre-calculates pcache and needed
997 992 visit = [base]
998 993 pcache = {}
999 994 needed = {base: 1}
1000 995 while visit:
1001 996 f = visit.pop()
1002 997 if f in pcache:
1003 998 continue
1004 999 pl = parents(f)
1005 1000 pcache[f] = pl
1006 1001 for p in pl:
1007 1002 needed[p] = needed.get(p, 0) + 1
1008 1003 if p not in pcache:
1009 1004 visit.append(p)
1010 1005
1011 1006 # 2nd DFS does the actual annotate
1012 1007 visit[:] = [base]
1013 1008 hist = {}
1014 1009 while visit:
1015 1010 f = visit[-1]
1016 1011 if f in hist:
1017 1012 visit.pop()
1018 1013 continue
1019 1014
1020 1015 ready = True
1021 1016 pl = pcache[f]
1022 1017 for p in pl:
1023 1018 if p not in hist:
1024 1019 ready = False
1025 1020 visit.append(p)
1026 1021 if ready:
1027 1022 visit.pop()
1028 1023 curr = decorate(f.data(), f)
1029 1024 for p in pl:
1030 1025 curr = pair(hist[p], curr)
1031 1026 if needed[p] == 1:
1032 1027 del hist[p]
1033 1028 del needed[p]
1034 1029 else:
1035 1030 needed[p] -= 1
1036 1031
1037 1032 hist[f] = curr
1038 1033 del pcache[f]
1039 1034
1040 1035 return zip(hist[base][0], hist[base][1].splitlines(True))
1041 1036
1042 1037 def ancestors(self, followfirst=False):
1043 1038 visit = {}
1044 1039 c = self
1045 1040 if followfirst:
1046 1041 cut = 1
1047 1042 else:
1048 1043 cut = None
1049 1044
1050 1045 while True:
1051 1046 for parent in c.parents()[:cut]:
1052 1047 visit[(parent.linkrev(), parent.filenode())] = parent
1053 1048 if not visit:
1054 1049 break
1055 1050 c = visit.pop(max(visit))
1056 1051 yield c
1057 1052
1058 1053 class filectx(basefilectx):
1059 1054 """A filecontext object makes access to data related to a particular
1060 1055 filerevision convenient."""
1061 1056 def __init__(self, repo, path, changeid=None, fileid=None,
1062 1057 filelog=None, changectx=None):
1063 1058 """changeid can be a changeset revision, node, or tag.
1064 1059 fileid can be a file revision or node."""
1065 1060 self._repo = repo
1066 1061 self._path = path
1067 1062
1068 1063 assert (changeid is not None
1069 1064 or fileid is not None
1070 1065 or changectx is not None), \
1071 1066 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1072 1067 % (changeid, fileid, changectx))
1073 1068
1074 1069 if filelog is not None:
1075 1070 self._filelog = filelog
1076 1071
1077 1072 if changeid is not None:
1078 1073 self._changeid = changeid
1079 1074 if changectx is not None:
1080 1075 self._changectx = changectx
1081 1076 if fileid is not None:
1082 1077 self._fileid = fileid
1083 1078
1084 1079 @propertycache
1085 1080 def _changectx(self):
1086 1081 try:
1087 1082 return changectx(self._repo, self._changeid)
1088 1083 except error.FilteredRepoLookupError:
1089 1084 # Linkrev may point to any revision in the repository. When the
1090 1085 # repository is filtered this may lead to `filectx` trying to build
1091 1086 # `changectx` for filtered revision. In such case we fallback to
1092 1087 # creating `changectx` on the unfiltered version of the reposition.
1093 1088 # This fallback should not be an issue because `changectx` from
1094 1089 # `filectx` are not used in complex operations that care about
1095 1090 # filtering.
1096 1091 #
1097 1092 # This fallback is a cheap and dirty fix that prevent several
1098 1093 # crashes. It does not ensure the behavior is correct. However the
1099 1094 # behavior was not correct before filtering either and "incorrect
1100 1095 # behavior" is seen as better as "crash"
1101 1096 #
1102 1097 # Linkrevs have several serious troubles with filtering that are
1103 1098 # complicated to solve. Proper handling of the issue here should be
1104 1099 # considered when solving linkrev issue are on the table.
1105 1100 return changectx(self._repo.unfiltered(), self._changeid)
1106 1101
1107 1102 def filectx(self, fileid, changeid=None):
1108 1103 '''opens an arbitrary revision of the file without
1109 1104 opening a new filelog'''
1110 1105 return filectx(self._repo, self._path, fileid=fileid,
1111 1106 filelog=self._filelog, changeid=changeid)
1112 1107
1113 1108 def data(self):
1114 1109 try:
1115 1110 return self._filelog.read(self._filenode)
1116 1111 except error.CensoredNodeError:
1117 1112 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1118 1113 return ""
1119 1114 raise error.Abort(_("censored node: %s") % short(self._filenode),
1120 1115 hint=_("set censor.policy to ignore errors"))
1121 1116
1122 1117 def size(self):
1123 1118 return self._filelog.size(self._filerev)
1124 1119
1125 1120 def renamed(self):
1126 1121 """check if file was actually renamed in this changeset revision
1127 1122
1128 1123 If rename logged in file revision, we report copy for changeset only
1129 1124 if file revisions linkrev points back to the changeset in question
1130 1125 or both changeset parents contain different file revisions.
1131 1126 """
1132 1127
1133 1128 renamed = self._filelog.renamed(self._filenode)
1134 1129 if not renamed:
1135 1130 return renamed
1136 1131
1137 1132 if self.rev() == self.linkrev():
1138 1133 return renamed
1139 1134
1140 1135 name = self.path()
1141 1136 fnode = self._filenode
1142 1137 for p in self._changectx.parents():
1143 1138 try:
1144 1139 if fnode == p.filenode(name):
1145 1140 return None
1146 1141 except error.LookupError:
1147 1142 pass
1148 1143 return renamed
1149 1144
1150 1145 def children(self):
1151 1146 # hard for renames
1152 1147 c = self._filelog.children(self._filenode)
1153 1148 return [filectx(self._repo, self._path, fileid=x,
1154 1149 filelog=self._filelog) for x in c]
1155 1150
1156 1151 class committablectx(basectx):
1157 1152 """A committablectx object provides common functionality for a context that
1158 1153 wants the ability to commit, e.g. workingctx or memctx."""
1159 1154 def __init__(self, repo, text="", user=None, date=None, extra=None,
1160 1155 changes=None):
1161 1156 self._repo = repo
1162 1157 self._rev = None
1163 1158 self._node = None
1164 1159 self._text = text
1165 1160 if date:
1166 1161 self._date = util.parsedate(date)
1167 1162 if user:
1168 1163 self._user = user
1169 1164 if changes:
1170 1165 self._status = changes
1171 1166
1172 1167 self._extra = {}
1173 1168 if extra:
1174 1169 self._extra = extra.copy()
1175 1170 if 'branch' not in self._extra:
1176 1171 try:
1177 1172 branch = encoding.fromlocal(self._repo.dirstate.branch())
1178 1173 except UnicodeDecodeError:
1179 1174 raise error.Abort(_('branch name not in UTF-8!'))
1180 1175 self._extra['branch'] = branch
1181 1176 if self._extra['branch'] == '':
1182 1177 self._extra['branch'] = 'default'
1183 1178
1184 1179 def __str__(self):
1185 1180 return str(self._parents[0]) + "+"
1186 1181
1187 1182 def __nonzero__(self):
1188 1183 return True
1189 1184
1190 1185 def _buildflagfunc(self):
1191 1186 # Create a fallback function for getting file flags when the
1192 1187 # filesystem doesn't support them
1193 1188
1194 1189 copiesget = self._repo.dirstate.copies().get
1195 1190 parents = self.parents()
1196 1191 if len(parents) < 2:
1197 1192 # when we have one parent, it's easy: copy from parent
1198 1193 man = parents[0].manifest()
1199 1194 def func(f):
1200 1195 f = copiesget(f, f)
1201 1196 return man.flags(f)
1202 1197 else:
1203 1198 # merges are tricky: we try to reconstruct the unstored
1204 1199 # result from the merge (issue1802)
1205 1200 p1, p2 = parents
1206 1201 pa = p1.ancestor(p2)
1207 1202 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1208 1203
1209 1204 def func(f):
1210 1205 f = copiesget(f, f) # may be wrong for merges with copies
1211 1206 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1212 1207 if fl1 == fl2:
1213 1208 return fl1
1214 1209 if fl1 == fla:
1215 1210 return fl2
1216 1211 if fl2 == fla:
1217 1212 return fl1
1218 1213 return '' # punt for conflicts
1219 1214
1220 1215 return func
1221 1216
1222 1217 @propertycache
1223 1218 def _flagfunc(self):
1224 1219 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1225 1220
1226 1221 @propertycache
1227 1222 def _manifest(self):
1228 1223 """generate a manifest corresponding to the values in self._status
1229 1224
1230 1225 This reuse the file nodeid from parent, but we append an extra letter
1231 1226 when modified. Modified files get an extra 'm' while added files get
1232 1227 an extra 'a'. This is used by manifests merge to see that files
1233 1228 are different and by update logic to avoid deleting newly added files.
1234 1229 """
1235 1230 parents = self.parents()
1236 1231
1237 1232 man1 = parents[0].manifest()
1238 1233 man = man1.copy()
1239 1234 if len(parents) > 1:
1240 1235 man2 = self.p2().manifest()
1241 1236 def getman(f):
1242 1237 if f in man1:
1243 1238 return man1
1244 1239 return man2
1245 1240 else:
1246 1241 getman = lambda f: man1
1247 1242
1248 1243 copied = self._repo.dirstate.copies()
1249 1244 ff = self._flagfunc
1250 1245 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1251 1246 for f in l:
1252 1247 orig = copied.get(f, f)
1253 1248 man[f] = getman(orig).get(orig, nullid) + i
1254 1249 try:
1255 1250 man.setflag(f, ff(f))
1256 1251 except OSError:
1257 1252 pass
1258 1253
1259 1254 for f in self._status.deleted + self._status.removed:
1260 1255 if f in man:
1261 1256 del man[f]
1262 1257
1263 1258 return man
1264 1259
1265 1260 @propertycache
1266 1261 def _status(self):
1267 1262 return self._repo.status()
1268 1263
1269 1264 @propertycache
1270 1265 def _user(self):
1271 1266 return self._repo.ui.username()
1272 1267
1273 1268 @propertycache
1274 1269 def _date(self):
1275 1270 return util.makedate()
1276 1271
1277 1272 def subrev(self, subpath):
1278 1273 return None
1279 1274
1280 1275 def manifestnode(self):
1281 1276 return None
1282 1277 def user(self):
1283 1278 return self._user or self._repo.ui.username()
1284 1279 def date(self):
1285 1280 return self._date
1286 1281 def description(self):
1287 1282 return self._text
1288 1283 def files(self):
1289 1284 return sorted(self._status.modified + self._status.added +
1290 1285 self._status.removed)
1291 1286
1292 1287 def modified(self):
1293 1288 return self._status.modified
1294 1289 def added(self):
1295 1290 return self._status.added
1296 1291 def removed(self):
1297 1292 return self._status.removed
1298 1293 def deleted(self):
1299 1294 return self._status.deleted
1300 1295 def branch(self):
1301 1296 return encoding.tolocal(self._extra['branch'])
1302 1297 def closesbranch(self):
1303 1298 return 'close' in self._extra
1304 1299 def extra(self):
1305 1300 return self._extra
1306 1301
1307 1302 def tags(self):
1308 1303 return []
1309 1304
1310 1305 def bookmarks(self):
1311 1306 b = []
1312 1307 for p in self.parents():
1313 1308 b.extend(p.bookmarks())
1314 1309 return b
1315 1310
1316 1311 def phase(self):
1317 1312 phase = phases.draft # default phase to draft
1318 1313 for p in self.parents():
1319 1314 phase = max(phase, p.phase())
1320 1315 return phase
1321 1316
1322 1317 def hidden(self):
1323 1318 return False
1324 1319
1325 1320 def children(self):
1326 1321 return []
1327 1322
1328 1323 def flags(self, path):
1329 1324 if '_manifest' in self.__dict__:
1330 1325 try:
1331 1326 return self._manifest.flags(path)
1332 1327 except KeyError:
1333 1328 return ''
1334 1329
1335 1330 try:
1336 1331 return self._flagfunc(path)
1337 1332 except OSError:
1338 1333 return ''
1339 1334
1340 1335 def ancestor(self, c2):
1341 1336 """return the "best" ancestor context of self and c2"""
1342 1337 return self._parents[0].ancestor(c2) # punt on two parents for now
1343 1338
1344 1339 def walk(self, match):
1345 1340 '''Generates matching file names.'''
1346 1341 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1347 1342 True, False))
1348 1343
1349 1344 def matches(self, match):
1350 1345 return sorted(self._repo.dirstate.matches(match))
1351 1346
1352 1347 def ancestors(self):
1353 1348 for p in self._parents:
1354 1349 yield p
1355 1350 for a in self._repo.changelog.ancestors(
1356 1351 [p.rev() for p in self._parents]):
1357 1352 yield changectx(self._repo, a)
1358 1353
1359 1354 def markcommitted(self, node):
1360 1355 """Perform post-commit cleanup necessary after committing this ctx
1361 1356
1362 1357 Specifically, this updates backing stores this working context
1363 1358 wraps to reflect the fact that the changes reflected by this
1364 1359 workingctx have been committed. For example, it marks
1365 1360 modified and added files as normal in the dirstate.
1366 1361
1367 1362 """
1368 1363
1369 1364 self._repo.dirstate.beginparentchange()
1370 1365 for f in self.modified() + self.added():
1371 1366 self._repo.dirstate.normal(f)
1372 1367 for f in self.removed():
1373 1368 self._repo.dirstate.drop(f)
1374 1369 self._repo.dirstate.setparents(node)
1375 1370 self._repo.dirstate.endparentchange()
1376 1371
1377 1372 # write changes out explicitly, because nesting wlock at
1378 1373 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1379 1374 # from immediately doing so for subsequent changing files
1380 1375 self._repo.dirstate.write(self._repo.currenttransaction())
1381 1376
1382 1377 class workingctx(committablectx):
1383 1378 """A workingctx object makes access to data related to
1384 1379 the current working directory convenient.
1385 1380 date - any valid date string or (unixtime, offset), or None.
1386 1381 user - username string, or None.
1387 1382 extra - a dictionary of extra values, or None.
1388 1383 changes - a list of file lists as returned by localrepo.status()
1389 1384 or None to use the repository status.
1390 1385 """
1391 1386 def __init__(self, repo, text="", user=None, date=None, extra=None,
1392 1387 changes=None):
1393 1388 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1394 1389
1395 1390 def __iter__(self):
1396 1391 d = self._repo.dirstate
1397 1392 for f in d:
1398 1393 if d[f] != 'r':
1399 1394 yield f
1400 1395
1401 1396 def __contains__(self, key):
1402 1397 return self._repo.dirstate[key] not in "?r"
1403 1398
1404 1399 def hex(self):
1405 1400 return hex(wdirid)
1406 1401
1407 1402 @propertycache
1408 1403 def _parents(self):
1409 1404 p = self._repo.dirstate.parents()
1410 1405 if p[1] == nullid:
1411 1406 p = p[:-1]
1412 1407 return [changectx(self._repo, x) for x in p]
1413 1408
1414 1409 def filectx(self, path, filelog=None):
1415 1410 """get a file context from the working directory"""
1416 1411 return workingfilectx(self._repo, path, workingctx=self,
1417 1412 filelog=filelog)
1418 1413
1419 1414 def dirty(self, missing=False, merge=True, branch=True):
1420 1415 "check whether a working directory is modified"
1421 1416 # check subrepos first
1422 1417 for s in sorted(self.substate):
1423 1418 if self.sub(s).dirty():
1424 1419 return True
1425 1420 # check current working dir
1426 1421 return ((merge and self.p2()) or
1427 1422 (branch and self.branch() != self.p1().branch()) or
1428 1423 self.modified() or self.added() or self.removed() or
1429 1424 (missing and self.deleted()))
1430 1425
1431 1426 def add(self, list, prefix=""):
1432 1427 join = lambda f: os.path.join(prefix, f)
1433 1428 with self._repo.wlock():
1434 1429 ui, ds = self._repo.ui, self._repo.dirstate
1435 1430 rejected = []
1436 1431 lstat = self._repo.wvfs.lstat
1437 1432 for f in list:
1438 1433 scmutil.checkportable(ui, join(f))
1439 1434 try:
1440 1435 st = lstat(f)
1441 1436 except OSError:
1442 1437 ui.warn(_("%s does not exist!\n") % join(f))
1443 1438 rejected.append(f)
1444 1439 continue
1445 1440 if st.st_size > 10000000:
1446 1441 ui.warn(_("%s: up to %d MB of RAM may be required "
1447 1442 "to manage this file\n"
1448 1443 "(use 'hg revert %s' to cancel the "
1449 1444 "pending addition)\n")
1450 1445 % (f, 3 * st.st_size // 1000000, join(f)))
1451 1446 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1452 1447 ui.warn(_("%s not added: only files and symlinks "
1453 1448 "supported currently\n") % join(f))
1454 1449 rejected.append(f)
1455 1450 elif ds[f] in 'amn':
1456 1451 ui.warn(_("%s already tracked!\n") % join(f))
1457 1452 elif ds[f] == 'r':
1458 1453 ds.normallookup(f)
1459 1454 else:
1460 1455 ds.add(f)
1461 1456 return rejected
1462 1457
1463 1458 def forget(self, files, prefix=""):
1464 1459 join = lambda f: os.path.join(prefix, f)
1465 1460 with self._repo.wlock():
1466 1461 rejected = []
1467 1462 for f in files:
1468 1463 if f not in self._repo.dirstate:
1469 1464 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1470 1465 rejected.append(f)
1471 1466 elif self._repo.dirstate[f] != 'a':
1472 1467 self._repo.dirstate.remove(f)
1473 1468 else:
1474 1469 self._repo.dirstate.drop(f)
1475 1470 return rejected
1476 1471
1477 1472 def undelete(self, list):
1478 1473 pctxs = self.parents()
1479 1474 with self._repo.wlock():
1480 1475 for f in list:
1481 1476 if self._repo.dirstate[f] != 'r':
1482 1477 self._repo.ui.warn(_("%s not removed!\n") % f)
1483 1478 else:
1484 1479 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1485 1480 t = fctx.data()
1486 1481 self._repo.wwrite(f, t, fctx.flags())
1487 1482 self._repo.dirstate.normal(f)
1488 1483
1489 1484 def copy(self, source, dest):
1490 1485 try:
1491 1486 st = self._repo.wvfs.lstat(dest)
1492 1487 except OSError as err:
1493 1488 if err.errno != errno.ENOENT:
1494 1489 raise
1495 1490 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1496 1491 return
1497 1492 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1498 1493 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1499 1494 "symbolic link\n") % dest)
1500 1495 else:
1501 1496 with self._repo.wlock():
1502 1497 if self._repo.dirstate[dest] in '?':
1503 1498 self._repo.dirstate.add(dest)
1504 1499 elif self._repo.dirstate[dest] in 'r':
1505 1500 self._repo.dirstate.normallookup(dest)
1506 1501 self._repo.dirstate.copy(source, dest)
1507 1502
1508 1503 def match(self, pats=[], include=None, exclude=None, default='glob',
1509 1504 listsubrepos=False, badfn=None):
1510 1505 r = self._repo
1511 1506
1512 1507 # Only a case insensitive filesystem needs magic to translate user input
1513 1508 # to actual case in the filesystem.
1514 1509 if not util.fscasesensitive(r.root):
1515 1510 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1516 1511 exclude, default, r.auditor, self,
1517 1512 listsubrepos=listsubrepos,
1518 1513 badfn=badfn)
1519 1514 return matchmod.match(r.root, r.getcwd(), pats,
1520 1515 include, exclude, default,
1521 1516 auditor=r.auditor, ctx=self,
1522 1517 listsubrepos=listsubrepos, badfn=badfn)
1523 1518
1524 1519 def _filtersuspectsymlink(self, files):
1525 1520 if not files or self._repo.dirstate._checklink:
1526 1521 return files
1527 1522
1528 1523 # Symlink placeholders may get non-symlink-like contents
1529 1524 # via user error or dereferencing by NFS or Samba servers,
1530 1525 # so we filter out any placeholders that don't look like a
1531 1526 # symlink
1532 1527 sane = []
1533 1528 for f in files:
1534 1529 if self.flags(f) == 'l':
1535 1530 d = self[f].data()
1536 1531 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1537 1532 self._repo.ui.debug('ignoring suspect symlink placeholder'
1538 1533 ' "%s"\n' % f)
1539 1534 continue
1540 1535 sane.append(f)
1541 1536 return sane
1542 1537
1543 1538 def _checklookup(self, files):
1544 1539 # check for any possibly clean files
1545 1540 if not files:
1546 1541 return [], []
1547 1542
1548 1543 modified = []
1549 1544 fixup = []
1550 1545 pctx = self._parents[0]
1551 1546 # do a full compare of any files that might have changed
1552 1547 for f in sorted(files):
1553 1548 if (f not in pctx or self.flags(f) != pctx.flags(f)
1554 1549 or pctx[f].cmp(self[f])):
1555 1550 modified.append(f)
1556 1551 else:
1557 1552 fixup.append(f)
1558 1553
1559 1554 # update dirstate for files that are actually clean
1560 1555 if fixup:
1561 1556 try:
1562 1557 # updating the dirstate is optional
1563 1558 # so we don't wait on the lock
1564 1559 # wlock can invalidate the dirstate, so cache normal _after_
1565 1560 # taking the lock
1566 1561 with self._repo.wlock(False):
1567 1562 normal = self._repo.dirstate.normal
1568 1563 for f in fixup:
1569 1564 normal(f)
1570 1565 # write changes out explicitly, because nesting
1571 1566 # wlock at runtime may prevent 'wlock.release()'
1572 1567 # after this block from doing so for subsequent
1573 1568 # changing files
1574 1569 self._repo.dirstate.write(self._repo.currenttransaction())
1575 1570 except error.LockError:
1576 1571 pass
1577 1572 return modified, fixup
1578 1573
1579 1574 def _manifestmatches(self, match, s):
1580 1575 """Slow path for workingctx
1581 1576
1582 1577 The fast path is when we compare the working directory to its parent
1583 1578 which means this function is comparing with a non-parent; therefore we
1584 1579 need to build a manifest and return what matches.
1585 1580 """
1586 1581 mf = self._repo['.']._manifestmatches(match, s)
1587 1582 for f in s.modified + s.added:
1588 1583 mf[f] = _newnode
1589 1584 mf.setflag(f, self.flags(f))
1590 1585 for f in s.removed:
1591 1586 if f in mf:
1592 1587 del mf[f]
1593 1588 return mf
1594 1589
1595 1590 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1596 1591 unknown=False):
1597 1592 '''Gets the status from the dirstate -- internal use only.'''
1598 1593 listignored, listclean, listunknown = ignored, clean, unknown
1599 1594 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1600 1595 subrepos = []
1601 1596 if '.hgsub' in self:
1602 1597 subrepos = sorted(self.substate)
1603 1598 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1604 1599 listclean, listunknown)
1605 1600
1606 1601 # check for any possibly clean files
1607 1602 if cmp:
1608 1603 modified2, fixup = self._checklookup(cmp)
1609 1604 s.modified.extend(modified2)
1610 1605
1611 1606 # update dirstate for files that are actually clean
1612 1607 if fixup and listclean:
1613 1608 s.clean.extend(fixup)
1614 1609
1615 1610 if match.always():
1616 1611 # cache for performance
1617 1612 if s.unknown or s.ignored or s.clean:
1618 1613 # "_status" is cached with list*=False in the normal route
1619 1614 self._status = scmutil.status(s.modified, s.added, s.removed,
1620 1615 s.deleted, [], [], [])
1621 1616 else:
1622 1617 self._status = s
1623 1618
1624 1619 return s
1625 1620
1626 1621 def _buildstatus(self, other, s, match, listignored, listclean,
1627 1622 listunknown):
1628 1623 """build a status with respect to another context
1629 1624
1630 1625 This includes logic for maintaining the fast path of status when
1631 1626 comparing the working directory against its parent, which is to skip
1632 1627 building a new manifest if self (working directory) is not comparing
1633 1628 against its parent (repo['.']).
1634 1629 """
1635 1630 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1636 1631 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1637 1632 # might have accidentally ended up with the entire contents of the file
1638 1633 # they are supposed to be linking to.
1639 1634 s.modified[:] = self._filtersuspectsymlink(s.modified)
1640 1635 if other != self._repo['.']:
1641 1636 s = super(workingctx, self)._buildstatus(other, s, match,
1642 1637 listignored, listclean,
1643 1638 listunknown)
1644 1639 return s
1645 1640
1646 1641 def _matchstatus(self, other, match):
1647 1642 """override the match method with a filter for directory patterns
1648 1643
1649 1644 We use inheritance to customize the match.bad method only in cases of
1650 1645 workingctx since it belongs only to the working directory when
1651 1646 comparing against the parent changeset.
1652 1647
1653 1648 If we aren't comparing against the working directory's parent, then we
1654 1649 just use the default match object sent to us.
1655 1650 """
1656 1651 superself = super(workingctx, self)
1657 1652 match = superself._matchstatus(other, match)
1658 1653 if other != self._repo['.']:
1659 1654 def bad(f, msg):
1660 1655 # 'f' may be a directory pattern from 'match.files()',
1661 1656 # so 'f not in ctx1' is not enough
1662 1657 if f not in other and not other.hasdir(f):
1663 1658 self._repo.ui.warn('%s: %s\n' %
1664 1659 (self._repo.dirstate.pathto(f), msg))
1665 1660 match.bad = bad
1666 1661 return match
1667 1662
1668 1663 class committablefilectx(basefilectx):
1669 1664 """A committablefilectx provides common functionality for a file context
1670 1665 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1671 1666 def __init__(self, repo, path, filelog=None, ctx=None):
1672 1667 self._repo = repo
1673 1668 self._path = path
1674 1669 self._changeid = None
1675 1670 self._filerev = self._filenode = None
1676 1671
1677 1672 if filelog is not None:
1678 1673 self._filelog = filelog
1679 1674 if ctx:
1680 1675 self._changectx = ctx
1681 1676
1682 1677 def __nonzero__(self):
1683 1678 return True
1684 1679
1685 1680 def linkrev(self):
1686 1681 # linked to self._changectx no matter if file is modified or not
1687 1682 return self.rev()
1688 1683
1689 1684 def parents(self):
1690 1685 '''return parent filectxs, following copies if necessary'''
1691 1686 def filenode(ctx, path):
1692 1687 return ctx._manifest.get(path, nullid)
1693 1688
1694 1689 path = self._path
1695 1690 fl = self._filelog
1696 1691 pcl = self._changectx._parents
1697 1692 renamed = self.renamed()
1698 1693
1699 1694 if renamed:
1700 1695 pl = [renamed + (None,)]
1701 1696 else:
1702 1697 pl = [(path, filenode(pcl[0], path), fl)]
1703 1698
1704 1699 for pc in pcl[1:]:
1705 1700 pl.append((path, filenode(pc, path), fl))
1706 1701
1707 1702 return [self._parentfilectx(p, fileid=n, filelog=l)
1708 1703 for p, n, l in pl if n != nullid]
1709 1704
1710 1705 def children(self):
1711 1706 return []
1712 1707
1713 1708 class workingfilectx(committablefilectx):
1714 1709 """A workingfilectx object makes access to data related to a particular
1715 1710 file in the working directory convenient."""
1716 1711 def __init__(self, repo, path, filelog=None, workingctx=None):
1717 1712 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1718 1713
1719 1714 @propertycache
1720 1715 def _changectx(self):
1721 1716 return workingctx(self._repo)
1722 1717
1723 1718 def data(self):
1724 1719 return self._repo.wread(self._path)
1725 1720 def renamed(self):
1726 1721 rp = self._repo.dirstate.copied(self._path)
1727 1722 if not rp:
1728 1723 return None
1729 1724 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1730 1725
1731 1726 def size(self):
1732 1727 return self._repo.wvfs.lstat(self._path).st_size
1733 1728 def date(self):
1734 1729 t, tz = self._changectx.date()
1735 1730 try:
1736 1731 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1737 1732 except OSError as err:
1738 1733 if err.errno != errno.ENOENT:
1739 1734 raise
1740 1735 return (t, tz)
1741 1736
1742 1737 def cmp(self, fctx):
1743 1738 """compare with other file context
1744 1739
1745 1740 returns True if different than fctx.
1746 1741 """
1747 1742 # fctx should be a filectx (not a workingfilectx)
1748 1743 # invert comparison to reuse the same code path
1749 1744 return fctx.cmp(self)
1750 1745
1751 1746 def remove(self, ignoremissing=False):
1752 1747 """wraps unlink for a repo's working directory"""
1753 1748 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1754 1749
1755 1750 def write(self, data, flags):
1756 1751 """wraps repo.wwrite"""
1757 1752 self._repo.wwrite(self._path, data, flags)
1758 1753
1759 1754 class workingcommitctx(workingctx):
1760 1755 """A workingcommitctx object makes access to data related to
1761 1756 the revision being committed convenient.
1762 1757
1763 1758 This hides changes in the working directory, if they aren't
1764 1759 committed in this context.
1765 1760 """
1766 1761 def __init__(self, repo, changes,
1767 1762 text="", user=None, date=None, extra=None):
1768 1763 super(workingctx, self).__init__(repo, text, user, date, extra,
1769 1764 changes)
1770 1765
1771 1766 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1772 1767 unknown=False):
1773 1768 """Return matched files only in ``self._status``
1774 1769
1775 1770 Uncommitted files appear "clean" via this context, even if
1776 1771 they aren't actually so in the working directory.
1777 1772 """
1778 1773 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1779 1774 if clean:
1780 1775 clean = [f for f in self._manifest if f not in self._changedset]
1781 1776 else:
1782 1777 clean = []
1783 1778 return scmutil.status([f for f in self._status.modified if match(f)],
1784 1779 [f for f in self._status.added if match(f)],
1785 1780 [f for f in self._status.removed if match(f)],
1786 1781 [], [], [], clean)
1787 1782
1788 1783 @propertycache
1789 1784 def _changedset(self):
1790 1785 """Return the set of files changed in this context
1791 1786 """
1792 1787 changed = set(self._status.modified)
1793 1788 changed.update(self._status.added)
1794 1789 changed.update(self._status.removed)
1795 1790 return changed
1796 1791
1797 1792 def makecachingfilectxfn(func):
1798 1793 """Create a filectxfn that caches based on the path.
1799 1794
1800 1795 We can't use util.cachefunc because it uses all arguments as the cache
1801 1796 key and this creates a cycle since the arguments include the repo and
1802 1797 memctx.
1803 1798 """
1804 1799 cache = {}
1805 1800
1806 1801 def getfilectx(repo, memctx, path):
1807 1802 if path not in cache:
1808 1803 cache[path] = func(repo, memctx, path)
1809 1804 return cache[path]
1810 1805
1811 1806 return getfilectx
1812 1807
1813 1808 class memctx(committablectx):
1814 1809 """Use memctx to perform in-memory commits via localrepo.commitctx().
1815 1810
1816 1811 Revision information is supplied at initialization time while
1817 1812 related files data and is made available through a callback
1818 1813 mechanism. 'repo' is the current localrepo, 'parents' is a
1819 1814 sequence of two parent revisions identifiers (pass None for every
1820 1815 missing parent), 'text' is the commit message and 'files' lists
1821 1816 names of files touched by the revision (normalized and relative to
1822 1817 repository root).
1823 1818
1824 1819 filectxfn(repo, memctx, path) is a callable receiving the
1825 1820 repository, the current memctx object and the normalized path of
1826 1821 requested file, relative to repository root. It is fired by the
1827 1822 commit function for every file in 'files', but calls order is
1828 1823 undefined. If the file is available in the revision being
1829 1824 committed (updated or added), filectxfn returns a memfilectx
1830 1825 object. If the file was removed, filectxfn raises an
1831 1826 IOError. Moved files are represented by marking the source file
1832 1827 removed and the new file added with copy information (see
1833 1828 memfilectx).
1834 1829
1835 1830 user receives the committer name and defaults to current
1836 1831 repository username, date is the commit date in any format
1837 1832 supported by util.parsedate() and defaults to current date, extra
1838 1833 is a dictionary of metadata or is left empty.
1839 1834 """
1840 1835
1841 1836 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1842 1837 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1843 1838 # this field to determine what to do in filectxfn.
1844 1839 _returnnoneformissingfiles = True
1845 1840
1846 1841 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1847 1842 date=None, extra=None, editor=False):
1848 1843 super(memctx, self).__init__(repo, text, user, date, extra)
1849 1844 self._rev = None
1850 1845 self._node = None
1851 1846 parents = [(p or nullid) for p in parents]
1852 1847 p1, p2 = parents
1853 1848 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1854 1849 files = sorted(set(files))
1855 1850 self._files = files
1856 1851 self.substate = {}
1857 1852
1858 1853 # if store is not callable, wrap it in a function
1859 1854 if not callable(filectxfn):
1860 1855 def getfilectx(repo, memctx, path):
1861 1856 fctx = filectxfn[path]
1862 1857 # this is weird but apparently we only keep track of one parent
1863 1858 # (why not only store that instead of a tuple?)
1864 1859 copied = fctx.renamed()
1865 1860 if copied:
1866 1861 copied = copied[0]
1867 1862 return memfilectx(repo, path, fctx.data(),
1868 1863 islink=fctx.islink(), isexec=fctx.isexec(),
1869 1864 copied=copied, memctx=memctx)
1870 1865 self._filectxfn = getfilectx
1871 1866 else:
1872 1867 # memoizing increases performance for e.g. vcs convert scenarios.
1873 1868 self._filectxfn = makecachingfilectxfn(filectxfn)
1874 1869
1875 1870 if extra:
1876 1871 self._extra = extra.copy()
1877 1872 else:
1878 1873 self._extra = {}
1879 1874
1880 1875 if self._extra.get('branch', '') == '':
1881 1876 self._extra['branch'] = 'default'
1882 1877
1883 1878 if editor:
1884 1879 self._text = editor(self._repo, self, [])
1885 1880 self._repo.savecommitmessage(self._text)
1886 1881
1887 1882 def filectx(self, path, filelog=None):
1888 1883 """get a file context from the working directory
1889 1884
1890 1885 Returns None if file doesn't exist and should be removed."""
1891 1886 return self._filectxfn(self._repo, self, path)
1892 1887
1893 1888 def commit(self):
1894 1889 """commit context to the repo"""
1895 1890 return self._repo.commitctx(self)
1896 1891
1897 1892 @propertycache
1898 1893 def _manifest(self):
1899 1894 """generate a manifest based on the return values of filectxfn"""
1900 1895
1901 1896 # keep this simple for now; just worry about p1
1902 1897 pctx = self._parents[0]
1903 1898 man = pctx.manifest().copy()
1904 1899
1905 1900 for f in self._status.modified:
1906 1901 p1node = nullid
1907 1902 p2node = nullid
1908 1903 p = pctx[f].parents() # if file isn't in pctx, check p2?
1909 1904 if len(p) > 0:
1910 1905 p1node = p[0].filenode()
1911 1906 if len(p) > 1:
1912 1907 p2node = p[1].filenode()
1913 1908 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1914 1909
1915 1910 for f in self._status.added:
1916 1911 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1917 1912
1918 1913 for f in self._status.removed:
1919 1914 if f in man:
1920 1915 del man[f]
1921 1916
1922 1917 return man
1923 1918
1924 1919 @propertycache
1925 1920 def _status(self):
1926 1921 """Calculate exact status from ``files`` specified at construction
1927 1922 """
1928 1923 man1 = self.p1().manifest()
1929 1924 p2 = self._parents[1]
1930 1925 # "1 < len(self._parents)" can't be used for checking
1931 1926 # existence of the 2nd parent, because "memctx._parents" is
1932 1927 # explicitly initialized by the list, of which length is 2.
1933 1928 if p2.node() != nullid:
1934 1929 man2 = p2.manifest()
1935 1930 managing = lambda f: f in man1 or f in man2
1936 1931 else:
1937 1932 managing = lambda f: f in man1
1938 1933
1939 1934 modified, added, removed = [], [], []
1940 1935 for f in self._files:
1941 1936 if not managing(f):
1942 1937 added.append(f)
1943 1938 elif self[f]:
1944 1939 modified.append(f)
1945 1940 else:
1946 1941 removed.append(f)
1947 1942
1948 1943 return scmutil.status(modified, added, removed, [], [], [], [])
1949 1944
1950 1945 class memfilectx(committablefilectx):
1951 1946 """memfilectx represents an in-memory file to commit.
1952 1947
1953 1948 See memctx and committablefilectx for more details.
1954 1949 """
1955 1950 def __init__(self, repo, path, data, islink=False,
1956 1951 isexec=False, copied=None, memctx=None):
1957 1952 """
1958 1953 path is the normalized file path relative to repository root.
1959 1954 data is the file content as a string.
1960 1955 islink is True if the file is a symbolic link.
1961 1956 isexec is True if the file is executable.
1962 1957 copied is the source file path if current file was copied in the
1963 1958 revision being committed, or None."""
1964 1959 super(memfilectx, self).__init__(repo, path, None, memctx)
1965 1960 self._data = data
1966 1961 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1967 1962 self._copied = None
1968 1963 if copied:
1969 1964 self._copied = (copied, nullid)
1970 1965
1971 1966 def data(self):
1972 1967 return self._data
1973 1968 def size(self):
1974 1969 return len(self.data())
1975 1970 def flags(self):
1976 1971 return self._flags
1977 1972 def renamed(self):
1978 1973 return self._copied
1979 1974
1980 1975 def remove(self, ignoremissing=False):
1981 1976 """wraps unlink for a repo's working directory"""
1982 1977 # need to figure out what to do here
1983 1978 del self._changectx[self._path]
1984 1979
1985 1980 def write(self, data, flags):
1986 1981 """wraps repo.wwrite"""
1987 1982 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now