##// END OF EJS Templates
context: add manifestctx property on changectx...
Durham Goode -
r30344:362f6f65 default
parent child Browse files
Show More
@@ -1,1984 +1,1989
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 wdirid,
23 23 )
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 fileset,
28 28 match as matchmod,
29 29 mdiff,
30 30 obsolete as obsmod,
31 31 patch,
32 32 phases,
33 33 repoview,
34 34 revlog,
35 35 scmutil,
36 36 subrepo,
37 37 util,
38 38 )
39 39
40 40 propertycache = util.propertycache
41 41
42 42 # Phony node value to stand-in for new files in some uses of
43 43 # manifests. Manifests support 21-byte hashes for nodes which are
44 44 # dirty in the working copy.
45 45 _newnode = '!' * 21
46 46
47 47 nonascii = re.compile(r'[^\x21-\x7f]').search
48 48
49 49 class basectx(object):
50 50 """A basectx object represents the common logic for its children:
51 51 changectx: read-only context that is already present in the repo,
52 52 workingctx: a context that represents the working directory and can
53 53 be committed,
54 54 memctx: a context that represents changes in-memory and can also
55 55 be committed."""
56 56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 57 if isinstance(changeid, basectx):
58 58 return changeid
59 59
60 60 o = super(basectx, cls).__new__(cls)
61 61
62 62 o._repo = repo
63 63 o._rev = nullrev
64 64 o._node = nullid
65 65
66 66 return o
67 67
68 68 def __str__(self):
69 69 return short(self.node())
70 70
71 71 def __int__(self):
72 72 return self.rev()
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _manifestmatches(self, match, s):
96 96 """generate a new manifest filtered by the match argument
97 97
98 98 This method is for internal use only and mainly exists to provide an
99 99 object oriented way for other contexts to customize the manifest
100 100 generation.
101 101 """
102 102 return self.manifest().matches(match)
103 103
104 104 def _matchstatus(self, other, match):
105 105 """return match.always if match is none
106 106
107 107 This internal method provides a way for child objects to override the
108 108 match operator.
109 109 """
110 110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 111
112 112 def _buildstatus(self, other, s, match, listignored, listclean,
113 113 listunknown):
114 114 """build a status with respect to another context"""
115 115 # Load earliest manifest first for caching reasons. More specifically,
116 116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 119 # delta to what's in the cache. So that's one full reconstruction + one
120 120 # delta application.
121 121 if self.rev() is not None and self.rev() < other.rev():
122 122 self.manifest()
123 123 mf1 = other._manifestmatches(match, s)
124 124 mf2 = self._manifestmatches(match, s)
125 125
126 126 modified, added = [], []
127 127 removed = []
128 128 clean = []
129 129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 130 deletedset = set(deleted)
131 131 d = mf1.diff(mf2, clean=listclean)
132 132 for fn, value in d.iteritems():
133 133 if fn in deletedset:
134 134 continue
135 135 if value is None:
136 136 clean.append(fn)
137 137 continue
138 138 (node1, flag1), (node2, flag2) = value
139 139 if node1 is None:
140 140 added.append(fn)
141 141 elif node2 is None:
142 142 removed.append(fn)
143 143 elif flag1 != flag2:
144 144 modified.append(fn)
145 145 elif node2 != _newnode:
146 146 # When comparing files between two commits, we save time by
147 147 # not comparing the file contents when the nodeids differ.
148 148 # Note that this means we incorrectly report a reverted change
149 149 # to a file as a modification.
150 150 modified.append(fn)
151 151 elif self[fn].cmp(other[fn]):
152 152 modified.append(fn)
153 153 else:
154 154 clean.append(fn)
155 155
156 156 if removed:
157 157 # need to filter files if they are already reported as removed
158 158 unknown = [fn for fn in unknown if fn not in mf1]
159 159 ignored = [fn for fn in ignored if fn not in mf1]
160 160 # if they're deleted, don't report them as removed
161 161 removed = [fn for fn in removed if fn not in deletedset]
162 162
163 163 return scmutil.status(modified, added, removed, deleted, unknown,
164 164 ignored, clean)
165 165
166 166 @propertycache
167 167 def substate(self):
168 168 return subrepo.state(self, self._repo.ui)
169 169
170 170 def subrev(self, subpath):
171 171 return self.substate[subpath][1]
172 172
173 173 def rev(self):
174 174 return self._rev
175 175 def node(self):
176 176 return self._node
177 177 def hex(self):
178 178 return hex(self.node())
179 179 def manifest(self):
180 180 return self._manifest
181 def manifestctx(self):
182 return self._manifestctx
181 183 def repo(self):
182 184 return self._repo
183 185 def phasestr(self):
184 186 return phases.phasenames[self.phase()]
185 187 def mutable(self):
186 188 return self.phase() > phases.public
187 189
188 190 def getfileset(self, expr):
189 191 return fileset.getfileset(self, expr)
190 192
191 193 def obsolete(self):
192 194 """True if the changeset is obsolete"""
193 195 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 196
195 197 def extinct(self):
196 198 """True if the changeset is extinct"""
197 199 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 200
199 201 def unstable(self):
200 202 """True if the changeset is not obsolete but it's ancestor are"""
201 203 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202 204
203 205 def bumped(self):
204 206 """True if the changeset try to be a successor of a public changeset
205 207
206 208 Only non-public and non-obsolete changesets may be bumped.
207 209 """
208 210 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209 211
210 212 def divergent(self):
211 213 """Is a successors of a changeset with multiple possible successors set
212 214
213 215 Only non-public and non-obsolete changesets may be divergent.
214 216 """
215 217 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216 218
217 219 def troubled(self):
218 220 """True if the changeset is either unstable, bumped or divergent"""
219 221 return self.unstable() or self.bumped() or self.divergent()
220 222
221 223 def troubles(self):
222 224 """return the list of troubles affecting this changesets.
223 225
224 226 Troubles are returned as strings. possible values are:
225 227 - unstable,
226 228 - bumped,
227 229 - divergent.
228 230 """
229 231 troubles = []
230 232 if self.unstable():
231 233 troubles.append('unstable')
232 234 if self.bumped():
233 235 troubles.append('bumped')
234 236 if self.divergent():
235 237 troubles.append('divergent')
236 238 return troubles
237 239
238 240 def parents(self):
239 241 """return contexts for each parent changeset"""
240 242 return self._parents
241 243
242 244 def p1(self):
243 245 return self._parents[0]
244 246
245 247 def p2(self):
246 248 parents = self._parents
247 249 if len(parents) == 2:
248 250 return parents[1]
249 251 return changectx(self._repo, nullrev)
250 252
251 253 def _fileinfo(self, path):
252 254 if '_manifest' in self.__dict__:
253 255 try:
254 256 return self._manifest[path], self._manifest.flags(path)
255 257 except KeyError:
256 258 raise error.ManifestLookupError(self._node, path,
257 259 _('not found in manifest'))
258 260 if '_manifestdelta' in self.__dict__ or path in self.files():
259 261 if path in self._manifestdelta:
260 262 return (self._manifestdelta[path],
261 263 self._manifestdelta.flags(path))
262 264 mfl = self._repo.manifestlog
263 265 try:
264 266 node, flag = mfl[self._changeset.manifest].find(path)
265 267 except KeyError:
266 268 raise error.ManifestLookupError(self._node, path,
267 269 _('not found in manifest'))
268 270
269 271 return node, flag
270 272
271 273 def filenode(self, path):
272 274 return self._fileinfo(path)[0]
273 275
274 276 def flags(self, path):
275 277 try:
276 278 return self._fileinfo(path)[1]
277 279 except error.LookupError:
278 280 return ''
279 281
280 282 def sub(self, path, allowcreate=True):
281 283 '''return a subrepo for the stored revision of path, never wdir()'''
282 284 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283 285
284 286 def nullsub(self, path, pctx):
285 287 return subrepo.nullsubrepo(self, path, pctx)
286 288
287 289 def workingsub(self, path):
288 290 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 291 context.
290 292 '''
291 293 return subrepo.subrepo(self, path, allowwdir=True)
292 294
293 295 def match(self, pats=[], include=None, exclude=None, default='glob',
294 296 listsubrepos=False, badfn=None):
295 297 r = self._repo
296 298 return matchmod.match(r.root, r.getcwd(), pats,
297 299 include, exclude, default,
298 300 auditor=r.nofsauditor, ctx=self,
299 301 listsubrepos=listsubrepos, badfn=badfn)
300 302
301 303 def diff(self, ctx2=None, match=None, **opts):
302 304 """Returns a diff generator for the given contexts and matcher"""
303 305 if ctx2 is None:
304 306 ctx2 = self.p1()
305 307 if ctx2 is not None:
306 308 ctx2 = self._repo[ctx2]
307 309 diffopts = patch.diffopts(self._repo.ui, opts)
308 310 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309 311
310 312 def dirs(self):
311 313 return self._manifest.dirs()
312 314
313 315 def hasdir(self, dir):
314 316 return self._manifest.hasdir(dir)
315 317
316 318 def dirty(self, missing=False, merge=True, branch=True):
317 319 return False
318 320
319 321 def status(self, other=None, match=None, listignored=False,
320 322 listclean=False, listunknown=False, listsubrepos=False):
321 323 """return status of files between two nodes or node and working
322 324 directory.
323 325
324 326 If other is None, compare this node with working directory.
325 327
326 328 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 329 """
328 330
329 331 ctx1 = self
330 332 ctx2 = self._repo[other]
331 333
332 334 # This next code block is, admittedly, fragile logic that tests for
333 335 # reversing the contexts and wouldn't need to exist if it weren't for
334 336 # the fast (and common) code path of comparing the working directory
335 337 # with its first parent.
336 338 #
337 339 # What we're aiming for here is the ability to call:
338 340 #
339 341 # workingctx.status(parentctx)
340 342 #
341 343 # If we always built the manifest for each context and compared those,
342 344 # then we'd be done. But the special case of the above call means we
343 345 # just copy the manifest of the parent.
344 346 reversed = False
345 347 if (not isinstance(ctx1, changectx)
346 348 and isinstance(ctx2, changectx)):
347 349 reversed = True
348 350 ctx1, ctx2 = ctx2, ctx1
349 351
350 352 match = ctx2._matchstatus(ctx1, match)
351 353 r = scmutil.status([], [], [], [], [], [], [])
352 354 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 355 listunknown)
354 356
355 357 if reversed:
356 358 # Reverse added and removed. Clear deleted, unknown and ignored as
357 359 # these make no sense to reverse.
358 360 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 361 r.clean)
360 362
361 363 if listsubrepos:
362 364 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 365 try:
364 366 rev2 = ctx2.subrev(subpath)
365 367 except KeyError:
366 368 # A subrepo that existed in node1 was deleted between
367 369 # node1 and node2 (inclusive). Thus, ctx2's substate
368 370 # won't contain that subpath. The best we can do ignore it.
369 371 rev2 = None
370 372 submatch = matchmod.subdirmatcher(subpath, match)
371 373 s = sub.status(rev2, match=submatch, ignored=listignored,
372 374 clean=listclean, unknown=listunknown,
373 375 listsubrepos=True)
374 376 for rfiles, sfiles in zip(r, s):
375 377 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376 378
377 379 for l in r:
378 380 l.sort()
379 381
380 382 return r
381 383
382 384
383 385 def makememctx(repo, parents, text, user, date, branch, files, store,
384 386 editor=None, extra=None):
385 387 def getfilectx(repo, memctx, path):
386 388 data, mode, copied = store.getfile(path)
387 389 if data is None:
388 390 return None
389 391 islink, isexec = mode
390 392 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
391 393 copied=copied, memctx=memctx)
392 394 if extra is None:
393 395 extra = {}
394 396 if branch:
395 397 extra['branch'] = encoding.fromlocal(branch)
396 398 ctx = memctx(repo, parents, text, files, getfilectx, user,
397 399 date, extra, editor)
398 400 return ctx
399 401
400 402 class changectx(basectx):
401 403 """A changecontext object makes access to data related to a particular
402 404 changeset convenient. It represents a read-only context already present in
403 405 the repo."""
404 406 def __init__(self, repo, changeid=''):
405 407 """changeid is a revision number, node, or tag"""
406 408
407 409 # since basectx.__new__ already took care of copying the object, we
408 410 # don't need to do anything in __init__, so we just exit here
409 411 if isinstance(changeid, basectx):
410 412 return
411 413
412 414 if changeid == '':
413 415 changeid = '.'
414 416 self._repo = repo
415 417
416 418 try:
417 419 if isinstance(changeid, int):
418 420 self._node = repo.changelog.node(changeid)
419 421 self._rev = changeid
420 422 return
421 423 if isinstance(changeid, long):
422 424 changeid = str(changeid)
423 425 if changeid == 'null':
424 426 self._node = nullid
425 427 self._rev = nullrev
426 428 return
427 429 if changeid == 'tip':
428 430 self._node = repo.changelog.tip()
429 431 self._rev = repo.changelog.rev(self._node)
430 432 return
431 433 if changeid == '.' or changeid == repo.dirstate.p1():
432 434 # this is a hack to delay/avoid loading obsmarkers
433 435 # when we know that '.' won't be hidden
434 436 self._node = repo.dirstate.p1()
435 437 self._rev = repo.unfiltered().changelog.rev(self._node)
436 438 return
437 439 if len(changeid) == 20:
438 440 try:
439 441 self._node = changeid
440 442 self._rev = repo.changelog.rev(changeid)
441 443 return
442 444 except error.FilteredRepoLookupError:
443 445 raise
444 446 except LookupError:
445 447 pass
446 448
447 449 try:
448 450 r = int(changeid)
449 451 if str(r) != changeid:
450 452 raise ValueError
451 453 l = len(repo.changelog)
452 454 if r < 0:
453 455 r += l
454 456 if r < 0 or r >= l:
455 457 raise ValueError
456 458 self._rev = r
457 459 self._node = repo.changelog.node(r)
458 460 return
459 461 except error.FilteredIndexError:
460 462 raise
461 463 except (ValueError, OverflowError, IndexError):
462 464 pass
463 465
464 466 if len(changeid) == 40:
465 467 try:
466 468 self._node = bin(changeid)
467 469 self._rev = repo.changelog.rev(self._node)
468 470 return
469 471 except error.FilteredLookupError:
470 472 raise
471 473 except (TypeError, LookupError):
472 474 pass
473 475
474 476 # lookup bookmarks through the name interface
475 477 try:
476 478 self._node = repo.names.singlenode(repo, changeid)
477 479 self._rev = repo.changelog.rev(self._node)
478 480 return
479 481 except KeyError:
480 482 pass
481 483 except error.FilteredRepoLookupError:
482 484 raise
483 485 except error.RepoLookupError:
484 486 pass
485 487
486 488 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 489 if self._node is not None:
488 490 self._rev = repo.changelog.rev(self._node)
489 491 return
490 492
491 493 # lookup failed
492 494 # check if it might have come from damaged dirstate
493 495 #
494 496 # XXX we could avoid the unfiltered if we had a recognizable
495 497 # exception for filtered changeset access
496 498 if changeid in repo.unfiltered().dirstate.parents():
497 499 msg = _("working directory has unknown parent '%s'!")
498 500 raise error.Abort(msg % short(changeid))
499 501 try:
500 502 if len(changeid) == 20 and nonascii(changeid):
501 503 changeid = hex(changeid)
502 504 except TypeError:
503 505 pass
504 506 except (error.FilteredIndexError, error.FilteredLookupError,
505 507 error.FilteredRepoLookupError):
506 508 if repo.filtername.startswith('visible'):
507 509 msg = _("hidden revision '%s'") % changeid
508 510 hint = _('use --hidden to access hidden revisions')
509 511 raise error.FilteredRepoLookupError(msg, hint=hint)
510 512 msg = _("filtered revision '%s' (not in '%s' subset)")
511 513 msg %= (changeid, repo.filtername)
512 514 raise error.FilteredRepoLookupError(msg)
513 515 except IndexError:
514 516 pass
515 517 raise error.RepoLookupError(
516 518 _("unknown revision '%s'") % changeid)
517 519
518 520 def __hash__(self):
519 521 try:
520 522 return hash(self._rev)
521 523 except AttributeError:
522 524 return id(self)
523 525
524 526 def __nonzero__(self):
525 527 return self._rev != nullrev
526 528
527 529 @propertycache
528 530 def _changeset(self):
529 531 return self._repo.changelog.changelogrevision(self.rev())
530 532
531 533 @propertycache
532 534 def _manifest(self):
533 return self._repo.manifestlog[self._changeset.manifest].read()
535 return self._manifestctx.read()
536
537 @propertycache
538 def _manifestctx(self):
539 return self._repo.manifestlog[self._changeset.manifest]
534 540
535 541 @propertycache
536 542 def _manifestdelta(self):
537 mfnode = self._changeset.manifest
538 return self._repo.manifestlog[mfnode].readdelta()
543 return self._manifestctx.readdelta()
539 544
540 545 @propertycache
541 546 def _parents(self):
542 547 repo = self._repo
543 548 p1, p2 = repo.changelog.parentrevs(self._rev)
544 549 if p2 == nullrev:
545 550 return [changectx(repo, p1)]
546 551 return [changectx(repo, p1), changectx(repo, p2)]
547 552
548 553 def changeset(self):
549 554 c = self._changeset
550 555 return (
551 556 c.manifest,
552 557 c.user,
553 558 c.date,
554 559 c.files,
555 560 c.description,
556 561 c.extra,
557 562 )
558 563 def manifestnode(self):
559 564 return self._changeset.manifest
560 565
561 566 def user(self):
562 567 return self._changeset.user
563 568 def date(self):
564 569 return self._changeset.date
565 570 def files(self):
566 571 return self._changeset.files
567 572 def description(self):
568 573 return self._changeset.description
569 574 def branch(self):
570 575 return encoding.tolocal(self._changeset.extra.get("branch"))
571 576 def closesbranch(self):
572 577 return 'close' in self._changeset.extra
573 578 def extra(self):
574 579 return self._changeset.extra
575 580 def tags(self):
576 581 return self._repo.nodetags(self._node)
577 582 def bookmarks(self):
578 583 return self._repo.nodebookmarks(self._node)
579 584 def phase(self):
580 585 return self._repo._phasecache.phase(self._repo, self._rev)
581 586 def hidden(self):
582 587 return self._rev in repoview.filterrevs(self._repo, 'visible')
583 588
584 589 def children(self):
585 590 """return contexts for each child changeset"""
586 591 c = self._repo.changelog.children(self._node)
587 592 return [changectx(self._repo, x) for x in c]
588 593
589 594 def ancestors(self):
590 595 for a in self._repo.changelog.ancestors([self._rev]):
591 596 yield changectx(self._repo, a)
592 597
593 598 def descendants(self):
594 599 for d in self._repo.changelog.descendants([self._rev]):
595 600 yield changectx(self._repo, d)
596 601
597 602 def filectx(self, path, fileid=None, filelog=None):
598 603 """get a file context from this changeset"""
599 604 if fileid is None:
600 605 fileid = self.filenode(path)
601 606 return filectx(self._repo, path, fileid=fileid,
602 607 changectx=self, filelog=filelog)
603 608
604 609 def ancestor(self, c2, warn=False):
605 610 """return the "best" ancestor context of self and c2
606 611
607 612 If there are multiple candidates, it will show a message and check
608 613 merge.preferancestor configuration before falling back to the
609 614 revlog ancestor."""
610 615 # deal with workingctxs
611 616 n2 = c2._node
612 617 if n2 is None:
613 618 n2 = c2._parents[0]._node
614 619 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
615 620 if not cahs:
616 621 anc = nullid
617 622 elif len(cahs) == 1:
618 623 anc = cahs[0]
619 624 else:
620 625 # experimental config: merge.preferancestor
621 626 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
622 627 try:
623 628 ctx = changectx(self._repo, r)
624 629 except error.RepoLookupError:
625 630 continue
626 631 anc = ctx.node()
627 632 if anc in cahs:
628 633 break
629 634 else:
630 635 anc = self._repo.changelog.ancestor(self._node, n2)
631 636 if warn:
632 637 self._repo.ui.status(
633 638 (_("note: using %s as ancestor of %s and %s\n") %
634 639 (short(anc), short(self._node), short(n2))) +
635 640 ''.join(_(" alternatively, use --config "
636 641 "merge.preferancestor=%s\n") %
637 642 short(n) for n in sorted(cahs) if n != anc))
638 643 return changectx(self._repo, anc)
639 644
640 645 def descendant(self, other):
641 646 """True if other is descendant of this changeset"""
642 647 return self._repo.changelog.descendant(self._rev, other._rev)
643 648
644 649 def walk(self, match):
645 650 '''Generates matching file names.'''
646 651
647 652 # Wrap match.bad method to have message with nodeid
648 653 def bad(fn, msg):
649 654 # The manifest doesn't know about subrepos, so don't complain about
650 655 # paths into valid subrepos.
651 656 if any(fn == s or fn.startswith(s + '/')
652 657 for s in self.substate):
653 658 return
654 659 match.bad(fn, _('no such file in rev %s') % self)
655 660
656 661 m = matchmod.badmatch(match, bad)
657 662 return self._manifest.walk(m)
658 663
659 664 def matches(self, match):
660 665 return self.walk(match)
661 666
662 667 class basefilectx(object):
663 668 """A filecontext object represents the common logic for its children:
664 669 filectx: read-only access to a filerevision that is already present
665 670 in the repo,
666 671 workingfilectx: a filecontext that represents files from the working
667 672 directory,
668 673 memfilectx: a filecontext that represents files in-memory."""
669 674 def __new__(cls, repo, path, *args, **kwargs):
670 675 return super(basefilectx, cls).__new__(cls)
671 676
672 677 @propertycache
673 678 def _filelog(self):
674 679 return self._repo.file(self._path)
675 680
676 681 @propertycache
677 682 def _changeid(self):
678 683 if '_changeid' in self.__dict__:
679 684 return self._changeid
680 685 elif '_changectx' in self.__dict__:
681 686 return self._changectx.rev()
682 687 elif '_descendantrev' in self.__dict__:
683 688 # this file context was created from a revision with a known
684 689 # descendant, we can (lazily) correct for linkrev aliases
685 690 return self._adjustlinkrev(self._descendantrev)
686 691 else:
687 692 return self._filelog.linkrev(self._filerev)
688 693
689 694 @propertycache
690 695 def _filenode(self):
691 696 if '_fileid' in self.__dict__:
692 697 return self._filelog.lookup(self._fileid)
693 698 else:
694 699 return self._changectx.filenode(self._path)
695 700
696 701 @propertycache
697 702 def _filerev(self):
698 703 return self._filelog.rev(self._filenode)
699 704
700 705 @propertycache
701 706 def _repopath(self):
702 707 return self._path
703 708
704 709 def __nonzero__(self):
705 710 try:
706 711 self._filenode
707 712 return True
708 713 except error.LookupError:
709 714 # file is missing
710 715 return False
711 716
712 717 def __str__(self):
713 718 try:
714 719 return "%s@%s" % (self.path(), self._changectx)
715 720 except error.LookupError:
716 721 return "%s@???" % self.path()
717 722
718 723 def __repr__(self):
719 724 return "<%s %s>" % (type(self).__name__, str(self))
720 725
721 726 def __hash__(self):
722 727 try:
723 728 return hash((self._path, self._filenode))
724 729 except AttributeError:
725 730 return id(self)
726 731
727 732 def __eq__(self, other):
728 733 try:
729 734 return (type(self) == type(other) and self._path == other._path
730 735 and self._filenode == other._filenode)
731 736 except AttributeError:
732 737 return False
733 738
734 739 def __ne__(self, other):
735 740 return not (self == other)
736 741
737 742 def filerev(self):
738 743 return self._filerev
739 744 def filenode(self):
740 745 return self._filenode
741 746 def flags(self):
742 747 return self._changectx.flags(self._path)
743 748 def filelog(self):
744 749 return self._filelog
745 750 def rev(self):
746 751 return self._changeid
747 752 def linkrev(self):
748 753 return self._filelog.linkrev(self._filerev)
749 754 def node(self):
750 755 return self._changectx.node()
751 756 def hex(self):
752 757 return self._changectx.hex()
753 758 def user(self):
754 759 return self._changectx.user()
755 760 def date(self):
756 761 return self._changectx.date()
757 762 def files(self):
758 763 return self._changectx.files()
759 764 def description(self):
760 765 return self._changectx.description()
761 766 def branch(self):
762 767 return self._changectx.branch()
763 768 def extra(self):
764 769 return self._changectx.extra()
765 770 def phase(self):
766 771 return self._changectx.phase()
767 772 def phasestr(self):
768 773 return self._changectx.phasestr()
769 774 def manifest(self):
770 775 return self._changectx.manifest()
771 776 def changectx(self):
772 777 return self._changectx
773 778 def repo(self):
774 779 return self._repo
775 780
776 781 def path(self):
777 782 return self._path
778 783
779 784 def isbinary(self):
780 785 try:
781 786 return util.binary(self.data())
782 787 except IOError:
783 788 return False
784 789 def isexec(self):
785 790 return 'x' in self.flags()
786 791 def islink(self):
787 792 return 'l' in self.flags()
788 793
789 794 def isabsent(self):
790 795 """whether this filectx represents a file not in self._changectx
791 796
792 797 This is mainly for merge code to detect change/delete conflicts. This is
793 798 expected to be True for all subclasses of basectx."""
794 799 return False
795 800
796 801 _customcmp = False
797 802 def cmp(self, fctx):
798 803 """compare with other file context
799 804
800 805 returns True if different than fctx.
801 806 """
802 807 if fctx._customcmp:
803 808 return fctx.cmp(self)
804 809
805 810 if (fctx._filenode is None
806 811 and (self._repo._encodefilterpats
807 812 # if file data starts with '\1\n', empty metadata block is
808 813 # prepended, which adds 4 bytes to filelog.size().
809 814 or self.size() - 4 == fctx.size())
810 815 or self.size() == fctx.size()):
811 816 return self._filelog.cmp(self._filenode, fctx.data())
812 817
813 818 return True
814 819
815 820 def _adjustlinkrev(self, srcrev, inclusive=False):
816 821 """return the first ancestor of <srcrev> introducing <fnode>
817 822
818 823 If the linkrev of the file revision does not point to an ancestor of
819 824 srcrev, we'll walk down the ancestors until we find one introducing
820 825 this file revision.
821 826
822 827 :srcrev: the changeset revision we search ancestors from
823 828 :inclusive: if true, the src revision will also be checked
824 829 """
825 830 repo = self._repo
826 831 cl = repo.unfiltered().changelog
827 832 mfl = repo.manifestlog
828 833 # fetch the linkrev
829 834 lkr = self.linkrev()
830 835 # hack to reuse ancestor computation when searching for renames
831 836 memberanc = getattr(self, '_ancestrycontext', None)
832 837 iteranc = None
833 838 if srcrev is None:
834 839 # wctx case, used by workingfilectx during mergecopy
835 840 revs = [p.rev() for p in self._repo[None].parents()]
836 841 inclusive = True # we skipped the real (revless) source
837 842 else:
838 843 revs = [srcrev]
839 844 if memberanc is None:
840 845 memberanc = iteranc = cl.ancestors(revs, lkr,
841 846 inclusive=inclusive)
842 847 # check if this linkrev is an ancestor of srcrev
843 848 if lkr not in memberanc:
844 849 if iteranc is None:
845 850 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
846 851 fnode = self._filenode
847 852 path = self._path
848 853 for a in iteranc:
849 854 ac = cl.read(a) # get changeset data (we avoid object creation)
850 855 if path in ac[3]: # checking the 'files' field.
851 856 # The file has been touched, check if the content is
852 857 # similar to the one we search for.
853 858 if fnode == mfl[ac[0]].readfast().get(path):
854 859 return a
855 860 # In theory, we should never get out of that loop without a result.
856 861 # But if manifest uses a buggy file revision (not children of the
857 862 # one it replaces) we could. Such a buggy situation will likely
858 863 # result is crash somewhere else at to some point.
859 864 return lkr
860 865
861 866 def introrev(self):
862 867 """return the rev of the changeset which introduced this file revision
863 868
864 869 This method is different from linkrev because it take into account the
865 870 changeset the filectx was created from. It ensures the returned
866 871 revision is one of its ancestors. This prevents bugs from
867 872 'linkrev-shadowing' when a file revision is used by multiple
868 873 changesets.
869 874 """
870 875 lkr = self.linkrev()
871 876 attrs = vars(self)
872 877 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
873 878 if noctx or self.rev() == lkr:
874 879 return self.linkrev()
875 880 return self._adjustlinkrev(self.rev(), inclusive=True)
876 881
877 882 def _parentfilectx(self, path, fileid, filelog):
878 883 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
879 884 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
880 885 if '_changeid' in vars(self) or '_changectx' in vars(self):
881 886 # If self is associated with a changeset (probably explicitly
882 887 # fed), ensure the created filectx is associated with a
883 888 # changeset that is an ancestor of self.changectx.
884 889 # This lets us later use _adjustlinkrev to get a correct link.
885 890 fctx._descendantrev = self.rev()
886 891 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
887 892 elif '_descendantrev' in vars(self):
888 893 # Otherwise propagate _descendantrev if we have one associated.
889 894 fctx._descendantrev = self._descendantrev
890 895 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 896 return fctx
892 897
893 898 def parents(self):
894 899 _path = self._path
895 900 fl = self._filelog
896 901 parents = self._filelog.parents(self._filenode)
897 902 pl = [(_path, node, fl) for node in parents if node != nullid]
898 903
899 904 r = fl.renamed(self._filenode)
900 905 if r:
901 906 # - In the simple rename case, both parent are nullid, pl is empty.
902 907 # - In case of merge, only one of the parent is null id and should
903 908 # be replaced with the rename information. This parent is -always-
904 909 # the first one.
905 910 #
906 911 # As null id have always been filtered out in the previous list
907 912 # comprehension, inserting to 0 will always result in "replacing
908 913 # first nullid parent with rename information.
909 914 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
910 915
911 916 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
912 917
913 918 def p1(self):
914 919 return self.parents()[0]
915 920
916 921 def p2(self):
917 922 p = self.parents()
918 923 if len(p) == 2:
919 924 return p[1]
920 925 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
921 926
922 927 def annotate(self, follow=False, linenumber=False, diffopts=None):
923 928 '''returns a list of tuples of ((ctx, number), line) for each line
924 929 in the file, where ctx is the filectx of the node where
925 930 that line was last changed; if linenumber parameter is true, number is
926 931 the line number at the first appearance in the managed file, otherwise,
927 932 number has a fixed value of False.
928 933 '''
929 934
930 935 def lines(text):
931 936 if text.endswith("\n"):
932 937 return text.count("\n")
933 938 return text.count("\n") + int(bool(text))
934 939
935 940 if linenumber:
936 941 def decorate(text, rev):
937 942 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
938 943 else:
939 944 def decorate(text, rev):
940 945 return ([(rev, False)] * lines(text), text)
941 946
942 947 def pair(parent, child):
943 948 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
944 949 for (a1, a2, b1, b2), t in blocks:
945 950 # Changed blocks ('!') or blocks made only of blank lines ('~')
946 951 # belong to the child.
947 952 if t == '=':
948 953 child[0][b1:b2] = parent[0][a1:a2]
949 954 return child
950 955
951 956 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952 957
953 958 def parents(f):
954 959 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 960 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 961 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 962 # isn't an ancestor of the srcrev.
958 963 f._changeid
959 964 pl = f.parents()
960 965
961 966 # Don't return renamed parents if we aren't following.
962 967 if not follow:
963 968 pl = [p for p in pl if p.path() == f.path()]
964 969
965 970 # renamed filectx won't have a filelog yet, so set it
966 971 # from the cache to save time
967 972 for p in pl:
968 973 if not '_filelog' in p.__dict__:
969 974 p._filelog = getlog(p.path())
970 975
971 976 return pl
972 977
973 978 # use linkrev to find the first changeset where self appeared
974 979 base = self
975 980 introrev = self.introrev()
976 981 if self.rev() != introrev:
977 982 base = self.filectx(self.filenode(), changeid=introrev)
978 983 if getattr(base, '_ancestrycontext', None) is None:
979 984 cl = self._repo.changelog
980 985 if introrev is None:
981 986 # wctx is not inclusive, but works because _ancestrycontext
982 987 # is used to test filelog revisions
983 988 ac = cl.ancestors([p.rev() for p in base.parents()],
984 989 inclusive=True)
985 990 else:
986 991 ac = cl.ancestors([introrev], inclusive=True)
987 992 base._ancestrycontext = ac
988 993
989 994 # This algorithm would prefer to be recursive, but Python is a
990 995 # bit recursion-hostile. Instead we do an iterative
991 996 # depth-first search.
992 997
993 998 # 1st DFS pre-calculates pcache and needed
994 999 visit = [base]
995 1000 pcache = {}
996 1001 needed = {base: 1}
997 1002 while visit:
998 1003 f = visit.pop()
999 1004 if f in pcache:
1000 1005 continue
1001 1006 pl = parents(f)
1002 1007 pcache[f] = pl
1003 1008 for p in pl:
1004 1009 needed[p] = needed.get(p, 0) + 1
1005 1010 if p not in pcache:
1006 1011 visit.append(p)
1007 1012
1008 1013 # 2nd DFS does the actual annotate
1009 1014 visit[:] = [base]
1010 1015 hist = {}
1011 1016 while visit:
1012 1017 f = visit[-1]
1013 1018 if f in hist:
1014 1019 visit.pop()
1015 1020 continue
1016 1021
1017 1022 ready = True
1018 1023 pl = pcache[f]
1019 1024 for p in pl:
1020 1025 if p not in hist:
1021 1026 ready = False
1022 1027 visit.append(p)
1023 1028 if ready:
1024 1029 visit.pop()
1025 1030 curr = decorate(f.data(), f)
1026 1031 for p in pl:
1027 1032 curr = pair(hist[p], curr)
1028 1033 if needed[p] == 1:
1029 1034 del hist[p]
1030 1035 del needed[p]
1031 1036 else:
1032 1037 needed[p] -= 1
1033 1038
1034 1039 hist[f] = curr
1035 1040 del pcache[f]
1036 1041
1037 1042 return zip(hist[base][0], hist[base][1].splitlines(True))
1038 1043
1039 1044 def ancestors(self, followfirst=False):
1040 1045 visit = {}
1041 1046 c = self
1042 1047 if followfirst:
1043 1048 cut = 1
1044 1049 else:
1045 1050 cut = None
1046 1051
1047 1052 while True:
1048 1053 for parent in c.parents()[:cut]:
1049 1054 visit[(parent.linkrev(), parent.filenode())] = parent
1050 1055 if not visit:
1051 1056 break
1052 1057 c = visit.pop(max(visit))
1053 1058 yield c
1054 1059
1055 1060 class filectx(basefilectx):
1056 1061 """A filecontext object makes access to data related to a particular
1057 1062 filerevision convenient."""
1058 1063 def __init__(self, repo, path, changeid=None, fileid=None,
1059 1064 filelog=None, changectx=None):
1060 1065 """changeid can be a changeset revision, node, or tag.
1061 1066 fileid can be a file revision or node."""
1062 1067 self._repo = repo
1063 1068 self._path = path
1064 1069
1065 1070 assert (changeid is not None
1066 1071 or fileid is not None
1067 1072 or changectx is not None), \
1068 1073 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1069 1074 % (changeid, fileid, changectx))
1070 1075
1071 1076 if filelog is not None:
1072 1077 self._filelog = filelog
1073 1078
1074 1079 if changeid is not None:
1075 1080 self._changeid = changeid
1076 1081 if changectx is not None:
1077 1082 self._changectx = changectx
1078 1083 if fileid is not None:
1079 1084 self._fileid = fileid
1080 1085
1081 1086 @propertycache
1082 1087 def _changectx(self):
1083 1088 try:
1084 1089 return changectx(self._repo, self._changeid)
1085 1090 except error.FilteredRepoLookupError:
1086 1091 # Linkrev may point to any revision in the repository. When the
1087 1092 # repository is filtered this may lead to `filectx` trying to build
1088 1093 # `changectx` for filtered revision. In such case we fallback to
1089 1094 # creating `changectx` on the unfiltered version of the reposition.
1090 1095 # This fallback should not be an issue because `changectx` from
1091 1096 # `filectx` are not used in complex operations that care about
1092 1097 # filtering.
1093 1098 #
1094 1099 # This fallback is a cheap and dirty fix that prevent several
1095 1100 # crashes. It does not ensure the behavior is correct. However the
1096 1101 # behavior was not correct before filtering either and "incorrect
1097 1102 # behavior" is seen as better as "crash"
1098 1103 #
1099 1104 # Linkrevs have several serious troubles with filtering that are
1100 1105 # complicated to solve. Proper handling of the issue here should be
1101 1106 # considered when solving linkrev issue are on the table.
1102 1107 return changectx(self._repo.unfiltered(), self._changeid)
1103 1108
1104 1109 def filectx(self, fileid, changeid=None):
1105 1110 '''opens an arbitrary revision of the file without
1106 1111 opening a new filelog'''
1107 1112 return filectx(self._repo, self._path, fileid=fileid,
1108 1113 filelog=self._filelog, changeid=changeid)
1109 1114
1110 1115 def data(self):
1111 1116 try:
1112 1117 return self._filelog.read(self._filenode)
1113 1118 except error.CensoredNodeError:
1114 1119 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1115 1120 return ""
1116 1121 raise error.Abort(_("censored node: %s") % short(self._filenode),
1117 1122 hint=_("set censor.policy to ignore errors"))
1118 1123
1119 1124 def size(self):
1120 1125 return self._filelog.size(self._filerev)
1121 1126
1122 1127 def renamed(self):
1123 1128 """check if file was actually renamed in this changeset revision
1124 1129
1125 1130 If rename logged in file revision, we report copy for changeset only
1126 1131 if file revisions linkrev points back to the changeset in question
1127 1132 or both changeset parents contain different file revisions.
1128 1133 """
1129 1134
1130 1135 renamed = self._filelog.renamed(self._filenode)
1131 1136 if not renamed:
1132 1137 return renamed
1133 1138
1134 1139 if self.rev() == self.linkrev():
1135 1140 return renamed
1136 1141
1137 1142 name = self.path()
1138 1143 fnode = self._filenode
1139 1144 for p in self._changectx.parents():
1140 1145 try:
1141 1146 if fnode == p.filenode(name):
1142 1147 return None
1143 1148 except error.LookupError:
1144 1149 pass
1145 1150 return renamed
1146 1151
1147 1152 def children(self):
1148 1153 # hard for renames
1149 1154 c = self._filelog.children(self._filenode)
1150 1155 return [filectx(self._repo, self._path, fileid=x,
1151 1156 filelog=self._filelog) for x in c]
1152 1157
1153 1158 class committablectx(basectx):
1154 1159 """A committablectx object provides common functionality for a context that
1155 1160 wants the ability to commit, e.g. workingctx or memctx."""
1156 1161 def __init__(self, repo, text="", user=None, date=None, extra=None,
1157 1162 changes=None):
1158 1163 self._repo = repo
1159 1164 self._rev = None
1160 1165 self._node = None
1161 1166 self._text = text
1162 1167 if date:
1163 1168 self._date = util.parsedate(date)
1164 1169 if user:
1165 1170 self._user = user
1166 1171 if changes:
1167 1172 self._status = changes
1168 1173
1169 1174 self._extra = {}
1170 1175 if extra:
1171 1176 self._extra = extra.copy()
1172 1177 if 'branch' not in self._extra:
1173 1178 try:
1174 1179 branch = encoding.fromlocal(self._repo.dirstate.branch())
1175 1180 except UnicodeDecodeError:
1176 1181 raise error.Abort(_('branch name not in UTF-8!'))
1177 1182 self._extra['branch'] = branch
1178 1183 if self._extra['branch'] == '':
1179 1184 self._extra['branch'] = 'default'
1180 1185
1181 1186 def __str__(self):
1182 1187 return str(self._parents[0]) + "+"
1183 1188
1184 1189 def __nonzero__(self):
1185 1190 return True
1186 1191
1187 1192 def _buildflagfunc(self):
1188 1193 # Create a fallback function for getting file flags when the
1189 1194 # filesystem doesn't support them
1190 1195
1191 1196 copiesget = self._repo.dirstate.copies().get
1192 1197 parents = self.parents()
1193 1198 if len(parents) < 2:
1194 1199 # when we have one parent, it's easy: copy from parent
1195 1200 man = parents[0].manifest()
1196 1201 def func(f):
1197 1202 f = copiesget(f, f)
1198 1203 return man.flags(f)
1199 1204 else:
1200 1205 # merges are tricky: we try to reconstruct the unstored
1201 1206 # result from the merge (issue1802)
1202 1207 p1, p2 = parents
1203 1208 pa = p1.ancestor(p2)
1204 1209 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1205 1210
1206 1211 def func(f):
1207 1212 f = copiesget(f, f) # may be wrong for merges with copies
1208 1213 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1209 1214 if fl1 == fl2:
1210 1215 return fl1
1211 1216 if fl1 == fla:
1212 1217 return fl2
1213 1218 if fl2 == fla:
1214 1219 return fl1
1215 1220 return '' # punt for conflicts
1216 1221
1217 1222 return func
1218 1223
1219 1224 @propertycache
1220 1225 def _flagfunc(self):
1221 1226 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1222 1227
1223 1228 @propertycache
1224 1229 def _manifest(self):
1225 1230 """generate a manifest corresponding to the values in self._status
1226 1231
1227 1232 This reuse the file nodeid from parent, but we append an extra letter
1228 1233 when modified. Modified files get an extra 'm' while added files get
1229 1234 an extra 'a'. This is used by manifests merge to see that files
1230 1235 are different and by update logic to avoid deleting newly added files.
1231 1236 """
1232 1237 parents = self.parents()
1233 1238
1234 1239 man1 = parents[0].manifest()
1235 1240 man = man1.copy()
1236 1241 if len(parents) > 1:
1237 1242 man2 = self.p2().manifest()
1238 1243 def getman(f):
1239 1244 if f in man1:
1240 1245 return man1
1241 1246 return man2
1242 1247 else:
1243 1248 getman = lambda f: man1
1244 1249
1245 1250 copied = self._repo.dirstate.copies()
1246 1251 ff = self._flagfunc
1247 1252 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1248 1253 for f in l:
1249 1254 orig = copied.get(f, f)
1250 1255 man[f] = getman(orig).get(orig, nullid) + i
1251 1256 try:
1252 1257 man.setflag(f, ff(f))
1253 1258 except OSError:
1254 1259 pass
1255 1260
1256 1261 for f in self._status.deleted + self._status.removed:
1257 1262 if f in man:
1258 1263 del man[f]
1259 1264
1260 1265 return man
1261 1266
1262 1267 @propertycache
1263 1268 def _status(self):
1264 1269 return self._repo.status()
1265 1270
1266 1271 @propertycache
1267 1272 def _user(self):
1268 1273 return self._repo.ui.username()
1269 1274
1270 1275 @propertycache
1271 1276 def _date(self):
1272 1277 return util.makedate()
1273 1278
1274 1279 def subrev(self, subpath):
1275 1280 return None
1276 1281
1277 1282 def manifestnode(self):
1278 1283 return None
1279 1284 def user(self):
1280 1285 return self._user or self._repo.ui.username()
1281 1286 def date(self):
1282 1287 return self._date
1283 1288 def description(self):
1284 1289 return self._text
1285 1290 def files(self):
1286 1291 return sorted(self._status.modified + self._status.added +
1287 1292 self._status.removed)
1288 1293
1289 1294 def modified(self):
1290 1295 return self._status.modified
1291 1296 def added(self):
1292 1297 return self._status.added
1293 1298 def removed(self):
1294 1299 return self._status.removed
1295 1300 def deleted(self):
1296 1301 return self._status.deleted
1297 1302 def branch(self):
1298 1303 return encoding.tolocal(self._extra['branch'])
1299 1304 def closesbranch(self):
1300 1305 return 'close' in self._extra
1301 1306 def extra(self):
1302 1307 return self._extra
1303 1308
1304 1309 def tags(self):
1305 1310 return []
1306 1311
1307 1312 def bookmarks(self):
1308 1313 b = []
1309 1314 for p in self.parents():
1310 1315 b.extend(p.bookmarks())
1311 1316 return b
1312 1317
1313 1318 def phase(self):
1314 1319 phase = phases.draft # default phase to draft
1315 1320 for p in self.parents():
1316 1321 phase = max(phase, p.phase())
1317 1322 return phase
1318 1323
1319 1324 def hidden(self):
1320 1325 return False
1321 1326
1322 1327 def children(self):
1323 1328 return []
1324 1329
1325 1330 def flags(self, path):
1326 1331 if '_manifest' in self.__dict__:
1327 1332 try:
1328 1333 return self._manifest.flags(path)
1329 1334 except KeyError:
1330 1335 return ''
1331 1336
1332 1337 try:
1333 1338 return self._flagfunc(path)
1334 1339 except OSError:
1335 1340 return ''
1336 1341
1337 1342 def ancestor(self, c2):
1338 1343 """return the "best" ancestor context of self and c2"""
1339 1344 return self._parents[0].ancestor(c2) # punt on two parents for now
1340 1345
1341 1346 def walk(self, match):
1342 1347 '''Generates matching file names.'''
1343 1348 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1344 1349 True, False))
1345 1350
1346 1351 def matches(self, match):
1347 1352 return sorted(self._repo.dirstate.matches(match))
1348 1353
1349 1354 def ancestors(self):
1350 1355 for p in self._parents:
1351 1356 yield p
1352 1357 for a in self._repo.changelog.ancestors(
1353 1358 [p.rev() for p in self._parents]):
1354 1359 yield changectx(self._repo, a)
1355 1360
1356 1361 def markcommitted(self, node):
1357 1362 """Perform post-commit cleanup necessary after committing this ctx
1358 1363
1359 1364 Specifically, this updates backing stores this working context
1360 1365 wraps to reflect the fact that the changes reflected by this
1361 1366 workingctx have been committed. For example, it marks
1362 1367 modified and added files as normal in the dirstate.
1363 1368
1364 1369 """
1365 1370
1366 1371 self._repo.dirstate.beginparentchange()
1367 1372 for f in self.modified() + self.added():
1368 1373 self._repo.dirstate.normal(f)
1369 1374 for f in self.removed():
1370 1375 self._repo.dirstate.drop(f)
1371 1376 self._repo.dirstate.setparents(node)
1372 1377 self._repo.dirstate.endparentchange()
1373 1378
1374 1379 # write changes out explicitly, because nesting wlock at
1375 1380 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1376 1381 # from immediately doing so for subsequent changing files
1377 1382 self._repo.dirstate.write(self._repo.currenttransaction())
1378 1383
1379 1384 class workingctx(committablectx):
1380 1385 """A workingctx object makes access to data related to
1381 1386 the current working directory convenient.
1382 1387 date - any valid date string or (unixtime, offset), or None.
1383 1388 user - username string, or None.
1384 1389 extra - a dictionary of extra values, or None.
1385 1390 changes - a list of file lists as returned by localrepo.status()
1386 1391 or None to use the repository status.
1387 1392 """
1388 1393 def __init__(self, repo, text="", user=None, date=None, extra=None,
1389 1394 changes=None):
1390 1395 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1391 1396
1392 1397 def __iter__(self):
1393 1398 d = self._repo.dirstate
1394 1399 for f in d:
1395 1400 if d[f] != 'r':
1396 1401 yield f
1397 1402
1398 1403 def __contains__(self, key):
1399 1404 return self._repo.dirstate[key] not in "?r"
1400 1405
1401 1406 def hex(self):
1402 1407 return hex(wdirid)
1403 1408
1404 1409 @propertycache
1405 1410 def _parents(self):
1406 1411 p = self._repo.dirstate.parents()
1407 1412 if p[1] == nullid:
1408 1413 p = p[:-1]
1409 1414 return [changectx(self._repo, x) for x in p]
1410 1415
1411 1416 def filectx(self, path, filelog=None):
1412 1417 """get a file context from the working directory"""
1413 1418 return workingfilectx(self._repo, path, workingctx=self,
1414 1419 filelog=filelog)
1415 1420
1416 1421 def dirty(self, missing=False, merge=True, branch=True):
1417 1422 "check whether a working directory is modified"
1418 1423 # check subrepos first
1419 1424 for s in sorted(self.substate):
1420 1425 if self.sub(s).dirty():
1421 1426 return True
1422 1427 # check current working dir
1423 1428 return ((merge and self.p2()) or
1424 1429 (branch and self.branch() != self.p1().branch()) or
1425 1430 self.modified() or self.added() or self.removed() or
1426 1431 (missing and self.deleted()))
1427 1432
1428 1433 def add(self, list, prefix=""):
1429 1434 join = lambda f: os.path.join(prefix, f)
1430 1435 with self._repo.wlock():
1431 1436 ui, ds = self._repo.ui, self._repo.dirstate
1432 1437 rejected = []
1433 1438 lstat = self._repo.wvfs.lstat
1434 1439 for f in list:
1435 1440 scmutil.checkportable(ui, join(f))
1436 1441 try:
1437 1442 st = lstat(f)
1438 1443 except OSError:
1439 1444 ui.warn(_("%s does not exist!\n") % join(f))
1440 1445 rejected.append(f)
1441 1446 continue
1442 1447 if st.st_size > 10000000:
1443 1448 ui.warn(_("%s: up to %d MB of RAM may be required "
1444 1449 "to manage this file\n"
1445 1450 "(use 'hg revert %s' to cancel the "
1446 1451 "pending addition)\n")
1447 1452 % (f, 3 * st.st_size // 1000000, join(f)))
1448 1453 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1449 1454 ui.warn(_("%s not added: only files and symlinks "
1450 1455 "supported currently\n") % join(f))
1451 1456 rejected.append(f)
1452 1457 elif ds[f] in 'amn':
1453 1458 ui.warn(_("%s already tracked!\n") % join(f))
1454 1459 elif ds[f] == 'r':
1455 1460 ds.normallookup(f)
1456 1461 else:
1457 1462 ds.add(f)
1458 1463 return rejected
1459 1464
1460 1465 def forget(self, files, prefix=""):
1461 1466 join = lambda f: os.path.join(prefix, f)
1462 1467 with self._repo.wlock():
1463 1468 rejected = []
1464 1469 for f in files:
1465 1470 if f not in self._repo.dirstate:
1466 1471 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1467 1472 rejected.append(f)
1468 1473 elif self._repo.dirstate[f] != 'a':
1469 1474 self._repo.dirstate.remove(f)
1470 1475 else:
1471 1476 self._repo.dirstate.drop(f)
1472 1477 return rejected
1473 1478
1474 1479 def undelete(self, list):
1475 1480 pctxs = self.parents()
1476 1481 with self._repo.wlock():
1477 1482 for f in list:
1478 1483 if self._repo.dirstate[f] != 'r':
1479 1484 self._repo.ui.warn(_("%s not removed!\n") % f)
1480 1485 else:
1481 1486 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1482 1487 t = fctx.data()
1483 1488 self._repo.wwrite(f, t, fctx.flags())
1484 1489 self._repo.dirstate.normal(f)
1485 1490
1486 1491 def copy(self, source, dest):
1487 1492 try:
1488 1493 st = self._repo.wvfs.lstat(dest)
1489 1494 except OSError as err:
1490 1495 if err.errno != errno.ENOENT:
1491 1496 raise
1492 1497 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1493 1498 return
1494 1499 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1495 1500 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1496 1501 "symbolic link\n") % dest)
1497 1502 else:
1498 1503 with self._repo.wlock():
1499 1504 if self._repo.dirstate[dest] in '?':
1500 1505 self._repo.dirstate.add(dest)
1501 1506 elif self._repo.dirstate[dest] in 'r':
1502 1507 self._repo.dirstate.normallookup(dest)
1503 1508 self._repo.dirstate.copy(source, dest)
1504 1509
1505 1510 def match(self, pats=[], include=None, exclude=None, default='glob',
1506 1511 listsubrepos=False, badfn=None):
1507 1512 r = self._repo
1508 1513
1509 1514 # Only a case insensitive filesystem needs magic to translate user input
1510 1515 # to actual case in the filesystem.
1511 1516 if not util.fscasesensitive(r.root):
1512 1517 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1513 1518 exclude, default, r.auditor, self,
1514 1519 listsubrepos=listsubrepos,
1515 1520 badfn=badfn)
1516 1521 return matchmod.match(r.root, r.getcwd(), pats,
1517 1522 include, exclude, default,
1518 1523 auditor=r.auditor, ctx=self,
1519 1524 listsubrepos=listsubrepos, badfn=badfn)
1520 1525
1521 1526 def _filtersuspectsymlink(self, files):
1522 1527 if not files or self._repo.dirstate._checklink:
1523 1528 return files
1524 1529
1525 1530 # Symlink placeholders may get non-symlink-like contents
1526 1531 # via user error or dereferencing by NFS or Samba servers,
1527 1532 # so we filter out any placeholders that don't look like a
1528 1533 # symlink
1529 1534 sane = []
1530 1535 for f in files:
1531 1536 if self.flags(f) == 'l':
1532 1537 d = self[f].data()
1533 1538 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1534 1539 self._repo.ui.debug('ignoring suspect symlink placeholder'
1535 1540 ' "%s"\n' % f)
1536 1541 continue
1537 1542 sane.append(f)
1538 1543 return sane
1539 1544
1540 1545 def _checklookup(self, files):
1541 1546 # check for any possibly clean files
1542 1547 if not files:
1543 1548 return [], []
1544 1549
1545 1550 modified = []
1546 1551 fixup = []
1547 1552 pctx = self._parents[0]
1548 1553 # do a full compare of any files that might have changed
1549 1554 for f in sorted(files):
1550 1555 if (f not in pctx or self.flags(f) != pctx.flags(f)
1551 1556 or pctx[f].cmp(self[f])):
1552 1557 modified.append(f)
1553 1558 else:
1554 1559 fixup.append(f)
1555 1560
1556 1561 # update dirstate for files that are actually clean
1557 1562 if fixup:
1558 1563 try:
1559 1564 # updating the dirstate is optional
1560 1565 # so we don't wait on the lock
1561 1566 # wlock can invalidate the dirstate, so cache normal _after_
1562 1567 # taking the lock
1563 1568 with self._repo.wlock(False):
1564 1569 normal = self._repo.dirstate.normal
1565 1570 for f in fixup:
1566 1571 normal(f)
1567 1572 # write changes out explicitly, because nesting
1568 1573 # wlock at runtime may prevent 'wlock.release()'
1569 1574 # after this block from doing so for subsequent
1570 1575 # changing files
1571 1576 self._repo.dirstate.write(self._repo.currenttransaction())
1572 1577 except error.LockError:
1573 1578 pass
1574 1579 return modified, fixup
1575 1580
1576 1581 def _manifestmatches(self, match, s):
1577 1582 """Slow path for workingctx
1578 1583
1579 1584 The fast path is when we compare the working directory to its parent
1580 1585 which means this function is comparing with a non-parent; therefore we
1581 1586 need to build a manifest and return what matches.
1582 1587 """
1583 1588 mf = self._repo['.']._manifestmatches(match, s)
1584 1589 for f in s.modified + s.added:
1585 1590 mf[f] = _newnode
1586 1591 mf.setflag(f, self.flags(f))
1587 1592 for f in s.removed:
1588 1593 if f in mf:
1589 1594 del mf[f]
1590 1595 return mf
1591 1596
1592 1597 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1593 1598 unknown=False):
1594 1599 '''Gets the status from the dirstate -- internal use only.'''
1595 1600 listignored, listclean, listunknown = ignored, clean, unknown
1596 1601 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1597 1602 subrepos = []
1598 1603 if '.hgsub' in self:
1599 1604 subrepos = sorted(self.substate)
1600 1605 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1601 1606 listclean, listunknown)
1602 1607
1603 1608 # check for any possibly clean files
1604 1609 if cmp:
1605 1610 modified2, fixup = self._checklookup(cmp)
1606 1611 s.modified.extend(modified2)
1607 1612
1608 1613 # update dirstate for files that are actually clean
1609 1614 if fixup and listclean:
1610 1615 s.clean.extend(fixup)
1611 1616
1612 1617 if match.always():
1613 1618 # cache for performance
1614 1619 if s.unknown or s.ignored or s.clean:
1615 1620 # "_status" is cached with list*=False in the normal route
1616 1621 self._status = scmutil.status(s.modified, s.added, s.removed,
1617 1622 s.deleted, [], [], [])
1618 1623 else:
1619 1624 self._status = s
1620 1625
1621 1626 return s
1622 1627
1623 1628 def _buildstatus(self, other, s, match, listignored, listclean,
1624 1629 listunknown):
1625 1630 """build a status with respect to another context
1626 1631
1627 1632 This includes logic for maintaining the fast path of status when
1628 1633 comparing the working directory against its parent, which is to skip
1629 1634 building a new manifest if self (working directory) is not comparing
1630 1635 against its parent (repo['.']).
1631 1636 """
1632 1637 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1633 1638 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1634 1639 # might have accidentally ended up with the entire contents of the file
1635 1640 # they are supposed to be linking to.
1636 1641 s.modified[:] = self._filtersuspectsymlink(s.modified)
1637 1642 if other != self._repo['.']:
1638 1643 s = super(workingctx, self)._buildstatus(other, s, match,
1639 1644 listignored, listclean,
1640 1645 listunknown)
1641 1646 return s
1642 1647
1643 1648 def _matchstatus(self, other, match):
1644 1649 """override the match method with a filter for directory patterns
1645 1650
1646 1651 We use inheritance to customize the match.bad method only in cases of
1647 1652 workingctx since it belongs only to the working directory when
1648 1653 comparing against the parent changeset.
1649 1654
1650 1655 If we aren't comparing against the working directory's parent, then we
1651 1656 just use the default match object sent to us.
1652 1657 """
1653 1658 superself = super(workingctx, self)
1654 1659 match = superself._matchstatus(other, match)
1655 1660 if other != self._repo['.']:
1656 1661 def bad(f, msg):
1657 1662 # 'f' may be a directory pattern from 'match.files()',
1658 1663 # so 'f not in ctx1' is not enough
1659 1664 if f not in other and not other.hasdir(f):
1660 1665 self._repo.ui.warn('%s: %s\n' %
1661 1666 (self._repo.dirstate.pathto(f), msg))
1662 1667 match.bad = bad
1663 1668 return match
1664 1669
1665 1670 class committablefilectx(basefilectx):
1666 1671 """A committablefilectx provides common functionality for a file context
1667 1672 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1668 1673 def __init__(self, repo, path, filelog=None, ctx=None):
1669 1674 self._repo = repo
1670 1675 self._path = path
1671 1676 self._changeid = None
1672 1677 self._filerev = self._filenode = None
1673 1678
1674 1679 if filelog is not None:
1675 1680 self._filelog = filelog
1676 1681 if ctx:
1677 1682 self._changectx = ctx
1678 1683
1679 1684 def __nonzero__(self):
1680 1685 return True
1681 1686
1682 1687 def linkrev(self):
1683 1688 # linked to self._changectx no matter if file is modified or not
1684 1689 return self.rev()
1685 1690
1686 1691 def parents(self):
1687 1692 '''return parent filectxs, following copies if necessary'''
1688 1693 def filenode(ctx, path):
1689 1694 return ctx._manifest.get(path, nullid)
1690 1695
1691 1696 path = self._path
1692 1697 fl = self._filelog
1693 1698 pcl = self._changectx._parents
1694 1699 renamed = self.renamed()
1695 1700
1696 1701 if renamed:
1697 1702 pl = [renamed + (None,)]
1698 1703 else:
1699 1704 pl = [(path, filenode(pcl[0], path), fl)]
1700 1705
1701 1706 for pc in pcl[1:]:
1702 1707 pl.append((path, filenode(pc, path), fl))
1703 1708
1704 1709 return [self._parentfilectx(p, fileid=n, filelog=l)
1705 1710 for p, n, l in pl if n != nullid]
1706 1711
1707 1712 def children(self):
1708 1713 return []
1709 1714
1710 1715 class workingfilectx(committablefilectx):
1711 1716 """A workingfilectx object makes access to data related to a particular
1712 1717 file in the working directory convenient."""
1713 1718 def __init__(self, repo, path, filelog=None, workingctx=None):
1714 1719 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1715 1720
1716 1721 @propertycache
1717 1722 def _changectx(self):
1718 1723 return workingctx(self._repo)
1719 1724
1720 1725 def data(self):
1721 1726 return self._repo.wread(self._path)
1722 1727 def renamed(self):
1723 1728 rp = self._repo.dirstate.copied(self._path)
1724 1729 if not rp:
1725 1730 return None
1726 1731 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1727 1732
1728 1733 def size(self):
1729 1734 return self._repo.wvfs.lstat(self._path).st_size
1730 1735 def date(self):
1731 1736 t, tz = self._changectx.date()
1732 1737 try:
1733 1738 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1734 1739 except OSError as err:
1735 1740 if err.errno != errno.ENOENT:
1736 1741 raise
1737 1742 return (t, tz)
1738 1743
1739 1744 def cmp(self, fctx):
1740 1745 """compare with other file context
1741 1746
1742 1747 returns True if different than fctx.
1743 1748 """
1744 1749 # fctx should be a filectx (not a workingfilectx)
1745 1750 # invert comparison to reuse the same code path
1746 1751 return fctx.cmp(self)
1747 1752
1748 1753 def remove(self, ignoremissing=False):
1749 1754 """wraps unlink for a repo's working directory"""
1750 1755 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1751 1756
1752 1757 def write(self, data, flags):
1753 1758 """wraps repo.wwrite"""
1754 1759 self._repo.wwrite(self._path, data, flags)
1755 1760
1756 1761 class workingcommitctx(workingctx):
1757 1762 """A workingcommitctx object makes access to data related to
1758 1763 the revision being committed convenient.
1759 1764
1760 1765 This hides changes in the working directory, if they aren't
1761 1766 committed in this context.
1762 1767 """
1763 1768 def __init__(self, repo, changes,
1764 1769 text="", user=None, date=None, extra=None):
1765 1770 super(workingctx, self).__init__(repo, text, user, date, extra,
1766 1771 changes)
1767 1772
1768 1773 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1769 1774 unknown=False):
1770 1775 """Return matched files only in ``self._status``
1771 1776
1772 1777 Uncommitted files appear "clean" via this context, even if
1773 1778 they aren't actually so in the working directory.
1774 1779 """
1775 1780 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1776 1781 if clean:
1777 1782 clean = [f for f in self._manifest if f not in self._changedset]
1778 1783 else:
1779 1784 clean = []
1780 1785 return scmutil.status([f for f in self._status.modified if match(f)],
1781 1786 [f for f in self._status.added if match(f)],
1782 1787 [f for f in self._status.removed if match(f)],
1783 1788 [], [], [], clean)
1784 1789
1785 1790 @propertycache
1786 1791 def _changedset(self):
1787 1792 """Return the set of files changed in this context
1788 1793 """
1789 1794 changed = set(self._status.modified)
1790 1795 changed.update(self._status.added)
1791 1796 changed.update(self._status.removed)
1792 1797 return changed
1793 1798
1794 1799 def makecachingfilectxfn(func):
1795 1800 """Create a filectxfn that caches based on the path.
1796 1801
1797 1802 We can't use util.cachefunc because it uses all arguments as the cache
1798 1803 key and this creates a cycle since the arguments include the repo and
1799 1804 memctx.
1800 1805 """
1801 1806 cache = {}
1802 1807
1803 1808 def getfilectx(repo, memctx, path):
1804 1809 if path not in cache:
1805 1810 cache[path] = func(repo, memctx, path)
1806 1811 return cache[path]
1807 1812
1808 1813 return getfilectx
1809 1814
1810 1815 class memctx(committablectx):
1811 1816 """Use memctx to perform in-memory commits via localrepo.commitctx().
1812 1817
1813 1818 Revision information is supplied at initialization time while
1814 1819 related files data and is made available through a callback
1815 1820 mechanism. 'repo' is the current localrepo, 'parents' is a
1816 1821 sequence of two parent revisions identifiers (pass None for every
1817 1822 missing parent), 'text' is the commit message and 'files' lists
1818 1823 names of files touched by the revision (normalized and relative to
1819 1824 repository root).
1820 1825
1821 1826 filectxfn(repo, memctx, path) is a callable receiving the
1822 1827 repository, the current memctx object and the normalized path of
1823 1828 requested file, relative to repository root. It is fired by the
1824 1829 commit function for every file in 'files', but calls order is
1825 1830 undefined. If the file is available in the revision being
1826 1831 committed (updated or added), filectxfn returns a memfilectx
1827 1832 object. If the file was removed, filectxfn raises an
1828 1833 IOError. Moved files are represented by marking the source file
1829 1834 removed and the new file added with copy information (see
1830 1835 memfilectx).
1831 1836
1832 1837 user receives the committer name and defaults to current
1833 1838 repository username, date is the commit date in any format
1834 1839 supported by util.parsedate() and defaults to current date, extra
1835 1840 is a dictionary of metadata or is left empty.
1836 1841 """
1837 1842
1838 1843 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1839 1844 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1840 1845 # this field to determine what to do in filectxfn.
1841 1846 _returnnoneformissingfiles = True
1842 1847
1843 1848 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1844 1849 date=None, extra=None, editor=False):
1845 1850 super(memctx, self).__init__(repo, text, user, date, extra)
1846 1851 self._rev = None
1847 1852 self._node = None
1848 1853 parents = [(p or nullid) for p in parents]
1849 1854 p1, p2 = parents
1850 1855 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1851 1856 files = sorted(set(files))
1852 1857 self._files = files
1853 1858 self.substate = {}
1854 1859
1855 1860 # if store is not callable, wrap it in a function
1856 1861 if not callable(filectxfn):
1857 1862 def getfilectx(repo, memctx, path):
1858 1863 fctx = filectxfn[path]
1859 1864 # this is weird but apparently we only keep track of one parent
1860 1865 # (why not only store that instead of a tuple?)
1861 1866 copied = fctx.renamed()
1862 1867 if copied:
1863 1868 copied = copied[0]
1864 1869 return memfilectx(repo, path, fctx.data(),
1865 1870 islink=fctx.islink(), isexec=fctx.isexec(),
1866 1871 copied=copied, memctx=memctx)
1867 1872 self._filectxfn = getfilectx
1868 1873 else:
1869 1874 # memoizing increases performance for e.g. vcs convert scenarios.
1870 1875 self._filectxfn = makecachingfilectxfn(filectxfn)
1871 1876
1872 1877 if extra:
1873 1878 self._extra = extra.copy()
1874 1879 else:
1875 1880 self._extra = {}
1876 1881
1877 1882 if self._extra.get('branch', '') == '':
1878 1883 self._extra['branch'] = 'default'
1879 1884
1880 1885 if editor:
1881 1886 self._text = editor(self._repo, self, [])
1882 1887 self._repo.savecommitmessage(self._text)
1883 1888
1884 1889 def filectx(self, path, filelog=None):
1885 1890 """get a file context from the working directory
1886 1891
1887 1892 Returns None if file doesn't exist and should be removed."""
1888 1893 return self._filectxfn(self._repo, self, path)
1889 1894
1890 1895 def commit(self):
1891 1896 """commit context to the repo"""
1892 1897 return self._repo.commitctx(self)
1893 1898
1894 1899 @propertycache
1895 1900 def _manifest(self):
1896 1901 """generate a manifest based on the return values of filectxfn"""
1897 1902
1898 1903 # keep this simple for now; just worry about p1
1899 1904 pctx = self._parents[0]
1900 1905 man = pctx.manifest().copy()
1901 1906
1902 1907 for f in self._status.modified:
1903 1908 p1node = nullid
1904 1909 p2node = nullid
1905 1910 p = pctx[f].parents() # if file isn't in pctx, check p2?
1906 1911 if len(p) > 0:
1907 1912 p1node = p[0].filenode()
1908 1913 if len(p) > 1:
1909 1914 p2node = p[1].filenode()
1910 1915 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1911 1916
1912 1917 for f in self._status.added:
1913 1918 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1914 1919
1915 1920 for f in self._status.removed:
1916 1921 if f in man:
1917 1922 del man[f]
1918 1923
1919 1924 return man
1920 1925
1921 1926 @propertycache
1922 1927 def _status(self):
1923 1928 """Calculate exact status from ``files`` specified at construction
1924 1929 """
1925 1930 man1 = self.p1().manifest()
1926 1931 p2 = self._parents[1]
1927 1932 # "1 < len(self._parents)" can't be used for checking
1928 1933 # existence of the 2nd parent, because "memctx._parents" is
1929 1934 # explicitly initialized by the list, of which length is 2.
1930 1935 if p2.node() != nullid:
1931 1936 man2 = p2.manifest()
1932 1937 managing = lambda f: f in man1 or f in man2
1933 1938 else:
1934 1939 managing = lambda f: f in man1
1935 1940
1936 1941 modified, added, removed = [], [], []
1937 1942 for f in self._files:
1938 1943 if not managing(f):
1939 1944 added.append(f)
1940 1945 elif self[f]:
1941 1946 modified.append(f)
1942 1947 else:
1943 1948 removed.append(f)
1944 1949
1945 1950 return scmutil.status(modified, added, removed, [], [], [], [])
1946 1951
1947 1952 class memfilectx(committablefilectx):
1948 1953 """memfilectx represents an in-memory file to commit.
1949 1954
1950 1955 See memctx and committablefilectx for more details.
1951 1956 """
1952 1957 def __init__(self, repo, path, data, islink=False,
1953 1958 isexec=False, copied=None, memctx=None):
1954 1959 """
1955 1960 path is the normalized file path relative to repository root.
1956 1961 data is the file content as a string.
1957 1962 islink is True if the file is a symbolic link.
1958 1963 isexec is True if the file is executable.
1959 1964 copied is the source file path if current file was copied in the
1960 1965 revision being committed, or None."""
1961 1966 super(memfilectx, self).__init__(repo, path, None, memctx)
1962 1967 self._data = data
1963 1968 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1964 1969 self._copied = None
1965 1970 if copied:
1966 1971 self._copied = (copied, nullid)
1967 1972
1968 1973 def data(self):
1969 1974 return self._data
1970 1975 def size(self):
1971 1976 return len(self.data())
1972 1977 def flags(self):
1973 1978 return self._flags
1974 1979 def renamed(self):
1975 1980 return self._copied
1976 1981
1977 1982 def remove(self, ignoremissing=False):
1978 1983 """wraps unlink for a repo's working directory"""
1979 1984 # need to figure out what to do here
1980 1985 del self._changectx[self._path]
1981 1986
1982 1987 def write(self, data, flags):
1983 1988 """wraps repo.wwrite"""
1984 1989 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now