##// END OF EJS Templates
context: make sure __str__ works, also when there is no _changectx...
Mads Kiilerich -
r30270:e25ce44f default
parent child Browse files
Show More
@@ -1,1984 +1,1987 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 wdirid,
23 23 )
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 fileset,
28 28 match as matchmod,
29 29 mdiff,
30 30 obsolete as obsmod,
31 31 patch,
32 32 phases,
33 33 repoview,
34 34 revlog,
35 35 scmutil,
36 36 subrepo,
37 37 util,
38 38 )
39 39
40 40 propertycache = util.propertycache
41 41
42 42 # Phony node value to stand-in for new files in some uses of
43 43 # manifests. Manifests support 21-byte hashes for nodes which are
44 44 # dirty in the working copy.
45 45 _newnode = '!' * 21
46 46
47 47 nonascii = re.compile(r'[^\x21-\x7f]').search
48 48
49 49 class basectx(object):
50 50 """A basectx object represents the common logic for its children:
51 51 changectx: read-only context that is already present in the repo,
52 52 workingctx: a context that represents the working directory and can
53 53 be committed,
54 54 memctx: a context that represents changes in-memory and can also
55 55 be committed."""
56 56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 57 if isinstance(changeid, basectx):
58 58 return changeid
59 59
60 60 o = super(basectx, cls).__new__(cls)
61 61
62 62 o._repo = repo
63 63 o._rev = nullrev
64 64 o._node = nullid
65 65
66 66 return o
67 67
68 68 def __str__(self):
69 69 return short(self.node())
70 70
71 71 def __int__(self):
72 72 return self.rev()
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _manifestmatches(self, match, s):
96 96 """generate a new manifest filtered by the match argument
97 97
98 98 This method is for internal use only and mainly exists to provide an
99 99 object oriented way for other contexts to customize the manifest
100 100 generation.
101 101 """
102 102 return self.manifest().matches(match)
103 103
104 104 def _matchstatus(self, other, match):
105 105 """return match.always if match is none
106 106
107 107 This internal method provides a way for child objects to override the
108 108 match operator.
109 109 """
110 110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 111
112 112 def _buildstatus(self, other, s, match, listignored, listclean,
113 113 listunknown):
114 114 """build a status with respect to another context"""
115 115 # Load earliest manifest first for caching reasons. More specifically,
116 116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 119 # delta to what's in the cache. So that's one full reconstruction + one
120 120 # delta application.
121 121 if self.rev() is not None and self.rev() < other.rev():
122 122 self.manifest()
123 123 mf1 = other._manifestmatches(match, s)
124 124 mf2 = self._manifestmatches(match, s)
125 125
126 126 modified, added = [], []
127 127 removed = []
128 128 clean = []
129 129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 130 deletedset = set(deleted)
131 131 d = mf1.diff(mf2, clean=listclean)
132 132 for fn, value in d.iteritems():
133 133 if fn in deletedset:
134 134 continue
135 135 if value is None:
136 136 clean.append(fn)
137 137 continue
138 138 (node1, flag1), (node2, flag2) = value
139 139 if node1 is None:
140 140 added.append(fn)
141 141 elif node2 is None:
142 142 removed.append(fn)
143 143 elif flag1 != flag2:
144 144 modified.append(fn)
145 145 elif node2 != _newnode:
146 146 # When comparing files between two commits, we save time by
147 147 # not comparing the file contents when the nodeids differ.
148 148 # Note that this means we incorrectly report a reverted change
149 149 # to a file as a modification.
150 150 modified.append(fn)
151 151 elif self[fn].cmp(other[fn]):
152 152 modified.append(fn)
153 153 else:
154 154 clean.append(fn)
155 155
156 156 if removed:
157 157 # need to filter files if they are already reported as removed
158 158 unknown = [fn for fn in unknown if fn not in mf1]
159 159 ignored = [fn for fn in ignored if fn not in mf1]
160 160 # if they're deleted, don't report them as removed
161 161 removed = [fn for fn in removed if fn not in deletedset]
162 162
163 163 return scmutil.status(modified, added, removed, deleted, unknown,
164 164 ignored, clean)
165 165
166 166 @propertycache
167 167 def substate(self):
168 168 return subrepo.state(self, self._repo.ui)
169 169
170 170 def subrev(self, subpath):
171 171 return self.substate[subpath][1]
172 172
173 173 def rev(self):
174 174 return self._rev
175 175 def node(self):
176 176 return self._node
177 177 def hex(self):
178 178 return hex(self.node())
179 179 def manifest(self):
180 180 return self._manifest
181 181 def repo(self):
182 182 return self._repo
183 183 def phasestr(self):
184 184 return phases.phasenames[self.phase()]
185 185 def mutable(self):
186 186 return self.phase() > phases.public
187 187
188 188 def getfileset(self, expr):
189 189 return fileset.getfileset(self, expr)
190 190
191 191 def obsolete(self):
192 192 """True if the changeset is obsolete"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 194
195 195 def extinct(self):
196 196 """True if the changeset is extinct"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 198
199 199 def unstable(self):
200 200 """True if the changeset is not obsolete but it's ancestor are"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202 202
203 203 def bumped(self):
204 204 """True if the changeset try to be a successor of a public changeset
205 205
206 206 Only non-public and non-obsolete changesets may be bumped.
207 207 """
208 208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209 209
210 210 def divergent(self):
211 211 """Is a successors of a changeset with multiple possible successors set
212 212
213 213 Only non-public and non-obsolete changesets may be divergent.
214 214 """
215 215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216 216
217 217 def troubled(self):
218 218 """True if the changeset is either unstable, bumped or divergent"""
219 219 return self.unstable() or self.bumped() or self.divergent()
220 220
221 221 def troubles(self):
222 222 """return the list of troubles affecting this changesets.
223 223
224 224 Troubles are returned as strings. possible values are:
225 225 - unstable,
226 226 - bumped,
227 227 - divergent.
228 228 """
229 229 troubles = []
230 230 if self.unstable():
231 231 troubles.append('unstable')
232 232 if self.bumped():
233 233 troubles.append('bumped')
234 234 if self.divergent():
235 235 troubles.append('divergent')
236 236 return troubles
237 237
238 238 def parents(self):
239 239 """return contexts for each parent changeset"""
240 240 return self._parents
241 241
242 242 def p1(self):
243 243 return self._parents[0]
244 244
245 245 def p2(self):
246 246 parents = self._parents
247 247 if len(parents) == 2:
248 248 return parents[1]
249 249 return changectx(self._repo, nullrev)
250 250
251 251 def _fileinfo(self, path):
252 252 if '_manifest' in self.__dict__:
253 253 try:
254 254 return self._manifest[path], self._manifest.flags(path)
255 255 except KeyError:
256 256 raise error.ManifestLookupError(self._node, path,
257 257 _('not found in manifest'))
258 258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 259 if path in self._manifestdelta:
260 260 return (self._manifestdelta[path],
261 261 self._manifestdelta.flags(path))
262 262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
263 263 if not node:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266
267 267 return node, flag
268 268
269 269 def filenode(self, path):
270 270 return self._fileinfo(path)[0]
271 271
272 272 def flags(self, path):
273 273 try:
274 274 return self._fileinfo(path)[1]
275 275 except error.LookupError:
276 276 return ''
277 277
278 278 def sub(self, path, allowcreate=True):
279 279 '''return a subrepo for the stored revision of path, never wdir()'''
280 280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281 281
282 282 def nullsub(self, path, pctx):
283 283 return subrepo.nullsubrepo(self, path, pctx)
284 284
285 285 def workingsub(self, path):
286 286 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 287 context.
288 288 '''
289 289 return subrepo.subrepo(self, path, allowwdir=True)
290 290
291 291 def match(self, pats=[], include=None, exclude=None, default='glob',
292 292 listsubrepos=False, badfn=None):
293 293 r = self._repo
294 294 return matchmod.match(r.root, r.getcwd(), pats,
295 295 include, exclude, default,
296 296 auditor=r.nofsauditor, ctx=self,
297 297 listsubrepos=listsubrepos, badfn=badfn)
298 298
299 299 def diff(self, ctx2=None, match=None, **opts):
300 300 """Returns a diff generator for the given contexts and matcher"""
301 301 if ctx2 is None:
302 302 ctx2 = self.p1()
303 303 if ctx2 is not None:
304 304 ctx2 = self._repo[ctx2]
305 305 diffopts = patch.diffopts(self._repo.ui, opts)
306 306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307 307
308 308 def dirs(self):
309 309 return self._manifest.dirs()
310 310
311 311 def hasdir(self, dir):
312 312 return self._manifest.hasdir(dir)
313 313
314 314 def dirty(self, missing=False, merge=True, branch=True):
315 315 return False
316 316
317 317 def status(self, other=None, match=None, listignored=False,
318 318 listclean=False, listunknown=False, listsubrepos=False):
319 319 """return status of files between two nodes or node and working
320 320 directory.
321 321
322 322 If other is None, compare this node with working directory.
323 323
324 324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 325 """
326 326
327 327 ctx1 = self
328 328 ctx2 = self._repo[other]
329 329
330 330 # This next code block is, admittedly, fragile logic that tests for
331 331 # reversing the contexts and wouldn't need to exist if it weren't for
332 332 # the fast (and common) code path of comparing the working directory
333 333 # with its first parent.
334 334 #
335 335 # What we're aiming for here is the ability to call:
336 336 #
337 337 # workingctx.status(parentctx)
338 338 #
339 339 # If we always built the manifest for each context and compared those,
340 340 # then we'd be done. But the special case of the above call means we
341 341 # just copy the manifest of the parent.
342 342 reversed = False
343 343 if (not isinstance(ctx1, changectx)
344 344 and isinstance(ctx2, changectx)):
345 345 reversed = True
346 346 ctx1, ctx2 = ctx2, ctx1
347 347
348 348 match = ctx2._matchstatus(ctx1, match)
349 349 r = scmutil.status([], [], [], [], [], [], [])
350 350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 351 listunknown)
352 352
353 353 if reversed:
354 354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 355 # these make no sense to reverse.
356 356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 357 r.clean)
358 358
359 359 if listsubrepos:
360 360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 361 try:
362 362 rev2 = ctx2.subrev(subpath)
363 363 except KeyError:
364 364 # A subrepo that existed in node1 was deleted between
365 365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 366 # won't contain that subpath. The best we can do ignore it.
367 367 rev2 = None
368 368 submatch = matchmod.subdirmatcher(subpath, match)
369 369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 370 clean=listclean, unknown=listunknown,
371 371 listsubrepos=True)
372 372 for rfiles, sfiles in zip(r, s):
373 373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374 374
375 375 for l in r:
376 376 l.sort()
377 377
378 378 return r
379 379
380 380
381 381 def makememctx(repo, parents, text, user, date, branch, files, store,
382 382 editor=None, extra=None):
383 383 def getfilectx(repo, memctx, path):
384 384 data, mode, copied = store.getfile(path)
385 385 if data is None:
386 386 return None
387 387 islink, isexec = mode
388 388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 389 copied=copied, memctx=memctx)
390 390 if extra is None:
391 391 extra = {}
392 392 if branch:
393 393 extra['branch'] = encoding.fromlocal(branch)
394 394 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 395 date, extra, editor)
396 396 return ctx
397 397
398 398 class changectx(basectx):
399 399 """A changecontext object makes access to data related to a particular
400 400 changeset convenient. It represents a read-only context already present in
401 401 the repo."""
402 402 def __init__(self, repo, changeid=''):
403 403 """changeid is a revision number, node, or tag"""
404 404
405 405 # since basectx.__new__ already took care of copying the object, we
406 406 # don't need to do anything in __init__, so we just exit here
407 407 if isinstance(changeid, basectx):
408 408 return
409 409
410 410 if changeid == '':
411 411 changeid = '.'
412 412 self._repo = repo
413 413
414 414 try:
415 415 if isinstance(changeid, int):
416 416 self._node = repo.changelog.node(changeid)
417 417 self._rev = changeid
418 418 return
419 419 if isinstance(changeid, long):
420 420 changeid = str(changeid)
421 421 if changeid == 'null':
422 422 self._node = nullid
423 423 self._rev = nullrev
424 424 return
425 425 if changeid == 'tip':
426 426 self._node = repo.changelog.tip()
427 427 self._rev = repo.changelog.rev(self._node)
428 428 return
429 429 if changeid == '.' or changeid == repo.dirstate.p1():
430 430 # this is a hack to delay/avoid loading obsmarkers
431 431 # when we know that '.' won't be hidden
432 432 self._node = repo.dirstate.p1()
433 433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 434 return
435 435 if len(changeid) == 20:
436 436 try:
437 437 self._node = changeid
438 438 self._rev = repo.changelog.rev(changeid)
439 439 return
440 440 except error.FilteredRepoLookupError:
441 441 raise
442 442 except LookupError:
443 443 pass
444 444
445 445 try:
446 446 r = int(changeid)
447 447 if str(r) != changeid:
448 448 raise ValueError
449 449 l = len(repo.changelog)
450 450 if r < 0:
451 451 r += l
452 452 if r < 0 or r >= l:
453 453 raise ValueError
454 454 self._rev = r
455 455 self._node = repo.changelog.node(r)
456 456 return
457 457 except error.FilteredIndexError:
458 458 raise
459 459 except (ValueError, OverflowError, IndexError):
460 460 pass
461 461
462 462 if len(changeid) == 40:
463 463 try:
464 464 self._node = bin(changeid)
465 465 self._rev = repo.changelog.rev(self._node)
466 466 return
467 467 except error.FilteredLookupError:
468 468 raise
469 469 except (TypeError, LookupError):
470 470 pass
471 471
472 472 # lookup bookmarks through the name interface
473 473 try:
474 474 self._node = repo.names.singlenode(repo, changeid)
475 475 self._rev = repo.changelog.rev(self._node)
476 476 return
477 477 except KeyError:
478 478 pass
479 479 except error.FilteredRepoLookupError:
480 480 raise
481 481 except error.RepoLookupError:
482 482 pass
483 483
484 484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 485 if self._node is not None:
486 486 self._rev = repo.changelog.rev(self._node)
487 487 return
488 488
489 489 # lookup failed
490 490 # check if it might have come from damaged dirstate
491 491 #
492 492 # XXX we could avoid the unfiltered if we had a recognizable
493 493 # exception for filtered changeset access
494 494 if changeid in repo.unfiltered().dirstate.parents():
495 495 msg = _("working directory has unknown parent '%s'!")
496 496 raise error.Abort(msg % short(changeid))
497 497 try:
498 498 if len(changeid) == 20 and nonascii(changeid):
499 499 changeid = hex(changeid)
500 500 except TypeError:
501 501 pass
502 502 except (error.FilteredIndexError, error.FilteredLookupError,
503 503 error.FilteredRepoLookupError):
504 504 if repo.filtername.startswith('visible'):
505 505 msg = _("hidden revision '%s'") % changeid
506 506 hint = _('use --hidden to access hidden revisions')
507 507 raise error.FilteredRepoLookupError(msg, hint=hint)
508 508 msg = _("filtered revision '%s' (not in '%s' subset)")
509 509 msg %= (changeid, repo.filtername)
510 510 raise error.FilteredRepoLookupError(msg)
511 511 except IndexError:
512 512 pass
513 513 raise error.RepoLookupError(
514 514 _("unknown revision '%s'") % changeid)
515 515
516 516 def __hash__(self):
517 517 try:
518 518 return hash(self._rev)
519 519 except AttributeError:
520 520 return id(self)
521 521
522 522 def __nonzero__(self):
523 523 return self._rev != nullrev
524 524
525 525 @propertycache
526 526 def _changeset(self):
527 527 return self._repo.changelog.changelogrevision(self.rev())
528 528
529 529 @propertycache
530 530 def _manifest(self):
531 531 return self._repo.manifestlog[self._changeset.manifest].read()
532 532
533 533 @propertycache
534 534 def _manifestdelta(self):
535 535 mfnode = self._changeset.manifest
536 536 return self._repo.manifestlog[mfnode].readdelta()
537 537
538 538 @propertycache
539 539 def _parents(self):
540 540 repo = self._repo
541 541 p1, p2 = repo.changelog.parentrevs(self._rev)
542 542 if p2 == nullrev:
543 543 return [changectx(repo, p1)]
544 544 return [changectx(repo, p1), changectx(repo, p2)]
545 545
546 546 def changeset(self):
547 547 c = self._changeset
548 548 return (
549 549 c.manifest,
550 550 c.user,
551 551 c.date,
552 552 c.files,
553 553 c.description,
554 554 c.extra,
555 555 )
556 556 def manifestnode(self):
557 557 return self._changeset.manifest
558 558
559 559 def user(self):
560 560 return self._changeset.user
561 561 def date(self):
562 562 return self._changeset.date
563 563 def files(self):
564 564 return self._changeset.files
565 565 def description(self):
566 566 return self._changeset.description
567 567 def branch(self):
568 568 return encoding.tolocal(self._changeset.extra.get("branch"))
569 569 def closesbranch(self):
570 570 return 'close' in self._changeset.extra
571 571 def extra(self):
572 572 return self._changeset.extra
573 573 def tags(self):
574 574 return self._repo.nodetags(self._node)
575 575 def bookmarks(self):
576 576 return self._repo.nodebookmarks(self._node)
577 577 def phase(self):
578 578 return self._repo._phasecache.phase(self._repo, self._rev)
579 579 def hidden(self):
580 580 return self._rev in repoview.filterrevs(self._repo, 'visible')
581 581
582 582 def children(self):
583 583 """return contexts for each child changeset"""
584 584 c = self._repo.changelog.children(self._node)
585 585 return [changectx(self._repo, x) for x in c]
586 586
587 587 def ancestors(self):
588 588 for a in self._repo.changelog.ancestors([self._rev]):
589 589 yield changectx(self._repo, a)
590 590
591 591 def descendants(self):
592 592 for d in self._repo.changelog.descendants([self._rev]):
593 593 yield changectx(self._repo, d)
594 594
595 595 def filectx(self, path, fileid=None, filelog=None):
596 596 """get a file context from this changeset"""
597 597 if fileid is None:
598 598 fileid = self.filenode(path)
599 599 return filectx(self._repo, path, fileid=fileid,
600 600 changectx=self, filelog=filelog)
601 601
602 602 def ancestor(self, c2, warn=False):
603 603 """return the "best" ancestor context of self and c2
604 604
605 605 If there are multiple candidates, it will show a message and check
606 606 merge.preferancestor configuration before falling back to the
607 607 revlog ancestor."""
608 608 # deal with workingctxs
609 609 n2 = c2._node
610 610 if n2 is None:
611 611 n2 = c2._parents[0]._node
612 612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
613 613 if not cahs:
614 614 anc = nullid
615 615 elif len(cahs) == 1:
616 616 anc = cahs[0]
617 617 else:
618 618 # experimental config: merge.preferancestor
619 619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
620 620 try:
621 621 ctx = changectx(self._repo, r)
622 622 except error.RepoLookupError:
623 623 continue
624 624 anc = ctx.node()
625 625 if anc in cahs:
626 626 break
627 627 else:
628 628 anc = self._repo.changelog.ancestor(self._node, n2)
629 629 if warn:
630 630 self._repo.ui.status(
631 631 (_("note: using %s as ancestor of %s and %s\n") %
632 632 (short(anc), short(self._node), short(n2))) +
633 633 ''.join(_(" alternatively, use --config "
634 634 "merge.preferancestor=%s\n") %
635 635 short(n) for n in sorted(cahs) if n != anc))
636 636 return changectx(self._repo, anc)
637 637
638 638 def descendant(self, other):
639 639 """True if other is descendant of this changeset"""
640 640 return self._repo.changelog.descendant(self._rev, other._rev)
641 641
642 642 def walk(self, match):
643 643 '''Generates matching file names.'''
644 644
645 645 # Wrap match.bad method to have message with nodeid
646 646 def bad(fn, msg):
647 647 # The manifest doesn't know about subrepos, so don't complain about
648 648 # paths into valid subrepos.
649 649 if any(fn == s or fn.startswith(s + '/')
650 650 for s in self.substate):
651 651 return
652 652 match.bad(fn, _('no such file in rev %s') % self)
653 653
654 654 m = matchmod.badmatch(match, bad)
655 655 return self._manifest.walk(m)
656 656
657 657 def matches(self, match):
658 658 return self.walk(match)
659 659
660 660 class basefilectx(object):
661 661 """A filecontext object represents the common logic for its children:
662 662 filectx: read-only access to a filerevision that is already present
663 663 in the repo,
664 664 workingfilectx: a filecontext that represents files from the working
665 665 directory,
666 666 memfilectx: a filecontext that represents files in-memory."""
667 667 def __new__(cls, repo, path, *args, **kwargs):
668 668 return super(basefilectx, cls).__new__(cls)
669 669
670 670 @propertycache
671 671 def _filelog(self):
672 672 return self._repo.file(self._path)
673 673
674 674 @propertycache
675 675 def _changeid(self):
676 676 if '_changeid' in self.__dict__:
677 677 return self._changeid
678 678 elif '_changectx' in self.__dict__:
679 679 return self._changectx.rev()
680 680 elif '_descendantrev' in self.__dict__:
681 681 # this file context was created from a revision with a known
682 682 # descendant, we can (lazily) correct for linkrev aliases
683 683 return self._adjustlinkrev(self._path, self._filelog,
684 684 self._filenode, self._descendantrev)
685 685 else:
686 686 return self._filelog.linkrev(self._filerev)
687 687
688 688 @propertycache
689 689 def _filenode(self):
690 690 if '_fileid' in self.__dict__:
691 691 return self._filelog.lookup(self._fileid)
692 692 else:
693 693 return self._changectx.filenode(self._path)
694 694
695 695 @propertycache
696 696 def _filerev(self):
697 697 return self._filelog.rev(self._filenode)
698 698
699 699 @propertycache
700 700 def _repopath(self):
701 701 return self._path
702 702
703 703 def __nonzero__(self):
704 704 try:
705 705 self._filenode
706 706 return True
707 707 except error.LookupError:
708 708 # file is missing
709 709 return False
710 710
711 711 def __str__(self):
712 try:
712 713 return "%s@%s" % (self.path(), self._changectx)
714 except error.LookupError:
715 return "%s@???" % self.path()
713 716
714 717 def __repr__(self):
715 718 return "<%s %s>" % (type(self).__name__, str(self))
716 719
717 720 def __hash__(self):
718 721 try:
719 722 return hash((self._path, self._filenode))
720 723 except AttributeError:
721 724 return id(self)
722 725
723 726 def __eq__(self, other):
724 727 try:
725 728 return (type(self) == type(other) and self._path == other._path
726 729 and self._filenode == other._filenode)
727 730 except AttributeError:
728 731 return False
729 732
730 733 def __ne__(self, other):
731 734 return not (self == other)
732 735
733 736 def filerev(self):
734 737 return self._filerev
735 738 def filenode(self):
736 739 return self._filenode
737 740 def flags(self):
738 741 return self._changectx.flags(self._path)
739 742 def filelog(self):
740 743 return self._filelog
741 744 def rev(self):
742 745 return self._changeid
743 746 def linkrev(self):
744 747 return self._filelog.linkrev(self._filerev)
745 748 def node(self):
746 749 return self._changectx.node()
747 750 def hex(self):
748 751 return self._changectx.hex()
749 752 def user(self):
750 753 return self._changectx.user()
751 754 def date(self):
752 755 return self._changectx.date()
753 756 def files(self):
754 757 return self._changectx.files()
755 758 def description(self):
756 759 return self._changectx.description()
757 760 def branch(self):
758 761 return self._changectx.branch()
759 762 def extra(self):
760 763 return self._changectx.extra()
761 764 def phase(self):
762 765 return self._changectx.phase()
763 766 def phasestr(self):
764 767 return self._changectx.phasestr()
765 768 def manifest(self):
766 769 return self._changectx.manifest()
767 770 def changectx(self):
768 771 return self._changectx
769 772 def repo(self):
770 773 return self._repo
771 774
772 775 def path(self):
773 776 return self._path
774 777
775 778 def isbinary(self):
776 779 try:
777 780 return util.binary(self.data())
778 781 except IOError:
779 782 return False
780 783 def isexec(self):
781 784 return 'x' in self.flags()
782 785 def islink(self):
783 786 return 'l' in self.flags()
784 787
785 788 def isabsent(self):
786 789 """whether this filectx represents a file not in self._changectx
787 790
788 791 This is mainly for merge code to detect change/delete conflicts. This is
789 792 expected to be True for all subclasses of basectx."""
790 793 return False
791 794
792 795 _customcmp = False
793 796 def cmp(self, fctx):
794 797 """compare with other file context
795 798
796 799 returns True if different than fctx.
797 800 """
798 801 if fctx._customcmp:
799 802 return fctx.cmp(self)
800 803
801 804 if (fctx._filenode is None
802 805 and (self._repo._encodefilterpats
803 806 # if file data starts with '\1\n', empty metadata block is
804 807 # prepended, which adds 4 bytes to filelog.size().
805 808 or self.size() - 4 == fctx.size())
806 809 or self.size() == fctx.size()):
807 810 return self._filelog.cmp(self._filenode, fctx.data())
808 811
809 812 return True
810 813
811 814 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
812 815 """return the first ancestor of <srcrev> introducing <fnode>
813 816
814 817 If the linkrev of the file revision does not point to an ancestor of
815 818 srcrev, we'll walk down the ancestors until we find one introducing
816 819 this file revision.
817 820
818 821 :repo: a localrepository object (used to access changelog and manifest)
819 822 :path: the file path
820 823 :fnode: the nodeid of the file revision
821 824 :filelog: the filelog of this path
822 825 :srcrev: the changeset revision we search ancestors from
823 826 :inclusive: if true, the src revision will also be checked
824 827 """
825 828 repo = self._repo
826 829 cl = repo.unfiltered().changelog
827 830 mfl = repo.manifestlog
828 831 # fetch the linkrev
829 832 fr = filelog.rev(fnode)
830 833 lkr = filelog.linkrev(fr)
831 834 # hack to reuse ancestor computation when searching for renames
832 835 memberanc = getattr(self, '_ancestrycontext', None)
833 836 iteranc = None
834 837 if srcrev is None:
835 838 # wctx case, used by workingfilectx during mergecopy
836 839 revs = [p.rev() for p in self._repo[None].parents()]
837 840 inclusive = True # we skipped the real (revless) source
838 841 else:
839 842 revs = [srcrev]
840 843 if memberanc is None:
841 844 memberanc = iteranc = cl.ancestors(revs, lkr,
842 845 inclusive=inclusive)
843 846 # check if this linkrev is an ancestor of srcrev
844 847 if lkr not in memberanc:
845 848 if iteranc is None:
846 849 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
847 850 for a in iteranc:
848 851 ac = cl.read(a) # get changeset data (we avoid object creation)
849 852 if path in ac[3]: # checking the 'files' field.
850 853 # The file has been touched, check if the content is
851 854 # similar to the one we search for.
852 855 if fnode == mfl[ac[0]].readfast().get(path):
853 856 return a
854 857 # In theory, we should never get out of that loop without a result.
855 858 # But if manifest uses a buggy file revision (not children of the
856 859 # one it replaces) we could. Such a buggy situation will likely
857 860 # result is crash somewhere else at to some point.
858 861 return lkr
859 862
860 863 def introrev(self):
861 864 """return the rev of the changeset which introduced this file revision
862 865
863 866 This method is different from linkrev because it take into account the
864 867 changeset the filectx was created from. It ensures the returned
865 868 revision is one of its ancestors. This prevents bugs from
866 869 'linkrev-shadowing' when a file revision is used by multiple
867 870 changesets.
868 871 """
869 872 lkr = self.linkrev()
870 873 attrs = vars(self)
871 874 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
872 875 if noctx or self.rev() == lkr:
873 876 return self.linkrev()
874 877 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
875 878 self.rev(), inclusive=True)
876 879
877 880 def _parentfilectx(self, path, fileid, filelog):
878 881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
879 882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
880 883 if '_changeid' in vars(self) or '_changectx' in vars(self):
881 884 # If self is associated with a changeset (probably explicitly
882 885 # fed), ensure the created filectx is associated with a
883 886 # changeset that is an ancestor of self.changectx.
884 887 # This lets us later use _adjustlinkrev to get a correct link.
885 888 fctx._descendantrev = self.rev()
886 889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
887 890 elif '_descendantrev' in vars(self):
888 891 # Otherwise propagate _descendantrev if we have one associated.
889 892 fctx._descendantrev = self._descendantrev
890 893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 894 return fctx
892 895
893 896 def parents(self):
894 897 _path = self._path
895 898 fl = self._filelog
896 899 parents = self._filelog.parents(self._filenode)
897 900 pl = [(_path, node, fl) for node in parents if node != nullid]
898 901
899 902 r = fl.renamed(self._filenode)
900 903 if r:
901 904 # - In the simple rename case, both parent are nullid, pl is empty.
902 905 # - In case of merge, only one of the parent is null id and should
903 906 # be replaced with the rename information. This parent is -always-
904 907 # the first one.
905 908 #
906 909 # As null id have always been filtered out in the previous list
907 910 # comprehension, inserting to 0 will always result in "replacing
908 911 # first nullid parent with rename information.
909 912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
910 913
911 914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
912 915
913 916 def p1(self):
914 917 return self.parents()[0]
915 918
916 919 def p2(self):
917 920 p = self.parents()
918 921 if len(p) == 2:
919 922 return p[1]
920 923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
921 924
922 925 def annotate(self, follow=False, linenumber=False, diffopts=None):
923 926 '''returns a list of tuples of ((ctx, number), line) for each line
924 927 in the file, where ctx is the filectx of the node where
925 928 that line was last changed; if linenumber parameter is true, number is
926 929 the line number at the first appearance in the managed file, otherwise,
927 930 number has a fixed value of False.
928 931 '''
929 932
930 933 def lines(text):
931 934 if text.endswith("\n"):
932 935 return text.count("\n")
933 936 return text.count("\n") + int(bool(text))
934 937
935 938 if linenumber:
936 939 def decorate(text, rev):
937 940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
938 941 else:
939 942 def decorate(text, rev):
940 943 return ([(rev, False)] * lines(text), text)
941 944
942 945 def pair(parent, child):
943 946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
944 947 for (a1, a2, b1, b2), t in blocks:
945 948 # Changed blocks ('!') or blocks made only of blank lines ('~')
946 949 # belong to the child.
947 950 if t == '=':
948 951 child[0][b1:b2] = parent[0][a1:a2]
949 952 return child
950 953
951 954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952 955
953 956 def parents(f):
954 957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 960 # isn't an ancestor of the srcrev.
958 961 f._changeid
959 962 pl = f.parents()
960 963
961 964 # Don't return renamed parents if we aren't following.
962 965 if not follow:
963 966 pl = [p for p in pl if p.path() == f.path()]
964 967
965 968 # renamed filectx won't have a filelog yet, so set it
966 969 # from the cache to save time
967 970 for p in pl:
968 971 if not '_filelog' in p.__dict__:
969 972 p._filelog = getlog(p.path())
970 973
971 974 return pl
972 975
973 976 # use linkrev to find the first changeset where self appeared
974 977 base = self
975 978 introrev = self.introrev()
976 979 if self.rev() != introrev:
977 980 base = self.filectx(self.filenode(), changeid=introrev)
978 981 if getattr(base, '_ancestrycontext', None) is None:
979 982 cl = self._repo.changelog
980 983 if introrev is None:
981 984 # wctx is not inclusive, but works because _ancestrycontext
982 985 # is used to test filelog revisions
983 986 ac = cl.ancestors([p.rev() for p in base.parents()],
984 987 inclusive=True)
985 988 else:
986 989 ac = cl.ancestors([introrev], inclusive=True)
987 990 base._ancestrycontext = ac
988 991
989 992 # This algorithm would prefer to be recursive, but Python is a
990 993 # bit recursion-hostile. Instead we do an iterative
991 994 # depth-first search.
992 995
993 996 # 1st DFS pre-calculates pcache and needed
994 997 visit = [base]
995 998 pcache = {}
996 999 needed = {base: 1}
997 1000 while visit:
998 1001 f = visit.pop()
999 1002 if f in pcache:
1000 1003 continue
1001 1004 pl = parents(f)
1002 1005 pcache[f] = pl
1003 1006 for p in pl:
1004 1007 needed[p] = needed.get(p, 0) + 1
1005 1008 if p not in pcache:
1006 1009 visit.append(p)
1007 1010
1008 1011 # 2nd DFS does the actual annotate
1009 1012 visit[:] = [base]
1010 1013 hist = {}
1011 1014 while visit:
1012 1015 f = visit[-1]
1013 1016 if f in hist:
1014 1017 visit.pop()
1015 1018 continue
1016 1019
1017 1020 ready = True
1018 1021 pl = pcache[f]
1019 1022 for p in pl:
1020 1023 if p not in hist:
1021 1024 ready = False
1022 1025 visit.append(p)
1023 1026 if ready:
1024 1027 visit.pop()
1025 1028 curr = decorate(f.data(), f)
1026 1029 for p in pl:
1027 1030 curr = pair(hist[p], curr)
1028 1031 if needed[p] == 1:
1029 1032 del hist[p]
1030 1033 del needed[p]
1031 1034 else:
1032 1035 needed[p] -= 1
1033 1036
1034 1037 hist[f] = curr
1035 1038 del pcache[f]
1036 1039
1037 1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1038 1041
1039 1042 def ancestors(self, followfirst=False):
1040 1043 visit = {}
1041 1044 c = self
1042 1045 if followfirst:
1043 1046 cut = 1
1044 1047 else:
1045 1048 cut = None
1046 1049
1047 1050 while True:
1048 1051 for parent in c.parents()[:cut]:
1049 1052 visit[(parent.linkrev(), parent.filenode())] = parent
1050 1053 if not visit:
1051 1054 break
1052 1055 c = visit.pop(max(visit))
1053 1056 yield c
1054 1057
1055 1058 class filectx(basefilectx):
1056 1059 """A filecontext object makes access to data related to a particular
1057 1060 filerevision convenient."""
1058 1061 def __init__(self, repo, path, changeid=None, fileid=None,
1059 1062 filelog=None, changectx=None):
1060 1063 """changeid can be a changeset revision, node, or tag.
1061 1064 fileid can be a file revision or node."""
1062 1065 self._repo = repo
1063 1066 self._path = path
1064 1067
1065 1068 assert (changeid is not None
1066 1069 or fileid is not None
1067 1070 or changectx is not None), \
1068 1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1069 1072 % (changeid, fileid, changectx))
1070 1073
1071 1074 if filelog is not None:
1072 1075 self._filelog = filelog
1073 1076
1074 1077 if changeid is not None:
1075 1078 self._changeid = changeid
1076 1079 if changectx is not None:
1077 1080 self._changectx = changectx
1078 1081 if fileid is not None:
1079 1082 self._fileid = fileid
1080 1083
1081 1084 @propertycache
1082 1085 def _changectx(self):
1083 1086 try:
1084 1087 return changectx(self._repo, self._changeid)
1085 1088 except error.FilteredRepoLookupError:
1086 1089 # Linkrev may point to any revision in the repository. When the
1087 1090 # repository is filtered this may lead to `filectx` trying to build
1088 1091 # `changectx` for filtered revision. In such case we fallback to
1089 1092 # creating `changectx` on the unfiltered version of the reposition.
1090 1093 # This fallback should not be an issue because `changectx` from
1091 1094 # `filectx` are not used in complex operations that care about
1092 1095 # filtering.
1093 1096 #
1094 1097 # This fallback is a cheap and dirty fix that prevent several
1095 1098 # crashes. It does not ensure the behavior is correct. However the
1096 1099 # behavior was not correct before filtering either and "incorrect
1097 1100 # behavior" is seen as better as "crash"
1098 1101 #
1099 1102 # Linkrevs have several serious troubles with filtering that are
1100 1103 # complicated to solve. Proper handling of the issue here should be
1101 1104 # considered when solving linkrev issue are on the table.
1102 1105 return changectx(self._repo.unfiltered(), self._changeid)
1103 1106
1104 1107 def filectx(self, fileid, changeid=None):
1105 1108 '''opens an arbitrary revision of the file without
1106 1109 opening a new filelog'''
1107 1110 return filectx(self._repo, self._path, fileid=fileid,
1108 1111 filelog=self._filelog, changeid=changeid)
1109 1112
1110 1113 def data(self):
1111 1114 try:
1112 1115 return self._filelog.read(self._filenode)
1113 1116 except error.CensoredNodeError:
1114 1117 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1115 1118 return ""
1116 1119 raise error.Abort(_("censored node: %s") % short(self._filenode),
1117 1120 hint=_("set censor.policy to ignore errors"))
1118 1121
1119 1122 def size(self):
1120 1123 return self._filelog.size(self._filerev)
1121 1124
1122 1125 def renamed(self):
1123 1126 """check if file was actually renamed in this changeset revision
1124 1127
1125 1128 If rename logged in file revision, we report copy for changeset only
1126 1129 if file revisions linkrev points back to the changeset in question
1127 1130 or both changeset parents contain different file revisions.
1128 1131 """
1129 1132
1130 1133 renamed = self._filelog.renamed(self._filenode)
1131 1134 if not renamed:
1132 1135 return renamed
1133 1136
1134 1137 if self.rev() == self.linkrev():
1135 1138 return renamed
1136 1139
1137 1140 name = self.path()
1138 1141 fnode = self._filenode
1139 1142 for p in self._changectx.parents():
1140 1143 try:
1141 1144 if fnode == p.filenode(name):
1142 1145 return None
1143 1146 except error.LookupError:
1144 1147 pass
1145 1148 return renamed
1146 1149
1147 1150 def children(self):
1148 1151 # hard for renames
1149 1152 c = self._filelog.children(self._filenode)
1150 1153 return [filectx(self._repo, self._path, fileid=x,
1151 1154 filelog=self._filelog) for x in c]
1152 1155
1153 1156 class committablectx(basectx):
1154 1157 """A committablectx object provides common functionality for a context that
1155 1158 wants the ability to commit, e.g. workingctx or memctx."""
1156 1159 def __init__(self, repo, text="", user=None, date=None, extra=None,
1157 1160 changes=None):
1158 1161 self._repo = repo
1159 1162 self._rev = None
1160 1163 self._node = None
1161 1164 self._text = text
1162 1165 if date:
1163 1166 self._date = util.parsedate(date)
1164 1167 if user:
1165 1168 self._user = user
1166 1169 if changes:
1167 1170 self._status = changes
1168 1171
1169 1172 self._extra = {}
1170 1173 if extra:
1171 1174 self._extra = extra.copy()
1172 1175 if 'branch' not in self._extra:
1173 1176 try:
1174 1177 branch = encoding.fromlocal(self._repo.dirstate.branch())
1175 1178 except UnicodeDecodeError:
1176 1179 raise error.Abort(_('branch name not in UTF-8!'))
1177 1180 self._extra['branch'] = branch
1178 1181 if self._extra['branch'] == '':
1179 1182 self._extra['branch'] = 'default'
1180 1183
1181 1184 def __str__(self):
1182 1185 return str(self._parents[0]) + "+"
1183 1186
1184 1187 def __nonzero__(self):
1185 1188 return True
1186 1189
1187 1190 def _buildflagfunc(self):
1188 1191 # Create a fallback function for getting file flags when the
1189 1192 # filesystem doesn't support them
1190 1193
1191 1194 copiesget = self._repo.dirstate.copies().get
1192 1195 parents = self.parents()
1193 1196 if len(parents) < 2:
1194 1197 # when we have one parent, it's easy: copy from parent
1195 1198 man = parents[0].manifest()
1196 1199 def func(f):
1197 1200 f = copiesget(f, f)
1198 1201 return man.flags(f)
1199 1202 else:
1200 1203 # merges are tricky: we try to reconstruct the unstored
1201 1204 # result from the merge (issue1802)
1202 1205 p1, p2 = parents
1203 1206 pa = p1.ancestor(p2)
1204 1207 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1205 1208
1206 1209 def func(f):
1207 1210 f = copiesget(f, f) # may be wrong for merges with copies
1208 1211 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1209 1212 if fl1 == fl2:
1210 1213 return fl1
1211 1214 if fl1 == fla:
1212 1215 return fl2
1213 1216 if fl2 == fla:
1214 1217 return fl1
1215 1218 return '' # punt for conflicts
1216 1219
1217 1220 return func
1218 1221
1219 1222 @propertycache
1220 1223 def _flagfunc(self):
1221 1224 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1222 1225
1223 1226 @propertycache
1224 1227 def _manifest(self):
1225 1228 """generate a manifest corresponding to the values in self._status
1226 1229
1227 1230 This reuse the file nodeid from parent, but we append an extra letter
1228 1231 when modified. Modified files get an extra 'm' while added files get
1229 1232 an extra 'a'. This is used by manifests merge to see that files
1230 1233 are different and by update logic to avoid deleting newly added files.
1231 1234 """
1232 1235 parents = self.parents()
1233 1236
1234 1237 man1 = parents[0].manifest()
1235 1238 man = man1.copy()
1236 1239 if len(parents) > 1:
1237 1240 man2 = self.p2().manifest()
1238 1241 def getman(f):
1239 1242 if f in man1:
1240 1243 return man1
1241 1244 return man2
1242 1245 else:
1243 1246 getman = lambda f: man1
1244 1247
1245 1248 copied = self._repo.dirstate.copies()
1246 1249 ff = self._flagfunc
1247 1250 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1248 1251 for f in l:
1249 1252 orig = copied.get(f, f)
1250 1253 man[f] = getman(orig).get(orig, nullid) + i
1251 1254 try:
1252 1255 man.setflag(f, ff(f))
1253 1256 except OSError:
1254 1257 pass
1255 1258
1256 1259 for f in self._status.deleted + self._status.removed:
1257 1260 if f in man:
1258 1261 del man[f]
1259 1262
1260 1263 return man
1261 1264
1262 1265 @propertycache
1263 1266 def _status(self):
1264 1267 return self._repo.status()
1265 1268
1266 1269 @propertycache
1267 1270 def _user(self):
1268 1271 return self._repo.ui.username()
1269 1272
1270 1273 @propertycache
1271 1274 def _date(self):
1272 1275 return util.makedate()
1273 1276
1274 1277 def subrev(self, subpath):
1275 1278 return None
1276 1279
1277 1280 def manifestnode(self):
1278 1281 return None
1279 1282 def user(self):
1280 1283 return self._user or self._repo.ui.username()
1281 1284 def date(self):
1282 1285 return self._date
1283 1286 def description(self):
1284 1287 return self._text
1285 1288 def files(self):
1286 1289 return sorted(self._status.modified + self._status.added +
1287 1290 self._status.removed)
1288 1291
1289 1292 def modified(self):
1290 1293 return self._status.modified
1291 1294 def added(self):
1292 1295 return self._status.added
1293 1296 def removed(self):
1294 1297 return self._status.removed
1295 1298 def deleted(self):
1296 1299 return self._status.deleted
1297 1300 def branch(self):
1298 1301 return encoding.tolocal(self._extra['branch'])
1299 1302 def closesbranch(self):
1300 1303 return 'close' in self._extra
1301 1304 def extra(self):
1302 1305 return self._extra
1303 1306
1304 1307 def tags(self):
1305 1308 return []
1306 1309
1307 1310 def bookmarks(self):
1308 1311 b = []
1309 1312 for p in self.parents():
1310 1313 b.extend(p.bookmarks())
1311 1314 return b
1312 1315
1313 1316 def phase(self):
1314 1317 phase = phases.draft # default phase to draft
1315 1318 for p in self.parents():
1316 1319 phase = max(phase, p.phase())
1317 1320 return phase
1318 1321
1319 1322 def hidden(self):
1320 1323 return False
1321 1324
1322 1325 def children(self):
1323 1326 return []
1324 1327
1325 1328 def flags(self, path):
1326 1329 if '_manifest' in self.__dict__:
1327 1330 try:
1328 1331 return self._manifest.flags(path)
1329 1332 except KeyError:
1330 1333 return ''
1331 1334
1332 1335 try:
1333 1336 return self._flagfunc(path)
1334 1337 except OSError:
1335 1338 return ''
1336 1339
1337 1340 def ancestor(self, c2):
1338 1341 """return the "best" ancestor context of self and c2"""
1339 1342 return self._parents[0].ancestor(c2) # punt on two parents for now
1340 1343
1341 1344 def walk(self, match):
1342 1345 '''Generates matching file names.'''
1343 1346 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1344 1347 True, False))
1345 1348
1346 1349 def matches(self, match):
1347 1350 return sorted(self._repo.dirstate.matches(match))
1348 1351
1349 1352 def ancestors(self):
1350 1353 for p in self._parents:
1351 1354 yield p
1352 1355 for a in self._repo.changelog.ancestors(
1353 1356 [p.rev() for p in self._parents]):
1354 1357 yield changectx(self._repo, a)
1355 1358
1356 1359 def markcommitted(self, node):
1357 1360 """Perform post-commit cleanup necessary after committing this ctx
1358 1361
1359 1362 Specifically, this updates backing stores this working context
1360 1363 wraps to reflect the fact that the changes reflected by this
1361 1364 workingctx have been committed. For example, it marks
1362 1365 modified and added files as normal in the dirstate.
1363 1366
1364 1367 """
1365 1368
1366 1369 self._repo.dirstate.beginparentchange()
1367 1370 for f in self.modified() + self.added():
1368 1371 self._repo.dirstate.normal(f)
1369 1372 for f in self.removed():
1370 1373 self._repo.dirstate.drop(f)
1371 1374 self._repo.dirstate.setparents(node)
1372 1375 self._repo.dirstate.endparentchange()
1373 1376
1374 1377 # write changes out explicitly, because nesting wlock at
1375 1378 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1376 1379 # from immediately doing so for subsequent changing files
1377 1380 self._repo.dirstate.write(self._repo.currenttransaction())
1378 1381
1379 1382 class workingctx(committablectx):
1380 1383 """A workingctx object makes access to data related to
1381 1384 the current working directory convenient.
1382 1385 date - any valid date string or (unixtime, offset), or None.
1383 1386 user - username string, or None.
1384 1387 extra - a dictionary of extra values, or None.
1385 1388 changes - a list of file lists as returned by localrepo.status()
1386 1389 or None to use the repository status.
1387 1390 """
1388 1391 def __init__(self, repo, text="", user=None, date=None, extra=None,
1389 1392 changes=None):
1390 1393 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1391 1394
1392 1395 def __iter__(self):
1393 1396 d = self._repo.dirstate
1394 1397 for f in d:
1395 1398 if d[f] != 'r':
1396 1399 yield f
1397 1400
1398 1401 def __contains__(self, key):
1399 1402 return self._repo.dirstate[key] not in "?r"
1400 1403
1401 1404 def hex(self):
1402 1405 return hex(wdirid)
1403 1406
1404 1407 @propertycache
1405 1408 def _parents(self):
1406 1409 p = self._repo.dirstate.parents()
1407 1410 if p[1] == nullid:
1408 1411 p = p[:-1]
1409 1412 return [changectx(self._repo, x) for x in p]
1410 1413
1411 1414 def filectx(self, path, filelog=None):
1412 1415 """get a file context from the working directory"""
1413 1416 return workingfilectx(self._repo, path, workingctx=self,
1414 1417 filelog=filelog)
1415 1418
1416 1419 def dirty(self, missing=False, merge=True, branch=True):
1417 1420 "check whether a working directory is modified"
1418 1421 # check subrepos first
1419 1422 for s in sorted(self.substate):
1420 1423 if self.sub(s).dirty():
1421 1424 return True
1422 1425 # check current working dir
1423 1426 return ((merge and self.p2()) or
1424 1427 (branch and self.branch() != self.p1().branch()) or
1425 1428 self.modified() or self.added() or self.removed() or
1426 1429 (missing and self.deleted()))
1427 1430
1428 1431 def add(self, list, prefix=""):
1429 1432 join = lambda f: os.path.join(prefix, f)
1430 1433 with self._repo.wlock():
1431 1434 ui, ds = self._repo.ui, self._repo.dirstate
1432 1435 rejected = []
1433 1436 lstat = self._repo.wvfs.lstat
1434 1437 for f in list:
1435 1438 scmutil.checkportable(ui, join(f))
1436 1439 try:
1437 1440 st = lstat(f)
1438 1441 except OSError:
1439 1442 ui.warn(_("%s does not exist!\n") % join(f))
1440 1443 rejected.append(f)
1441 1444 continue
1442 1445 if st.st_size > 10000000:
1443 1446 ui.warn(_("%s: up to %d MB of RAM may be required "
1444 1447 "to manage this file\n"
1445 1448 "(use 'hg revert %s' to cancel the "
1446 1449 "pending addition)\n")
1447 1450 % (f, 3 * st.st_size // 1000000, join(f)))
1448 1451 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1449 1452 ui.warn(_("%s not added: only files and symlinks "
1450 1453 "supported currently\n") % join(f))
1451 1454 rejected.append(f)
1452 1455 elif ds[f] in 'amn':
1453 1456 ui.warn(_("%s already tracked!\n") % join(f))
1454 1457 elif ds[f] == 'r':
1455 1458 ds.normallookup(f)
1456 1459 else:
1457 1460 ds.add(f)
1458 1461 return rejected
1459 1462
1460 1463 def forget(self, files, prefix=""):
1461 1464 join = lambda f: os.path.join(prefix, f)
1462 1465 with self._repo.wlock():
1463 1466 rejected = []
1464 1467 for f in files:
1465 1468 if f not in self._repo.dirstate:
1466 1469 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1467 1470 rejected.append(f)
1468 1471 elif self._repo.dirstate[f] != 'a':
1469 1472 self._repo.dirstate.remove(f)
1470 1473 else:
1471 1474 self._repo.dirstate.drop(f)
1472 1475 return rejected
1473 1476
1474 1477 def undelete(self, list):
1475 1478 pctxs = self.parents()
1476 1479 with self._repo.wlock():
1477 1480 for f in list:
1478 1481 if self._repo.dirstate[f] != 'r':
1479 1482 self._repo.ui.warn(_("%s not removed!\n") % f)
1480 1483 else:
1481 1484 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1482 1485 t = fctx.data()
1483 1486 self._repo.wwrite(f, t, fctx.flags())
1484 1487 self._repo.dirstate.normal(f)
1485 1488
1486 1489 def copy(self, source, dest):
1487 1490 try:
1488 1491 st = self._repo.wvfs.lstat(dest)
1489 1492 except OSError as err:
1490 1493 if err.errno != errno.ENOENT:
1491 1494 raise
1492 1495 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1493 1496 return
1494 1497 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1495 1498 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1496 1499 "symbolic link\n") % dest)
1497 1500 else:
1498 1501 with self._repo.wlock():
1499 1502 if self._repo.dirstate[dest] in '?':
1500 1503 self._repo.dirstate.add(dest)
1501 1504 elif self._repo.dirstate[dest] in 'r':
1502 1505 self._repo.dirstate.normallookup(dest)
1503 1506 self._repo.dirstate.copy(source, dest)
1504 1507
1505 1508 def match(self, pats=[], include=None, exclude=None, default='glob',
1506 1509 listsubrepos=False, badfn=None):
1507 1510 r = self._repo
1508 1511
1509 1512 # Only a case insensitive filesystem needs magic to translate user input
1510 1513 # to actual case in the filesystem.
1511 1514 if not util.fscasesensitive(r.root):
1512 1515 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1513 1516 exclude, default, r.auditor, self,
1514 1517 listsubrepos=listsubrepos,
1515 1518 badfn=badfn)
1516 1519 return matchmod.match(r.root, r.getcwd(), pats,
1517 1520 include, exclude, default,
1518 1521 auditor=r.auditor, ctx=self,
1519 1522 listsubrepos=listsubrepos, badfn=badfn)
1520 1523
1521 1524 def _filtersuspectsymlink(self, files):
1522 1525 if not files or self._repo.dirstate._checklink:
1523 1526 return files
1524 1527
1525 1528 # Symlink placeholders may get non-symlink-like contents
1526 1529 # via user error or dereferencing by NFS or Samba servers,
1527 1530 # so we filter out any placeholders that don't look like a
1528 1531 # symlink
1529 1532 sane = []
1530 1533 for f in files:
1531 1534 if self.flags(f) == 'l':
1532 1535 d = self[f].data()
1533 1536 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1534 1537 self._repo.ui.debug('ignoring suspect symlink placeholder'
1535 1538 ' "%s"\n' % f)
1536 1539 continue
1537 1540 sane.append(f)
1538 1541 return sane
1539 1542
1540 1543 def _checklookup(self, files):
1541 1544 # check for any possibly clean files
1542 1545 if not files:
1543 1546 return [], []
1544 1547
1545 1548 modified = []
1546 1549 fixup = []
1547 1550 pctx = self._parents[0]
1548 1551 # do a full compare of any files that might have changed
1549 1552 for f in sorted(files):
1550 1553 if (f not in pctx or self.flags(f) != pctx.flags(f)
1551 1554 or pctx[f].cmp(self[f])):
1552 1555 modified.append(f)
1553 1556 else:
1554 1557 fixup.append(f)
1555 1558
1556 1559 # update dirstate for files that are actually clean
1557 1560 if fixup:
1558 1561 try:
1559 1562 # updating the dirstate is optional
1560 1563 # so we don't wait on the lock
1561 1564 # wlock can invalidate the dirstate, so cache normal _after_
1562 1565 # taking the lock
1563 1566 with self._repo.wlock(False):
1564 1567 normal = self._repo.dirstate.normal
1565 1568 for f in fixup:
1566 1569 normal(f)
1567 1570 # write changes out explicitly, because nesting
1568 1571 # wlock at runtime may prevent 'wlock.release()'
1569 1572 # after this block from doing so for subsequent
1570 1573 # changing files
1571 1574 self._repo.dirstate.write(self._repo.currenttransaction())
1572 1575 except error.LockError:
1573 1576 pass
1574 1577 return modified, fixup
1575 1578
1576 1579 def _manifestmatches(self, match, s):
1577 1580 """Slow path for workingctx
1578 1581
1579 1582 The fast path is when we compare the working directory to its parent
1580 1583 which means this function is comparing with a non-parent; therefore we
1581 1584 need to build a manifest and return what matches.
1582 1585 """
1583 1586 mf = self._repo['.']._manifestmatches(match, s)
1584 1587 for f in s.modified + s.added:
1585 1588 mf[f] = _newnode
1586 1589 mf.setflag(f, self.flags(f))
1587 1590 for f in s.removed:
1588 1591 if f in mf:
1589 1592 del mf[f]
1590 1593 return mf
1591 1594
1592 1595 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1593 1596 unknown=False):
1594 1597 '''Gets the status from the dirstate -- internal use only.'''
1595 1598 listignored, listclean, listunknown = ignored, clean, unknown
1596 1599 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1597 1600 subrepos = []
1598 1601 if '.hgsub' in self:
1599 1602 subrepos = sorted(self.substate)
1600 1603 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1601 1604 listclean, listunknown)
1602 1605
1603 1606 # check for any possibly clean files
1604 1607 if cmp:
1605 1608 modified2, fixup = self._checklookup(cmp)
1606 1609 s.modified.extend(modified2)
1607 1610
1608 1611 # update dirstate for files that are actually clean
1609 1612 if fixup and listclean:
1610 1613 s.clean.extend(fixup)
1611 1614
1612 1615 if match.always():
1613 1616 # cache for performance
1614 1617 if s.unknown or s.ignored or s.clean:
1615 1618 # "_status" is cached with list*=False in the normal route
1616 1619 self._status = scmutil.status(s.modified, s.added, s.removed,
1617 1620 s.deleted, [], [], [])
1618 1621 else:
1619 1622 self._status = s
1620 1623
1621 1624 return s
1622 1625
1623 1626 def _buildstatus(self, other, s, match, listignored, listclean,
1624 1627 listunknown):
1625 1628 """build a status with respect to another context
1626 1629
1627 1630 This includes logic for maintaining the fast path of status when
1628 1631 comparing the working directory against its parent, which is to skip
1629 1632 building a new manifest if self (working directory) is not comparing
1630 1633 against its parent (repo['.']).
1631 1634 """
1632 1635 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1633 1636 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1634 1637 # might have accidentally ended up with the entire contents of the file
1635 1638 # they are supposed to be linking to.
1636 1639 s.modified[:] = self._filtersuspectsymlink(s.modified)
1637 1640 if other != self._repo['.']:
1638 1641 s = super(workingctx, self)._buildstatus(other, s, match,
1639 1642 listignored, listclean,
1640 1643 listunknown)
1641 1644 return s
1642 1645
1643 1646 def _matchstatus(self, other, match):
1644 1647 """override the match method with a filter for directory patterns
1645 1648
1646 1649 We use inheritance to customize the match.bad method only in cases of
1647 1650 workingctx since it belongs only to the working directory when
1648 1651 comparing against the parent changeset.
1649 1652
1650 1653 If we aren't comparing against the working directory's parent, then we
1651 1654 just use the default match object sent to us.
1652 1655 """
1653 1656 superself = super(workingctx, self)
1654 1657 match = superself._matchstatus(other, match)
1655 1658 if other != self._repo['.']:
1656 1659 def bad(f, msg):
1657 1660 # 'f' may be a directory pattern from 'match.files()',
1658 1661 # so 'f not in ctx1' is not enough
1659 1662 if f not in other and not other.hasdir(f):
1660 1663 self._repo.ui.warn('%s: %s\n' %
1661 1664 (self._repo.dirstate.pathto(f), msg))
1662 1665 match.bad = bad
1663 1666 return match
1664 1667
1665 1668 class committablefilectx(basefilectx):
1666 1669 """A committablefilectx provides common functionality for a file context
1667 1670 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1668 1671 def __init__(self, repo, path, filelog=None, ctx=None):
1669 1672 self._repo = repo
1670 1673 self._path = path
1671 1674 self._changeid = None
1672 1675 self._filerev = self._filenode = None
1673 1676
1674 1677 if filelog is not None:
1675 1678 self._filelog = filelog
1676 1679 if ctx:
1677 1680 self._changectx = ctx
1678 1681
1679 1682 def __nonzero__(self):
1680 1683 return True
1681 1684
1682 1685 def linkrev(self):
1683 1686 # linked to self._changectx no matter if file is modified or not
1684 1687 return self.rev()
1685 1688
1686 1689 def parents(self):
1687 1690 '''return parent filectxs, following copies if necessary'''
1688 1691 def filenode(ctx, path):
1689 1692 return ctx._manifest.get(path, nullid)
1690 1693
1691 1694 path = self._path
1692 1695 fl = self._filelog
1693 1696 pcl = self._changectx._parents
1694 1697 renamed = self.renamed()
1695 1698
1696 1699 if renamed:
1697 1700 pl = [renamed + (None,)]
1698 1701 else:
1699 1702 pl = [(path, filenode(pcl[0], path), fl)]
1700 1703
1701 1704 for pc in pcl[1:]:
1702 1705 pl.append((path, filenode(pc, path), fl))
1703 1706
1704 1707 return [self._parentfilectx(p, fileid=n, filelog=l)
1705 1708 for p, n, l in pl if n != nullid]
1706 1709
1707 1710 def children(self):
1708 1711 return []
1709 1712
1710 1713 class workingfilectx(committablefilectx):
1711 1714 """A workingfilectx object makes access to data related to a particular
1712 1715 file in the working directory convenient."""
1713 1716 def __init__(self, repo, path, filelog=None, workingctx=None):
1714 1717 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1715 1718
1716 1719 @propertycache
1717 1720 def _changectx(self):
1718 1721 return workingctx(self._repo)
1719 1722
1720 1723 def data(self):
1721 1724 return self._repo.wread(self._path)
1722 1725 def renamed(self):
1723 1726 rp = self._repo.dirstate.copied(self._path)
1724 1727 if not rp:
1725 1728 return None
1726 1729 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1727 1730
1728 1731 def size(self):
1729 1732 return self._repo.wvfs.lstat(self._path).st_size
1730 1733 def date(self):
1731 1734 t, tz = self._changectx.date()
1732 1735 try:
1733 1736 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1734 1737 except OSError as err:
1735 1738 if err.errno != errno.ENOENT:
1736 1739 raise
1737 1740 return (t, tz)
1738 1741
1739 1742 def cmp(self, fctx):
1740 1743 """compare with other file context
1741 1744
1742 1745 returns True if different than fctx.
1743 1746 """
1744 1747 # fctx should be a filectx (not a workingfilectx)
1745 1748 # invert comparison to reuse the same code path
1746 1749 return fctx.cmp(self)
1747 1750
1748 1751 def remove(self, ignoremissing=False):
1749 1752 """wraps unlink for a repo's working directory"""
1750 1753 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1751 1754
1752 1755 def write(self, data, flags):
1753 1756 """wraps repo.wwrite"""
1754 1757 self._repo.wwrite(self._path, data, flags)
1755 1758
1756 1759 class workingcommitctx(workingctx):
1757 1760 """A workingcommitctx object makes access to data related to
1758 1761 the revision being committed convenient.
1759 1762
1760 1763 This hides changes in the working directory, if they aren't
1761 1764 committed in this context.
1762 1765 """
1763 1766 def __init__(self, repo, changes,
1764 1767 text="", user=None, date=None, extra=None):
1765 1768 super(workingctx, self).__init__(repo, text, user, date, extra,
1766 1769 changes)
1767 1770
1768 1771 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1769 1772 unknown=False):
1770 1773 """Return matched files only in ``self._status``
1771 1774
1772 1775 Uncommitted files appear "clean" via this context, even if
1773 1776 they aren't actually so in the working directory.
1774 1777 """
1775 1778 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1776 1779 if clean:
1777 1780 clean = [f for f in self._manifest if f not in self._changedset]
1778 1781 else:
1779 1782 clean = []
1780 1783 return scmutil.status([f for f in self._status.modified if match(f)],
1781 1784 [f for f in self._status.added if match(f)],
1782 1785 [f for f in self._status.removed if match(f)],
1783 1786 [], [], [], clean)
1784 1787
1785 1788 @propertycache
1786 1789 def _changedset(self):
1787 1790 """Return the set of files changed in this context
1788 1791 """
1789 1792 changed = set(self._status.modified)
1790 1793 changed.update(self._status.added)
1791 1794 changed.update(self._status.removed)
1792 1795 return changed
1793 1796
1794 1797 def makecachingfilectxfn(func):
1795 1798 """Create a filectxfn that caches based on the path.
1796 1799
1797 1800 We can't use util.cachefunc because it uses all arguments as the cache
1798 1801 key and this creates a cycle since the arguments include the repo and
1799 1802 memctx.
1800 1803 """
1801 1804 cache = {}
1802 1805
1803 1806 def getfilectx(repo, memctx, path):
1804 1807 if path not in cache:
1805 1808 cache[path] = func(repo, memctx, path)
1806 1809 return cache[path]
1807 1810
1808 1811 return getfilectx
1809 1812
1810 1813 class memctx(committablectx):
1811 1814 """Use memctx to perform in-memory commits via localrepo.commitctx().
1812 1815
1813 1816 Revision information is supplied at initialization time while
1814 1817 related files data and is made available through a callback
1815 1818 mechanism. 'repo' is the current localrepo, 'parents' is a
1816 1819 sequence of two parent revisions identifiers (pass None for every
1817 1820 missing parent), 'text' is the commit message and 'files' lists
1818 1821 names of files touched by the revision (normalized and relative to
1819 1822 repository root).
1820 1823
1821 1824 filectxfn(repo, memctx, path) is a callable receiving the
1822 1825 repository, the current memctx object and the normalized path of
1823 1826 requested file, relative to repository root. It is fired by the
1824 1827 commit function for every file in 'files', but calls order is
1825 1828 undefined. If the file is available in the revision being
1826 1829 committed (updated or added), filectxfn returns a memfilectx
1827 1830 object. If the file was removed, filectxfn raises an
1828 1831 IOError. Moved files are represented by marking the source file
1829 1832 removed and the new file added with copy information (see
1830 1833 memfilectx).
1831 1834
1832 1835 user receives the committer name and defaults to current
1833 1836 repository username, date is the commit date in any format
1834 1837 supported by util.parsedate() and defaults to current date, extra
1835 1838 is a dictionary of metadata or is left empty.
1836 1839 """
1837 1840
1838 1841 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1839 1842 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1840 1843 # this field to determine what to do in filectxfn.
1841 1844 _returnnoneformissingfiles = True
1842 1845
1843 1846 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1844 1847 date=None, extra=None, editor=False):
1845 1848 super(memctx, self).__init__(repo, text, user, date, extra)
1846 1849 self._rev = None
1847 1850 self._node = None
1848 1851 parents = [(p or nullid) for p in parents]
1849 1852 p1, p2 = parents
1850 1853 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1851 1854 files = sorted(set(files))
1852 1855 self._files = files
1853 1856 self.substate = {}
1854 1857
1855 1858 # if store is not callable, wrap it in a function
1856 1859 if not callable(filectxfn):
1857 1860 def getfilectx(repo, memctx, path):
1858 1861 fctx = filectxfn[path]
1859 1862 # this is weird but apparently we only keep track of one parent
1860 1863 # (why not only store that instead of a tuple?)
1861 1864 copied = fctx.renamed()
1862 1865 if copied:
1863 1866 copied = copied[0]
1864 1867 return memfilectx(repo, path, fctx.data(),
1865 1868 islink=fctx.islink(), isexec=fctx.isexec(),
1866 1869 copied=copied, memctx=memctx)
1867 1870 self._filectxfn = getfilectx
1868 1871 else:
1869 1872 # memoizing increases performance for e.g. vcs convert scenarios.
1870 1873 self._filectxfn = makecachingfilectxfn(filectxfn)
1871 1874
1872 1875 if extra:
1873 1876 self._extra = extra.copy()
1874 1877 else:
1875 1878 self._extra = {}
1876 1879
1877 1880 if self._extra.get('branch', '') == '':
1878 1881 self._extra['branch'] = 'default'
1879 1882
1880 1883 if editor:
1881 1884 self._text = editor(self._repo, self, [])
1882 1885 self._repo.savecommitmessage(self._text)
1883 1886
1884 1887 def filectx(self, path, filelog=None):
1885 1888 """get a file context from the working directory
1886 1889
1887 1890 Returns None if file doesn't exist and should be removed."""
1888 1891 return self._filectxfn(self._repo, self, path)
1889 1892
1890 1893 def commit(self):
1891 1894 """commit context to the repo"""
1892 1895 return self._repo.commitctx(self)
1893 1896
1894 1897 @propertycache
1895 1898 def _manifest(self):
1896 1899 """generate a manifest based on the return values of filectxfn"""
1897 1900
1898 1901 # keep this simple for now; just worry about p1
1899 1902 pctx = self._parents[0]
1900 1903 man = pctx.manifest().copy()
1901 1904
1902 1905 for f in self._status.modified:
1903 1906 p1node = nullid
1904 1907 p2node = nullid
1905 1908 p = pctx[f].parents() # if file isn't in pctx, check p2?
1906 1909 if len(p) > 0:
1907 1910 p1node = p[0].filenode()
1908 1911 if len(p) > 1:
1909 1912 p2node = p[1].filenode()
1910 1913 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1911 1914
1912 1915 for f in self._status.added:
1913 1916 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1914 1917
1915 1918 for f in self._status.removed:
1916 1919 if f in man:
1917 1920 del man[f]
1918 1921
1919 1922 return man
1920 1923
1921 1924 @propertycache
1922 1925 def _status(self):
1923 1926 """Calculate exact status from ``files`` specified at construction
1924 1927 """
1925 1928 man1 = self.p1().manifest()
1926 1929 p2 = self._parents[1]
1927 1930 # "1 < len(self._parents)" can't be used for checking
1928 1931 # existence of the 2nd parent, because "memctx._parents" is
1929 1932 # explicitly initialized by the list, of which length is 2.
1930 1933 if p2.node() != nullid:
1931 1934 man2 = p2.manifest()
1932 1935 managing = lambda f: f in man1 or f in man2
1933 1936 else:
1934 1937 managing = lambda f: f in man1
1935 1938
1936 1939 modified, added, removed = [], [], []
1937 1940 for f in self._files:
1938 1941 if not managing(f):
1939 1942 added.append(f)
1940 1943 elif self[f]:
1941 1944 modified.append(f)
1942 1945 else:
1943 1946 removed.append(f)
1944 1947
1945 1948 return scmutil.status(modified, added, removed, [], [], [], [])
1946 1949
1947 1950 class memfilectx(committablefilectx):
1948 1951 """memfilectx represents an in-memory file to commit.
1949 1952
1950 1953 See memctx and committablefilectx for more details.
1951 1954 """
1952 1955 def __init__(self, repo, path, data, islink=False,
1953 1956 isexec=False, copied=None, memctx=None):
1954 1957 """
1955 1958 path is the normalized file path relative to repository root.
1956 1959 data is the file content as a string.
1957 1960 islink is True if the file is a symbolic link.
1958 1961 isexec is True if the file is executable.
1959 1962 copied is the source file path if current file was copied in the
1960 1963 revision being committed, or None."""
1961 1964 super(memfilectx, self).__init__(repo, path, None, memctx)
1962 1965 self._data = data
1963 1966 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1964 1967 self._copied = None
1965 1968 if copied:
1966 1969 self._copied = (copied, nullid)
1967 1970
1968 1971 def data(self):
1969 1972 return self._data
1970 1973 def size(self):
1971 1974 return len(self.data())
1972 1975 def flags(self):
1973 1976 return self._flags
1974 1977 def renamed(self):
1975 1978 return self._copied
1976 1979
1977 1980 def remove(self, ignoremissing=False):
1978 1981 """wraps unlink for a repo's working directory"""
1979 1982 # need to figure out what to do here
1980 1983 del self._changectx[self._path]
1981 1984
1982 1985 def write(self, data, flags):
1983 1986 """wraps repo.wwrite"""
1984 1987 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now