##// END OF EJS Templates
context: raise ProgrammingError on repo['my-tag']...
Martin von Zweigbergk -
r38845:91618801 default
parent child Browse files
Show More
@@ -1,2540 +1,2544 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirfilenodeids,
25 25 wdirid,
26 26 )
27 27 from . import (
28 28 dagop,
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 revlog,
40 40 scmutil,
41 41 sparse,
42 42 subrepo,
43 43 subrepoutil,
44 44 util,
45 45 )
46 46 from .utils import (
47 47 dateutil,
48 48 stringutil,
49 49 )
50 50
51 51 propertycache = util.propertycache
52 52
53 53 class basectx(object):
54 54 """A basectx object represents the common logic for its children:
55 55 changectx: read-only context that is already present in the repo,
56 56 workingctx: a context that represents the working directory and can
57 57 be committed,
58 58 memctx: a context that represents changes in-memory and can also
59 59 be committed."""
60 60
61 61 def __init__(self, repo):
62 62 self._repo = repo
63 63
64 64 def __bytes__(self):
65 65 return short(self.node())
66 66
67 67 __str__ = encoding.strmethod(__bytes__)
68 68
69 69 def __repr__(self):
70 70 return r"<%s %s>" % (type(self).__name__, str(self))
71 71
72 72 def __eq__(self, other):
73 73 try:
74 74 return type(self) == type(other) and self._rev == other._rev
75 75 except AttributeError:
76 76 return False
77 77
78 78 def __ne__(self, other):
79 79 return not (self == other)
80 80
81 81 def __contains__(self, key):
82 82 return key in self._manifest
83 83
84 84 def __getitem__(self, key):
85 85 return self.filectx(key)
86 86
87 87 def __iter__(self):
88 88 return iter(self._manifest)
89 89
90 90 def _buildstatusmanifest(self, status):
91 91 """Builds a manifest that includes the given status results, if this is
92 92 a working copy context. For non-working copy contexts, it just returns
93 93 the normal manifest."""
94 94 return self.manifest()
95 95
96 96 def _matchstatus(self, other, match):
97 97 """This internal method provides a way for child objects to override the
98 98 match operator.
99 99 """
100 100 return match
101 101
102 102 def _buildstatus(self, other, s, match, listignored, listclean,
103 103 listunknown):
104 104 """build a status with respect to another context"""
105 105 # Load earliest manifest first for caching reasons. More specifically,
106 106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 108 # 1000 and cache it so that when you read 1001, we just need to apply a
109 109 # delta to what's in the cache. So that's one full reconstruction + one
110 110 # delta application.
111 111 mf2 = None
112 112 if self.rev() is not None and self.rev() < other.rev():
113 113 mf2 = self._buildstatusmanifest(s)
114 114 mf1 = other._buildstatusmanifest(s)
115 115 if mf2 is None:
116 116 mf2 = self._buildstatusmanifest(s)
117 117
118 118 modified, added = [], []
119 119 removed = []
120 120 clean = []
121 121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 122 deletedset = set(deleted)
123 123 d = mf1.diff(mf2, match=match, clean=listclean)
124 124 for fn, value in d.iteritems():
125 125 if fn in deletedset:
126 126 continue
127 127 if value is None:
128 128 clean.append(fn)
129 129 continue
130 130 (node1, flag1), (node2, flag2) = value
131 131 if node1 is None:
132 132 added.append(fn)
133 133 elif node2 is None:
134 134 removed.append(fn)
135 135 elif flag1 != flag2:
136 136 modified.append(fn)
137 137 elif node2 not in wdirfilenodeids:
138 138 # When comparing files between two commits, we save time by
139 139 # not comparing the file contents when the nodeids differ.
140 140 # Note that this means we incorrectly report a reverted change
141 141 # to a file as a modification.
142 142 modified.append(fn)
143 143 elif self[fn].cmp(other[fn]):
144 144 modified.append(fn)
145 145 else:
146 146 clean.append(fn)
147 147
148 148 if removed:
149 149 # need to filter files if they are already reported as removed
150 150 unknown = [fn for fn in unknown if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 ignored = [fn for fn in ignored if fn not in mf1 and
153 153 (not match or match(fn))]
154 154 # if they're deleted, don't report them as removed
155 155 removed = [fn for fn in removed if fn not in deletedset]
156 156
157 157 return scmutil.status(modified, added, removed, deleted, unknown,
158 158 ignored, clean)
159 159
160 160 @propertycache
161 161 def substate(self):
162 162 return subrepoutil.state(self, self._repo.ui)
163 163
164 164 def subrev(self, subpath):
165 165 return self.substate[subpath][1]
166 166
167 167 def rev(self):
168 168 return self._rev
169 169 def node(self):
170 170 return self._node
171 171 def hex(self):
172 172 return hex(self.node())
173 173 def manifest(self):
174 174 return self._manifest
175 175 def manifestctx(self):
176 176 return self._manifestctx
177 177 def repo(self):
178 178 return self._repo
179 179 def phasestr(self):
180 180 return phases.phasenames[self.phase()]
181 181 def mutable(self):
182 182 return self.phase() > phases.public
183 183
184 184 def getfileset(self, expr):
185 185 return fileset.getfileset(self, expr)
186 186
187 187 def obsolete(self):
188 188 """True if the changeset is obsolete"""
189 189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190 190
191 191 def extinct(self):
192 192 """True if the changeset is extinct"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194 194
195 195 def orphan(self):
196 196 """True if the changeset is not obsolete but it's ancestor are"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198 198
199 199 def phasedivergent(self):
200 200 """True if the changeset try to be a successor of a public changeset
201 201
202 202 Only non-public and non-obsolete changesets may be bumped.
203 203 """
204 204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205 205
206 206 def contentdivergent(self):
207 207 """Is a successors of a changeset with multiple possible successors set
208 208
209 209 Only non-public and non-obsolete changesets may be divergent.
210 210 """
211 211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212 212
213 213 def isunstable(self):
214 214 """True if the changeset is either unstable, bumped or divergent"""
215 215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216 216
217 217 def instabilities(self):
218 218 """return the list of instabilities affecting this changeset.
219 219
220 220 Instabilities are returned as strings. possible values are:
221 221 - orphan,
222 222 - phase-divergent,
223 223 - content-divergent.
224 224 """
225 225 instabilities = []
226 226 if self.orphan():
227 227 instabilities.append('orphan')
228 228 if self.phasedivergent():
229 229 instabilities.append('phase-divergent')
230 230 if self.contentdivergent():
231 231 instabilities.append('content-divergent')
232 232 return instabilities
233 233
234 234 def parents(self):
235 235 """return contexts for each parent changeset"""
236 236 return self._parents
237 237
238 238 def p1(self):
239 239 return self._parents[0]
240 240
241 241 def p2(self):
242 242 parents = self._parents
243 243 if len(parents) == 2:
244 244 return parents[1]
245 245 return changectx(self._repo, nullrev)
246 246
247 247 def _fileinfo(self, path):
248 248 if r'_manifest' in self.__dict__:
249 249 try:
250 250 return self._manifest[path], self._manifest.flags(path)
251 251 except KeyError:
252 252 raise error.ManifestLookupError(self._node, path,
253 253 _('not found in manifest'))
254 254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 255 if path in self._manifestdelta:
256 256 return (self._manifestdelta[path],
257 257 self._manifestdelta.flags(path))
258 258 mfl = self._repo.manifestlog
259 259 try:
260 260 node, flag = mfl[self._changeset.manifest].find(path)
261 261 except KeyError:
262 262 raise error.ManifestLookupError(self._node, path,
263 263 _('not found in manifest'))
264 264
265 265 return node, flag
266 266
267 267 def filenode(self, path):
268 268 return self._fileinfo(path)[0]
269 269
270 270 def flags(self, path):
271 271 try:
272 272 return self._fileinfo(path)[1]
273 273 except error.LookupError:
274 274 return ''
275 275
276 276 def sub(self, path, allowcreate=True):
277 277 '''return a subrepo for the stored revision of path, never wdir()'''
278 278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279 279
280 280 def nullsub(self, path, pctx):
281 281 return subrepo.nullsubrepo(self, path, pctx)
282 282
283 283 def workingsub(self, path):
284 284 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 285 context.
286 286 '''
287 287 return subrepo.subrepo(self, path, allowwdir=True)
288 288
289 289 def match(self, pats=None, include=None, exclude=None, default='glob',
290 290 listsubrepos=False, badfn=None):
291 291 r = self._repo
292 292 return matchmod.match(r.root, r.getcwd(), pats,
293 293 include, exclude, default,
294 294 auditor=r.nofsauditor, ctx=self,
295 295 listsubrepos=listsubrepos, badfn=badfn)
296 296
297 297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 298 losedatafn=None, prefix='', relroot='', copy=None,
299 299 hunksfilterfn=None):
300 300 """Returns a diff generator for the given contexts and matcher"""
301 301 if ctx2 is None:
302 302 ctx2 = self.p1()
303 303 if ctx2 is not None:
304 304 ctx2 = self._repo[ctx2]
305 305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
306 306 opts=opts, losedatafn=losedatafn, prefix=prefix,
307 307 relroot=relroot, copy=copy,
308 308 hunksfilterfn=hunksfilterfn)
309 309
310 310 def dirs(self):
311 311 return self._manifest.dirs()
312 312
313 313 def hasdir(self, dir):
314 314 return self._manifest.hasdir(dir)
315 315
316 316 def status(self, other=None, match=None, listignored=False,
317 317 listclean=False, listunknown=False, listsubrepos=False):
318 318 """return status of files between two nodes or node and working
319 319 directory.
320 320
321 321 If other is None, compare this node with working directory.
322 322
323 323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 324 """
325 325
326 326 ctx1 = self
327 327 ctx2 = self._repo[other]
328 328
329 329 # This next code block is, admittedly, fragile logic that tests for
330 330 # reversing the contexts and wouldn't need to exist if it weren't for
331 331 # the fast (and common) code path of comparing the working directory
332 332 # with its first parent.
333 333 #
334 334 # What we're aiming for here is the ability to call:
335 335 #
336 336 # workingctx.status(parentctx)
337 337 #
338 338 # If we always built the manifest for each context and compared those,
339 339 # then we'd be done. But the special case of the above call means we
340 340 # just copy the manifest of the parent.
341 341 reversed = False
342 342 if (not isinstance(ctx1, changectx)
343 343 and isinstance(ctx2, changectx)):
344 344 reversed = True
345 345 ctx1, ctx2 = ctx2, ctx1
346 346
347 347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 348 match = ctx2._matchstatus(ctx1, match)
349 349 r = scmutil.status([], [], [], [], [], [], [])
350 350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 351 listunknown)
352 352
353 353 if reversed:
354 354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 355 # these make no sense to reverse.
356 356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 357 r.clean)
358 358
359 359 if listsubrepos:
360 360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 361 try:
362 362 rev2 = ctx2.subrev(subpath)
363 363 except KeyError:
364 364 # A subrepo that existed in node1 was deleted between
365 365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 366 # won't contain that subpath. The best we can do ignore it.
367 367 rev2 = None
368 368 submatch = matchmod.subdirmatcher(subpath, match)
369 369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 370 clean=listclean, unknown=listunknown,
371 371 listsubrepos=True)
372 372 for rfiles, sfiles in zip(r, s):
373 373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374 374
375 375 for l in r:
376 376 l.sort()
377 377
378 378 return r
379 379
380 380 class changectx(basectx):
381 381 """A changecontext object makes access to data related to a particular
382 382 changeset convenient. It represents a read-only context already present in
383 383 the repo."""
384 384 def __init__(self, repo, changeid='.'):
385 385 """changeid is a revision number, node, or tag"""
386 386 super(changectx, self).__init__(repo)
387 387
388 388 try:
389 389 if isinstance(changeid, int):
390 390 self._node = repo.changelog.node(changeid)
391 391 self._rev = changeid
392 392 return
393 393 elif changeid == 'null':
394 394 self._node = nullid
395 395 self._rev = nullrev
396 396 return
397 397 elif changeid == 'tip':
398 398 self._node = repo.changelog.tip()
399 399 self._rev = repo.changelog.rev(self._node)
400 400 return
401 401 elif (changeid == '.'
402 402 or repo.local() and changeid == repo.dirstate.p1()):
403 403 # this is a hack to delay/avoid loading obsmarkers
404 404 # when we know that '.' won't be hidden
405 405 self._node = repo.dirstate.p1()
406 406 self._rev = repo.unfiltered().changelog.rev(self._node)
407 407 return
408 408 elif len(changeid) == 20:
409 409 try:
410 410 self._node = changeid
411 411 self._rev = repo.changelog.rev(changeid)
412 412 return
413 413 except error.FilteredLookupError:
414 414 raise
415 415 except LookupError:
416 416 # check if it might have come from damaged dirstate
417 417 #
418 418 # XXX we could avoid the unfiltered if we had a recognizable
419 419 # exception for filtered changeset access
420 420 if (repo.local()
421 421 and changeid in repo.unfiltered().dirstate.parents()):
422 422 msg = _("working directory has unknown parent '%s'!")
423 423 raise error.Abort(msg % short(changeid))
424 424 changeid = hex(changeid) # for the error message
425 425
426 426 elif len(changeid) == 40:
427 427 try:
428 428 self._node = bin(changeid)
429 429 self._rev = repo.changelog.rev(self._node)
430 430 return
431 431 except error.FilteredLookupError:
432 432 raise
433 433 except (TypeError, LookupError):
434 434 pass
435 else:
436 raise error.ProgrammingError(
437 "unsupported changeid '%s' of type %s" %
438 (changeid, type(changeid)))
435 439
436 440 # lookup failed
437 441 except (error.FilteredIndexError, error.FilteredLookupError):
438 442 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
439 443 % pycompat.bytestr(changeid))
440 444 except error.FilteredRepoLookupError:
441 445 raise
442 446 except IndexError:
443 447 pass
444 448 raise error.RepoLookupError(
445 449 _("unknown revision '%s'") % changeid)
446 450
447 451 def __hash__(self):
448 452 try:
449 453 return hash(self._rev)
450 454 except AttributeError:
451 455 return id(self)
452 456
453 457 def __nonzero__(self):
454 458 return self._rev != nullrev
455 459
456 460 __bool__ = __nonzero__
457 461
458 462 @propertycache
459 463 def _changeset(self):
460 464 return self._repo.changelog.changelogrevision(self.rev())
461 465
462 466 @propertycache
463 467 def _manifest(self):
464 468 return self._manifestctx.read()
465 469
466 470 @property
467 471 def _manifestctx(self):
468 472 return self._repo.manifestlog[self._changeset.manifest]
469 473
470 474 @propertycache
471 475 def _manifestdelta(self):
472 476 return self._manifestctx.readdelta()
473 477
474 478 @propertycache
475 479 def _parents(self):
476 480 repo = self._repo
477 481 p1, p2 = repo.changelog.parentrevs(self._rev)
478 482 if p2 == nullrev:
479 483 return [changectx(repo, p1)]
480 484 return [changectx(repo, p1), changectx(repo, p2)]
481 485
482 486 def changeset(self):
483 487 c = self._changeset
484 488 return (
485 489 c.manifest,
486 490 c.user,
487 491 c.date,
488 492 c.files,
489 493 c.description,
490 494 c.extra,
491 495 )
492 496 def manifestnode(self):
493 497 return self._changeset.manifest
494 498
495 499 def user(self):
496 500 return self._changeset.user
497 501 def date(self):
498 502 return self._changeset.date
499 503 def files(self):
500 504 return self._changeset.files
501 505 def description(self):
502 506 return self._changeset.description
503 507 def branch(self):
504 508 return encoding.tolocal(self._changeset.extra.get("branch"))
505 509 def closesbranch(self):
506 510 return 'close' in self._changeset.extra
507 511 def extra(self):
508 512 """Return a dict of extra information."""
509 513 return self._changeset.extra
510 514 def tags(self):
511 515 """Return a list of byte tag names"""
512 516 return self._repo.nodetags(self._node)
513 517 def bookmarks(self):
514 518 """Return a list of byte bookmark names."""
515 519 return self._repo.nodebookmarks(self._node)
516 520 def phase(self):
517 521 return self._repo._phasecache.phase(self._repo, self._rev)
518 522 def hidden(self):
519 523 return self._rev in repoview.filterrevs(self._repo, 'visible')
520 524
521 525 def isinmemory(self):
522 526 return False
523 527
524 528 def children(self):
525 529 """return list of changectx contexts for each child changeset.
526 530
527 531 This returns only the immediate child changesets. Use descendants() to
528 532 recursively walk children.
529 533 """
530 534 c = self._repo.changelog.children(self._node)
531 535 return [changectx(self._repo, x) for x in c]
532 536
533 537 def ancestors(self):
534 538 for a in self._repo.changelog.ancestors([self._rev]):
535 539 yield changectx(self._repo, a)
536 540
537 541 def descendants(self):
538 542 """Recursively yield all children of the changeset.
539 543
540 544 For just the immediate children, use children()
541 545 """
542 546 for d in self._repo.changelog.descendants([self._rev]):
543 547 yield changectx(self._repo, d)
544 548
545 549 def filectx(self, path, fileid=None, filelog=None):
546 550 """get a file context from this changeset"""
547 551 if fileid is None:
548 552 fileid = self.filenode(path)
549 553 return filectx(self._repo, path, fileid=fileid,
550 554 changectx=self, filelog=filelog)
551 555
552 556 def ancestor(self, c2, warn=False):
553 557 """return the "best" ancestor context of self and c2
554 558
555 559 If there are multiple candidates, it will show a message and check
556 560 merge.preferancestor configuration before falling back to the
557 561 revlog ancestor."""
558 562 # deal with workingctxs
559 563 n2 = c2._node
560 564 if n2 is None:
561 565 n2 = c2._parents[0]._node
562 566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
563 567 if not cahs:
564 568 anc = nullid
565 569 elif len(cahs) == 1:
566 570 anc = cahs[0]
567 571 else:
568 572 # experimental config: merge.preferancestor
569 573 for r in self._repo.ui.configlist('merge', 'preferancestor'):
570 574 try:
571 575 ctx = scmutil.revsymbol(self._repo, r)
572 576 except error.RepoLookupError:
573 577 continue
574 578 anc = ctx.node()
575 579 if anc in cahs:
576 580 break
577 581 else:
578 582 anc = self._repo.changelog.ancestor(self._node, n2)
579 583 if warn:
580 584 self._repo.ui.status(
581 585 (_("note: using %s as ancestor of %s and %s\n") %
582 586 (short(anc), short(self._node), short(n2))) +
583 587 ''.join(_(" alternatively, use --config "
584 588 "merge.preferancestor=%s\n") %
585 589 short(n) for n in sorted(cahs) if n != anc))
586 590 return changectx(self._repo, anc)
587 591
588 592 def descendant(self, other):
589 593 """True if other is descendant of this changeset"""
590 594 return self._repo.changelog.descendant(self._rev, other._rev)
591 595
592 596 def walk(self, match):
593 597 '''Generates matching file names.'''
594 598
595 599 # Wrap match.bad method to have message with nodeid
596 600 def bad(fn, msg):
597 601 # The manifest doesn't know about subrepos, so don't complain about
598 602 # paths into valid subrepos.
599 603 if any(fn == s or fn.startswith(s + '/')
600 604 for s in self.substate):
601 605 return
602 606 match.bad(fn, _('no such file in rev %s') % self)
603 607
604 608 m = matchmod.badmatch(match, bad)
605 609 return self._manifest.walk(m)
606 610
607 611 def matches(self, match):
608 612 return self.walk(match)
609 613
610 614 class basefilectx(object):
611 615 """A filecontext object represents the common logic for its children:
612 616 filectx: read-only access to a filerevision that is already present
613 617 in the repo,
614 618 workingfilectx: a filecontext that represents files from the working
615 619 directory,
616 620 memfilectx: a filecontext that represents files in-memory,
617 621 overlayfilectx: duplicate another filecontext with some fields overridden.
618 622 """
619 623 @propertycache
620 624 def _filelog(self):
621 625 return self._repo.file(self._path)
622 626
623 627 @propertycache
624 628 def _changeid(self):
625 629 if r'_changeid' in self.__dict__:
626 630 return self._changeid
627 631 elif r'_changectx' in self.__dict__:
628 632 return self._changectx.rev()
629 633 elif r'_descendantrev' in self.__dict__:
630 634 # this file context was created from a revision with a known
631 635 # descendant, we can (lazily) correct for linkrev aliases
632 636 return self._adjustlinkrev(self._descendantrev)
633 637 else:
634 638 return self._filelog.linkrev(self._filerev)
635 639
636 640 @propertycache
637 641 def _filenode(self):
638 642 if r'_fileid' in self.__dict__:
639 643 return self._filelog.lookup(self._fileid)
640 644 else:
641 645 return self._changectx.filenode(self._path)
642 646
643 647 @propertycache
644 648 def _filerev(self):
645 649 return self._filelog.rev(self._filenode)
646 650
647 651 @propertycache
648 652 def _repopath(self):
649 653 return self._path
650 654
651 655 def __nonzero__(self):
652 656 try:
653 657 self._filenode
654 658 return True
655 659 except error.LookupError:
656 660 # file is missing
657 661 return False
658 662
659 663 __bool__ = __nonzero__
660 664
661 665 def __bytes__(self):
662 666 try:
663 667 return "%s@%s" % (self.path(), self._changectx)
664 668 except error.LookupError:
665 669 return "%s@???" % self.path()
666 670
667 671 __str__ = encoding.strmethod(__bytes__)
668 672
669 673 def __repr__(self):
670 674 return r"<%s %s>" % (type(self).__name__, str(self))
671 675
672 676 def __hash__(self):
673 677 try:
674 678 return hash((self._path, self._filenode))
675 679 except AttributeError:
676 680 return id(self)
677 681
678 682 def __eq__(self, other):
679 683 try:
680 684 return (type(self) == type(other) and self._path == other._path
681 685 and self._filenode == other._filenode)
682 686 except AttributeError:
683 687 return False
684 688
685 689 def __ne__(self, other):
686 690 return not (self == other)
687 691
688 692 def filerev(self):
689 693 return self._filerev
690 694 def filenode(self):
691 695 return self._filenode
692 696 @propertycache
693 697 def _flags(self):
694 698 return self._changectx.flags(self._path)
695 699 def flags(self):
696 700 return self._flags
697 701 def filelog(self):
698 702 return self._filelog
699 703 def rev(self):
700 704 return self._changeid
701 705 def linkrev(self):
702 706 return self._filelog.linkrev(self._filerev)
703 707 def node(self):
704 708 return self._changectx.node()
705 709 def hex(self):
706 710 return self._changectx.hex()
707 711 def user(self):
708 712 return self._changectx.user()
709 713 def date(self):
710 714 return self._changectx.date()
711 715 def files(self):
712 716 return self._changectx.files()
713 717 def description(self):
714 718 return self._changectx.description()
715 719 def branch(self):
716 720 return self._changectx.branch()
717 721 def extra(self):
718 722 return self._changectx.extra()
719 723 def phase(self):
720 724 return self._changectx.phase()
721 725 def phasestr(self):
722 726 return self._changectx.phasestr()
723 727 def obsolete(self):
724 728 return self._changectx.obsolete()
725 729 def instabilities(self):
726 730 return self._changectx.instabilities()
727 731 def manifest(self):
728 732 return self._changectx.manifest()
729 733 def changectx(self):
730 734 return self._changectx
731 735 def renamed(self):
732 736 return self._copied
733 737 def repo(self):
734 738 return self._repo
735 739 def size(self):
736 740 return len(self.data())
737 741
738 742 def path(self):
739 743 return self._path
740 744
741 745 def isbinary(self):
742 746 try:
743 747 return stringutil.binary(self.data())
744 748 except IOError:
745 749 return False
746 750 def isexec(self):
747 751 return 'x' in self.flags()
748 752 def islink(self):
749 753 return 'l' in self.flags()
750 754
751 755 def isabsent(self):
752 756 """whether this filectx represents a file not in self._changectx
753 757
754 758 This is mainly for merge code to detect change/delete conflicts. This is
755 759 expected to be True for all subclasses of basectx."""
756 760 return False
757 761
758 762 _customcmp = False
759 763 def cmp(self, fctx):
760 764 """compare with other file context
761 765
762 766 returns True if different than fctx.
763 767 """
764 768 if fctx._customcmp:
765 769 return fctx.cmp(self)
766 770
767 771 if (fctx._filenode is None
768 772 and (self._repo._encodefilterpats
769 773 # if file data starts with '\1\n', empty metadata block is
770 774 # prepended, which adds 4 bytes to filelog.size().
771 775 or self.size() - 4 == fctx.size())
772 776 or self.size() == fctx.size()):
773 777 return self._filelog.cmp(self._filenode, fctx.data())
774 778
775 779 return True
776 780
777 781 def _adjustlinkrev(self, srcrev, inclusive=False):
778 782 """return the first ancestor of <srcrev> introducing <fnode>
779 783
780 784 If the linkrev of the file revision does not point to an ancestor of
781 785 srcrev, we'll walk down the ancestors until we find one introducing
782 786 this file revision.
783 787
784 788 :srcrev: the changeset revision we search ancestors from
785 789 :inclusive: if true, the src revision will also be checked
786 790 """
787 791 repo = self._repo
788 792 cl = repo.unfiltered().changelog
789 793 mfl = repo.manifestlog
790 794 # fetch the linkrev
791 795 lkr = self.linkrev()
792 796 # hack to reuse ancestor computation when searching for renames
793 797 memberanc = getattr(self, '_ancestrycontext', None)
794 798 iteranc = None
795 799 if srcrev is None:
796 800 # wctx case, used by workingfilectx during mergecopy
797 801 revs = [p.rev() for p in self._repo[None].parents()]
798 802 inclusive = True # we skipped the real (revless) source
799 803 else:
800 804 revs = [srcrev]
801 805 if memberanc is None:
802 806 memberanc = iteranc = cl.ancestors(revs, lkr,
803 807 inclusive=inclusive)
804 808 # check if this linkrev is an ancestor of srcrev
805 809 if lkr not in memberanc:
806 810 if iteranc is None:
807 811 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
808 812 fnode = self._filenode
809 813 path = self._path
810 814 for a in iteranc:
811 815 ac = cl.read(a) # get changeset data (we avoid object creation)
812 816 if path in ac[3]: # checking the 'files' field.
813 817 # The file has been touched, check if the content is
814 818 # similar to the one we search for.
815 819 if fnode == mfl[ac[0]].readfast().get(path):
816 820 return a
817 821 # In theory, we should never get out of that loop without a result.
818 822 # But if manifest uses a buggy file revision (not children of the
819 823 # one it replaces) we could. Such a buggy situation will likely
820 824 # result is crash somewhere else at to some point.
821 825 return lkr
822 826
823 827 def introrev(self):
824 828 """return the rev of the changeset which introduced this file revision
825 829
826 830 This method is different from linkrev because it take into account the
827 831 changeset the filectx was created from. It ensures the returned
828 832 revision is one of its ancestors. This prevents bugs from
829 833 'linkrev-shadowing' when a file revision is used by multiple
830 834 changesets.
831 835 """
832 836 lkr = self.linkrev()
833 837 attrs = vars(self)
834 838 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
835 839 if noctx or self.rev() == lkr:
836 840 return self.linkrev()
837 841 return self._adjustlinkrev(self.rev(), inclusive=True)
838 842
839 843 def introfilectx(self):
840 844 """Return filectx having identical contents, but pointing to the
841 845 changeset revision where this filectx was introduced"""
842 846 introrev = self.introrev()
843 847 if self.rev() == introrev:
844 848 return self
845 849 return self.filectx(self.filenode(), changeid=introrev)
846 850
847 851 def _parentfilectx(self, path, fileid, filelog):
848 852 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
849 853 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
850 854 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
851 855 # If self is associated with a changeset (probably explicitly
852 856 # fed), ensure the created filectx is associated with a
853 857 # changeset that is an ancestor of self.changectx.
854 858 # This lets us later use _adjustlinkrev to get a correct link.
855 859 fctx._descendantrev = self.rev()
856 860 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
857 861 elif r'_descendantrev' in vars(self):
858 862 # Otherwise propagate _descendantrev if we have one associated.
859 863 fctx._descendantrev = self._descendantrev
860 864 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
861 865 return fctx
862 866
863 867 def parents(self):
864 868 _path = self._path
865 869 fl = self._filelog
866 870 parents = self._filelog.parents(self._filenode)
867 871 pl = [(_path, node, fl) for node in parents if node != nullid]
868 872
869 873 r = fl.renamed(self._filenode)
870 874 if r:
871 875 # - In the simple rename case, both parent are nullid, pl is empty.
872 876 # - In case of merge, only one of the parent is null id and should
873 877 # be replaced with the rename information. This parent is -always-
874 878 # the first one.
875 879 #
876 880 # As null id have always been filtered out in the previous list
877 881 # comprehension, inserting to 0 will always result in "replacing
878 882 # first nullid parent with rename information.
879 883 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
880 884
881 885 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
882 886
883 887 def p1(self):
884 888 return self.parents()[0]
885 889
886 890 def p2(self):
887 891 p = self.parents()
888 892 if len(p) == 2:
889 893 return p[1]
890 894 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
891 895
892 896 def annotate(self, follow=False, skiprevs=None, diffopts=None):
893 897 """Returns a list of annotateline objects for each line in the file
894 898
895 899 - line.fctx is the filectx of the node where that line was last changed
896 900 - line.lineno is the line number at the first appearance in the managed
897 901 file
898 902 - line.text is the data on that line (including newline character)
899 903 """
900 904 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
901 905
902 906 def parents(f):
903 907 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
904 908 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
905 909 # from the topmost introrev (= srcrev) down to p.linkrev() if it
906 910 # isn't an ancestor of the srcrev.
907 911 f._changeid
908 912 pl = f.parents()
909 913
910 914 # Don't return renamed parents if we aren't following.
911 915 if not follow:
912 916 pl = [p for p in pl if p.path() == f.path()]
913 917
914 918 # renamed filectx won't have a filelog yet, so set it
915 919 # from the cache to save time
916 920 for p in pl:
917 921 if not r'_filelog' in p.__dict__:
918 922 p._filelog = getlog(p.path())
919 923
920 924 return pl
921 925
922 926 # use linkrev to find the first changeset where self appeared
923 927 base = self.introfilectx()
924 928 if getattr(base, '_ancestrycontext', None) is None:
925 929 cl = self._repo.changelog
926 930 if base.rev() is None:
927 931 # wctx is not inclusive, but works because _ancestrycontext
928 932 # is used to test filelog revisions
929 933 ac = cl.ancestors([p.rev() for p in base.parents()],
930 934 inclusive=True)
931 935 else:
932 936 ac = cl.ancestors([base.rev()], inclusive=True)
933 937 base._ancestrycontext = ac
934 938
935 939 return dagop.annotate(base, parents, skiprevs=skiprevs,
936 940 diffopts=diffopts)
937 941
938 942 def ancestors(self, followfirst=False):
939 943 visit = {}
940 944 c = self
941 945 if followfirst:
942 946 cut = 1
943 947 else:
944 948 cut = None
945 949
946 950 while True:
947 951 for parent in c.parents()[:cut]:
948 952 visit[(parent.linkrev(), parent.filenode())] = parent
949 953 if not visit:
950 954 break
951 955 c = visit.pop(max(visit))
952 956 yield c
953 957
954 958 def decodeddata(self):
955 959 """Returns `data()` after running repository decoding filters.
956 960
957 961 This is often equivalent to how the data would be expressed on disk.
958 962 """
959 963 return self._repo.wwritedata(self.path(), self.data())
960 964
961 965 class filectx(basefilectx):
962 966 """A filecontext object makes access to data related to a particular
963 967 filerevision convenient."""
964 968 def __init__(self, repo, path, changeid=None, fileid=None,
965 969 filelog=None, changectx=None):
966 970 """changeid can be a changeset revision, node, or tag.
967 971 fileid can be a file revision or node."""
968 972 self._repo = repo
969 973 self._path = path
970 974
971 975 assert (changeid is not None
972 976 or fileid is not None
973 977 or changectx is not None), \
974 978 ("bad args: changeid=%r, fileid=%r, changectx=%r"
975 979 % (changeid, fileid, changectx))
976 980
977 981 if filelog is not None:
978 982 self._filelog = filelog
979 983
980 984 if changeid is not None:
981 985 self._changeid = changeid
982 986 if changectx is not None:
983 987 self._changectx = changectx
984 988 if fileid is not None:
985 989 self._fileid = fileid
986 990
987 991 @propertycache
988 992 def _changectx(self):
989 993 try:
990 994 return changectx(self._repo, self._changeid)
991 995 except error.FilteredRepoLookupError:
992 996 # Linkrev may point to any revision in the repository. When the
993 997 # repository is filtered this may lead to `filectx` trying to build
994 998 # `changectx` for filtered revision. In such case we fallback to
995 999 # creating `changectx` on the unfiltered version of the reposition.
996 1000 # This fallback should not be an issue because `changectx` from
997 1001 # `filectx` are not used in complex operations that care about
998 1002 # filtering.
999 1003 #
1000 1004 # This fallback is a cheap and dirty fix that prevent several
1001 1005 # crashes. It does not ensure the behavior is correct. However the
1002 1006 # behavior was not correct before filtering either and "incorrect
1003 1007 # behavior" is seen as better as "crash"
1004 1008 #
1005 1009 # Linkrevs have several serious troubles with filtering that are
1006 1010 # complicated to solve. Proper handling of the issue here should be
1007 1011 # considered when solving linkrev issue are on the table.
1008 1012 return changectx(self._repo.unfiltered(), self._changeid)
1009 1013
1010 1014 def filectx(self, fileid, changeid=None):
1011 1015 '''opens an arbitrary revision of the file without
1012 1016 opening a new filelog'''
1013 1017 return filectx(self._repo, self._path, fileid=fileid,
1014 1018 filelog=self._filelog, changeid=changeid)
1015 1019
1016 1020 def rawdata(self):
1017 1021 return self._filelog.revision(self._filenode, raw=True)
1018 1022
1019 1023 def rawflags(self):
1020 1024 """low-level revlog flags"""
1021 1025 return self._filelog.flags(self._filerev)
1022 1026
1023 1027 def data(self):
1024 1028 try:
1025 1029 return self._filelog.read(self._filenode)
1026 1030 except error.CensoredNodeError:
1027 1031 if self._repo.ui.config("censor", "policy") == "ignore":
1028 1032 return ""
1029 1033 raise error.Abort(_("censored node: %s") % short(self._filenode),
1030 1034 hint=_("set censor.policy to ignore errors"))
1031 1035
1032 1036 def size(self):
1033 1037 return self._filelog.size(self._filerev)
1034 1038
1035 1039 @propertycache
1036 1040 def _copied(self):
1037 1041 """check if file was actually renamed in this changeset revision
1038 1042
1039 1043 If rename logged in file revision, we report copy for changeset only
1040 1044 if file revisions linkrev points back to the changeset in question
1041 1045 or both changeset parents contain different file revisions.
1042 1046 """
1043 1047
1044 1048 renamed = self._filelog.renamed(self._filenode)
1045 1049 if not renamed:
1046 1050 return renamed
1047 1051
1048 1052 if self.rev() == self.linkrev():
1049 1053 return renamed
1050 1054
1051 1055 name = self.path()
1052 1056 fnode = self._filenode
1053 1057 for p in self._changectx.parents():
1054 1058 try:
1055 1059 if fnode == p.filenode(name):
1056 1060 return None
1057 1061 except error.LookupError:
1058 1062 pass
1059 1063 return renamed
1060 1064
1061 1065 def children(self):
1062 1066 # hard for renames
1063 1067 c = self._filelog.children(self._filenode)
1064 1068 return [filectx(self._repo, self._path, fileid=x,
1065 1069 filelog=self._filelog) for x in c]
1066 1070
1067 1071 class committablectx(basectx):
1068 1072 """A committablectx object provides common functionality for a context that
1069 1073 wants the ability to commit, e.g. workingctx or memctx."""
1070 1074 def __init__(self, repo, text="", user=None, date=None, extra=None,
1071 1075 changes=None):
1072 1076 super(committablectx, self).__init__(repo)
1073 1077 self._rev = None
1074 1078 self._node = None
1075 1079 self._text = text
1076 1080 if date:
1077 1081 self._date = dateutil.parsedate(date)
1078 1082 if user:
1079 1083 self._user = user
1080 1084 if changes:
1081 1085 self._status = changes
1082 1086
1083 1087 self._extra = {}
1084 1088 if extra:
1085 1089 self._extra = extra.copy()
1086 1090 if 'branch' not in self._extra:
1087 1091 try:
1088 1092 branch = encoding.fromlocal(self._repo.dirstate.branch())
1089 1093 except UnicodeDecodeError:
1090 1094 raise error.Abort(_('branch name not in UTF-8!'))
1091 1095 self._extra['branch'] = branch
1092 1096 if self._extra['branch'] == '':
1093 1097 self._extra['branch'] = 'default'
1094 1098
1095 1099 def __bytes__(self):
1096 1100 return bytes(self._parents[0]) + "+"
1097 1101
1098 1102 __str__ = encoding.strmethod(__bytes__)
1099 1103
1100 1104 def __nonzero__(self):
1101 1105 return True
1102 1106
1103 1107 __bool__ = __nonzero__
1104 1108
1105 1109 def _buildflagfunc(self):
1106 1110 # Create a fallback function for getting file flags when the
1107 1111 # filesystem doesn't support them
1108 1112
1109 1113 copiesget = self._repo.dirstate.copies().get
1110 1114 parents = self.parents()
1111 1115 if len(parents) < 2:
1112 1116 # when we have one parent, it's easy: copy from parent
1113 1117 man = parents[0].manifest()
1114 1118 def func(f):
1115 1119 f = copiesget(f, f)
1116 1120 return man.flags(f)
1117 1121 else:
1118 1122 # merges are tricky: we try to reconstruct the unstored
1119 1123 # result from the merge (issue1802)
1120 1124 p1, p2 = parents
1121 1125 pa = p1.ancestor(p2)
1122 1126 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1123 1127
1124 1128 def func(f):
1125 1129 f = copiesget(f, f) # may be wrong for merges with copies
1126 1130 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1127 1131 if fl1 == fl2:
1128 1132 return fl1
1129 1133 if fl1 == fla:
1130 1134 return fl2
1131 1135 if fl2 == fla:
1132 1136 return fl1
1133 1137 return '' # punt for conflicts
1134 1138
1135 1139 return func
1136 1140
1137 1141 @propertycache
1138 1142 def _flagfunc(self):
1139 1143 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1140 1144
1141 1145 @propertycache
1142 1146 def _status(self):
1143 1147 return self._repo.status()
1144 1148
1145 1149 @propertycache
1146 1150 def _user(self):
1147 1151 return self._repo.ui.username()
1148 1152
1149 1153 @propertycache
1150 1154 def _date(self):
1151 1155 ui = self._repo.ui
1152 1156 date = ui.configdate('devel', 'default-date')
1153 1157 if date is None:
1154 1158 date = dateutil.makedate()
1155 1159 return date
1156 1160
1157 1161 def subrev(self, subpath):
1158 1162 return None
1159 1163
1160 1164 def manifestnode(self):
1161 1165 return None
1162 1166 def user(self):
1163 1167 return self._user or self._repo.ui.username()
1164 1168 def date(self):
1165 1169 return self._date
1166 1170 def description(self):
1167 1171 return self._text
1168 1172 def files(self):
1169 1173 return sorted(self._status.modified + self._status.added +
1170 1174 self._status.removed)
1171 1175
1172 1176 def modified(self):
1173 1177 return self._status.modified
1174 1178 def added(self):
1175 1179 return self._status.added
1176 1180 def removed(self):
1177 1181 return self._status.removed
1178 1182 def deleted(self):
1179 1183 return self._status.deleted
1180 1184 def branch(self):
1181 1185 return encoding.tolocal(self._extra['branch'])
1182 1186 def closesbranch(self):
1183 1187 return 'close' in self._extra
1184 1188 def extra(self):
1185 1189 return self._extra
1186 1190
1187 1191 def isinmemory(self):
1188 1192 return False
1189 1193
1190 1194 def tags(self):
1191 1195 return []
1192 1196
1193 1197 def bookmarks(self):
1194 1198 b = []
1195 1199 for p in self.parents():
1196 1200 b.extend(p.bookmarks())
1197 1201 return b
1198 1202
1199 1203 def phase(self):
1200 1204 phase = phases.draft # default phase to draft
1201 1205 for p in self.parents():
1202 1206 phase = max(phase, p.phase())
1203 1207 return phase
1204 1208
1205 1209 def hidden(self):
1206 1210 return False
1207 1211
1208 1212 def children(self):
1209 1213 return []
1210 1214
1211 1215 def flags(self, path):
1212 1216 if r'_manifest' in self.__dict__:
1213 1217 try:
1214 1218 return self._manifest.flags(path)
1215 1219 except KeyError:
1216 1220 return ''
1217 1221
1218 1222 try:
1219 1223 return self._flagfunc(path)
1220 1224 except OSError:
1221 1225 return ''
1222 1226
1223 1227 def ancestor(self, c2):
1224 1228 """return the "best" ancestor context of self and c2"""
1225 1229 return self._parents[0].ancestor(c2) # punt on two parents for now
1226 1230
1227 1231 def walk(self, match):
1228 1232 '''Generates matching file names.'''
1229 1233 return sorted(self._repo.dirstate.walk(match,
1230 1234 subrepos=sorted(self.substate),
1231 1235 unknown=True, ignored=False))
1232 1236
1233 1237 def matches(self, match):
1234 1238 ds = self._repo.dirstate
1235 1239 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1236 1240
1237 1241 def ancestors(self):
1238 1242 for p in self._parents:
1239 1243 yield p
1240 1244 for a in self._repo.changelog.ancestors(
1241 1245 [p.rev() for p in self._parents]):
1242 1246 yield changectx(self._repo, a)
1243 1247
1244 1248 def markcommitted(self, node):
1245 1249 """Perform post-commit cleanup necessary after committing this ctx
1246 1250
1247 1251 Specifically, this updates backing stores this working context
1248 1252 wraps to reflect the fact that the changes reflected by this
1249 1253 workingctx have been committed. For example, it marks
1250 1254 modified and added files as normal in the dirstate.
1251 1255
1252 1256 """
1253 1257
1254 1258 with self._repo.dirstate.parentchange():
1255 1259 for f in self.modified() + self.added():
1256 1260 self._repo.dirstate.normal(f)
1257 1261 for f in self.removed():
1258 1262 self._repo.dirstate.drop(f)
1259 1263 self._repo.dirstate.setparents(node)
1260 1264
1261 1265 # write changes out explicitly, because nesting wlock at
1262 1266 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1263 1267 # from immediately doing so for subsequent changing files
1264 1268 self._repo.dirstate.write(self._repo.currenttransaction())
1265 1269
1266 1270 def dirty(self, missing=False, merge=True, branch=True):
1267 1271 return False
1268 1272
1269 1273 class workingctx(committablectx):
1270 1274 """A workingctx object makes access to data related to
1271 1275 the current working directory convenient.
1272 1276 date - any valid date string or (unixtime, offset), or None.
1273 1277 user - username string, or None.
1274 1278 extra - a dictionary of extra values, or None.
1275 1279 changes - a list of file lists as returned by localrepo.status()
1276 1280 or None to use the repository status.
1277 1281 """
1278 1282 def __init__(self, repo, text="", user=None, date=None, extra=None,
1279 1283 changes=None):
1280 1284 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1281 1285
1282 1286 def __iter__(self):
1283 1287 d = self._repo.dirstate
1284 1288 for f in d:
1285 1289 if d[f] != 'r':
1286 1290 yield f
1287 1291
1288 1292 def __contains__(self, key):
1289 1293 return self._repo.dirstate[key] not in "?r"
1290 1294
1291 1295 def hex(self):
1292 1296 return hex(wdirid)
1293 1297
1294 1298 @propertycache
1295 1299 def _parents(self):
1296 1300 p = self._repo.dirstate.parents()
1297 1301 if p[1] == nullid:
1298 1302 p = p[:-1]
1299 1303 return [changectx(self._repo, x) for x in p]
1300 1304
1301 1305 def _fileinfo(self, path):
1302 1306 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1303 1307 self._manifest
1304 1308 return super(workingctx, self)._fileinfo(path)
1305 1309
1306 1310 def filectx(self, path, filelog=None):
1307 1311 """get a file context from the working directory"""
1308 1312 return workingfilectx(self._repo, path, workingctx=self,
1309 1313 filelog=filelog)
1310 1314
1311 1315 def dirty(self, missing=False, merge=True, branch=True):
1312 1316 "check whether a working directory is modified"
1313 1317 # check subrepos first
1314 1318 for s in sorted(self.substate):
1315 1319 if self.sub(s).dirty(missing=missing):
1316 1320 return True
1317 1321 # check current working dir
1318 1322 return ((merge and self.p2()) or
1319 1323 (branch and self.branch() != self.p1().branch()) or
1320 1324 self.modified() or self.added() or self.removed() or
1321 1325 (missing and self.deleted()))
1322 1326
1323 1327 def add(self, list, prefix=""):
1324 1328 with self._repo.wlock():
1325 1329 ui, ds = self._repo.ui, self._repo.dirstate
1326 1330 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1327 1331 rejected = []
1328 1332 lstat = self._repo.wvfs.lstat
1329 1333 for f in list:
1330 1334 # ds.pathto() returns an absolute file when this is invoked from
1331 1335 # the keyword extension. That gets flagged as non-portable on
1332 1336 # Windows, since it contains the drive letter and colon.
1333 1337 scmutil.checkportable(ui, os.path.join(prefix, f))
1334 1338 try:
1335 1339 st = lstat(f)
1336 1340 except OSError:
1337 1341 ui.warn(_("%s does not exist!\n") % uipath(f))
1338 1342 rejected.append(f)
1339 1343 continue
1340 1344 if st.st_size > 10000000:
1341 1345 ui.warn(_("%s: up to %d MB of RAM may be required "
1342 1346 "to manage this file\n"
1343 1347 "(use 'hg revert %s' to cancel the "
1344 1348 "pending addition)\n")
1345 1349 % (f, 3 * st.st_size // 1000000, uipath(f)))
1346 1350 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1347 1351 ui.warn(_("%s not added: only files and symlinks "
1348 1352 "supported currently\n") % uipath(f))
1349 1353 rejected.append(f)
1350 1354 elif ds[f] in 'amn':
1351 1355 ui.warn(_("%s already tracked!\n") % uipath(f))
1352 1356 elif ds[f] == 'r':
1353 1357 ds.normallookup(f)
1354 1358 else:
1355 1359 ds.add(f)
1356 1360 return rejected
1357 1361
1358 1362 def forget(self, files, prefix=""):
1359 1363 with self._repo.wlock():
1360 1364 ds = self._repo.dirstate
1361 1365 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1362 1366 rejected = []
1363 1367 for f in files:
1364 1368 if f not in self._repo.dirstate:
1365 1369 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1366 1370 rejected.append(f)
1367 1371 elif self._repo.dirstate[f] != 'a':
1368 1372 self._repo.dirstate.remove(f)
1369 1373 else:
1370 1374 self._repo.dirstate.drop(f)
1371 1375 return rejected
1372 1376
1373 1377 def undelete(self, list):
1374 1378 pctxs = self.parents()
1375 1379 with self._repo.wlock():
1376 1380 ds = self._repo.dirstate
1377 1381 for f in list:
1378 1382 if self._repo.dirstate[f] != 'r':
1379 1383 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1380 1384 else:
1381 1385 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1382 1386 t = fctx.data()
1383 1387 self._repo.wwrite(f, t, fctx.flags())
1384 1388 self._repo.dirstate.normal(f)
1385 1389
1386 1390 def copy(self, source, dest):
1387 1391 try:
1388 1392 st = self._repo.wvfs.lstat(dest)
1389 1393 except OSError as err:
1390 1394 if err.errno != errno.ENOENT:
1391 1395 raise
1392 1396 self._repo.ui.warn(_("%s does not exist!\n")
1393 1397 % self._repo.dirstate.pathto(dest))
1394 1398 return
1395 1399 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1396 1400 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1397 1401 "symbolic link\n")
1398 1402 % self._repo.dirstate.pathto(dest))
1399 1403 else:
1400 1404 with self._repo.wlock():
1401 1405 if self._repo.dirstate[dest] in '?':
1402 1406 self._repo.dirstate.add(dest)
1403 1407 elif self._repo.dirstate[dest] in 'r':
1404 1408 self._repo.dirstate.normallookup(dest)
1405 1409 self._repo.dirstate.copy(source, dest)
1406 1410
1407 1411 def match(self, pats=None, include=None, exclude=None, default='glob',
1408 1412 listsubrepos=False, badfn=None):
1409 1413 r = self._repo
1410 1414
1411 1415 # Only a case insensitive filesystem needs magic to translate user input
1412 1416 # to actual case in the filesystem.
1413 1417 icasefs = not util.fscasesensitive(r.root)
1414 1418 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1415 1419 default, auditor=r.auditor, ctx=self,
1416 1420 listsubrepos=listsubrepos, badfn=badfn,
1417 1421 icasefs=icasefs)
1418 1422
1419 1423 def _filtersuspectsymlink(self, files):
1420 1424 if not files or self._repo.dirstate._checklink:
1421 1425 return files
1422 1426
1423 1427 # Symlink placeholders may get non-symlink-like contents
1424 1428 # via user error or dereferencing by NFS or Samba servers,
1425 1429 # so we filter out any placeholders that don't look like a
1426 1430 # symlink
1427 1431 sane = []
1428 1432 for f in files:
1429 1433 if self.flags(f) == 'l':
1430 1434 d = self[f].data()
1431 1435 if (d == '' or len(d) >= 1024 or '\n' in d
1432 1436 or stringutil.binary(d)):
1433 1437 self._repo.ui.debug('ignoring suspect symlink placeholder'
1434 1438 ' "%s"\n' % f)
1435 1439 continue
1436 1440 sane.append(f)
1437 1441 return sane
1438 1442
1439 1443 def _checklookup(self, files):
1440 1444 # check for any possibly clean files
1441 1445 if not files:
1442 1446 return [], [], []
1443 1447
1444 1448 modified = []
1445 1449 deleted = []
1446 1450 fixup = []
1447 1451 pctx = self._parents[0]
1448 1452 # do a full compare of any files that might have changed
1449 1453 for f in sorted(files):
1450 1454 try:
1451 1455 # This will return True for a file that got replaced by a
1452 1456 # directory in the interim, but fixing that is pretty hard.
1453 1457 if (f not in pctx or self.flags(f) != pctx.flags(f)
1454 1458 or pctx[f].cmp(self[f])):
1455 1459 modified.append(f)
1456 1460 else:
1457 1461 fixup.append(f)
1458 1462 except (IOError, OSError):
1459 1463 # A file become inaccessible in between? Mark it as deleted,
1460 1464 # matching dirstate behavior (issue5584).
1461 1465 # The dirstate has more complex behavior around whether a
1462 1466 # missing file matches a directory, etc, but we don't need to
1463 1467 # bother with that: if f has made it to this point, we're sure
1464 1468 # it's in the dirstate.
1465 1469 deleted.append(f)
1466 1470
1467 1471 return modified, deleted, fixup
1468 1472
1469 1473 def _poststatusfixup(self, status, fixup):
1470 1474 """update dirstate for files that are actually clean"""
1471 1475 poststatus = self._repo.postdsstatus()
1472 1476 if fixup or poststatus:
1473 1477 try:
1474 1478 oldid = self._repo.dirstate.identity()
1475 1479
1476 1480 # updating the dirstate is optional
1477 1481 # so we don't wait on the lock
1478 1482 # wlock can invalidate the dirstate, so cache normal _after_
1479 1483 # taking the lock
1480 1484 with self._repo.wlock(False):
1481 1485 if self._repo.dirstate.identity() == oldid:
1482 1486 if fixup:
1483 1487 normal = self._repo.dirstate.normal
1484 1488 for f in fixup:
1485 1489 normal(f)
1486 1490 # write changes out explicitly, because nesting
1487 1491 # wlock at runtime may prevent 'wlock.release()'
1488 1492 # after this block from doing so for subsequent
1489 1493 # changing files
1490 1494 tr = self._repo.currenttransaction()
1491 1495 self._repo.dirstate.write(tr)
1492 1496
1493 1497 if poststatus:
1494 1498 for ps in poststatus:
1495 1499 ps(self, status)
1496 1500 else:
1497 1501 # in this case, writing changes out breaks
1498 1502 # consistency, because .hg/dirstate was
1499 1503 # already changed simultaneously after last
1500 1504 # caching (see also issue5584 for detail)
1501 1505 self._repo.ui.debug('skip updating dirstate: '
1502 1506 'identity mismatch\n')
1503 1507 except error.LockError:
1504 1508 pass
1505 1509 finally:
1506 1510 # Even if the wlock couldn't be grabbed, clear out the list.
1507 1511 self._repo.clearpostdsstatus()
1508 1512
1509 1513 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1510 1514 '''Gets the status from the dirstate -- internal use only.'''
1511 1515 subrepos = []
1512 1516 if '.hgsub' in self:
1513 1517 subrepos = sorted(self.substate)
1514 1518 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1515 1519 clean=clean, unknown=unknown)
1516 1520
1517 1521 # check for any possibly clean files
1518 1522 fixup = []
1519 1523 if cmp:
1520 1524 modified2, deleted2, fixup = self._checklookup(cmp)
1521 1525 s.modified.extend(modified2)
1522 1526 s.deleted.extend(deleted2)
1523 1527
1524 1528 if fixup and clean:
1525 1529 s.clean.extend(fixup)
1526 1530
1527 1531 self._poststatusfixup(s, fixup)
1528 1532
1529 1533 if match.always():
1530 1534 # cache for performance
1531 1535 if s.unknown or s.ignored or s.clean:
1532 1536 # "_status" is cached with list*=False in the normal route
1533 1537 self._status = scmutil.status(s.modified, s.added, s.removed,
1534 1538 s.deleted, [], [], [])
1535 1539 else:
1536 1540 self._status = s
1537 1541
1538 1542 return s
1539 1543
1540 1544 @propertycache
1541 1545 def _manifest(self):
1542 1546 """generate a manifest corresponding to the values in self._status
1543 1547
1544 1548 This reuse the file nodeid from parent, but we use special node
1545 1549 identifiers for added and modified files. This is used by manifests
1546 1550 merge to see that files are different and by update logic to avoid
1547 1551 deleting newly added files.
1548 1552 """
1549 1553 return self._buildstatusmanifest(self._status)
1550 1554
1551 1555 def _buildstatusmanifest(self, status):
1552 1556 """Builds a manifest that includes the given status results."""
1553 1557 parents = self.parents()
1554 1558
1555 1559 man = parents[0].manifest().copy()
1556 1560
1557 1561 ff = self._flagfunc
1558 1562 for i, l in ((addednodeid, status.added),
1559 1563 (modifiednodeid, status.modified)):
1560 1564 for f in l:
1561 1565 man[f] = i
1562 1566 try:
1563 1567 man.setflag(f, ff(f))
1564 1568 except OSError:
1565 1569 pass
1566 1570
1567 1571 for f in status.deleted + status.removed:
1568 1572 if f in man:
1569 1573 del man[f]
1570 1574
1571 1575 return man
1572 1576
1573 1577 def _buildstatus(self, other, s, match, listignored, listclean,
1574 1578 listunknown):
1575 1579 """build a status with respect to another context
1576 1580
1577 1581 This includes logic for maintaining the fast path of status when
1578 1582 comparing the working directory against its parent, which is to skip
1579 1583 building a new manifest if self (working directory) is not comparing
1580 1584 against its parent (repo['.']).
1581 1585 """
1582 1586 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1583 1587 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1584 1588 # might have accidentally ended up with the entire contents of the file
1585 1589 # they are supposed to be linking to.
1586 1590 s.modified[:] = self._filtersuspectsymlink(s.modified)
1587 1591 if other != self._repo['.']:
1588 1592 s = super(workingctx, self)._buildstatus(other, s, match,
1589 1593 listignored, listclean,
1590 1594 listunknown)
1591 1595 return s
1592 1596
1593 1597 def _matchstatus(self, other, match):
1594 1598 """override the match method with a filter for directory patterns
1595 1599
1596 1600 We use inheritance to customize the match.bad method only in cases of
1597 1601 workingctx since it belongs only to the working directory when
1598 1602 comparing against the parent changeset.
1599 1603
1600 1604 If we aren't comparing against the working directory's parent, then we
1601 1605 just use the default match object sent to us.
1602 1606 """
1603 1607 if other != self._repo['.']:
1604 1608 def bad(f, msg):
1605 1609 # 'f' may be a directory pattern from 'match.files()',
1606 1610 # so 'f not in ctx1' is not enough
1607 1611 if f not in other and not other.hasdir(f):
1608 1612 self._repo.ui.warn('%s: %s\n' %
1609 1613 (self._repo.dirstate.pathto(f), msg))
1610 1614 match.bad = bad
1611 1615 return match
1612 1616
1613 1617 def markcommitted(self, node):
1614 1618 super(workingctx, self).markcommitted(node)
1615 1619
1616 1620 sparse.aftercommit(self._repo, node)
1617 1621
1618 1622 class committablefilectx(basefilectx):
1619 1623 """A committablefilectx provides common functionality for a file context
1620 1624 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1621 1625 def __init__(self, repo, path, filelog=None, ctx=None):
1622 1626 self._repo = repo
1623 1627 self._path = path
1624 1628 self._changeid = None
1625 1629 self._filerev = self._filenode = None
1626 1630
1627 1631 if filelog is not None:
1628 1632 self._filelog = filelog
1629 1633 if ctx:
1630 1634 self._changectx = ctx
1631 1635
1632 1636 def __nonzero__(self):
1633 1637 return True
1634 1638
1635 1639 __bool__ = __nonzero__
1636 1640
1637 1641 def linkrev(self):
1638 1642 # linked to self._changectx no matter if file is modified or not
1639 1643 return self.rev()
1640 1644
1641 1645 def parents(self):
1642 1646 '''return parent filectxs, following copies if necessary'''
1643 1647 def filenode(ctx, path):
1644 1648 return ctx._manifest.get(path, nullid)
1645 1649
1646 1650 path = self._path
1647 1651 fl = self._filelog
1648 1652 pcl = self._changectx._parents
1649 1653 renamed = self.renamed()
1650 1654
1651 1655 if renamed:
1652 1656 pl = [renamed + (None,)]
1653 1657 else:
1654 1658 pl = [(path, filenode(pcl[0], path), fl)]
1655 1659
1656 1660 for pc in pcl[1:]:
1657 1661 pl.append((path, filenode(pc, path), fl))
1658 1662
1659 1663 return [self._parentfilectx(p, fileid=n, filelog=l)
1660 1664 for p, n, l in pl if n != nullid]
1661 1665
1662 1666 def children(self):
1663 1667 return []
1664 1668
1665 1669 class workingfilectx(committablefilectx):
1666 1670 """A workingfilectx object makes access to data related to a particular
1667 1671 file in the working directory convenient."""
1668 1672 def __init__(self, repo, path, filelog=None, workingctx=None):
1669 1673 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1670 1674
1671 1675 @propertycache
1672 1676 def _changectx(self):
1673 1677 return workingctx(self._repo)
1674 1678
1675 1679 def data(self):
1676 1680 return self._repo.wread(self._path)
1677 1681 def renamed(self):
1678 1682 rp = self._repo.dirstate.copied(self._path)
1679 1683 if not rp:
1680 1684 return None
1681 1685 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1682 1686
1683 1687 def size(self):
1684 1688 return self._repo.wvfs.lstat(self._path).st_size
1685 1689 def date(self):
1686 1690 t, tz = self._changectx.date()
1687 1691 try:
1688 1692 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1689 1693 except OSError as err:
1690 1694 if err.errno != errno.ENOENT:
1691 1695 raise
1692 1696 return (t, tz)
1693 1697
1694 1698 def exists(self):
1695 1699 return self._repo.wvfs.exists(self._path)
1696 1700
1697 1701 def lexists(self):
1698 1702 return self._repo.wvfs.lexists(self._path)
1699 1703
1700 1704 def audit(self):
1701 1705 return self._repo.wvfs.audit(self._path)
1702 1706
1703 1707 def cmp(self, fctx):
1704 1708 """compare with other file context
1705 1709
1706 1710 returns True if different than fctx.
1707 1711 """
1708 1712 # fctx should be a filectx (not a workingfilectx)
1709 1713 # invert comparison to reuse the same code path
1710 1714 return fctx.cmp(self)
1711 1715
1712 1716 def remove(self, ignoremissing=False):
1713 1717 """wraps unlink for a repo's working directory"""
1714 1718 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1715 1719 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1716 1720 rmdir=rmdir)
1717 1721
1718 1722 def write(self, data, flags, backgroundclose=False, **kwargs):
1719 1723 """wraps repo.wwrite"""
1720 1724 self._repo.wwrite(self._path, data, flags,
1721 1725 backgroundclose=backgroundclose,
1722 1726 **kwargs)
1723 1727
1724 1728 def markcopied(self, src):
1725 1729 """marks this file a copy of `src`"""
1726 1730 if self._repo.dirstate[self._path] in "nma":
1727 1731 self._repo.dirstate.copy(src, self._path)
1728 1732
1729 1733 def clearunknown(self):
1730 1734 """Removes conflicting items in the working directory so that
1731 1735 ``write()`` can be called successfully.
1732 1736 """
1733 1737 wvfs = self._repo.wvfs
1734 1738 f = self._path
1735 1739 wvfs.audit(f)
1736 1740 if wvfs.isdir(f) and not wvfs.islink(f):
1737 1741 wvfs.rmtree(f, forcibly=True)
1738 1742 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1739 1743 for p in reversed(list(util.finddirs(f))):
1740 1744 if wvfs.isfileorlink(p):
1741 1745 wvfs.unlink(p)
1742 1746 break
1743 1747
1744 1748 def setflags(self, l, x):
1745 1749 self._repo.wvfs.setflags(self._path, l, x)
1746 1750
1747 1751 class overlayworkingctx(committablectx):
1748 1752 """Wraps another mutable context with a write-back cache that can be
1749 1753 converted into a commit context.
1750 1754
1751 1755 self._cache[path] maps to a dict with keys: {
1752 1756 'exists': bool?
1753 1757 'date': date?
1754 1758 'data': str?
1755 1759 'flags': str?
1756 1760 'copied': str? (path or None)
1757 1761 }
1758 1762 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1759 1763 is `False`, the file was deleted.
1760 1764 """
1761 1765
1762 1766 def __init__(self, repo):
1763 1767 super(overlayworkingctx, self).__init__(repo)
1764 1768 self.clean()
1765 1769
1766 1770 def setbase(self, wrappedctx):
1767 1771 self._wrappedctx = wrappedctx
1768 1772 self._parents = [wrappedctx]
1769 1773 # Drop old manifest cache as it is now out of date.
1770 1774 # This is necessary when, e.g., rebasing several nodes with one
1771 1775 # ``overlayworkingctx`` (e.g. with --collapse).
1772 1776 util.clearcachedproperty(self, '_manifest')
1773 1777
1774 1778 def data(self, path):
1775 1779 if self.isdirty(path):
1776 1780 if self._cache[path]['exists']:
1777 1781 if self._cache[path]['data']:
1778 1782 return self._cache[path]['data']
1779 1783 else:
1780 1784 # Must fallback here, too, because we only set flags.
1781 1785 return self._wrappedctx[path].data()
1782 1786 else:
1783 1787 raise error.ProgrammingError("No such file or directory: %s" %
1784 1788 path)
1785 1789 else:
1786 1790 return self._wrappedctx[path].data()
1787 1791
1788 1792 @propertycache
1789 1793 def _manifest(self):
1790 1794 parents = self.parents()
1791 1795 man = parents[0].manifest().copy()
1792 1796
1793 1797 flag = self._flagfunc
1794 1798 for path in self.added():
1795 1799 man[path] = addednodeid
1796 1800 man.setflag(path, flag(path))
1797 1801 for path in self.modified():
1798 1802 man[path] = modifiednodeid
1799 1803 man.setflag(path, flag(path))
1800 1804 for path in self.removed():
1801 1805 del man[path]
1802 1806 return man
1803 1807
1804 1808 @propertycache
1805 1809 def _flagfunc(self):
1806 1810 def f(path):
1807 1811 return self._cache[path]['flags']
1808 1812 return f
1809 1813
1810 1814 def files(self):
1811 1815 return sorted(self.added() + self.modified() + self.removed())
1812 1816
1813 1817 def modified(self):
1814 1818 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1815 1819 self._existsinparent(f)]
1816 1820
1817 1821 def added(self):
1818 1822 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1819 1823 not self._existsinparent(f)]
1820 1824
1821 1825 def removed(self):
1822 1826 return [f for f in self._cache.keys() if
1823 1827 not self._cache[f]['exists'] and self._existsinparent(f)]
1824 1828
1825 1829 def isinmemory(self):
1826 1830 return True
1827 1831
1828 1832 def filedate(self, path):
1829 1833 if self.isdirty(path):
1830 1834 return self._cache[path]['date']
1831 1835 else:
1832 1836 return self._wrappedctx[path].date()
1833 1837
1834 1838 def markcopied(self, path, origin):
1835 1839 if self.isdirty(path):
1836 1840 self._cache[path]['copied'] = origin
1837 1841 else:
1838 1842 raise error.ProgrammingError('markcopied() called on clean context')
1839 1843
1840 1844 def copydata(self, path):
1841 1845 if self.isdirty(path):
1842 1846 return self._cache[path]['copied']
1843 1847 else:
1844 1848 raise error.ProgrammingError('copydata() called on clean context')
1845 1849
1846 1850 def flags(self, path):
1847 1851 if self.isdirty(path):
1848 1852 if self._cache[path]['exists']:
1849 1853 return self._cache[path]['flags']
1850 1854 else:
1851 1855 raise error.ProgrammingError("No such file or directory: %s" %
1852 1856 self._path)
1853 1857 else:
1854 1858 return self._wrappedctx[path].flags()
1855 1859
1856 1860 def _existsinparent(self, path):
1857 1861 try:
1858 1862 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1859 1863 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1860 1864 # with an ``exists()`` function.
1861 1865 self._wrappedctx[path]
1862 1866 return True
1863 1867 except error.ManifestLookupError:
1864 1868 return False
1865 1869
1866 1870 def _auditconflicts(self, path):
1867 1871 """Replicates conflict checks done by wvfs.write().
1868 1872
1869 1873 Since we never write to the filesystem and never call `applyupdates` in
1870 1874 IMM, we'll never check that a path is actually writable -- e.g., because
1871 1875 it adds `a/foo`, but `a` is actually a file in the other commit.
1872 1876 """
1873 1877 def fail(path, component):
1874 1878 # p1() is the base and we're receiving "writes" for p2()'s
1875 1879 # files.
1876 1880 if 'l' in self.p1()[component].flags():
1877 1881 raise error.Abort("error: %s conflicts with symlink %s "
1878 1882 "in %s." % (path, component,
1879 1883 self.p1().rev()))
1880 1884 else:
1881 1885 raise error.Abort("error: '%s' conflicts with file '%s' in "
1882 1886 "%s." % (path, component,
1883 1887 self.p1().rev()))
1884 1888
1885 1889 # Test that each new directory to be created to write this path from p2
1886 1890 # is not a file in p1.
1887 1891 components = path.split('/')
1888 1892 for i in xrange(len(components)):
1889 1893 component = "/".join(components[0:i])
1890 1894 if component in self.p1():
1891 1895 fail(path, component)
1892 1896
1893 1897 # Test the other direction -- that this path from p2 isn't a directory
1894 1898 # in p1 (test that p1 doesn't any paths matching `path/*`).
1895 1899 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1896 1900 matches = self.p1().manifest().matches(match)
1897 1901 if len(matches) > 0:
1898 1902 if len(matches) == 1 and matches.keys()[0] == path:
1899 1903 return
1900 1904 raise error.Abort("error: file '%s' cannot be written because "
1901 1905 " '%s/' is a folder in %s (containing %d "
1902 1906 "entries: %s)"
1903 1907 % (path, path, self.p1(), len(matches),
1904 1908 ', '.join(matches.keys())))
1905 1909
1906 1910 def write(self, path, data, flags='', **kwargs):
1907 1911 if data is None:
1908 1912 raise error.ProgrammingError("data must be non-None")
1909 1913 self._auditconflicts(path)
1910 1914 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1911 1915 flags=flags)
1912 1916
1913 1917 def setflags(self, path, l, x):
1914 1918 self._markdirty(path, exists=True, date=dateutil.makedate(),
1915 1919 flags=(l and 'l' or '') + (x and 'x' or ''))
1916 1920
1917 1921 def remove(self, path):
1918 1922 self._markdirty(path, exists=False)
1919 1923
1920 1924 def exists(self, path):
1921 1925 """exists behaves like `lexists`, but needs to follow symlinks and
1922 1926 return False if they are broken.
1923 1927 """
1924 1928 if self.isdirty(path):
1925 1929 # If this path exists and is a symlink, "follow" it by calling
1926 1930 # exists on the destination path.
1927 1931 if (self._cache[path]['exists'] and
1928 1932 'l' in self._cache[path]['flags']):
1929 1933 return self.exists(self._cache[path]['data'].strip())
1930 1934 else:
1931 1935 return self._cache[path]['exists']
1932 1936
1933 1937 return self._existsinparent(path)
1934 1938
1935 1939 def lexists(self, path):
1936 1940 """lexists returns True if the path exists"""
1937 1941 if self.isdirty(path):
1938 1942 return self._cache[path]['exists']
1939 1943
1940 1944 return self._existsinparent(path)
1941 1945
1942 1946 def size(self, path):
1943 1947 if self.isdirty(path):
1944 1948 if self._cache[path]['exists']:
1945 1949 return len(self._cache[path]['data'])
1946 1950 else:
1947 1951 raise error.ProgrammingError("No such file or directory: %s" %
1948 1952 self._path)
1949 1953 return self._wrappedctx[path].size()
1950 1954
1951 1955 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1952 1956 user=None, editor=None):
1953 1957 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1954 1958 committed.
1955 1959
1956 1960 ``text`` is the commit message.
1957 1961 ``parents`` (optional) are rev numbers.
1958 1962 """
1959 1963 # Default parents to the wrapped contexts' if not passed.
1960 1964 if parents is None:
1961 1965 parents = self._wrappedctx.parents()
1962 1966 if len(parents) == 1:
1963 1967 parents = (parents[0], None)
1964 1968
1965 1969 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1966 1970 if parents[1] is None:
1967 1971 parents = (self._repo[parents[0]], None)
1968 1972 else:
1969 1973 parents = (self._repo[parents[0]], self._repo[parents[1]])
1970 1974
1971 1975 files = self._cache.keys()
1972 1976 def getfile(repo, memctx, path):
1973 1977 if self._cache[path]['exists']:
1974 1978 return memfilectx(repo, memctx, path,
1975 1979 self._cache[path]['data'],
1976 1980 'l' in self._cache[path]['flags'],
1977 1981 'x' in self._cache[path]['flags'],
1978 1982 self._cache[path]['copied'])
1979 1983 else:
1980 1984 # Returning None, but including the path in `files`, is
1981 1985 # necessary for memctx to register a deletion.
1982 1986 return None
1983 1987 return memctx(self._repo, parents, text, files, getfile, date=date,
1984 1988 extra=extra, user=user, branch=branch, editor=editor)
1985 1989
1986 1990 def isdirty(self, path):
1987 1991 return path in self._cache
1988 1992
1989 1993 def isempty(self):
1990 1994 # We need to discard any keys that are actually clean before the empty
1991 1995 # commit check.
1992 1996 self._compact()
1993 1997 return len(self._cache) == 0
1994 1998
1995 1999 def clean(self):
1996 2000 self._cache = {}
1997 2001
1998 2002 def _compact(self):
1999 2003 """Removes keys from the cache that are actually clean, by comparing
2000 2004 them with the underlying context.
2001 2005
2002 2006 This can occur during the merge process, e.g. by passing --tool :local
2003 2007 to resolve a conflict.
2004 2008 """
2005 2009 keys = []
2006 2010 for path in self._cache.keys():
2007 2011 cache = self._cache[path]
2008 2012 try:
2009 2013 underlying = self._wrappedctx[path]
2010 2014 if (underlying.data() == cache['data'] and
2011 2015 underlying.flags() == cache['flags']):
2012 2016 keys.append(path)
2013 2017 except error.ManifestLookupError:
2014 2018 # Path not in the underlying manifest (created).
2015 2019 continue
2016 2020
2017 2021 for path in keys:
2018 2022 del self._cache[path]
2019 2023 return keys
2020 2024
2021 2025 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2022 2026 self._cache[path] = {
2023 2027 'exists': exists,
2024 2028 'data': data,
2025 2029 'date': date,
2026 2030 'flags': flags,
2027 2031 'copied': None,
2028 2032 }
2029 2033
2030 2034 def filectx(self, path, filelog=None):
2031 2035 return overlayworkingfilectx(self._repo, path, parent=self,
2032 2036 filelog=filelog)
2033 2037
2034 2038 class overlayworkingfilectx(committablefilectx):
2035 2039 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2036 2040 cache, which can be flushed through later by calling ``flush()``."""
2037 2041
2038 2042 def __init__(self, repo, path, filelog=None, parent=None):
2039 2043 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2040 2044 parent)
2041 2045 self._repo = repo
2042 2046 self._parent = parent
2043 2047 self._path = path
2044 2048
2045 2049 def cmp(self, fctx):
2046 2050 return self.data() != fctx.data()
2047 2051
2048 2052 def changectx(self):
2049 2053 return self._parent
2050 2054
2051 2055 def data(self):
2052 2056 return self._parent.data(self._path)
2053 2057
2054 2058 def date(self):
2055 2059 return self._parent.filedate(self._path)
2056 2060
2057 2061 def exists(self):
2058 2062 return self.lexists()
2059 2063
2060 2064 def lexists(self):
2061 2065 return self._parent.exists(self._path)
2062 2066
2063 2067 def renamed(self):
2064 2068 path = self._parent.copydata(self._path)
2065 2069 if not path:
2066 2070 return None
2067 2071 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2068 2072
2069 2073 def size(self):
2070 2074 return self._parent.size(self._path)
2071 2075
2072 2076 def markcopied(self, origin):
2073 2077 self._parent.markcopied(self._path, origin)
2074 2078
2075 2079 def audit(self):
2076 2080 pass
2077 2081
2078 2082 def flags(self):
2079 2083 return self._parent.flags(self._path)
2080 2084
2081 2085 def setflags(self, islink, isexec):
2082 2086 return self._parent.setflags(self._path, islink, isexec)
2083 2087
2084 2088 def write(self, data, flags, backgroundclose=False, **kwargs):
2085 2089 return self._parent.write(self._path, data, flags, **kwargs)
2086 2090
2087 2091 def remove(self, ignoremissing=False):
2088 2092 return self._parent.remove(self._path)
2089 2093
2090 2094 def clearunknown(self):
2091 2095 pass
2092 2096
2093 2097 class workingcommitctx(workingctx):
2094 2098 """A workingcommitctx object makes access to data related to
2095 2099 the revision being committed convenient.
2096 2100
2097 2101 This hides changes in the working directory, if they aren't
2098 2102 committed in this context.
2099 2103 """
2100 2104 def __init__(self, repo, changes,
2101 2105 text="", user=None, date=None, extra=None):
2102 2106 super(workingctx, self).__init__(repo, text, user, date, extra,
2103 2107 changes)
2104 2108
2105 2109 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2106 2110 """Return matched files only in ``self._status``
2107 2111
2108 2112 Uncommitted files appear "clean" via this context, even if
2109 2113 they aren't actually so in the working directory.
2110 2114 """
2111 2115 if clean:
2112 2116 clean = [f for f in self._manifest if f not in self._changedset]
2113 2117 else:
2114 2118 clean = []
2115 2119 return scmutil.status([f for f in self._status.modified if match(f)],
2116 2120 [f for f in self._status.added if match(f)],
2117 2121 [f for f in self._status.removed if match(f)],
2118 2122 [], [], [], clean)
2119 2123
2120 2124 @propertycache
2121 2125 def _changedset(self):
2122 2126 """Return the set of files changed in this context
2123 2127 """
2124 2128 changed = set(self._status.modified)
2125 2129 changed.update(self._status.added)
2126 2130 changed.update(self._status.removed)
2127 2131 return changed
2128 2132
2129 2133 def makecachingfilectxfn(func):
2130 2134 """Create a filectxfn that caches based on the path.
2131 2135
2132 2136 We can't use util.cachefunc because it uses all arguments as the cache
2133 2137 key and this creates a cycle since the arguments include the repo and
2134 2138 memctx.
2135 2139 """
2136 2140 cache = {}
2137 2141
2138 2142 def getfilectx(repo, memctx, path):
2139 2143 if path not in cache:
2140 2144 cache[path] = func(repo, memctx, path)
2141 2145 return cache[path]
2142 2146
2143 2147 return getfilectx
2144 2148
2145 2149 def memfilefromctx(ctx):
2146 2150 """Given a context return a memfilectx for ctx[path]
2147 2151
2148 2152 This is a convenience method for building a memctx based on another
2149 2153 context.
2150 2154 """
2151 2155 def getfilectx(repo, memctx, path):
2152 2156 fctx = ctx[path]
2153 2157 # this is weird but apparently we only keep track of one parent
2154 2158 # (why not only store that instead of a tuple?)
2155 2159 copied = fctx.renamed()
2156 2160 if copied:
2157 2161 copied = copied[0]
2158 2162 return memfilectx(repo, memctx, path, fctx.data(),
2159 2163 islink=fctx.islink(), isexec=fctx.isexec(),
2160 2164 copied=copied)
2161 2165
2162 2166 return getfilectx
2163 2167
2164 2168 def memfilefrompatch(patchstore):
2165 2169 """Given a patch (e.g. patchstore object) return a memfilectx
2166 2170
2167 2171 This is a convenience method for building a memctx based on a patchstore.
2168 2172 """
2169 2173 def getfilectx(repo, memctx, path):
2170 2174 data, mode, copied = patchstore.getfile(path)
2171 2175 if data is None:
2172 2176 return None
2173 2177 islink, isexec = mode
2174 2178 return memfilectx(repo, memctx, path, data, islink=islink,
2175 2179 isexec=isexec, copied=copied)
2176 2180
2177 2181 return getfilectx
2178 2182
2179 2183 class memctx(committablectx):
2180 2184 """Use memctx to perform in-memory commits via localrepo.commitctx().
2181 2185
2182 2186 Revision information is supplied at initialization time while
2183 2187 related files data and is made available through a callback
2184 2188 mechanism. 'repo' is the current localrepo, 'parents' is a
2185 2189 sequence of two parent revisions identifiers (pass None for every
2186 2190 missing parent), 'text' is the commit message and 'files' lists
2187 2191 names of files touched by the revision (normalized and relative to
2188 2192 repository root).
2189 2193
2190 2194 filectxfn(repo, memctx, path) is a callable receiving the
2191 2195 repository, the current memctx object and the normalized path of
2192 2196 requested file, relative to repository root. It is fired by the
2193 2197 commit function for every file in 'files', but calls order is
2194 2198 undefined. If the file is available in the revision being
2195 2199 committed (updated or added), filectxfn returns a memfilectx
2196 2200 object. If the file was removed, filectxfn return None for recent
2197 2201 Mercurial. Moved files are represented by marking the source file
2198 2202 removed and the new file added with copy information (see
2199 2203 memfilectx).
2200 2204
2201 2205 user receives the committer name and defaults to current
2202 2206 repository username, date is the commit date in any format
2203 2207 supported by dateutil.parsedate() and defaults to current date, extra
2204 2208 is a dictionary of metadata or is left empty.
2205 2209 """
2206 2210
2207 2211 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2208 2212 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2209 2213 # this field to determine what to do in filectxfn.
2210 2214 _returnnoneformissingfiles = True
2211 2215
2212 2216 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2213 2217 date=None, extra=None, branch=None, editor=False):
2214 2218 super(memctx, self).__init__(repo, text, user, date, extra)
2215 2219 self._rev = None
2216 2220 self._node = None
2217 2221 parents = [(p or nullid) for p in parents]
2218 2222 p1, p2 = parents
2219 2223 self._parents = [self._repo[p] for p in (p1, p2)]
2220 2224 files = sorted(set(files))
2221 2225 self._files = files
2222 2226 if branch is not None:
2223 2227 self._extra['branch'] = encoding.fromlocal(branch)
2224 2228 self.substate = {}
2225 2229
2226 2230 if isinstance(filectxfn, patch.filestore):
2227 2231 filectxfn = memfilefrompatch(filectxfn)
2228 2232 elif not callable(filectxfn):
2229 2233 # if store is not callable, wrap it in a function
2230 2234 filectxfn = memfilefromctx(filectxfn)
2231 2235
2232 2236 # memoizing increases performance for e.g. vcs convert scenarios.
2233 2237 self._filectxfn = makecachingfilectxfn(filectxfn)
2234 2238
2235 2239 if editor:
2236 2240 self._text = editor(self._repo, self, [])
2237 2241 self._repo.savecommitmessage(self._text)
2238 2242
2239 2243 def filectx(self, path, filelog=None):
2240 2244 """get a file context from the working directory
2241 2245
2242 2246 Returns None if file doesn't exist and should be removed."""
2243 2247 return self._filectxfn(self._repo, self, path)
2244 2248
2245 2249 def commit(self):
2246 2250 """commit context to the repo"""
2247 2251 return self._repo.commitctx(self)
2248 2252
2249 2253 @propertycache
2250 2254 def _manifest(self):
2251 2255 """generate a manifest based on the return values of filectxfn"""
2252 2256
2253 2257 # keep this simple for now; just worry about p1
2254 2258 pctx = self._parents[0]
2255 2259 man = pctx.manifest().copy()
2256 2260
2257 2261 for f in self._status.modified:
2258 2262 p1node = nullid
2259 2263 p2node = nullid
2260 2264 p = pctx[f].parents() # if file isn't in pctx, check p2?
2261 2265 if len(p) > 0:
2262 2266 p1node = p[0].filenode()
2263 2267 if len(p) > 1:
2264 2268 p2node = p[1].filenode()
2265 2269 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2266 2270
2267 2271 for f in self._status.added:
2268 2272 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2269 2273
2270 2274 for f in self._status.removed:
2271 2275 if f in man:
2272 2276 del man[f]
2273 2277
2274 2278 return man
2275 2279
2276 2280 @propertycache
2277 2281 def _status(self):
2278 2282 """Calculate exact status from ``files`` specified at construction
2279 2283 """
2280 2284 man1 = self.p1().manifest()
2281 2285 p2 = self._parents[1]
2282 2286 # "1 < len(self._parents)" can't be used for checking
2283 2287 # existence of the 2nd parent, because "memctx._parents" is
2284 2288 # explicitly initialized by the list, of which length is 2.
2285 2289 if p2.node() != nullid:
2286 2290 man2 = p2.manifest()
2287 2291 managing = lambda f: f in man1 or f in man2
2288 2292 else:
2289 2293 managing = lambda f: f in man1
2290 2294
2291 2295 modified, added, removed = [], [], []
2292 2296 for f in self._files:
2293 2297 if not managing(f):
2294 2298 added.append(f)
2295 2299 elif self[f]:
2296 2300 modified.append(f)
2297 2301 else:
2298 2302 removed.append(f)
2299 2303
2300 2304 return scmutil.status(modified, added, removed, [], [], [], [])
2301 2305
2302 2306 class memfilectx(committablefilectx):
2303 2307 """memfilectx represents an in-memory file to commit.
2304 2308
2305 2309 See memctx and committablefilectx for more details.
2306 2310 """
2307 2311 def __init__(self, repo, changectx, path, data, islink=False,
2308 2312 isexec=False, copied=None):
2309 2313 """
2310 2314 path is the normalized file path relative to repository root.
2311 2315 data is the file content as a string.
2312 2316 islink is True if the file is a symbolic link.
2313 2317 isexec is True if the file is executable.
2314 2318 copied is the source file path if current file was copied in the
2315 2319 revision being committed, or None."""
2316 2320 super(memfilectx, self).__init__(repo, path, None, changectx)
2317 2321 self._data = data
2318 2322 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2319 2323 self._copied = None
2320 2324 if copied:
2321 2325 self._copied = (copied, nullid)
2322 2326
2323 2327 def data(self):
2324 2328 return self._data
2325 2329
2326 2330 def remove(self, ignoremissing=False):
2327 2331 """wraps unlink for a repo's working directory"""
2328 2332 # need to figure out what to do here
2329 2333 del self._changectx[self._path]
2330 2334
2331 2335 def write(self, data, flags, **kwargs):
2332 2336 """wraps repo.wwrite"""
2333 2337 self._data = data
2334 2338
2335 2339 class overlayfilectx(committablefilectx):
2336 2340 """Like memfilectx but take an original filectx and optional parameters to
2337 2341 override parts of it. This is useful when fctx.data() is expensive (i.e.
2338 2342 flag processor is expensive) and raw data, flags, and filenode could be
2339 2343 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2340 2344 """
2341 2345
2342 2346 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2343 2347 copied=None, ctx=None):
2344 2348 """originalfctx: filecontext to duplicate
2345 2349
2346 2350 datafunc: None or a function to override data (file content). It is a
2347 2351 function to be lazy. path, flags, copied, ctx: None or overridden value
2348 2352
2349 2353 copied could be (path, rev), or False. copied could also be just path,
2350 2354 and will be converted to (path, nullid). This simplifies some callers.
2351 2355 """
2352 2356
2353 2357 if path is None:
2354 2358 path = originalfctx.path()
2355 2359 if ctx is None:
2356 2360 ctx = originalfctx.changectx()
2357 2361 ctxmatch = lambda: True
2358 2362 else:
2359 2363 ctxmatch = lambda: ctx == originalfctx.changectx()
2360 2364
2361 2365 repo = originalfctx.repo()
2362 2366 flog = originalfctx.filelog()
2363 2367 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2364 2368
2365 2369 if copied is None:
2366 2370 copied = originalfctx.renamed()
2367 2371 copiedmatch = lambda: True
2368 2372 else:
2369 2373 if copied and not isinstance(copied, tuple):
2370 2374 # repo._filecommit will recalculate copyrev so nullid is okay
2371 2375 copied = (copied, nullid)
2372 2376 copiedmatch = lambda: copied == originalfctx.renamed()
2373 2377
2374 2378 # When data, copied (could affect data), ctx (could affect filelog
2375 2379 # parents) are not overridden, rawdata, rawflags, and filenode may be
2376 2380 # reused (repo._filecommit should double check filelog parents).
2377 2381 #
2378 2382 # path, flags are not hashed in filelog (but in manifestlog) so they do
2379 2383 # not affect reusable here.
2380 2384 #
2381 2385 # If ctx or copied is overridden to a same value with originalfctx,
2382 2386 # still consider it's reusable. originalfctx.renamed() may be a bit
2383 2387 # expensive so it's not called unless necessary. Assuming datafunc is
2384 2388 # always expensive, do not call it for this "reusable" test.
2385 2389 reusable = datafunc is None and ctxmatch() and copiedmatch()
2386 2390
2387 2391 if datafunc is None:
2388 2392 datafunc = originalfctx.data
2389 2393 if flags is None:
2390 2394 flags = originalfctx.flags()
2391 2395
2392 2396 self._datafunc = datafunc
2393 2397 self._flags = flags
2394 2398 self._copied = copied
2395 2399
2396 2400 if reusable:
2397 2401 # copy extra fields from originalfctx
2398 2402 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2399 2403 for attr_ in attrs:
2400 2404 if util.safehasattr(originalfctx, attr_):
2401 2405 setattr(self, attr_, getattr(originalfctx, attr_))
2402 2406
2403 2407 def data(self):
2404 2408 return self._datafunc()
2405 2409
2406 2410 class metadataonlyctx(committablectx):
2407 2411 """Like memctx but it's reusing the manifest of different commit.
2408 2412 Intended to be used by lightweight operations that are creating
2409 2413 metadata-only changes.
2410 2414
2411 2415 Revision information is supplied at initialization time. 'repo' is the
2412 2416 current localrepo, 'ctx' is original revision which manifest we're reuisng
2413 2417 'parents' is a sequence of two parent revisions identifiers (pass None for
2414 2418 every missing parent), 'text' is the commit.
2415 2419
2416 2420 user receives the committer name and defaults to current repository
2417 2421 username, date is the commit date in any format supported by
2418 2422 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2419 2423 metadata or is left empty.
2420 2424 """
2421 2425 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2422 2426 date=None, extra=None, editor=False):
2423 2427 if text is None:
2424 2428 text = originalctx.description()
2425 2429 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2426 2430 self._rev = None
2427 2431 self._node = None
2428 2432 self._originalctx = originalctx
2429 2433 self._manifestnode = originalctx.manifestnode()
2430 2434 if parents is None:
2431 2435 parents = originalctx.parents()
2432 2436 else:
2433 2437 parents = [repo[p] for p in parents if p is not None]
2434 2438 parents = parents[:]
2435 2439 while len(parents) < 2:
2436 2440 parents.append(repo[nullid])
2437 2441 p1, p2 = self._parents = parents
2438 2442
2439 2443 # sanity check to ensure that the reused manifest parents are
2440 2444 # manifests of our commit parents
2441 2445 mp1, mp2 = self.manifestctx().parents
2442 2446 if p1 != nullid and p1.manifestnode() != mp1:
2443 2447 raise RuntimeError('can\'t reuse the manifest: '
2444 2448 'its p1 doesn\'t match the new ctx p1')
2445 2449 if p2 != nullid and p2.manifestnode() != mp2:
2446 2450 raise RuntimeError('can\'t reuse the manifest: '
2447 2451 'its p2 doesn\'t match the new ctx p2')
2448 2452
2449 2453 self._files = originalctx.files()
2450 2454 self.substate = {}
2451 2455
2452 2456 if editor:
2453 2457 self._text = editor(self._repo, self, [])
2454 2458 self._repo.savecommitmessage(self._text)
2455 2459
2456 2460 def manifestnode(self):
2457 2461 return self._manifestnode
2458 2462
2459 2463 @property
2460 2464 def _manifestctx(self):
2461 2465 return self._repo.manifestlog[self._manifestnode]
2462 2466
2463 2467 def filectx(self, path, filelog=None):
2464 2468 return self._originalctx.filectx(path, filelog=filelog)
2465 2469
2466 2470 def commit(self):
2467 2471 """commit context to the repo"""
2468 2472 return self._repo.commitctx(self)
2469 2473
2470 2474 @property
2471 2475 def _manifest(self):
2472 2476 return self._originalctx.manifest()
2473 2477
2474 2478 @propertycache
2475 2479 def _status(self):
2476 2480 """Calculate exact status from ``files`` specified in the ``origctx``
2477 2481 and parents manifests.
2478 2482 """
2479 2483 man1 = self.p1().manifest()
2480 2484 p2 = self._parents[1]
2481 2485 # "1 < len(self._parents)" can't be used for checking
2482 2486 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2483 2487 # explicitly initialized by the list, of which length is 2.
2484 2488 if p2.node() != nullid:
2485 2489 man2 = p2.manifest()
2486 2490 managing = lambda f: f in man1 or f in man2
2487 2491 else:
2488 2492 managing = lambda f: f in man1
2489 2493
2490 2494 modified, added, removed = [], [], []
2491 2495 for f in self._files:
2492 2496 if not managing(f):
2493 2497 added.append(f)
2494 2498 elif f in self:
2495 2499 modified.append(f)
2496 2500 else:
2497 2501 removed.append(f)
2498 2502
2499 2503 return scmutil.status(modified, added, removed, [], [], [], [])
2500 2504
2501 2505 class arbitraryfilectx(object):
2502 2506 """Allows you to use filectx-like functions on a file in an arbitrary
2503 2507 location on disk, possibly not in the working directory.
2504 2508 """
2505 2509 def __init__(self, path, repo=None):
2506 2510 # Repo is optional because contrib/simplemerge uses this class.
2507 2511 self._repo = repo
2508 2512 self._path = path
2509 2513
2510 2514 def cmp(self, fctx):
2511 2515 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2512 2516 # path if either side is a symlink.
2513 2517 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2514 2518 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2515 2519 # Add a fast-path for merge if both sides are disk-backed.
2516 2520 # Note that filecmp uses the opposite return values (True if same)
2517 2521 # from our cmp functions (True if different).
2518 2522 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2519 2523 return self.data() != fctx.data()
2520 2524
2521 2525 def path(self):
2522 2526 return self._path
2523 2527
2524 2528 def flags(self):
2525 2529 return ''
2526 2530
2527 2531 def data(self):
2528 2532 return util.readfile(self._path)
2529 2533
2530 2534 def decodeddata(self):
2531 2535 with open(self._path, "rb") as f:
2532 2536 return f.read()
2533 2537
2534 2538 def remove(self):
2535 2539 util.unlink(self._path)
2536 2540
2537 2541 def write(self, data, flags, **kwargs):
2538 2542 assert not flags
2539 2543 with open(self._path, "w") as f:
2540 2544 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now