##// END OF EJS Templates
status: don't override _buildstatus() in workingcommitctx...
Martin von Zweigbergk -
r23777:a4951ade default
parent child Browse files
Show More
@@ -1,1859 +1,1847
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
26 26 """return the first ancestor of <srcrev> introducting <fnode>
27 27
28 28 If the linkrev of the file revision does not point to an ancestor of
29 29 srcrev, we'll walk down the ancestors until we find one introducing this
30 30 file revision.
31 31
32 32 :repo: a localrepository object (used to access changelog and manifest)
33 33 :path: the file path
34 34 :fnode: the nodeid of the file revision
35 35 :filelog: the filelog of this path
36 36 :srcrev: the changeset revision we search ancestors from
37 37 :inclusive: if true, the src revision will also be checked
38 38 """
39 39 cl = repo.unfiltered().changelog
40 40 ma = repo.manifest
41 41 # fetch the linkrev
42 42 fr = filelog.rev(fnode)
43 43 lkr = filelog.linkrev(fr)
44 44 # check if this linkrev is an ancestor of srcrev
45 45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
46 46 if lkr not in anc:
47 47 for a in anc:
48 48 ac = cl.read(a) # get changeset data (we avoid object creation).
49 49 if path in ac[3]: # checking the 'files' field.
50 50 # The file has been touched, check if the content is similar
51 51 # to the one we search for.
52 52 if fnode == ma.readdelta(ac[0]).get(path):
53 53 return a
54 54 # In theory, we should never get out of that loop without a result. But
55 55 # if manifest uses a buggy file revision (not children of the one it
56 56 # replaces) we could. Such a buggy situation will likely result is crash
57 57 # somewhere else at to some point.
58 58 return lkr
59 59
60 60 class basectx(object):
61 61 """A basectx object represents the common logic for its children:
62 62 changectx: read-only context that is already present in the repo,
63 63 workingctx: a context that represents the working directory and can
64 64 be committed,
65 65 memctx: a context that represents changes in-memory and can also
66 66 be committed."""
67 67 def __new__(cls, repo, changeid='', *args, **kwargs):
68 68 if isinstance(changeid, basectx):
69 69 return changeid
70 70
71 71 o = super(basectx, cls).__new__(cls)
72 72
73 73 o._repo = repo
74 74 o._rev = nullrev
75 75 o._node = nullid
76 76
77 77 return o
78 78
79 79 def __str__(self):
80 80 return short(self.node())
81 81
82 82 def __int__(self):
83 83 return self.rev()
84 84
85 85 def __repr__(self):
86 86 return "<%s %s>" % (type(self).__name__, str(self))
87 87
88 88 def __eq__(self, other):
89 89 try:
90 90 return type(self) == type(other) and self._rev == other._rev
91 91 except AttributeError:
92 92 return False
93 93
94 94 def __ne__(self, other):
95 95 return not (self == other)
96 96
97 97 def __contains__(self, key):
98 98 return key in self._manifest
99 99
100 100 def __getitem__(self, key):
101 101 return self.filectx(key)
102 102
103 103 def __iter__(self):
104 104 for f in sorted(self._manifest):
105 105 yield f
106 106
107 107 def _manifestmatches(self, match, s):
108 108 """generate a new manifest filtered by the match argument
109 109
110 110 This method is for internal use only and mainly exists to provide an
111 111 object oriented way for other contexts to customize the manifest
112 112 generation.
113 113 """
114 114 return self.manifest().matches(match)
115 115
116 116 def _matchstatus(self, other, match):
117 117 """return match.always if match is none
118 118
119 119 This internal method provides a way for child objects to override the
120 120 match operator.
121 121 """
122 122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
123 123
124 124 def _buildstatus(self, other, s, match, listignored, listclean,
125 125 listunknown):
126 126 """build a status with respect to another context"""
127 127 # Load earliest manifest first for caching reasons. More specifically,
128 128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
129 129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
130 130 # 1000 and cache it so that when you read 1001, we just need to apply a
131 131 # delta to what's in the cache. So that's one full reconstruction + one
132 132 # delta application.
133 133 if self.rev() is not None and self.rev() < other.rev():
134 134 self.manifest()
135 135 mf1 = other._manifestmatches(match, s)
136 136 mf2 = self._manifestmatches(match, s)
137 137
138 138 modified, added = [], []
139 139 removed = []
140 140 clean = []
141 141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
142 142 deletedset = set(deleted)
143 143 d = mf1.diff(mf2, clean=listclean)
144 144 for fn, value in d.iteritems():
145 145 if fn in deletedset:
146 146 continue
147 147 if value is None:
148 148 clean.append(fn)
149 149 continue
150 150 (node1, flag1), (node2, flag2) = value
151 151 if node1 is None:
152 152 added.append(fn)
153 153 elif node2 is None:
154 154 removed.append(fn)
155 155 elif node2 != _newnode:
156 156 # The file was not a new file in mf2, so an entry
157 157 # from diff is really a difference.
158 158 modified.append(fn)
159 159 elif self[fn].cmp(other[fn]):
160 160 # node2 was newnode, but the working file doesn't
161 161 # match the one in mf1.
162 162 modified.append(fn)
163 163 else:
164 164 clean.append(fn)
165 165
166 166 if removed:
167 167 # need to filter files if they are already reported as removed
168 168 unknown = [fn for fn in unknown if fn not in mf1]
169 169 ignored = [fn for fn in ignored if fn not in mf1]
170 170 # if they're deleted, don't report them as removed
171 171 removed = [fn for fn in removed if fn not in deletedset]
172 172
173 173 return scmutil.status(modified, added, removed, deleted, unknown,
174 174 ignored, clean)
175 175
176 176 @propertycache
177 177 def substate(self):
178 178 return subrepo.state(self, self._repo.ui)
179 179
180 180 def subrev(self, subpath):
181 181 return self.substate[subpath][1]
182 182
183 183 def rev(self):
184 184 return self._rev
185 185 def node(self):
186 186 return self._node
187 187 def hex(self):
188 188 return hex(self.node())
189 189 def manifest(self):
190 190 return self._manifest
191 191 def phasestr(self):
192 192 return phases.phasenames[self.phase()]
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def getfileset(self, expr):
197 197 return fileset.getfileset(self, expr)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 206
207 207 def unstable(self):
208 208 """True if the changeset is not obsolete but it's ancestor are"""
209 209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 210
211 211 def bumped(self):
212 212 """True if the changeset try to be a successor of a public changeset
213 213
214 214 Only non-public and non-obsolete changesets may be bumped.
215 215 """
216 216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 217
218 218 def divergent(self):
219 219 """Is a successors of a changeset with multiple possible successors set
220 220
221 221 Only non-public and non-obsolete changesets may be divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 224
225 225 def troubled(self):
226 226 """True if the changeset is either unstable, bumped or divergent"""
227 227 return self.unstable() or self.bumped() or self.divergent()
228 228
229 229 def troubles(self):
230 230 """return the list of troubles affecting this changesets.
231 231
232 232 Troubles are returned as strings. possible values are:
233 233 - unstable,
234 234 - bumped,
235 235 - divergent.
236 236 """
237 237 troubles = []
238 238 if self.unstable():
239 239 troubles.append('unstable')
240 240 if self.bumped():
241 241 troubles.append('bumped')
242 242 if self.divergent():
243 243 troubles.append('divergent')
244 244 return troubles
245 245
246 246 def parents(self):
247 247 """return contexts for each parent changeset"""
248 248 return self._parents
249 249
250 250 def p1(self):
251 251 return self._parents[0]
252 252
253 253 def p2(self):
254 254 if len(self._parents) == 2:
255 255 return self._parents[1]
256 256 return changectx(self._repo, -1)
257 257
258 258 def _fileinfo(self, path):
259 259 if '_manifest' in self.__dict__:
260 260 try:
261 261 return self._manifest[path], self._manifest.flags(path)
262 262 except KeyError:
263 263 raise error.ManifestLookupError(self._node, path,
264 264 _('not found in manifest'))
265 265 if '_manifestdelta' in self.__dict__ or path in self.files():
266 266 if path in self._manifestdelta:
267 267 return (self._manifestdelta[path],
268 268 self._manifestdelta.flags(path))
269 269 node, flag = self._repo.manifest.find(self._changeset[0], path)
270 270 if not node:
271 271 raise error.ManifestLookupError(self._node, path,
272 272 _('not found in manifest'))
273 273
274 274 return node, flag
275 275
276 276 def filenode(self, path):
277 277 return self._fileinfo(path)[0]
278 278
279 279 def flags(self, path):
280 280 try:
281 281 return self._fileinfo(path)[1]
282 282 except error.LookupError:
283 283 return ''
284 284
285 285 def sub(self, path):
286 286 return subrepo.subrepo(self, path)
287 287
288 288 def match(self, pats=[], include=None, exclude=None, default='glob'):
289 289 r = self._repo
290 290 return matchmod.match(r.root, r.getcwd(), pats,
291 291 include, exclude, default,
292 292 auditor=r.auditor, ctx=self)
293 293
294 294 def diff(self, ctx2=None, match=None, **opts):
295 295 """Returns a diff generator for the given contexts and matcher"""
296 296 if ctx2 is None:
297 297 ctx2 = self.p1()
298 298 if ctx2 is not None:
299 299 ctx2 = self._repo[ctx2]
300 300 diffopts = patch.diffopts(self._repo.ui, opts)
301 301 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
302 302
303 303 @propertycache
304 304 def _dirs(self):
305 305 return scmutil.dirs(self._manifest)
306 306
307 307 def dirs(self):
308 308 return self._dirs
309 309
310 310 def dirty(self, missing=False, merge=True, branch=True):
311 311 return False
312 312
313 313 def status(self, other=None, match=None, listignored=False,
314 314 listclean=False, listunknown=False, listsubrepos=False):
315 315 """return status of files between two nodes or node and working
316 316 directory.
317 317
318 318 If other is None, compare this node with working directory.
319 319
320 320 returns (modified, added, removed, deleted, unknown, ignored, clean)
321 321 """
322 322
323 323 ctx1 = self
324 324 ctx2 = self._repo[other]
325 325
326 326 # This next code block is, admittedly, fragile logic that tests for
327 327 # reversing the contexts and wouldn't need to exist if it weren't for
328 328 # the fast (and common) code path of comparing the working directory
329 329 # with its first parent.
330 330 #
331 331 # What we're aiming for here is the ability to call:
332 332 #
333 333 # workingctx.status(parentctx)
334 334 #
335 335 # If we always built the manifest for each context and compared those,
336 336 # then we'd be done. But the special case of the above call means we
337 337 # just copy the manifest of the parent.
338 338 reversed = False
339 339 if (not isinstance(ctx1, changectx)
340 340 and isinstance(ctx2, changectx)):
341 341 reversed = True
342 342 ctx1, ctx2 = ctx2, ctx1
343 343
344 344 match = ctx2._matchstatus(ctx1, match)
345 345 r = scmutil.status([], [], [], [], [], [], [])
346 346 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
347 347 listunknown)
348 348
349 349 if reversed:
350 350 # Reverse added and removed. Clear deleted, unknown and ignored as
351 351 # these make no sense to reverse.
352 352 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
353 353 r.clean)
354 354
355 355 if listsubrepos:
356 356 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
357 357 rev2 = ctx2.subrev(subpath)
358 358 try:
359 359 submatch = matchmod.narrowmatcher(subpath, match)
360 360 s = sub.status(rev2, match=submatch, ignored=listignored,
361 361 clean=listclean, unknown=listunknown,
362 362 listsubrepos=True)
363 363 for rfiles, sfiles in zip(r, s):
364 364 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
365 365 except error.LookupError:
366 366 self._repo.ui.status(_("skipping missing "
367 367 "subrepository: %s\n") % subpath)
368 368
369 369 for l in r:
370 370 l.sort()
371 371
372 372 return r
373 373
374 374
375 375 def makememctx(repo, parents, text, user, date, branch, files, store,
376 376 editor=None):
377 377 def getfilectx(repo, memctx, path):
378 378 data, mode, copied = store.getfile(path)
379 379 if data is None:
380 380 return None
381 381 islink, isexec = mode
382 382 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
383 383 copied=copied, memctx=memctx)
384 384 extra = {}
385 385 if branch:
386 386 extra['branch'] = encoding.fromlocal(branch)
387 387 ctx = memctx(repo, parents, text, files, getfilectx, user,
388 388 date, extra, editor)
389 389 return ctx
390 390
391 391 class changectx(basectx):
392 392 """A changecontext object makes access to data related to a particular
393 393 changeset convenient. It represents a read-only context already present in
394 394 the repo."""
395 395 def __init__(self, repo, changeid=''):
396 396 """changeid is a revision number, node, or tag"""
397 397
398 398 # since basectx.__new__ already took care of copying the object, we
399 399 # don't need to do anything in __init__, so we just exit here
400 400 if isinstance(changeid, basectx):
401 401 return
402 402
403 403 if changeid == '':
404 404 changeid = '.'
405 405 self._repo = repo
406 406
407 407 try:
408 408 if isinstance(changeid, int):
409 409 self._node = repo.changelog.node(changeid)
410 410 self._rev = changeid
411 411 return
412 412 if isinstance(changeid, long):
413 413 changeid = str(changeid)
414 414 if changeid == '.':
415 415 self._node = repo.dirstate.p1()
416 416 self._rev = repo.changelog.rev(self._node)
417 417 return
418 418 if changeid == 'null':
419 419 self._node = nullid
420 420 self._rev = nullrev
421 421 return
422 422 if changeid == 'tip':
423 423 self._node = repo.changelog.tip()
424 424 self._rev = repo.changelog.rev(self._node)
425 425 return
426 426 if len(changeid) == 20:
427 427 try:
428 428 self._node = changeid
429 429 self._rev = repo.changelog.rev(changeid)
430 430 return
431 431 except error.FilteredRepoLookupError:
432 432 raise
433 433 except LookupError:
434 434 pass
435 435
436 436 try:
437 437 r = int(changeid)
438 438 if str(r) != changeid:
439 439 raise ValueError
440 440 l = len(repo.changelog)
441 441 if r < 0:
442 442 r += l
443 443 if r < 0 or r >= l:
444 444 raise ValueError
445 445 self._rev = r
446 446 self._node = repo.changelog.node(r)
447 447 return
448 448 except error.FilteredIndexError:
449 449 raise
450 450 except (ValueError, OverflowError, IndexError):
451 451 pass
452 452
453 453 if len(changeid) == 40:
454 454 try:
455 455 self._node = bin(changeid)
456 456 self._rev = repo.changelog.rev(self._node)
457 457 return
458 458 except error.FilteredLookupError:
459 459 raise
460 460 except (TypeError, LookupError):
461 461 pass
462 462
463 463 # lookup bookmarks through the name interface
464 464 try:
465 465 self._node = repo.names.singlenode(repo, changeid)
466 466 self._rev = repo.changelog.rev(self._node)
467 467 return
468 468 except KeyError:
469 469 pass
470 470 except error.FilteredRepoLookupError:
471 471 raise
472 472 except error.RepoLookupError:
473 473 pass
474 474
475 475 self._node = repo.unfiltered().changelog._partialmatch(changeid)
476 476 if self._node is not None:
477 477 self._rev = repo.changelog.rev(self._node)
478 478 return
479 479
480 480 # lookup failed
481 481 # check if it might have come from damaged dirstate
482 482 #
483 483 # XXX we could avoid the unfiltered if we had a recognizable
484 484 # exception for filtered changeset access
485 485 if changeid in repo.unfiltered().dirstate.parents():
486 486 msg = _("working directory has unknown parent '%s'!")
487 487 raise error.Abort(msg % short(changeid))
488 488 try:
489 489 if len(changeid) == 20:
490 490 changeid = hex(changeid)
491 491 except TypeError:
492 492 pass
493 493 except (error.FilteredIndexError, error.FilteredLookupError,
494 494 error.FilteredRepoLookupError):
495 495 if repo.filtername == 'visible':
496 496 msg = _("hidden revision '%s'") % changeid
497 497 hint = _('use --hidden to access hidden revisions')
498 498 raise error.FilteredRepoLookupError(msg, hint=hint)
499 499 msg = _("filtered revision '%s' (not in '%s' subset)")
500 500 msg %= (changeid, repo.filtername)
501 501 raise error.FilteredRepoLookupError(msg)
502 502 except IndexError:
503 503 pass
504 504 raise error.RepoLookupError(
505 505 _("unknown revision '%s'") % changeid)
506 506
507 507 def __hash__(self):
508 508 try:
509 509 return hash(self._rev)
510 510 except AttributeError:
511 511 return id(self)
512 512
513 513 def __nonzero__(self):
514 514 return self._rev != nullrev
515 515
516 516 @propertycache
517 517 def _changeset(self):
518 518 return self._repo.changelog.read(self.rev())
519 519
520 520 @propertycache
521 521 def _manifest(self):
522 522 return self._repo.manifest.read(self._changeset[0])
523 523
524 524 @propertycache
525 525 def _manifestdelta(self):
526 526 return self._repo.manifest.readdelta(self._changeset[0])
527 527
528 528 @propertycache
529 529 def _parents(self):
530 530 p = self._repo.changelog.parentrevs(self._rev)
531 531 if p[1] == nullrev:
532 532 p = p[:-1]
533 533 return [changectx(self._repo, x) for x in p]
534 534
535 535 def changeset(self):
536 536 return self._changeset
537 537 def manifestnode(self):
538 538 return self._changeset[0]
539 539
540 540 def user(self):
541 541 return self._changeset[1]
542 542 def date(self):
543 543 return self._changeset[2]
544 544 def files(self):
545 545 return self._changeset[3]
546 546 def description(self):
547 547 return self._changeset[4]
548 548 def branch(self):
549 549 return encoding.tolocal(self._changeset[5].get("branch"))
550 550 def closesbranch(self):
551 551 return 'close' in self._changeset[5]
552 552 def extra(self):
553 553 return self._changeset[5]
554 554 def tags(self):
555 555 return self._repo.nodetags(self._node)
556 556 def bookmarks(self):
557 557 return self._repo.nodebookmarks(self._node)
558 558 def phase(self):
559 559 return self._repo._phasecache.phase(self._repo, self._rev)
560 560 def hidden(self):
561 561 return self._rev in repoview.filterrevs(self._repo, 'visible')
562 562
563 563 def children(self):
564 564 """return contexts for each child changeset"""
565 565 c = self._repo.changelog.children(self._node)
566 566 return [changectx(self._repo, x) for x in c]
567 567
568 568 def ancestors(self):
569 569 for a in self._repo.changelog.ancestors([self._rev]):
570 570 yield changectx(self._repo, a)
571 571
572 572 def descendants(self):
573 573 for d in self._repo.changelog.descendants([self._rev]):
574 574 yield changectx(self._repo, d)
575 575
576 576 def filectx(self, path, fileid=None, filelog=None):
577 577 """get a file context from this changeset"""
578 578 if fileid is None:
579 579 fileid = self.filenode(path)
580 580 return filectx(self._repo, path, fileid=fileid,
581 581 changectx=self, filelog=filelog)
582 582
583 583 def ancestor(self, c2, warn=False):
584 584 """return the "best" ancestor context of self and c2
585 585
586 586 If there are multiple candidates, it will show a message and check
587 587 merge.preferancestor configuration before falling back to the
588 588 revlog ancestor."""
589 589 # deal with workingctxs
590 590 n2 = c2._node
591 591 if n2 is None:
592 592 n2 = c2._parents[0]._node
593 593 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
594 594 if not cahs:
595 595 anc = nullid
596 596 elif len(cahs) == 1:
597 597 anc = cahs[0]
598 598 else:
599 599 for r in self._repo.ui.configlist('merge', 'preferancestor'):
600 600 try:
601 601 ctx = changectx(self._repo, r)
602 602 except error.RepoLookupError:
603 603 continue
604 604 anc = ctx.node()
605 605 if anc in cahs:
606 606 break
607 607 else:
608 608 anc = self._repo.changelog.ancestor(self._node, n2)
609 609 if warn:
610 610 self._repo.ui.status(
611 611 (_("note: using %s as ancestor of %s and %s\n") %
612 612 (short(anc), short(self._node), short(n2))) +
613 613 ''.join(_(" alternatively, use --config "
614 614 "merge.preferancestor=%s\n") %
615 615 short(n) for n in sorted(cahs) if n != anc))
616 616 return changectx(self._repo, anc)
617 617
618 618 def descendant(self, other):
619 619 """True if other is descendant of this changeset"""
620 620 return self._repo.changelog.descendant(self._rev, other._rev)
621 621
622 622 def walk(self, match):
623 623 fset = set(match.files())
624 624 # for dirstate.walk, files=['.'] means "walk the whole tree".
625 625 # follow that here, too
626 626 fset.discard('.')
627 627
628 628 # avoid the entire walk if we're only looking for specific files
629 629 if fset and not match.anypats():
630 630 if util.all([fn in self for fn in fset]):
631 631 for fn in sorted(fset):
632 632 if match(fn):
633 633 yield fn
634 634 raise StopIteration
635 635
636 636 for fn in self:
637 637 if fn in fset:
638 638 # specified pattern is the exact name
639 639 fset.remove(fn)
640 640 if match(fn):
641 641 yield fn
642 642 for fn in sorted(fset):
643 643 if fn in self._dirs:
644 644 # specified pattern is a directory
645 645 continue
646 646 match.bad(fn, _('no such file in rev %s') % self)
647 647
648 648 def matches(self, match):
649 649 return self.walk(match)
650 650
651 651 class basefilectx(object):
652 652 """A filecontext object represents the common logic for its children:
653 653 filectx: read-only access to a filerevision that is already present
654 654 in the repo,
655 655 workingfilectx: a filecontext that represents files from the working
656 656 directory,
657 657 memfilectx: a filecontext that represents files in-memory."""
658 658 def __new__(cls, repo, path, *args, **kwargs):
659 659 return super(basefilectx, cls).__new__(cls)
660 660
661 661 @propertycache
662 662 def _filelog(self):
663 663 return self._repo.file(self._path)
664 664
665 665 @propertycache
666 666 def _changeid(self):
667 667 if '_changeid' in self.__dict__:
668 668 return self._changeid
669 669 elif '_changectx' in self.__dict__:
670 670 return self._changectx.rev()
671 671 else:
672 672 return self._filelog.linkrev(self._filerev)
673 673
674 674 @propertycache
675 675 def _filenode(self):
676 676 if '_fileid' in self.__dict__:
677 677 return self._filelog.lookup(self._fileid)
678 678 else:
679 679 return self._changectx.filenode(self._path)
680 680
681 681 @propertycache
682 682 def _filerev(self):
683 683 return self._filelog.rev(self._filenode)
684 684
685 685 @propertycache
686 686 def _repopath(self):
687 687 return self._path
688 688
689 689 def __nonzero__(self):
690 690 try:
691 691 self._filenode
692 692 return True
693 693 except error.LookupError:
694 694 # file is missing
695 695 return False
696 696
697 697 def __str__(self):
698 698 return "%s@%s" % (self.path(), self._changectx)
699 699
700 700 def __repr__(self):
701 701 return "<%s %s>" % (type(self).__name__, str(self))
702 702
703 703 def __hash__(self):
704 704 try:
705 705 return hash((self._path, self._filenode))
706 706 except AttributeError:
707 707 return id(self)
708 708
709 709 def __eq__(self, other):
710 710 try:
711 711 return (type(self) == type(other) and self._path == other._path
712 712 and self._filenode == other._filenode)
713 713 except AttributeError:
714 714 return False
715 715
716 716 def __ne__(self, other):
717 717 return not (self == other)
718 718
719 719 def filerev(self):
720 720 return self._filerev
721 721 def filenode(self):
722 722 return self._filenode
723 723 def flags(self):
724 724 return self._changectx.flags(self._path)
725 725 def filelog(self):
726 726 return self._filelog
727 727 def rev(self):
728 728 return self._changeid
729 729 def linkrev(self):
730 730 return self._filelog.linkrev(self._filerev)
731 731 def node(self):
732 732 return self._changectx.node()
733 733 def hex(self):
734 734 return self._changectx.hex()
735 735 def user(self):
736 736 return self._changectx.user()
737 737 def date(self):
738 738 return self._changectx.date()
739 739 def files(self):
740 740 return self._changectx.files()
741 741 def description(self):
742 742 return self._changectx.description()
743 743 def branch(self):
744 744 return self._changectx.branch()
745 745 def extra(self):
746 746 return self._changectx.extra()
747 747 def phase(self):
748 748 return self._changectx.phase()
749 749 def phasestr(self):
750 750 return self._changectx.phasestr()
751 751 def manifest(self):
752 752 return self._changectx.manifest()
753 753 def changectx(self):
754 754 return self._changectx
755 755
756 756 def path(self):
757 757 return self._path
758 758
759 759 def isbinary(self):
760 760 try:
761 761 return util.binary(self.data())
762 762 except IOError:
763 763 return False
764 764 def isexec(self):
765 765 return 'x' in self.flags()
766 766 def islink(self):
767 767 return 'l' in self.flags()
768 768
769 769 def cmp(self, fctx):
770 770 """compare with other file context
771 771
772 772 returns True if different than fctx.
773 773 """
774 774 if (fctx._filerev is None
775 775 and (self._repo._encodefilterpats
776 776 # if file data starts with '\1\n', empty metadata block is
777 777 # prepended, which adds 4 bytes to filelog.size().
778 778 or self.size() - 4 == fctx.size())
779 779 or self.size() == fctx.size()):
780 780 return self._filelog.cmp(self._filenode, fctx.data())
781 781
782 782 return True
783 783
784 784 def introrev(self):
785 785 """return the rev of the changeset which introduced this file revision
786 786
787 787 This method is different from linkrev because it take into account the
788 788 changeset the filectx was created from. It ensures the returned
789 789 revision is one of its ancestors. This prevents bugs from
790 790 'linkrev-shadowing' when a file revision is used by multiple
791 791 changesets.
792 792 """
793 793 lkr = self.linkrev()
794 794 attrs = vars(self)
795 795 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
796 796 if noctx or self.rev() == lkr:
797 797 return self.linkrev()
798 798 return _adjustlinkrev(self._repo, self._path, self._filelog,
799 799 self._filenode, self.rev(), inclusive=True)
800 800
801 801 def parents(self):
802 802 _path = self._path
803 803 fl = self._filelog
804 804 parents = self._filelog.parents(self._filenode)
805 805 pl = [(_path, node, fl) for node in parents if node != nullid]
806 806
807 807 r = fl.renamed(self._filenode)
808 808 if r:
809 809 # - In the simple rename case, both parent are nullid, pl is empty.
810 810 # - In case of merge, only one of the parent is null id and should
811 811 # be replaced with the rename information. This parent is -always-
812 812 # the first one.
813 813 #
814 814 # As null id have alway been filtered out in the previous list
815 815 # comprehension, inserting to 0 will always result in "replacing
816 816 # first nullid parent with rename information.
817 817 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
818 818
819 819 ret = []
820 820 for path, fnode, l in pl:
821 821 if '_changeid' in vars(self) or '_changectx' in vars(self):
822 822 # If self is associated with a changeset (probably explicitly
823 823 # fed), ensure the created filectx is associated with a
824 824 # changeset that is an ancestor of self.changectx.
825 825 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
826 826 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
827 827 changeid=rev)
828 828 else:
829 829 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
830 830 ret.append(fctx)
831 831 return ret
832 832
833 833 def p1(self):
834 834 return self.parents()[0]
835 835
836 836 def p2(self):
837 837 p = self.parents()
838 838 if len(p) == 2:
839 839 return p[1]
840 840 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
841 841
842 842 def annotate(self, follow=False, linenumber=None, diffopts=None):
843 843 '''returns a list of tuples of (ctx, line) for each line
844 844 in the file, where ctx is the filectx of the node where
845 845 that line was last changed.
846 846 This returns tuples of ((ctx, linenumber), line) for each line,
847 847 if "linenumber" parameter is NOT "None".
848 848 In such tuples, linenumber means one at the first appearance
849 849 in the managed file.
850 850 To reduce annotation cost,
851 851 this returns fixed value(False is used) as linenumber,
852 852 if "linenumber" parameter is "False".'''
853 853
854 854 if linenumber is None:
855 855 def decorate(text, rev):
856 856 return ([rev] * len(text.splitlines()), text)
857 857 elif linenumber:
858 858 def decorate(text, rev):
859 859 size = len(text.splitlines())
860 860 return ([(rev, i) for i in xrange(1, size + 1)], text)
861 861 else:
862 862 def decorate(text, rev):
863 863 return ([(rev, False)] * len(text.splitlines()), text)
864 864
865 865 def pair(parent, child):
866 866 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
867 867 refine=True)
868 868 for (a1, a2, b1, b2), t in blocks:
869 869 # Changed blocks ('!') or blocks made only of blank lines ('~')
870 870 # belong to the child.
871 871 if t == '=':
872 872 child[0][b1:b2] = parent[0][a1:a2]
873 873 return child
874 874
875 875 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
876 876
877 877 def parents(f):
878 878 pl = f.parents()
879 879
880 880 # Don't return renamed parents if we aren't following.
881 881 if not follow:
882 882 pl = [p for p in pl if p.path() == f.path()]
883 883
884 884 # renamed filectx won't have a filelog yet, so set it
885 885 # from the cache to save time
886 886 for p in pl:
887 887 if not '_filelog' in p.__dict__:
888 888 p._filelog = getlog(p.path())
889 889
890 890 return pl
891 891
892 892 # use linkrev to find the first changeset where self appeared
893 893 base = self
894 894 introrev = self.introrev()
895 895 if self.rev() != introrev:
896 896 base = self.filectx(self.filenode(), changeid=introrev)
897 897
898 898 # This algorithm would prefer to be recursive, but Python is a
899 899 # bit recursion-hostile. Instead we do an iterative
900 900 # depth-first search.
901 901
902 902 visit = [base]
903 903 hist = {}
904 904 pcache = {}
905 905 needed = {base: 1}
906 906 while visit:
907 907 f = visit[-1]
908 908 pcached = f in pcache
909 909 if not pcached:
910 910 pcache[f] = parents(f)
911 911
912 912 ready = True
913 913 pl = pcache[f]
914 914 for p in pl:
915 915 if p not in hist:
916 916 ready = False
917 917 visit.append(p)
918 918 if not pcached:
919 919 needed[p] = needed.get(p, 0) + 1
920 920 if ready:
921 921 visit.pop()
922 922 reusable = f in hist
923 923 if reusable:
924 924 curr = hist[f]
925 925 else:
926 926 curr = decorate(f.data(), f)
927 927 for p in pl:
928 928 if not reusable:
929 929 curr = pair(hist[p], curr)
930 930 if needed[p] == 1:
931 931 del hist[p]
932 932 del needed[p]
933 933 else:
934 934 needed[p] -= 1
935 935
936 936 hist[f] = curr
937 937 pcache[f] = []
938 938
939 939 return zip(hist[base][0], hist[base][1].splitlines(True))
940 940
941 941 def ancestors(self, followfirst=False):
942 942 visit = {}
943 943 c = self
944 944 cut = followfirst and 1 or None
945 945 while True:
946 946 for parent in c.parents()[:cut]:
947 947 visit[(parent.rev(), parent.node())] = parent
948 948 if not visit:
949 949 break
950 950 c = visit.pop(max(visit))
951 951 yield c
952 952
953 953 class filectx(basefilectx):
954 954 """A filecontext object makes access to data related to a particular
955 955 filerevision convenient."""
956 956 def __init__(self, repo, path, changeid=None, fileid=None,
957 957 filelog=None, changectx=None):
958 958 """changeid can be a changeset revision, node, or tag.
959 959 fileid can be a file revision or node."""
960 960 self._repo = repo
961 961 self._path = path
962 962
963 963 assert (changeid is not None
964 964 or fileid is not None
965 965 or changectx is not None), \
966 966 ("bad args: changeid=%r, fileid=%r, changectx=%r"
967 967 % (changeid, fileid, changectx))
968 968
969 969 if filelog is not None:
970 970 self._filelog = filelog
971 971
972 972 if changeid is not None:
973 973 self._changeid = changeid
974 974 if changectx is not None:
975 975 self._changectx = changectx
976 976 if fileid is not None:
977 977 self._fileid = fileid
978 978
979 979 @propertycache
980 980 def _changectx(self):
981 981 try:
982 982 return changectx(self._repo, self._changeid)
983 983 except error.FilteredRepoLookupError:
984 984 # Linkrev may point to any revision in the repository. When the
985 985 # repository is filtered this may lead to `filectx` trying to build
986 986 # `changectx` for filtered revision. In such case we fallback to
987 987 # creating `changectx` on the unfiltered version of the reposition.
988 988 # This fallback should not be an issue because `changectx` from
989 989 # `filectx` are not used in complex operations that care about
990 990 # filtering.
991 991 #
992 992 # This fallback is a cheap and dirty fix that prevent several
993 993 # crashes. It does not ensure the behavior is correct. However the
994 994 # behavior was not correct before filtering either and "incorrect
995 995 # behavior" is seen as better as "crash"
996 996 #
997 997 # Linkrevs have several serious troubles with filtering that are
998 998 # complicated to solve. Proper handling of the issue here should be
999 999 # considered when solving linkrev issue are on the table.
1000 1000 return changectx(self._repo.unfiltered(), self._changeid)
1001 1001
1002 1002 def filectx(self, fileid, changeid=None):
1003 1003 '''opens an arbitrary revision of the file without
1004 1004 opening a new filelog'''
1005 1005 return filectx(self._repo, self._path, fileid=fileid,
1006 1006 filelog=self._filelog, changeid=changeid)
1007 1007
1008 1008 def data(self):
1009 1009 try:
1010 1010 return self._filelog.read(self._filenode)
1011 1011 except error.CensoredNodeError:
1012 1012 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1013 1013 return ""
1014 1014 raise util.Abort(_("censored node: %s") % short(self._filenode),
1015 1015 hint=_("set censor.policy to ignore errors"))
1016 1016
1017 1017 def size(self):
1018 1018 return self._filelog.size(self._filerev)
1019 1019
1020 1020 def renamed(self):
1021 1021 """check if file was actually renamed in this changeset revision
1022 1022
1023 1023 If rename logged in file revision, we report copy for changeset only
1024 1024 if file revisions linkrev points back to the changeset in question
1025 1025 or both changeset parents contain different file revisions.
1026 1026 """
1027 1027
1028 1028 renamed = self._filelog.renamed(self._filenode)
1029 1029 if not renamed:
1030 1030 return renamed
1031 1031
1032 1032 if self.rev() == self.linkrev():
1033 1033 return renamed
1034 1034
1035 1035 name = self.path()
1036 1036 fnode = self._filenode
1037 1037 for p in self._changectx.parents():
1038 1038 try:
1039 1039 if fnode == p.filenode(name):
1040 1040 return None
1041 1041 except error.LookupError:
1042 1042 pass
1043 1043 return renamed
1044 1044
1045 1045 def children(self):
1046 1046 # hard for renames
1047 1047 c = self._filelog.children(self._filenode)
1048 1048 return [filectx(self._repo, self._path, fileid=x,
1049 1049 filelog=self._filelog) for x in c]
1050 1050
1051 1051 class committablectx(basectx):
1052 1052 """A committablectx object provides common functionality for a context that
1053 1053 wants the ability to commit, e.g. workingctx or memctx."""
1054 1054 def __init__(self, repo, text="", user=None, date=None, extra=None,
1055 1055 changes=None):
1056 1056 self._repo = repo
1057 1057 self._rev = None
1058 1058 self._node = None
1059 1059 self._text = text
1060 1060 if date:
1061 1061 self._date = util.parsedate(date)
1062 1062 if user:
1063 1063 self._user = user
1064 1064 if changes:
1065 1065 self._status = changes
1066 1066
1067 1067 self._extra = {}
1068 1068 if extra:
1069 1069 self._extra = extra.copy()
1070 1070 if 'branch' not in self._extra:
1071 1071 try:
1072 1072 branch = encoding.fromlocal(self._repo.dirstate.branch())
1073 1073 except UnicodeDecodeError:
1074 1074 raise util.Abort(_('branch name not in UTF-8!'))
1075 1075 self._extra['branch'] = branch
1076 1076 if self._extra['branch'] == '':
1077 1077 self._extra['branch'] = 'default'
1078 1078
1079 1079 def __str__(self):
1080 1080 return str(self._parents[0]) + "+"
1081 1081
1082 1082 def __nonzero__(self):
1083 1083 return True
1084 1084
1085 1085 def _buildflagfunc(self):
1086 1086 # Create a fallback function for getting file flags when the
1087 1087 # filesystem doesn't support them
1088 1088
1089 1089 copiesget = self._repo.dirstate.copies().get
1090 1090
1091 1091 if len(self._parents) < 2:
1092 1092 # when we have one parent, it's easy: copy from parent
1093 1093 man = self._parents[0].manifest()
1094 1094 def func(f):
1095 1095 f = copiesget(f, f)
1096 1096 return man.flags(f)
1097 1097 else:
1098 1098 # merges are tricky: we try to reconstruct the unstored
1099 1099 # result from the merge (issue1802)
1100 1100 p1, p2 = self._parents
1101 1101 pa = p1.ancestor(p2)
1102 1102 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1103 1103
1104 1104 def func(f):
1105 1105 f = copiesget(f, f) # may be wrong for merges with copies
1106 1106 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1107 1107 if fl1 == fl2:
1108 1108 return fl1
1109 1109 if fl1 == fla:
1110 1110 return fl2
1111 1111 if fl2 == fla:
1112 1112 return fl1
1113 1113 return '' # punt for conflicts
1114 1114
1115 1115 return func
1116 1116
1117 1117 @propertycache
1118 1118 def _flagfunc(self):
1119 1119 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1120 1120
1121 1121 @propertycache
1122 1122 def _manifest(self):
1123 1123 """generate a manifest corresponding to the values in self._status
1124 1124
1125 1125 This reuse the file nodeid from parent, but we append an extra letter
1126 1126 when modified. Modified files get an extra 'm' while added files get
1127 1127 an extra 'a'. This is used by manifests merge to see that files
1128 1128 are different and by update logic to avoid deleting newly added files.
1129 1129 """
1130 1130
1131 1131 man1 = self._parents[0].manifest()
1132 1132 man = man1.copy()
1133 1133 if len(self._parents) > 1:
1134 1134 man2 = self.p2().manifest()
1135 1135 def getman(f):
1136 1136 if f in man1:
1137 1137 return man1
1138 1138 return man2
1139 1139 else:
1140 1140 getman = lambda f: man1
1141 1141
1142 1142 copied = self._repo.dirstate.copies()
1143 1143 ff = self._flagfunc
1144 1144 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1145 1145 for f in l:
1146 1146 orig = copied.get(f, f)
1147 1147 man[f] = getman(orig).get(orig, nullid) + i
1148 1148 try:
1149 1149 man.setflag(f, ff(f))
1150 1150 except OSError:
1151 1151 pass
1152 1152
1153 1153 for f in self._status.deleted + self._status.removed:
1154 1154 if f in man:
1155 1155 del man[f]
1156 1156
1157 1157 return man
1158 1158
1159 1159 @propertycache
1160 1160 def _status(self):
1161 1161 return self._repo.status()
1162 1162
1163 1163 @propertycache
1164 1164 def _user(self):
1165 1165 return self._repo.ui.username()
1166 1166
1167 1167 @propertycache
1168 1168 def _date(self):
1169 1169 return util.makedate()
1170 1170
1171 1171 def subrev(self, subpath):
1172 1172 return None
1173 1173
1174 1174 def user(self):
1175 1175 return self._user or self._repo.ui.username()
1176 1176 def date(self):
1177 1177 return self._date
1178 1178 def description(self):
1179 1179 return self._text
1180 1180 def files(self):
1181 1181 return sorted(self._status.modified + self._status.added +
1182 1182 self._status.removed)
1183 1183
1184 1184 def modified(self):
1185 1185 return self._status.modified
1186 1186 def added(self):
1187 1187 return self._status.added
1188 1188 def removed(self):
1189 1189 return self._status.removed
1190 1190 def deleted(self):
1191 1191 return self._status.deleted
1192 1192 def branch(self):
1193 1193 return encoding.tolocal(self._extra['branch'])
1194 1194 def closesbranch(self):
1195 1195 return 'close' in self._extra
1196 1196 def extra(self):
1197 1197 return self._extra
1198 1198
1199 1199 def tags(self):
1200 1200 t = []
1201 1201 for p in self.parents():
1202 1202 t.extend(p.tags())
1203 1203 return t
1204 1204
1205 1205 def bookmarks(self):
1206 1206 b = []
1207 1207 for p in self.parents():
1208 1208 b.extend(p.bookmarks())
1209 1209 return b
1210 1210
1211 1211 def phase(self):
1212 1212 phase = phases.draft # default phase to draft
1213 1213 for p in self.parents():
1214 1214 phase = max(phase, p.phase())
1215 1215 return phase
1216 1216
1217 1217 def hidden(self):
1218 1218 return False
1219 1219
1220 1220 def children(self):
1221 1221 return []
1222 1222
1223 1223 def flags(self, path):
1224 1224 if '_manifest' in self.__dict__:
1225 1225 try:
1226 1226 return self._manifest.flags(path)
1227 1227 except KeyError:
1228 1228 return ''
1229 1229
1230 1230 try:
1231 1231 return self._flagfunc(path)
1232 1232 except OSError:
1233 1233 return ''
1234 1234
1235 1235 def ancestor(self, c2):
1236 1236 """return the "best" ancestor context of self and c2"""
1237 1237 return self._parents[0].ancestor(c2) # punt on two parents for now
1238 1238
1239 1239 def walk(self, match):
1240 1240 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1241 1241 True, False))
1242 1242
1243 1243 def matches(self, match):
1244 1244 return sorted(self._repo.dirstate.matches(match))
1245 1245
1246 1246 def ancestors(self):
1247 1247 for p in self._parents:
1248 1248 yield p
1249 1249 for a in self._repo.changelog.ancestors(
1250 1250 [p.rev() for p in self._parents]):
1251 1251 yield changectx(self._repo, a)
1252 1252
1253 1253 def markcommitted(self, node):
1254 1254 """Perform post-commit cleanup necessary after committing this ctx
1255 1255
1256 1256 Specifically, this updates backing stores this working context
1257 1257 wraps to reflect the fact that the changes reflected by this
1258 1258 workingctx have been committed. For example, it marks
1259 1259 modified and added files as normal in the dirstate.
1260 1260
1261 1261 """
1262 1262
1263 1263 self._repo.dirstate.beginparentchange()
1264 1264 for f in self.modified() + self.added():
1265 1265 self._repo.dirstate.normal(f)
1266 1266 for f in self.removed():
1267 1267 self._repo.dirstate.drop(f)
1268 1268 self._repo.dirstate.setparents(node)
1269 1269 self._repo.dirstate.endparentchange()
1270 1270
1271 1271 def dirs(self):
1272 1272 return self._repo.dirstate.dirs()
1273 1273
1274 1274 class workingctx(committablectx):
1275 1275 """A workingctx object makes access to data related to
1276 1276 the current working directory convenient.
1277 1277 date - any valid date string or (unixtime, offset), or None.
1278 1278 user - username string, or None.
1279 1279 extra - a dictionary of extra values, or None.
1280 1280 changes - a list of file lists as returned by localrepo.status()
1281 1281 or None to use the repository status.
1282 1282 """
1283 1283 def __init__(self, repo, text="", user=None, date=None, extra=None,
1284 1284 changes=None):
1285 1285 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1286 1286
1287 1287 def __iter__(self):
1288 1288 d = self._repo.dirstate
1289 1289 for f in d:
1290 1290 if d[f] != 'r':
1291 1291 yield f
1292 1292
1293 1293 def __contains__(self, key):
1294 1294 return self._repo.dirstate[key] not in "?r"
1295 1295
1296 1296 @propertycache
1297 1297 def _parents(self):
1298 1298 p = self._repo.dirstate.parents()
1299 1299 if p[1] == nullid:
1300 1300 p = p[:-1]
1301 1301 return [changectx(self._repo, x) for x in p]
1302 1302
1303 1303 def filectx(self, path, filelog=None):
1304 1304 """get a file context from the working directory"""
1305 1305 return workingfilectx(self._repo, path, workingctx=self,
1306 1306 filelog=filelog)
1307 1307
1308 1308 def dirty(self, missing=False, merge=True, branch=True):
1309 1309 "check whether a working directory is modified"
1310 1310 # check subrepos first
1311 1311 for s in sorted(self.substate):
1312 1312 if self.sub(s).dirty():
1313 1313 return True
1314 1314 # check current working dir
1315 1315 return ((merge and self.p2()) or
1316 1316 (branch and self.branch() != self.p1().branch()) or
1317 1317 self.modified() or self.added() or self.removed() or
1318 1318 (missing and self.deleted()))
1319 1319
1320 1320 def add(self, list, prefix=""):
1321 1321 join = lambda f: os.path.join(prefix, f)
1322 1322 wlock = self._repo.wlock()
1323 1323 ui, ds = self._repo.ui, self._repo.dirstate
1324 1324 try:
1325 1325 rejected = []
1326 1326 lstat = self._repo.wvfs.lstat
1327 1327 for f in list:
1328 1328 scmutil.checkportable(ui, join(f))
1329 1329 try:
1330 1330 st = lstat(f)
1331 1331 except OSError:
1332 1332 ui.warn(_("%s does not exist!\n") % join(f))
1333 1333 rejected.append(f)
1334 1334 continue
1335 1335 if st.st_size > 10000000:
1336 1336 ui.warn(_("%s: up to %d MB of RAM may be required "
1337 1337 "to manage this file\n"
1338 1338 "(use 'hg revert %s' to cancel the "
1339 1339 "pending addition)\n")
1340 1340 % (f, 3 * st.st_size // 1000000, join(f)))
1341 1341 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1342 1342 ui.warn(_("%s not added: only files and symlinks "
1343 1343 "supported currently\n") % join(f))
1344 1344 rejected.append(f)
1345 1345 elif ds[f] in 'amn':
1346 1346 ui.warn(_("%s already tracked!\n") % join(f))
1347 1347 elif ds[f] == 'r':
1348 1348 ds.normallookup(f)
1349 1349 else:
1350 1350 ds.add(f)
1351 1351 return rejected
1352 1352 finally:
1353 1353 wlock.release()
1354 1354
1355 1355 def forget(self, files, prefix=""):
1356 1356 join = lambda f: os.path.join(prefix, f)
1357 1357 wlock = self._repo.wlock()
1358 1358 try:
1359 1359 rejected = []
1360 1360 for f in files:
1361 1361 if f not in self._repo.dirstate:
1362 1362 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1363 1363 rejected.append(f)
1364 1364 elif self._repo.dirstate[f] != 'a':
1365 1365 self._repo.dirstate.remove(f)
1366 1366 else:
1367 1367 self._repo.dirstate.drop(f)
1368 1368 return rejected
1369 1369 finally:
1370 1370 wlock.release()
1371 1371
1372 1372 def undelete(self, list):
1373 1373 pctxs = self.parents()
1374 1374 wlock = self._repo.wlock()
1375 1375 try:
1376 1376 for f in list:
1377 1377 if self._repo.dirstate[f] != 'r':
1378 1378 self._repo.ui.warn(_("%s not removed!\n") % f)
1379 1379 else:
1380 1380 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1381 1381 t = fctx.data()
1382 1382 self._repo.wwrite(f, t, fctx.flags())
1383 1383 self._repo.dirstate.normal(f)
1384 1384 finally:
1385 1385 wlock.release()
1386 1386
1387 1387 def copy(self, source, dest):
1388 1388 try:
1389 1389 st = self._repo.wvfs.lstat(dest)
1390 1390 except OSError, err:
1391 1391 if err.errno != errno.ENOENT:
1392 1392 raise
1393 1393 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1394 1394 return
1395 1395 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1396 1396 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1397 1397 "symbolic link\n") % dest)
1398 1398 else:
1399 1399 wlock = self._repo.wlock()
1400 1400 try:
1401 1401 if self._repo.dirstate[dest] in '?':
1402 1402 self._repo.dirstate.add(dest)
1403 1403 elif self._repo.dirstate[dest] in 'r':
1404 1404 self._repo.dirstate.normallookup(dest)
1405 1405 self._repo.dirstate.copy(source, dest)
1406 1406 finally:
1407 1407 wlock.release()
1408 1408
1409 1409 def _filtersuspectsymlink(self, files):
1410 1410 if not files or self._repo.dirstate._checklink:
1411 1411 return files
1412 1412
1413 1413 # Symlink placeholders may get non-symlink-like contents
1414 1414 # via user error or dereferencing by NFS or Samba servers,
1415 1415 # so we filter out any placeholders that don't look like a
1416 1416 # symlink
1417 1417 sane = []
1418 1418 for f in files:
1419 1419 if self.flags(f) == 'l':
1420 1420 d = self[f].data()
1421 1421 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1422 1422 self._repo.ui.debug('ignoring suspect symlink placeholder'
1423 1423 ' "%s"\n' % f)
1424 1424 continue
1425 1425 sane.append(f)
1426 1426 return sane
1427 1427
1428 1428 def _checklookup(self, files):
1429 1429 # check for any possibly clean files
1430 1430 if not files:
1431 1431 return [], []
1432 1432
1433 1433 modified = []
1434 1434 fixup = []
1435 1435 pctx = self._parents[0]
1436 1436 # do a full compare of any files that might have changed
1437 1437 for f in sorted(files):
1438 1438 if (f not in pctx or self.flags(f) != pctx.flags(f)
1439 1439 or pctx[f].cmp(self[f])):
1440 1440 modified.append(f)
1441 1441 else:
1442 1442 fixup.append(f)
1443 1443
1444 1444 # update dirstate for files that are actually clean
1445 1445 if fixup:
1446 1446 try:
1447 1447 # updating the dirstate is optional
1448 1448 # so we don't wait on the lock
1449 1449 # wlock can invalidate the dirstate, so cache normal _after_
1450 1450 # taking the lock
1451 1451 wlock = self._repo.wlock(False)
1452 1452 normal = self._repo.dirstate.normal
1453 1453 try:
1454 1454 for f in fixup:
1455 1455 normal(f)
1456 1456 finally:
1457 1457 wlock.release()
1458 1458 except error.LockError:
1459 1459 pass
1460 1460 return modified, fixup
1461 1461
1462 1462 def _manifestmatches(self, match, s):
1463 1463 """Slow path for workingctx
1464 1464
1465 1465 The fast path is when we compare the working directory to its parent
1466 1466 which means this function is comparing with a non-parent; therefore we
1467 1467 need to build a manifest and return what matches.
1468 1468 """
1469 1469 mf = self._repo['.']._manifestmatches(match, s)
1470 1470 for f in s.modified + s.added:
1471 1471 mf[f] = _newnode
1472 1472 mf.setflag(f, self.flags(f))
1473 1473 for f in s.removed:
1474 1474 if f in mf:
1475 1475 del mf[f]
1476 1476 return mf
1477 1477
1478 1478 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1479 1479 unknown=False):
1480 1480 '''Gets the status from the dirstate -- internal use only.'''
1481 1481 listignored, listclean, listunknown = ignored, clean, unknown
1482 1482 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1483 1483 subrepos = []
1484 1484 if '.hgsub' in self:
1485 1485 subrepos = sorted(self.substate)
1486 1486 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1487 1487 listclean, listunknown)
1488 1488
1489 1489 # check for any possibly clean files
1490 1490 if cmp:
1491 1491 modified2, fixup = self._checklookup(cmp)
1492 1492 s.modified.extend(modified2)
1493 1493
1494 1494 # update dirstate for files that are actually clean
1495 1495 if fixup and listclean:
1496 1496 s.clean.extend(fixup)
1497 1497
1498 1498 if match.always():
1499 1499 # cache for performance
1500 1500 if s.unknown or s.ignored or s.clean:
1501 1501 # "_status" is cached with list*=False in the normal route
1502 1502 self._status = scmutil.status(s.modified, s.added, s.removed,
1503 1503 s.deleted, [], [], [])
1504 1504 else:
1505 1505 self._status = s
1506 1506
1507 1507 return s
1508 1508
1509 1509 def _buildstatus(self, other, s, match, listignored, listclean,
1510 1510 listunknown):
1511 1511 """build a status with respect to another context
1512 1512
1513 1513 This includes logic for maintaining the fast path of status when
1514 1514 comparing the working directory against its parent, which is to skip
1515 1515 building a new manifest if self (working directory) is not comparing
1516 1516 against its parent (repo['.']).
1517 1517 """
1518 1518 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1519 1519 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1520 1520 # might have accidentally ended up with the entire contents of the file
1521 1521 # they are supposed to be linking to.
1522 1522 s.modified[:] = self._filtersuspectsymlink(s.modified)
1523 1523 if other != self._repo['.']:
1524 1524 s = super(workingctx, self)._buildstatus(other, s, match,
1525 1525 listignored, listclean,
1526 1526 listunknown)
1527 1527 return s
1528 1528
1529 1529 def _matchstatus(self, other, match):
1530 1530 """override the match method with a filter for directory patterns
1531 1531
1532 1532 We use inheritance to customize the match.bad method only in cases of
1533 1533 workingctx since it belongs only to the working directory when
1534 1534 comparing against the parent changeset.
1535 1535
1536 1536 If we aren't comparing against the working directory's parent, then we
1537 1537 just use the default match object sent to us.
1538 1538 """
1539 1539 superself = super(workingctx, self)
1540 1540 match = superself._matchstatus(other, match)
1541 1541 if other != self._repo['.']:
1542 1542 def bad(f, msg):
1543 1543 # 'f' may be a directory pattern from 'match.files()',
1544 1544 # so 'f not in ctx1' is not enough
1545 1545 if f not in other and f not in other.dirs():
1546 1546 self._repo.ui.warn('%s: %s\n' %
1547 1547 (self._repo.dirstate.pathto(f), msg))
1548 1548 match.bad = bad
1549 1549 return match
1550 1550
1551 1551 class committablefilectx(basefilectx):
1552 1552 """A committablefilectx provides common functionality for a file context
1553 1553 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1554 1554 def __init__(self, repo, path, filelog=None, ctx=None):
1555 1555 self._repo = repo
1556 1556 self._path = path
1557 1557 self._changeid = None
1558 1558 self._filerev = self._filenode = None
1559 1559
1560 1560 if filelog is not None:
1561 1561 self._filelog = filelog
1562 1562 if ctx:
1563 1563 self._changectx = ctx
1564 1564
1565 1565 def __nonzero__(self):
1566 1566 return True
1567 1567
1568 1568 def parents(self):
1569 1569 '''return parent filectxs, following copies if necessary'''
1570 1570 def filenode(ctx, path):
1571 1571 return ctx._manifest.get(path, nullid)
1572 1572
1573 1573 path = self._path
1574 1574 fl = self._filelog
1575 1575 pcl = self._changectx._parents
1576 1576 renamed = self.renamed()
1577 1577
1578 1578 if renamed:
1579 1579 pl = [renamed + (None,)]
1580 1580 else:
1581 1581 pl = [(path, filenode(pcl[0], path), fl)]
1582 1582
1583 1583 for pc in pcl[1:]:
1584 1584 pl.append((path, filenode(pc, path), fl))
1585 1585
1586 1586 return [filectx(self._repo, p, fileid=n, filelog=l)
1587 1587 for p, n, l in pl if n != nullid]
1588 1588
1589 1589 def children(self):
1590 1590 return []
1591 1591
1592 1592 class workingfilectx(committablefilectx):
1593 1593 """A workingfilectx object makes access to data related to a particular
1594 1594 file in the working directory convenient."""
1595 1595 def __init__(self, repo, path, filelog=None, workingctx=None):
1596 1596 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1597 1597
1598 1598 @propertycache
1599 1599 def _changectx(self):
1600 1600 return workingctx(self._repo)
1601 1601
1602 1602 def data(self):
1603 1603 return self._repo.wread(self._path)
1604 1604 def renamed(self):
1605 1605 rp = self._repo.dirstate.copied(self._path)
1606 1606 if not rp:
1607 1607 return None
1608 1608 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1609 1609
1610 1610 def size(self):
1611 1611 return self._repo.wvfs.lstat(self._path).st_size
1612 1612 def date(self):
1613 1613 t, tz = self._changectx.date()
1614 1614 try:
1615 1615 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1616 1616 except OSError, err:
1617 1617 if err.errno != errno.ENOENT:
1618 1618 raise
1619 1619 return (t, tz)
1620 1620
1621 1621 def cmp(self, fctx):
1622 1622 """compare with other file context
1623 1623
1624 1624 returns True if different than fctx.
1625 1625 """
1626 1626 # fctx should be a filectx (not a workingfilectx)
1627 1627 # invert comparison to reuse the same code path
1628 1628 return fctx.cmp(self)
1629 1629
1630 1630 def remove(self, ignoremissing=False):
1631 1631 """wraps unlink for a repo's working directory"""
1632 1632 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1633 1633
1634 1634 def write(self, data, flags):
1635 1635 """wraps repo.wwrite"""
1636 1636 self._repo.wwrite(self._path, data, flags)
1637 1637
1638 1638 class workingcommitctx(workingctx):
1639 1639 """A workingcommitctx object makes access to data related to
1640 1640 the revision being committed convenient.
1641 1641
1642 1642 This hides changes in the working directory, if they aren't
1643 1643 committed in this context.
1644 1644 """
1645 1645 def __init__(self, repo, changes,
1646 1646 text="", user=None, date=None, extra=None):
1647 1647 super(workingctx, self).__init__(repo, text, user, date, extra,
1648 1648 changes)
1649 1649
1650 def _buildstatus(self, other, s, match,
1651 listignored, listclean, listunknown):
1652 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1653 """
1654 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1655 if other != self._repo['.']:
1656 # workingctx._buildstatus doesn't change self._status in this case
1657 superself = super(workingcommitctx, self)
1658 s = superself._buildstatus(other, s, match,
1659 listignored, listclean, listunknown)
1660 return s
1661
1662 1650 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1663 1651 unknown=False):
1664 1652 """Return matched files only in ``self._status``
1665 1653
1666 1654 Uncommitted files appear "clean" via this context, even if
1667 1655 they aren't actually so in the working directory.
1668 1656 """
1669 1657 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1670 1658 if clean:
1671 1659 clean = [f for f in self._manifest if f not in self._changedset]
1672 1660 else:
1673 1661 clean = []
1674 1662 return scmutil.status([f for f in self._status.modified if match(f)],
1675 1663 [f for f in self._status.added if match(f)],
1676 1664 [f for f in self._status.removed if match(f)],
1677 1665 [], [], [], clean)
1678 1666
1679 1667 @propertycache
1680 1668 def _changedset(self):
1681 1669 """Return the set of files changed in this context
1682 1670 """
1683 1671 changed = set(self._status.modified)
1684 1672 changed.update(self._status.added)
1685 1673 changed.update(self._status.removed)
1686 1674 return changed
1687 1675
1688 1676 class memctx(committablectx):
1689 1677 """Use memctx to perform in-memory commits via localrepo.commitctx().
1690 1678
1691 1679 Revision information is supplied at initialization time while
1692 1680 related files data and is made available through a callback
1693 1681 mechanism. 'repo' is the current localrepo, 'parents' is a
1694 1682 sequence of two parent revisions identifiers (pass None for every
1695 1683 missing parent), 'text' is the commit message and 'files' lists
1696 1684 names of files touched by the revision (normalized and relative to
1697 1685 repository root).
1698 1686
1699 1687 filectxfn(repo, memctx, path) is a callable receiving the
1700 1688 repository, the current memctx object and the normalized path of
1701 1689 requested file, relative to repository root. It is fired by the
1702 1690 commit function for every file in 'files', but calls order is
1703 1691 undefined. If the file is available in the revision being
1704 1692 committed (updated or added), filectxfn returns a memfilectx
1705 1693 object. If the file was removed, filectxfn raises an
1706 1694 IOError. Moved files are represented by marking the source file
1707 1695 removed and the new file added with copy information (see
1708 1696 memfilectx).
1709 1697
1710 1698 user receives the committer name and defaults to current
1711 1699 repository username, date is the commit date in any format
1712 1700 supported by util.parsedate() and defaults to current date, extra
1713 1701 is a dictionary of metadata or is left empty.
1714 1702 """
1715 1703
1716 1704 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1717 1705 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1718 1706 # this field to determine what to do in filectxfn.
1719 1707 _returnnoneformissingfiles = True
1720 1708
1721 1709 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1722 1710 date=None, extra=None, editor=False):
1723 1711 super(memctx, self).__init__(repo, text, user, date, extra)
1724 1712 self._rev = None
1725 1713 self._node = None
1726 1714 parents = [(p or nullid) for p in parents]
1727 1715 p1, p2 = parents
1728 1716 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1729 1717 files = sorted(set(files))
1730 1718 self._files = files
1731 1719 self.substate = {}
1732 1720
1733 1721 # if store is not callable, wrap it in a function
1734 1722 if not callable(filectxfn):
1735 1723 def getfilectx(repo, memctx, path):
1736 1724 fctx = filectxfn[path]
1737 1725 # this is weird but apparently we only keep track of one parent
1738 1726 # (why not only store that instead of a tuple?)
1739 1727 copied = fctx.renamed()
1740 1728 if copied:
1741 1729 copied = copied[0]
1742 1730 return memfilectx(repo, path, fctx.data(),
1743 1731 islink=fctx.islink(), isexec=fctx.isexec(),
1744 1732 copied=copied, memctx=memctx)
1745 1733 self._filectxfn = getfilectx
1746 1734 else:
1747 1735 # "util.cachefunc" reduces invocation of possibly expensive
1748 1736 # "filectxfn" for performance (e.g. converting from another VCS)
1749 1737 self._filectxfn = util.cachefunc(filectxfn)
1750 1738
1751 1739 self._extra = extra and extra.copy() or {}
1752 1740 if self._extra.get('branch', '') == '':
1753 1741 self._extra['branch'] = 'default'
1754 1742
1755 1743 if editor:
1756 1744 self._text = editor(self._repo, self, [])
1757 1745 self._repo.savecommitmessage(self._text)
1758 1746
1759 1747 def filectx(self, path, filelog=None):
1760 1748 """get a file context from the working directory
1761 1749
1762 1750 Returns None if file doesn't exist and should be removed."""
1763 1751 return self._filectxfn(self._repo, self, path)
1764 1752
1765 1753 def commit(self):
1766 1754 """commit context to the repo"""
1767 1755 return self._repo.commitctx(self)
1768 1756
1769 1757 @propertycache
1770 1758 def _manifest(self):
1771 1759 """generate a manifest based on the return values of filectxfn"""
1772 1760
1773 1761 # keep this simple for now; just worry about p1
1774 1762 pctx = self._parents[0]
1775 1763 man = pctx.manifest().copy()
1776 1764
1777 1765 for f in self._status.modified:
1778 1766 p1node = nullid
1779 1767 p2node = nullid
1780 1768 p = pctx[f].parents() # if file isn't in pctx, check p2?
1781 1769 if len(p) > 0:
1782 1770 p1node = p[0].node()
1783 1771 if len(p) > 1:
1784 1772 p2node = p[1].node()
1785 1773 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1786 1774
1787 1775 for f in self._status.added:
1788 1776 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1789 1777
1790 1778 for f in self._status.removed:
1791 1779 if f in man:
1792 1780 del man[f]
1793 1781
1794 1782 return man
1795 1783
1796 1784 @propertycache
1797 1785 def _status(self):
1798 1786 """Calculate exact status from ``files`` specified at construction
1799 1787 """
1800 1788 man1 = self.p1().manifest()
1801 1789 p2 = self._parents[1]
1802 1790 # "1 < len(self._parents)" can't be used for checking
1803 1791 # existence of the 2nd parent, because "memctx._parents" is
1804 1792 # explicitly initialized by the list, of which length is 2.
1805 1793 if p2.node() != nullid:
1806 1794 man2 = p2.manifest()
1807 1795 managing = lambda f: f in man1 or f in man2
1808 1796 else:
1809 1797 managing = lambda f: f in man1
1810 1798
1811 1799 modified, added, removed = [], [], []
1812 1800 for f in self._files:
1813 1801 if not managing(f):
1814 1802 added.append(f)
1815 1803 elif self[f]:
1816 1804 modified.append(f)
1817 1805 else:
1818 1806 removed.append(f)
1819 1807
1820 1808 return scmutil.status(modified, added, removed, [], [], [], [])
1821 1809
1822 1810 class memfilectx(committablefilectx):
1823 1811 """memfilectx represents an in-memory file to commit.
1824 1812
1825 1813 See memctx and committablefilectx for more details.
1826 1814 """
1827 1815 def __init__(self, repo, path, data, islink=False,
1828 1816 isexec=False, copied=None, memctx=None):
1829 1817 """
1830 1818 path is the normalized file path relative to repository root.
1831 1819 data is the file content as a string.
1832 1820 islink is True if the file is a symbolic link.
1833 1821 isexec is True if the file is executable.
1834 1822 copied is the source file path if current file was copied in the
1835 1823 revision being committed, or None."""
1836 1824 super(memfilectx, self).__init__(repo, path, None, memctx)
1837 1825 self._data = data
1838 1826 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1839 1827 self._copied = None
1840 1828 if copied:
1841 1829 self._copied = (copied, nullid)
1842 1830
1843 1831 def data(self):
1844 1832 return self._data
1845 1833 def size(self):
1846 1834 return len(self.data())
1847 1835 def flags(self):
1848 1836 return self._flags
1849 1837 def renamed(self):
1850 1838 return self._copied
1851 1839
1852 1840 def remove(self, ignoremissing=False):
1853 1841 """wraps unlink for a repo's working directory"""
1854 1842 # need to figure out what to do here
1855 1843 del self._changectx[self._path]
1856 1844
1857 1845 def write(self, data, flags):
1858 1846 """wraps repo.wwrite"""
1859 1847 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now