##// END OF EJS Templates
filectx: use _descendantrev in parents()...
Matt Mackall -
r23984:2896f535 stable
parent child Browse files
Show More
@@ -1,1862 +1,1866
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 class basectx(object):
26 26 """A basectx object represents the common logic for its children:
27 27 changectx: read-only context that is already present in the repo,
28 28 workingctx: a context that represents the working directory and can
29 29 be committed,
30 30 memctx: a context that represents changes in-memory and can also
31 31 be committed."""
32 32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 33 if isinstance(changeid, basectx):
34 34 return changeid
35 35
36 36 o = super(basectx, cls).__new__(cls)
37 37
38 38 o._repo = repo
39 39 o._rev = nullrev
40 40 o._node = nullid
41 41
42 42 return o
43 43
44 44 def __str__(self):
45 45 return short(self.node())
46 46
47 47 def __int__(self):
48 48 return self.rev()
49 49
50 50 def __repr__(self):
51 51 return "<%s %s>" % (type(self).__name__, str(self))
52 52
53 53 def __eq__(self, other):
54 54 try:
55 55 return type(self) == type(other) and self._rev == other._rev
56 56 except AttributeError:
57 57 return False
58 58
59 59 def __ne__(self, other):
60 60 return not (self == other)
61 61
62 62 def __contains__(self, key):
63 63 return key in self._manifest
64 64
65 65 def __getitem__(self, key):
66 66 return self.filectx(key)
67 67
68 68 def __iter__(self):
69 69 for f in sorted(self._manifest):
70 70 yield f
71 71
72 72 def _manifestmatches(self, match, s):
73 73 """generate a new manifest filtered by the match argument
74 74
75 75 This method is for internal use only and mainly exists to provide an
76 76 object oriented way for other contexts to customize the manifest
77 77 generation.
78 78 """
79 79 return self.manifest().matches(match)
80 80
81 81 def _matchstatus(self, other, match):
82 82 """return match.always if match is none
83 83
84 84 This internal method provides a way for child objects to override the
85 85 match operator.
86 86 """
87 87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
88 88
89 89 def _buildstatus(self, other, s, match, listignored, listclean,
90 90 listunknown):
91 91 """build a status with respect to another context"""
92 92 # Load earliest manifest first for caching reasons. More specifically,
93 93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
94 94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
95 95 # 1000 and cache it so that when you read 1001, we just need to apply a
96 96 # delta to what's in the cache. So that's one full reconstruction + one
97 97 # delta application.
98 98 if self.rev() is not None and self.rev() < other.rev():
99 99 self.manifest()
100 100 mf1 = other._manifestmatches(match, s)
101 101 mf2 = self._manifestmatches(match, s)
102 102
103 103 modified, added = [], []
104 104 removed = []
105 105 clean = []
106 106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
107 107 deletedset = set(deleted)
108 108 d = mf1.diff(mf2, clean=listclean)
109 109 for fn, value in d.iteritems():
110 110 if fn in deletedset:
111 111 continue
112 112 if value is None:
113 113 clean.append(fn)
114 114 continue
115 115 (node1, flag1), (node2, flag2) = value
116 116 if node1 is None:
117 117 added.append(fn)
118 118 elif node2 is None:
119 119 removed.append(fn)
120 120 elif node2 != _newnode:
121 121 # The file was not a new file in mf2, so an entry
122 122 # from diff is really a difference.
123 123 modified.append(fn)
124 124 elif self[fn].cmp(other[fn]):
125 125 # node2 was newnode, but the working file doesn't
126 126 # match the one in mf1.
127 127 modified.append(fn)
128 128 else:
129 129 clean.append(fn)
130 130
131 131 if removed:
132 132 # need to filter files if they are already reported as removed
133 133 unknown = [fn for fn in unknown if fn not in mf1]
134 134 ignored = [fn for fn in ignored if fn not in mf1]
135 135 # if they're deleted, don't report them as removed
136 136 removed = [fn for fn in removed if fn not in deletedset]
137 137
138 138 return scmutil.status(modified, added, removed, deleted, unknown,
139 139 ignored, clean)
140 140
141 141 @propertycache
142 142 def substate(self):
143 143 return subrepo.state(self, self._repo.ui)
144 144
145 145 def subrev(self, subpath):
146 146 return self.substate[subpath][1]
147 147
148 148 def rev(self):
149 149 return self._rev
150 150 def node(self):
151 151 return self._node
152 152 def hex(self):
153 153 return hex(self.node())
154 154 def manifest(self):
155 155 return self._manifest
156 156 def phasestr(self):
157 157 return phases.phasenames[self.phase()]
158 158 def mutable(self):
159 159 return self.phase() > phases.public
160 160
161 161 def getfileset(self, expr):
162 162 return fileset.getfileset(self, expr)
163 163
164 164 def obsolete(self):
165 165 """True if the changeset is obsolete"""
166 166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167 167
168 168 def extinct(self):
169 169 """True if the changeset is extinct"""
170 170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171 171
172 172 def unstable(self):
173 173 """True if the changeset is not obsolete but it's ancestor are"""
174 174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175 175
176 176 def bumped(self):
177 177 """True if the changeset try to be a successor of a public changeset
178 178
179 179 Only non-public and non-obsolete changesets may be bumped.
180 180 """
181 181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182 182
183 183 def divergent(self):
184 184 """Is a successors of a changeset with multiple possible successors set
185 185
186 186 Only non-public and non-obsolete changesets may be divergent.
187 187 """
188 188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189 189
190 190 def troubled(self):
191 191 """True if the changeset is either unstable, bumped or divergent"""
192 192 return self.unstable() or self.bumped() or self.divergent()
193 193
194 194 def troubles(self):
195 195 """return the list of troubles affecting this changesets.
196 196
197 197 Troubles are returned as strings. possible values are:
198 198 - unstable,
199 199 - bumped,
200 200 - divergent.
201 201 """
202 202 troubles = []
203 203 if self.unstable():
204 204 troubles.append('unstable')
205 205 if self.bumped():
206 206 troubles.append('bumped')
207 207 if self.divergent():
208 208 troubles.append('divergent')
209 209 return troubles
210 210
211 211 def parents(self):
212 212 """return contexts for each parent changeset"""
213 213 return self._parents
214 214
215 215 def p1(self):
216 216 return self._parents[0]
217 217
218 218 def p2(self):
219 219 if len(self._parents) == 2:
220 220 return self._parents[1]
221 221 return changectx(self._repo, -1)
222 222
223 223 def _fileinfo(self, path):
224 224 if '_manifest' in self.__dict__:
225 225 try:
226 226 return self._manifest[path], self._manifest.flags(path)
227 227 except KeyError:
228 228 raise error.ManifestLookupError(self._node, path,
229 229 _('not found in manifest'))
230 230 if '_manifestdelta' in self.__dict__ or path in self.files():
231 231 if path in self._manifestdelta:
232 232 return (self._manifestdelta[path],
233 233 self._manifestdelta.flags(path))
234 234 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 235 if not node:
236 236 raise error.ManifestLookupError(self._node, path,
237 237 _('not found in manifest'))
238 238
239 239 return node, flag
240 240
241 241 def filenode(self, path):
242 242 return self._fileinfo(path)[0]
243 243
244 244 def flags(self, path):
245 245 try:
246 246 return self._fileinfo(path)[1]
247 247 except error.LookupError:
248 248 return ''
249 249
250 250 def sub(self, path):
251 251 return subrepo.subrepo(self, path)
252 252
253 253 def match(self, pats=[], include=None, exclude=None, default='glob'):
254 254 r = self._repo
255 255 return matchmod.match(r.root, r.getcwd(), pats,
256 256 include, exclude, default,
257 257 auditor=r.auditor, ctx=self)
258 258
259 259 def diff(self, ctx2=None, match=None, **opts):
260 260 """Returns a diff generator for the given contexts and matcher"""
261 261 if ctx2 is None:
262 262 ctx2 = self.p1()
263 263 if ctx2 is not None:
264 264 ctx2 = self._repo[ctx2]
265 265 diffopts = patch.diffopts(self._repo.ui, opts)
266 266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
267 267
268 268 @propertycache
269 269 def _dirs(self):
270 270 return scmutil.dirs(self._manifest)
271 271
272 272 def dirs(self):
273 273 return self._dirs
274 274
275 275 def dirty(self, missing=False, merge=True, branch=True):
276 276 return False
277 277
278 278 def status(self, other=None, match=None, listignored=False,
279 279 listclean=False, listunknown=False, listsubrepos=False):
280 280 """return status of files between two nodes or node and working
281 281 directory.
282 282
283 283 If other is None, compare this node with working directory.
284 284
285 285 returns (modified, added, removed, deleted, unknown, ignored, clean)
286 286 """
287 287
288 288 ctx1 = self
289 289 ctx2 = self._repo[other]
290 290
291 291 # This next code block is, admittedly, fragile logic that tests for
292 292 # reversing the contexts and wouldn't need to exist if it weren't for
293 293 # the fast (and common) code path of comparing the working directory
294 294 # with its first parent.
295 295 #
296 296 # What we're aiming for here is the ability to call:
297 297 #
298 298 # workingctx.status(parentctx)
299 299 #
300 300 # If we always built the manifest for each context and compared those,
301 301 # then we'd be done. But the special case of the above call means we
302 302 # just copy the manifest of the parent.
303 303 reversed = False
304 304 if (not isinstance(ctx1, changectx)
305 305 and isinstance(ctx2, changectx)):
306 306 reversed = True
307 307 ctx1, ctx2 = ctx2, ctx1
308 308
309 309 match = ctx2._matchstatus(ctx1, match)
310 310 r = scmutil.status([], [], [], [], [], [], [])
311 311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
312 312 listunknown)
313 313
314 314 if reversed:
315 315 # Reverse added and removed. Clear deleted, unknown and ignored as
316 316 # these make no sense to reverse.
317 317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
318 318 r.clean)
319 319
320 320 if listsubrepos:
321 321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
322 322 rev2 = ctx2.subrev(subpath)
323 323 try:
324 324 submatch = matchmod.narrowmatcher(subpath, match)
325 325 s = sub.status(rev2, match=submatch, ignored=listignored,
326 326 clean=listclean, unknown=listunknown,
327 327 listsubrepos=True)
328 328 for rfiles, sfiles in zip(r, s):
329 329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
330 330 except error.LookupError:
331 331 self._repo.ui.status(_("skipping missing "
332 332 "subrepository: %s\n") % subpath)
333 333
334 334 for l in r:
335 335 l.sort()
336 336
337 337 return r
338 338
339 339
340 340 def makememctx(repo, parents, text, user, date, branch, files, store,
341 341 editor=None):
342 342 def getfilectx(repo, memctx, path):
343 343 data, mode, copied = store.getfile(path)
344 344 if data is None:
345 345 return None
346 346 islink, isexec = mode
347 347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
348 348 copied=copied, memctx=memctx)
349 349 extra = {}
350 350 if branch:
351 351 extra['branch'] = encoding.fromlocal(branch)
352 352 ctx = memctx(repo, parents, text, files, getfilectx, user,
353 353 date, extra, editor)
354 354 return ctx
355 355
356 356 class changectx(basectx):
357 357 """A changecontext object makes access to data related to a particular
358 358 changeset convenient. It represents a read-only context already present in
359 359 the repo."""
360 360 def __init__(self, repo, changeid=''):
361 361 """changeid is a revision number, node, or tag"""
362 362
363 363 # since basectx.__new__ already took care of copying the object, we
364 364 # don't need to do anything in __init__, so we just exit here
365 365 if isinstance(changeid, basectx):
366 366 return
367 367
368 368 if changeid == '':
369 369 changeid = '.'
370 370 self._repo = repo
371 371
372 372 try:
373 373 if isinstance(changeid, int):
374 374 self._node = repo.changelog.node(changeid)
375 375 self._rev = changeid
376 376 return
377 377 if isinstance(changeid, long):
378 378 changeid = str(changeid)
379 379 if changeid == '.':
380 380 self._node = repo.dirstate.p1()
381 381 self._rev = repo.changelog.rev(self._node)
382 382 return
383 383 if changeid == 'null':
384 384 self._node = nullid
385 385 self._rev = nullrev
386 386 return
387 387 if changeid == 'tip':
388 388 self._node = repo.changelog.tip()
389 389 self._rev = repo.changelog.rev(self._node)
390 390 return
391 391 if len(changeid) == 20:
392 392 try:
393 393 self._node = changeid
394 394 self._rev = repo.changelog.rev(changeid)
395 395 return
396 396 except error.FilteredRepoLookupError:
397 397 raise
398 398 except LookupError:
399 399 pass
400 400
401 401 try:
402 402 r = int(changeid)
403 403 if str(r) != changeid:
404 404 raise ValueError
405 405 l = len(repo.changelog)
406 406 if r < 0:
407 407 r += l
408 408 if r < 0 or r >= l:
409 409 raise ValueError
410 410 self._rev = r
411 411 self._node = repo.changelog.node(r)
412 412 return
413 413 except error.FilteredIndexError:
414 414 raise
415 415 except (ValueError, OverflowError, IndexError):
416 416 pass
417 417
418 418 if len(changeid) == 40:
419 419 try:
420 420 self._node = bin(changeid)
421 421 self._rev = repo.changelog.rev(self._node)
422 422 return
423 423 except error.FilteredLookupError:
424 424 raise
425 425 except (TypeError, LookupError):
426 426 pass
427 427
428 428 # lookup bookmarks through the name interface
429 429 try:
430 430 self._node = repo.names.singlenode(repo, changeid)
431 431 self._rev = repo.changelog.rev(self._node)
432 432 return
433 433 except KeyError:
434 434 pass
435 435 except error.FilteredRepoLookupError:
436 436 raise
437 437 except error.RepoLookupError:
438 438 pass
439 439
440 440 self._node = repo.unfiltered().changelog._partialmatch(changeid)
441 441 if self._node is not None:
442 442 self._rev = repo.changelog.rev(self._node)
443 443 return
444 444
445 445 # lookup failed
446 446 # check if it might have come from damaged dirstate
447 447 #
448 448 # XXX we could avoid the unfiltered if we had a recognizable
449 449 # exception for filtered changeset access
450 450 if changeid in repo.unfiltered().dirstate.parents():
451 451 msg = _("working directory has unknown parent '%s'!")
452 452 raise error.Abort(msg % short(changeid))
453 453 try:
454 454 if len(changeid) == 20:
455 455 changeid = hex(changeid)
456 456 except TypeError:
457 457 pass
458 458 except (error.FilteredIndexError, error.FilteredLookupError,
459 459 error.FilteredRepoLookupError):
460 460 if repo.filtername == 'visible':
461 461 msg = _("hidden revision '%s'") % changeid
462 462 hint = _('use --hidden to access hidden revisions')
463 463 raise error.FilteredRepoLookupError(msg, hint=hint)
464 464 msg = _("filtered revision '%s' (not in '%s' subset)")
465 465 msg %= (changeid, repo.filtername)
466 466 raise error.FilteredRepoLookupError(msg)
467 467 except IndexError:
468 468 pass
469 469 raise error.RepoLookupError(
470 470 _("unknown revision '%s'") % changeid)
471 471
472 472 def __hash__(self):
473 473 try:
474 474 return hash(self._rev)
475 475 except AttributeError:
476 476 return id(self)
477 477
478 478 def __nonzero__(self):
479 479 return self._rev != nullrev
480 480
481 481 @propertycache
482 482 def _changeset(self):
483 483 return self._repo.changelog.read(self.rev())
484 484
485 485 @propertycache
486 486 def _manifest(self):
487 487 return self._repo.manifest.read(self._changeset[0])
488 488
489 489 @propertycache
490 490 def _manifestdelta(self):
491 491 return self._repo.manifest.readdelta(self._changeset[0])
492 492
493 493 @propertycache
494 494 def _parents(self):
495 495 p = self._repo.changelog.parentrevs(self._rev)
496 496 if p[1] == nullrev:
497 497 p = p[:-1]
498 498 return [changectx(self._repo, x) for x in p]
499 499
500 500 def changeset(self):
501 501 return self._changeset
502 502 def manifestnode(self):
503 503 return self._changeset[0]
504 504
505 505 def user(self):
506 506 return self._changeset[1]
507 507 def date(self):
508 508 return self._changeset[2]
509 509 def files(self):
510 510 return self._changeset[3]
511 511 def description(self):
512 512 return self._changeset[4]
513 513 def branch(self):
514 514 return encoding.tolocal(self._changeset[5].get("branch"))
515 515 def closesbranch(self):
516 516 return 'close' in self._changeset[5]
517 517 def extra(self):
518 518 return self._changeset[5]
519 519 def tags(self):
520 520 return self._repo.nodetags(self._node)
521 521 def bookmarks(self):
522 522 return self._repo.nodebookmarks(self._node)
523 523 def phase(self):
524 524 return self._repo._phasecache.phase(self._repo, self._rev)
525 525 def hidden(self):
526 526 return self._rev in repoview.filterrevs(self._repo, 'visible')
527 527
528 528 def children(self):
529 529 """return contexts for each child changeset"""
530 530 c = self._repo.changelog.children(self._node)
531 531 return [changectx(self._repo, x) for x in c]
532 532
533 533 def ancestors(self):
534 534 for a in self._repo.changelog.ancestors([self._rev]):
535 535 yield changectx(self._repo, a)
536 536
537 537 def descendants(self):
538 538 for d in self._repo.changelog.descendants([self._rev]):
539 539 yield changectx(self._repo, d)
540 540
541 541 def filectx(self, path, fileid=None, filelog=None):
542 542 """get a file context from this changeset"""
543 543 if fileid is None:
544 544 fileid = self.filenode(path)
545 545 return filectx(self._repo, path, fileid=fileid,
546 546 changectx=self, filelog=filelog)
547 547
548 548 def ancestor(self, c2, warn=False):
549 549 """return the "best" ancestor context of self and c2
550 550
551 551 If there are multiple candidates, it will show a message and check
552 552 merge.preferancestor configuration before falling back to the
553 553 revlog ancestor."""
554 554 # deal with workingctxs
555 555 n2 = c2._node
556 556 if n2 is None:
557 557 n2 = c2._parents[0]._node
558 558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
559 559 if not cahs:
560 560 anc = nullid
561 561 elif len(cahs) == 1:
562 562 anc = cahs[0]
563 563 else:
564 564 for r in self._repo.ui.configlist('merge', 'preferancestor'):
565 565 try:
566 566 ctx = changectx(self._repo, r)
567 567 except error.RepoLookupError:
568 568 continue
569 569 anc = ctx.node()
570 570 if anc in cahs:
571 571 break
572 572 else:
573 573 anc = self._repo.changelog.ancestor(self._node, n2)
574 574 if warn:
575 575 self._repo.ui.status(
576 576 (_("note: using %s as ancestor of %s and %s\n") %
577 577 (short(anc), short(self._node), short(n2))) +
578 578 ''.join(_(" alternatively, use --config "
579 579 "merge.preferancestor=%s\n") %
580 580 short(n) for n in sorted(cahs) if n != anc))
581 581 return changectx(self._repo, anc)
582 582
583 583 def descendant(self, other):
584 584 """True if other is descendant of this changeset"""
585 585 return self._repo.changelog.descendant(self._rev, other._rev)
586 586
587 587 def walk(self, match):
588 588 fset = set(match.files())
589 589 # for dirstate.walk, files=['.'] means "walk the whole tree".
590 590 # follow that here, too
591 591 fset.discard('.')
592 592
593 593 # avoid the entire walk if we're only looking for specific files
594 594 if fset and not match.anypats():
595 595 if util.all([fn in self for fn in fset]):
596 596 for fn in sorted(fset):
597 597 if match(fn):
598 598 yield fn
599 599 raise StopIteration
600 600
601 601 for fn in self:
602 602 if fn in fset:
603 603 # specified pattern is the exact name
604 604 fset.remove(fn)
605 605 if match(fn):
606 606 yield fn
607 607 for fn in sorted(fset):
608 608 if fn in self._dirs:
609 609 # specified pattern is a directory
610 610 continue
611 611 match.bad(fn, _('no such file in rev %s') % self)
612 612
613 613 def matches(self, match):
614 614 return self.walk(match)
615 615
616 616 class basefilectx(object):
617 617 """A filecontext object represents the common logic for its children:
618 618 filectx: read-only access to a filerevision that is already present
619 619 in the repo,
620 620 workingfilectx: a filecontext that represents files from the working
621 621 directory,
622 622 memfilectx: a filecontext that represents files in-memory."""
623 623 def __new__(cls, repo, path, *args, **kwargs):
624 624 return super(basefilectx, cls).__new__(cls)
625 625
626 626 @propertycache
627 627 def _filelog(self):
628 628 return self._repo.file(self._path)
629 629
630 630 @propertycache
631 631 def _changeid(self):
632 632 if '_changeid' in self.__dict__:
633 633 return self._changeid
634 634 elif '_changectx' in self.__dict__:
635 635 return self._changectx.rev()
636 636 elif '_descendantrev' in self.__dict__:
637 637 # this file context was created from a revision with a known
638 638 # descendant, we can (lazily) correct for linkrev aliases
639 639 return self._adjustlinkrev(self._path, self._filelog,
640 640 self._filenode, self._descendantrev)
641 641 else:
642 642 return self._filelog.linkrev(self._filerev)
643 643
644 644 @propertycache
645 645 def _filenode(self):
646 646 if '_fileid' in self.__dict__:
647 647 return self._filelog.lookup(self._fileid)
648 648 else:
649 649 return self._changectx.filenode(self._path)
650 650
651 651 @propertycache
652 652 def _filerev(self):
653 653 return self._filelog.rev(self._filenode)
654 654
655 655 @propertycache
656 656 def _repopath(self):
657 657 return self._path
658 658
659 659 def __nonzero__(self):
660 660 try:
661 661 self._filenode
662 662 return True
663 663 except error.LookupError:
664 664 # file is missing
665 665 return False
666 666
667 667 def __str__(self):
668 668 return "%s@%s" % (self.path(), self._changectx)
669 669
670 670 def __repr__(self):
671 671 return "<%s %s>" % (type(self).__name__, str(self))
672 672
673 673 def __hash__(self):
674 674 try:
675 675 return hash((self._path, self._filenode))
676 676 except AttributeError:
677 677 return id(self)
678 678
679 679 def __eq__(self, other):
680 680 try:
681 681 return (type(self) == type(other) and self._path == other._path
682 682 and self._filenode == other._filenode)
683 683 except AttributeError:
684 684 return False
685 685
686 686 def __ne__(self, other):
687 687 return not (self == other)
688 688
689 689 def filerev(self):
690 690 return self._filerev
691 691 def filenode(self):
692 692 return self._filenode
693 693 def flags(self):
694 694 return self._changectx.flags(self._path)
695 695 def filelog(self):
696 696 return self._filelog
697 697 def rev(self):
698 698 return self._changeid
699 699 def linkrev(self):
700 700 return self._filelog.linkrev(self._filerev)
701 701 def node(self):
702 702 return self._changectx.node()
703 703 def hex(self):
704 704 return self._changectx.hex()
705 705 def user(self):
706 706 return self._changectx.user()
707 707 def date(self):
708 708 return self._changectx.date()
709 709 def files(self):
710 710 return self._changectx.files()
711 711 def description(self):
712 712 return self._changectx.description()
713 713 def branch(self):
714 714 return self._changectx.branch()
715 715 def extra(self):
716 716 return self._changectx.extra()
717 717 def phase(self):
718 718 return self._changectx.phase()
719 719 def phasestr(self):
720 720 return self._changectx.phasestr()
721 721 def manifest(self):
722 722 return self._changectx.manifest()
723 723 def changectx(self):
724 724 return self._changectx
725 725
726 726 def path(self):
727 727 return self._path
728 728
729 729 def isbinary(self):
730 730 try:
731 731 return util.binary(self.data())
732 732 except IOError:
733 733 return False
734 734 def isexec(self):
735 735 return 'x' in self.flags()
736 736 def islink(self):
737 737 return 'l' in self.flags()
738 738
739 739 def cmp(self, fctx):
740 740 """compare with other file context
741 741
742 742 returns True if different than fctx.
743 743 """
744 744 if (fctx._filerev is None
745 745 and (self._repo._encodefilterpats
746 746 # if file data starts with '\1\n', empty metadata block is
747 747 # prepended, which adds 4 bytes to filelog.size().
748 748 or self.size() - 4 == fctx.size())
749 749 or self.size() == fctx.size()):
750 750 return self._filelog.cmp(self._filenode, fctx.data())
751 751
752 752 return True
753 753
754 754 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
755 755 """return the first ancestor of <srcrev> introducting <fnode>
756 756
757 757 If the linkrev of the file revision does not point to an ancestor of
758 758 srcrev, we'll walk down the ancestors until we find one introducing
759 759 this file revision.
760 760
761 761 :repo: a localrepository object (used to access changelog and manifest)
762 762 :path: the file path
763 763 :fnode: the nodeid of the file revision
764 764 :filelog: the filelog of this path
765 765 :srcrev: the changeset revision we search ancestors from
766 766 :inclusive: if true, the src revision will also be checked
767 767 """
768 768 repo = self._repo
769 769 cl = repo.unfiltered().changelog
770 770 ma = repo.manifest
771 771 # fetch the linkrev
772 772 fr = filelog.rev(fnode)
773 773 lkr = filelog.linkrev(fr)
774 774 # hack to reuse ancestor computation when searching for renames
775 775 memberanc = getattr(self, '_ancestrycontext', None)
776 776 iteranc = None
777 777 if memberanc is None:
778 778 memberanc = iteranc = cl.ancestors([srcrev], lkr,
779 779 inclusive=inclusive)
780 780 # check if this linkrev is an ancestor of srcrev
781 781 if lkr not in memberanc:
782 782 if iteranc is None:
783 783 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
784 784 for a in iteranc:
785 785 ac = cl.read(a) # get changeset data (we avoid object creation)
786 786 if path in ac[3]: # checking the 'files' field.
787 787 # The file has been touched, check if the content is
788 788 # similar to the one we search for.
789 789 if fnode == ma.readfast(ac[0]).get(path):
790 790 return a
791 791 # In theory, we should never get out of that loop without a result.
792 792 # But if manifest uses a buggy file revision (not children of the
793 793 # one it replaces) we could. Such a buggy situation will likely
794 794 # result is crash somewhere else at to some point.
795 795 return lkr
796 796
797 797 def introrev(self):
798 798 """return the rev of the changeset which introduced this file revision
799 799
800 800 This method is different from linkrev because it take into account the
801 801 changeset the filectx was created from. It ensures the returned
802 802 revision is one of its ancestors. This prevents bugs from
803 803 'linkrev-shadowing' when a file revision is used by multiple
804 804 changesets.
805 805 """
806 806 lkr = self.linkrev()
807 807 attrs = vars(self)
808 808 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
809 809 if noctx or self.rev() == lkr:
810 810 return self.linkrev()
811 811 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
812 812 self.rev(), inclusive=True)
813 813
814 814 def parents(self):
815 815 _path = self._path
816 816 fl = self._filelog
817 817 parents = self._filelog.parents(self._filenode)
818 818 pl = [(_path, node, fl) for node in parents if node != nullid]
819 819
820 820 r = fl.renamed(self._filenode)
821 821 if r:
822 822 # - In the simple rename case, both parent are nullid, pl is empty.
823 823 # - In case of merge, only one of the parent is null id and should
824 824 # be replaced with the rename information. This parent is -always-
825 825 # the first one.
826 826 #
827 827 # As null id have alway been filtered out in the previous list
828 828 # comprehension, inserting to 0 will always result in "replacing
829 829 # first nullid parent with rename information.
830 830 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
831 831
832 832 ret = []
833 833 for path, fnode, l in pl:
834 834 if '_changeid' in vars(self) or '_changectx' in vars(self):
835 835 # If self is associated with a changeset (probably explicitly
836 836 # fed), ensure the created filectx is associated with a
837 837 # changeset that is an ancestor of self.changectx.
838 rev = self._adjustlinkrev(path, l, fnode, self.rev())
839 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
840 changeid=rev)
838 # This lets us later use _adjustlinkrev to get a correct link.
839 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
840 fctx._descendantrev = self.rev()
841 841 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
842
842 elif '_descendantrev' in vars(self):
843 # Otherwise propagate _descendantrev if we have one associated.
844 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
845 fctx._descendantrev = self._descendantrev
846 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
843 847 else:
844 848 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
845 849 ret.append(fctx)
846 850 return ret
847 851
848 852 def p1(self):
849 853 return self.parents()[0]
850 854
851 855 def p2(self):
852 856 p = self.parents()
853 857 if len(p) == 2:
854 858 return p[1]
855 859 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
856 860
857 861 def annotate(self, follow=False, linenumber=None, diffopts=None):
858 862 '''returns a list of tuples of (ctx, line) for each line
859 863 in the file, where ctx is the filectx of the node where
860 864 that line was last changed.
861 865 This returns tuples of ((ctx, linenumber), line) for each line,
862 866 if "linenumber" parameter is NOT "None".
863 867 In such tuples, linenumber means one at the first appearance
864 868 in the managed file.
865 869 To reduce annotation cost,
866 870 this returns fixed value(False is used) as linenumber,
867 871 if "linenumber" parameter is "False".'''
868 872
869 873 if linenumber is None:
870 874 def decorate(text, rev):
871 875 return ([rev] * len(text.splitlines()), text)
872 876 elif linenumber:
873 877 def decorate(text, rev):
874 878 size = len(text.splitlines())
875 879 return ([(rev, i) for i in xrange(1, size + 1)], text)
876 880 else:
877 881 def decorate(text, rev):
878 882 return ([(rev, False)] * len(text.splitlines()), text)
879 883
880 884 def pair(parent, child):
881 885 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
882 886 refine=True)
883 887 for (a1, a2, b1, b2), t in blocks:
884 888 # Changed blocks ('!') or blocks made only of blank lines ('~')
885 889 # belong to the child.
886 890 if t == '=':
887 891 child[0][b1:b2] = parent[0][a1:a2]
888 892 return child
889 893
890 894 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
891 895
892 896 def parents(f):
893 897 pl = f.parents()
894 898
895 899 # Don't return renamed parents if we aren't following.
896 900 if not follow:
897 901 pl = [p for p in pl if p.path() == f.path()]
898 902
899 903 # renamed filectx won't have a filelog yet, so set it
900 904 # from the cache to save time
901 905 for p in pl:
902 906 if not '_filelog' in p.__dict__:
903 907 p._filelog = getlog(p.path())
904 908
905 909 return pl
906 910
907 911 # use linkrev to find the first changeset where self appeared
908 912 base = self
909 913 introrev = self.introrev()
910 914 if self.rev() != introrev:
911 915 base = self.filectx(self.filenode(), changeid=introrev)
912 916
913 917 # This algorithm would prefer to be recursive, but Python is a
914 918 # bit recursion-hostile. Instead we do an iterative
915 919 # depth-first search.
916 920
917 921 visit = [base]
918 922 hist = {}
919 923 pcache = {}
920 924 needed = {base: 1}
921 925 while visit:
922 926 f = visit[-1]
923 927 pcached = f in pcache
924 928 if not pcached:
925 929 pcache[f] = parents(f)
926 930
927 931 ready = True
928 932 pl = pcache[f]
929 933 for p in pl:
930 934 if p not in hist:
931 935 ready = False
932 936 visit.append(p)
933 937 if not pcached:
934 938 needed[p] = needed.get(p, 0) + 1
935 939 if ready:
936 940 visit.pop()
937 941 reusable = f in hist
938 942 if reusable:
939 943 curr = hist[f]
940 944 else:
941 945 curr = decorate(f.data(), f)
942 946 for p in pl:
943 947 if not reusable:
944 948 curr = pair(hist[p], curr)
945 949 if needed[p] == 1:
946 950 del hist[p]
947 951 del needed[p]
948 952 else:
949 953 needed[p] -= 1
950 954
951 955 hist[f] = curr
952 956 pcache[f] = []
953 957
954 958 return zip(hist[base][0], hist[base][1].splitlines(True))
955 959
956 960 def ancestors(self, followfirst=False):
957 961 visit = {}
958 962 c = self
959 963 cut = followfirst and 1 or None
960 964 while True:
961 965 for parent in c.parents()[:cut]:
962 966 visit[(parent.linkrev(), parent.filenode())] = parent
963 967 if not visit:
964 968 break
965 969 c = visit.pop(max(visit))
966 970 yield c
967 971
968 972 class filectx(basefilectx):
969 973 """A filecontext object makes access to data related to a particular
970 974 filerevision convenient."""
971 975 def __init__(self, repo, path, changeid=None, fileid=None,
972 976 filelog=None, changectx=None):
973 977 """changeid can be a changeset revision, node, or tag.
974 978 fileid can be a file revision or node."""
975 979 self._repo = repo
976 980 self._path = path
977 981
978 982 assert (changeid is not None
979 983 or fileid is not None
980 984 or changectx is not None), \
981 985 ("bad args: changeid=%r, fileid=%r, changectx=%r"
982 986 % (changeid, fileid, changectx))
983 987
984 988 if filelog is not None:
985 989 self._filelog = filelog
986 990
987 991 if changeid is not None:
988 992 self._changeid = changeid
989 993 if changectx is not None:
990 994 self._changectx = changectx
991 995 if fileid is not None:
992 996 self._fileid = fileid
993 997
994 998 @propertycache
995 999 def _changectx(self):
996 1000 try:
997 1001 return changectx(self._repo, self._changeid)
998 1002 except error.FilteredRepoLookupError:
999 1003 # Linkrev may point to any revision in the repository. When the
1000 1004 # repository is filtered this may lead to `filectx` trying to build
1001 1005 # `changectx` for filtered revision. In such case we fallback to
1002 1006 # creating `changectx` on the unfiltered version of the reposition.
1003 1007 # This fallback should not be an issue because `changectx` from
1004 1008 # `filectx` are not used in complex operations that care about
1005 1009 # filtering.
1006 1010 #
1007 1011 # This fallback is a cheap and dirty fix that prevent several
1008 1012 # crashes. It does not ensure the behavior is correct. However the
1009 1013 # behavior was not correct before filtering either and "incorrect
1010 1014 # behavior" is seen as better as "crash"
1011 1015 #
1012 1016 # Linkrevs have several serious troubles with filtering that are
1013 1017 # complicated to solve. Proper handling of the issue here should be
1014 1018 # considered when solving linkrev issue are on the table.
1015 1019 return changectx(self._repo.unfiltered(), self._changeid)
1016 1020
1017 1021 def filectx(self, fileid, changeid=None):
1018 1022 '''opens an arbitrary revision of the file without
1019 1023 opening a new filelog'''
1020 1024 return filectx(self._repo, self._path, fileid=fileid,
1021 1025 filelog=self._filelog, changeid=changeid)
1022 1026
1023 1027 def data(self):
1024 1028 try:
1025 1029 return self._filelog.read(self._filenode)
1026 1030 except error.CensoredNodeError:
1027 1031 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1028 1032 return ""
1029 1033 raise util.Abort(_("censored node: %s") % short(self._filenode),
1030 1034 hint=_("set censor.policy to ignore errors"))
1031 1035
1032 1036 def size(self):
1033 1037 return self._filelog.size(self._filerev)
1034 1038
1035 1039 def renamed(self):
1036 1040 """check if file was actually renamed in this changeset revision
1037 1041
1038 1042 If rename logged in file revision, we report copy for changeset only
1039 1043 if file revisions linkrev points back to the changeset in question
1040 1044 or both changeset parents contain different file revisions.
1041 1045 """
1042 1046
1043 1047 renamed = self._filelog.renamed(self._filenode)
1044 1048 if not renamed:
1045 1049 return renamed
1046 1050
1047 1051 if self.rev() == self.linkrev():
1048 1052 return renamed
1049 1053
1050 1054 name = self.path()
1051 1055 fnode = self._filenode
1052 1056 for p in self._changectx.parents():
1053 1057 try:
1054 1058 if fnode == p.filenode(name):
1055 1059 return None
1056 1060 except error.LookupError:
1057 1061 pass
1058 1062 return renamed
1059 1063
1060 1064 def children(self):
1061 1065 # hard for renames
1062 1066 c = self._filelog.children(self._filenode)
1063 1067 return [filectx(self._repo, self._path, fileid=x,
1064 1068 filelog=self._filelog) for x in c]
1065 1069
1066 1070 class committablectx(basectx):
1067 1071 """A committablectx object provides common functionality for a context that
1068 1072 wants the ability to commit, e.g. workingctx or memctx."""
1069 1073 def __init__(self, repo, text="", user=None, date=None, extra=None,
1070 1074 changes=None):
1071 1075 self._repo = repo
1072 1076 self._rev = None
1073 1077 self._node = None
1074 1078 self._text = text
1075 1079 if date:
1076 1080 self._date = util.parsedate(date)
1077 1081 if user:
1078 1082 self._user = user
1079 1083 if changes:
1080 1084 self._status = changes
1081 1085
1082 1086 self._extra = {}
1083 1087 if extra:
1084 1088 self._extra = extra.copy()
1085 1089 if 'branch' not in self._extra:
1086 1090 try:
1087 1091 branch = encoding.fromlocal(self._repo.dirstate.branch())
1088 1092 except UnicodeDecodeError:
1089 1093 raise util.Abort(_('branch name not in UTF-8!'))
1090 1094 self._extra['branch'] = branch
1091 1095 if self._extra['branch'] == '':
1092 1096 self._extra['branch'] = 'default'
1093 1097
1094 1098 def __str__(self):
1095 1099 return str(self._parents[0]) + "+"
1096 1100
1097 1101 def __nonzero__(self):
1098 1102 return True
1099 1103
1100 1104 def _buildflagfunc(self):
1101 1105 # Create a fallback function for getting file flags when the
1102 1106 # filesystem doesn't support them
1103 1107
1104 1108 copiesget = self._repo.dirstate.copies().get
1105 1109
1106 1110 if len(self._parents) < 2:
1107 1111 # when we have one parent, it's easy: copy from parent
1108 1112 man = self._parents[0].manifest()
1109 1113 def func(f):
1110 1114 f = copiesget(f, f)
1111 1115 return man.flags(f)
1112 1116 else:
1113 1117 # merges are tricky: we try to reconstruct the unstored
1114 1118 # result from the merge (issue1802)
1115 1119 p1, p2 = self._parents
1116 1120 pa = p1.ancestor(p2)
1117 1121 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1118 1122
1119 1123 def func(f):
1120 1124 f = copiesget(f, f) # may be wrong for merges with copies
1121 1125 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1122 1126 if fl1 == fl2:
1123 1127 return fl1
1124 1128 if fl1 == fla:
1125 1129 return fl2
1126 1130 if fl2 == fla:
1127 1131 return fl1
1128 1132 return '' # punt for conflicts
1129 1133
1130 1134 return func
1131 1135
1132 1136 @propertycache
1133 1137 def _flagfunc(self):
1134 1138 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1135 1139
1136 1140 @propertycache
1137 1141 def _manifest(self):
1138 1142 """generate a manifest corresponding to the values in self._status
1139 1143
1140 1144 This reuse the file nodeid from parent, but we append an extra letter
1141 1145 when modified. Modified files get an extra 'm' while added files get
1142 1146 an extra 'a'. This is used by manifests merge to see that files
1143 1147 are different and by update logic to avoid deleting newly added files.
1144 1148 """
1145 1149
1146 1150 man1 = self._parents[0].manifest()
1147 1151 man = man1.copy()
1148 1152 if len(self._parents) > 1:
1149 1153 man2 = self.p2().manifest()
1150 1154 def getman(f):
1151 1155 if f in man1:
1152 1156 return man1
1153 1157 return man2
1154 1158 else:
1155 1159 getman = lambda f: man1
1156 1160
1157 1161 copied = self._repo.dirstate.copies()
1158 1162 ff = self._flagfunc
1159 1163 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1160 1164 for f in l:
1161 1165 orig = copied.get(f, f)
1162 1166 man[f] = getman(orig).get(orig, nullid) + i
1163 1167 try:
1164 1168 man.setflag(f, ff(f))
1165 1169 except OSError:
1166 1170 pass
1167 1171
1168 1172 for f in self._status.deleted + self._status.removed:
1169 1173 if f in man:
1170 1174 del man[f]
1171 1175
1172 1176 return man
1173 1177
1174 1178 @propertycache
1175 1179 def _status(self):
1176 1180 return self._repo.status()
1177 1181
1178 1182 @propertycache
1179 1183 def _user(self):
1180 1184 return self._repo.ui.username()
1181 1185
1182 1186 @propertycache
1183 1187 def _date(self):
1184 1188 return util.makedate()
1185 1189
1186 1190 def subrev(self, subpath):
1187 1191 return None
1188 1192
1189 1193 def user(self):
1190 1194 return self._user or self._repo.ui.username()
1191 1195 def date(self):
1192 1196 return self._date
1193 1197 def description(self):
1194 1198 return self._text
1195 1199 def files(self):
1196 1200 return sorted(self._status.modified + self._status.added +
1197 1201 self._status.removed)
1198 1202
1199 1203 def modified(self):
1200 1204 return self._status.modified
1201 1205 def added(self):
1202 1206 return self._status.added
1203 1207 def removed(self):
1204 1208 return self._status.removed
1205 1209 def deleted(self):
1206 1210 return self._status.deleted
1207 1211 def branch(self):
1208 1212 return encoding.tolocal(self._extra['branch'])
1209 1213 def closesbranch(self):
1210 1214 return 'close' in self._extra
1211 1215 def extra(self):
1212 1216 return self._extra
1213 1217
1214 1218 def tags(self):
1215 1219 t = []
1216 1220 for p in self.parents():
1217 1221 t.extend(p.tags())
1218 1222 return t
1219 1223
1220 1224 def bookmarks(self):
1221 1225 b = []
1222 1226 for p in self.parents():
1223 1227 b.extend(p.bookmarks())
1224 1228 return b
1225 1229
1226 1230 def phase(self):
1227 1231 phase = phases.draft # default phase to draft
1228 1232 for p in self.parents():
1229 1233 phase = max(phase, p.phase())
1230 1234 return phase
1231 1235
1232 1236 def hidden(self):
1233 1237 return False
1234 1238
1235 1239 def children(self):
1236 1240 return []
1237 1241
1238 1242 def flags(self, path):
1239 1243 if '_manifest' in self.__dict__:
1240 1244 try:
1241 1245 return self._manifest.flags(path)
1242 1246 except KeyError:
1243 1247 return ''
1244 1248
1245 1249 try:
1246 1250 return self._flagfunc(path)
1247 1251 except OSError:
1248 1252 return ''
1249 1253
1250 1254 def ancestor(self, c2):
1251 1255 """return the "best" ancestor context of self and c2"""
1252 1256 return self._parents[0].ancestor(c2) # punt on two parents for now
1253 1257
1254 1258 def walk(self, match):
1255 1259 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1256 1260 True, False))
1257 1261
1258 1262 def matches(self, match):
1259 1263 return sorted(self._repo.dirstate.matches(match))
1260 1264
1261 1265 def ancestors(self):
1262 1266 for p in self._parents:
1263 1267 yield p
1264 1268 for a in self._repo.changelog.ancestors(
1265 1269 [p.rev() for p in self._parents]):
1266 1270 yield changectx(self._repo, a)
1267 1271
1268 1272 def markcommitted(self, node):
1269 1273 """Perform post-commit cleanup necessary after committing this ctx
1270 1274
1271 1275 Specifically, this updates backing stores this working context
1272 1276 wraps to reflect the fact that the changes reflected by this
1273 1277 workingctx have been committed. For example, it marks
1274 1278 modified and added files as normal in the dirstate.
1275 1279
1276 1280 """
1277 1281
1278 1282 self._repo.dirstate.beginparentchange()
1279 1283 for f in self.modified() + self.added():
1280 1284 self._repo.dirstate.normal(f)
1281 1285 for f in self.removed():
1282 1286 self._repo.dirstate.drop(f)
1283 1287 self._repo.dirstate.setparents(node)
1284 1288 self._repo.dirstate.endparentchange()
1285 1289
1286 1290 def dirs(self):
1287 1291 return self._repo.dirstate.dirs()
1288 1292
1289 1293 class workingctx(committablectx):
1290 1294 """A workingctx object makes access to data related to
1291 1295 the current working directory convenient.
1292 1296 date - any valid date string or (unixtime, offset), or None.
1293 1297 user - username string, or None.
1294 1298 extra - a dictionary of extra values, or None.
1295 1299 changes - a list of file lists as returned by localrepo.status()
1296 1300 or None to use the repository status.
1297 1301 """
1298 1302 def __init__(self, repo, text="", user=None, date=None, extra=None,
1299 1303 changes=None):
1300 1304 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1301 1305
1302 1306 def __iter__(self):
1303 1307 d = self._repo.dirstate
1304 1308 for f in d:
1305 1309 if d[f] != 'r':
1306 1310 yield f
1307 1311
1308 1312 def __contains__(self, key):
1309 1313 return self._repo.dirstate[key] not in "?r"
1310 1314
1311 1315 @propertycache
1312 1316 def _parents(self):
1313 1317 p = self._repo.dirstate.parents()
1314 1318 if p[1] == nullid:
1315 1319 p = p[:-1]
1316 1320 return [changectx(self._repo, x) for x in p]
1317 1321
1318 1322 def filectx(self, path, filelog=None):
1319 1323 """get a file context from the working directory"""
1320 1324 return workingfilectx(self._repo, path, workingctx=self,
1321 1325 filelog=filelog)
1322 1326
1323 1327 def dirty(self, missing=False, merge=True, branch=True):
1324 1328 "check whether a working directory is modified"
1325 1329 # check subrepos first
1326 1330 for s in sorted(self.substate):
1327 1331 if self.sub(s).dirty():
1328 1332 return True
1329 1333 # check current working dir
1330 1334 return ((merge and self.p2()) or
1331 1335 (branch and self.branch() != self.p1().branch()) or
1332 1336 self.modified() or self.added() or self.removed() or
1333 1337 (missing and self.deleted()))
1334 1338
1335 1339 def add(self, list, prefix=""):
1336 1340 join = lambda f: os.path.join(prefix, f)
1337 1341 wlock = self._repo.wlock()
1338 1342 ui, ds = self._repo.ui, self._repo.dirstate
1339 1343 try:
1340 1344 rejected = []
1341 1345 lstat = self._repo.wvfs.lstat
1342 1346 for f in list:
1343 1347 scmutil.checkportable(ui, join(f))
1344 1348 try:
1345 1349 st = lstat(f)
1346 1350 except OSError:
1347 1351 ui.warn(_("%s does not exist!\n") % join(f))
1348 1352 rejected.append(f)
1349 1353 continue
1350 1354 if st.st_size > 10000000:
1351 1355 ui.warn(_("%s: up to %d MB of RAM may be required "
1352 1356 "to manage this file\n"
1353 1357 "(use 'hg revert %s' to cancel the "
1354 1358 "pending addition)\n")
1355 1359 % (f, 3 * st.st_size // 1000000, join(f)))
1356 1360 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1357 1361 ui.warn(_("%s not added: only files and symlinks "
1358 1362 "supported currently\n") % join(f))
1359 1363 rejected.append(f)
1360 1364 elif ds[f] in 'amn':
1361 1365 ui.warn(_("%s already tracked!\n") % join(f))
1362 1366 elif ds[f] == 'r':
1363 1367 ds.normallookup(f)
1364 1368 else:
1365 1369 ds.add(f)
1366 1370 return rejected
1367 1371 finally:
1368 1372 wlock.release()
1369 1373
1370 1374 def forget(self, files, prefix=""):
1371 1375 join = lambda f: os.path.join(prefix, f)
1372 1376 wlock = self._repo.wlock()
1373 1377 try:
1374 1378 rejected = []
1375 1379 for f in files:
1376 1380 if f not in self._repo.dirstate:
1377 1381 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1378 1382 rejected.append(f)
1379 1383 elif self._repo.dirstate[f] != 'a':
1380 1384 self._repo.dirstate.remove(f)
1381 1385 else:
1382 1386 self._repo.dirstate.drop(f)
1383 1387 return rejected
1384 1388 finally:
1385 1389 wlock.release()
1386 1390
1387 1391 def undelete(self, list):
1388 1392 pctxs = self.parents()
1389 1393 wlock = self._repo.wlock()
1390 1394 try:
1391 1395 for f in list:
1392 1396 if self._repo.dirstate[f] != 'r':
1393 1397 self._repo.ui.warn(_("%s not removed!\n") % f)
1394 1398 else:
1395 1399 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1396 1400 t = fctx.data()
1397 1401 self._repo.wwrite(f, t, fctx.flags())
1398 1402 self._repo.dirstate.normal(f)
1399 1403 finally:
1400 1404 wlock.release()
1401 1405
1402 1406 def copy(self, source, dest):
1403 1407 try:
1404 1408 st = self._repo.wvfs.lstat(dest)
1405 1409 except OSError, err:
1406 1410 if err.errno != errno.ENOENT:
1407 1411 raise
1408 1412 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1409 1413 return
1410 1414 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1411 1415 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1412 1416 "symbolic link\n") % dest)
1413 1417 else:
1414 1418 wlock = self._repo.wlock()
1415 1419 try:
1416 1420 if self._repo.dirstate[dest] in '?':
1417 1421 self._repo.dirstate.add(dest)
1418 1422 elif self._repo.dirstate[dest] in 'r':
1419 1423 self._repo.dirstate.normallookup(dest)
1420 1424 self._repo.dirstate.copy(source, dest)
1421 1425 finally:
1422 1426 wlock.release()
1423 1427
1424 1428 def _filtersuspectsymlink(self, files):
1425 1429 if not files or self._repo.dirstate._checklink:
1426 1430 return files
1427 1431
1428 1432 # Symlink placeholders may get non-symlink-like contents
1429 1433 # via user error or dereferencing by NFS or Samba servers,
1430 1434 # so we filter out any placeholders that don't look like a
1431 1435 # symlink
1432 1436 sane = []
1433 1437 for f in files:
1434 1438 if self.flags(f) == 'l':
1435 1439 d = self[f].data()
1436 1440 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1437 1441 self._repo.ui.debug('ignoring suspect symlink placeholder'
1438 1442 ' "%s"\n' % f)
1439 1443 continue
1440 1444 sane.append(f)
1441 1445 return sane
1442 1446
1443 1447 def _checklookup(self, files):
1444 1448 # check for any possibly clean files
1445 1449 if not files:
1446 1450 return [], []
1447 1451
1448 1452 modified = []
1449 1453 fixup = []
1450 1454 pctx = self._parents[0]
1451 1455 # do a full compare of any files that might have changed
1452 1456 for f in sorted(files):
1453 1457 if (f not in pctx or self.flags(f) != pctx.flags(f)
1454 1458 or pctx[f].cmp(self[f])):
1455 1459 modified.append(f)
1456 1460 else:
1457 1461 fixup.append(f)
1458 1462
1459 1463 # update dirstate for files that are actually clean
1460 1464 if fixup:
1461 1465 try:
1462 1466 # updating the dirstate is optional
1463 1467 # so we don't wait on the lock
1464 1468 # wlock can invalidate the dirstate, so cache normal _after_
1465 1469 # taking the lock
1466 1470 wlock = self._repo.wlock(False)
1467 1471 normal = self._repo.dirstate.normal
1468 1472 try:
1469 1473 for f in fixup:
1470 1474 normal(f)
1471 1475 finally:
1472 1476 wlock.release()
1473 1477 except error.LockError:
1474 1478 pass
1475 1479 return modified, fixup
1476 1480
1477 1481 def _manifestmatches(self, match, s):
1478 1482 """Slow path for workingctx
1479 1483
1480 1484 The fast path is when we compare the working directory to its parent
1481 1485 which means this function is comparing with a non-parent; therefore we
1482 1486 need to build a manifest and return what matches.
1483 1487 """
1484 1488 mf = self._repo['.']._manifestmatches(match, s)
1485 1489 for f in s.modified + s.added:
1486 1490 mf[f] = _newnode
1487 1491 mf.setflag(f, self.flags(f))
1488 1492 for f in s.removed:
1489 1493 if f in mf:
1490 1494 del mf[f]
1491 1495 return mf
1492 1496
1493 1497 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1494 1498 unknown=False):
1495 1499 '''Gets the status from the dirstate -- internal use only.'''
1496 1500 listignored, listclean, listunknown = ignored, clean, unknown
1497 1501 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1498 1502 subrepos = []
1499 1503 if '.hgsub' in self:
1500 1504 subrepos = sorted(self.substate)
1501 1505 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1502 1506 listclean, listunknown)
1503 1507
1504 1508 # check for any possibly clean files
1505 1509 if cmp:
1506 1510 modified2, fixup = self._checklookup(cmp)
1507 1511 s.modified.extend(modified2)
1508 1512
1509 1513 # update dirstate for files that are actually clean
1510 1514 if fixup and listclean:
1511 1515 s.clean.extend(fixup)
1512 1516
1513 1517 if match.always():
1514 1518 # cache for performance
1515 1519 if s.unknown or s.ignored or s.clean:
1516 1520 # "_status" is cached with list*=False in the normal route
1517 1521 self._status = scmutil.status(s.modified, s.added, s.removed,
1518 1522 s.deleted, [], [], [])
1519 1523 else:
1520 1524 self._status = s
1521 1525
1522 1526 return s
1523 1527
1524 1528 def _buildstatus(self, other, s, match, listignored, listclean,
1525 1529 listunknown):
1526 1530 """build a status with respect to another context
1527 1531
1528 1532 This includes logic for maintaining the fast path of status when
1529 1533 comparing the working directory against its parent, which is to skip
1530 1534 building a new manifest if self (working directory) is not comparing
1531 1535 against its parent (repo['.']).
1532 1536 """
1533 1537 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1534 1538 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1535 1539 # might have accidentally ended up with the entire contents of the file
1536 1540 # they are supposed to be linking to.
1537 1541 s.modified[:] = self._filtersuspectsymlink(s.modified)
1538 1542 if other != self._repo['.']:
1539 1543 s = super(workingctx, self)._buildstatus(other, s, match,
1540 1544 listignored, listclean,
1541 1545 listunknown)
1542 1546 return s
1543 1547
1544 1548 def _matchstatus(self, other, match):
1545 1549 """override the match method with a filter for directory patterns
1546 1550
1547 1551 We use inheritance to customize the match.bad method only in cases of
1548 1552 workingctx since it belongs only to the working directory when
1549 1553 comparing against the parent changeset.
1550 1554
1551 1555 If we aren't comparing against the working directory's parent, then we
1552 1556 just use the default match object sent to us.
1553 1557 """
1554 1558 superself = super(workingctx, self)
1555 1559 match = superself._matchstatus(other, match)
1556 1560 if other != self._repo['.']:
1557 1561 def bad(f, msg):
1558 1562 # 'f' may be a directory pattern from 'match.files()',
1559 1563 # so 'f not in ctx1' is not enough
1560 1564 if f not in other and f not in other.dirs():
1561 1565 self._repo.ui.warn('%s: %s\n' %
1562 1566 (self._repo.dirstate.pathto(f), msg))
1563 1567 match.bad = bad
1564 1568 return match
1565 1569
1566 1570 class committablefilectx(basefilectx):
1567 1571 """A committablefilectx provides common functionality for a file context
1568 1572 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1569 1573 def __init__(self, repo, path, filelog=None, ctx=None):
1570 1574 self._repo = repo
1571 1575 self._path = path
1572 1576 self._changeid = None
1573 1577 self._filerev = self._filenode = None
1574 1578
1575 1579 if filelog is not None:
1576 1580 self._filelog = filelog
1577 1581 if ctx:
1578 1582 self._changectx = ctx
1579 1583
1580 1584 def __nonzero__(self):
1581 1585 return True
1582 1586
1583 1587 def parents(self):
1584 1588 '''return parent filectxs, following copies if necessary'''
1585 1589 def filenode(ctx, path):
1586 1590 return ctx._manifest.get(path, nullid)
1587 1591
1588 1592 path = self._path
1589 1593 fl = self._filelog
1590 1594 pcl = self._changectx._parents
1591 1595 renamed = self.renamed()
1592 1596
1593 1597 if renamed:
1594 1598 pl = [renamed + (None,)]
1595 1599 else:
1596 1600 pl = [(path, filenode(pcl[0], path), fl)]
1597 1601
1598 1602 for pc in pcl[1:]:
1599 1603 pl.append((path, filenode(pc, path), fl))
1600 1604
1601 1605 return [filectx(self._repo, p, fileid=n, filelog=l)
1602 1606 for p, n, l in pl if n != nullid]
1603 1607
1604 1608 def children(self):
1605 1609 return []
1606 1610
1607 1611 class workingfilectx(committablefilectx):
1608 1612 """A workingfilectx object makes access to data related to a particular
1609 1613 file in the working directory convenient."""
1610 1614 def __init__(self, repo, path, filelog=None, workingctx=None):
1611 1615 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1612 1616
1613 1617 @propertycache
1614 1618 def _changectx(self):
1615 1619 return workingctx(self._repo)
1616 1620
1617 1621 def data(self):
1618 1622 return self._repo.wread(self._path)
1619 1623 def renamed(self):
1620 1624 rp = self._repo.dirstate.copied(self._path)
1621 1625 if not rp:
1622 1626 return None
1623 1627 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1624 1628
1625 1629 def size(self):
1626 1630 return self._repo.wvfs.lstat(self._path).st_size
1627 1631 def date(self):
1628 1632 t, tz = self._changectx.date()
1629 1633 try:
1630 1634 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1631 1635 except OSError, err:
1632 1636 if err.errno != errno.ENOENT:
1633 1637 raise
1634 1638 return (t, tz)
1635 1639
1636 1640 def cmp(self, fctx):
1637 1641 """compare with other file context
1638 1642
1639 1643 returns True if different than fctx.
1640 1644 """
1641 1645 # fctx should be a filectx (not a workingfilectx)
1642 1646 # invert comparison to reuse the same code path
1643 1647 return fctx.cmp(self)
1644 1648
1645 1649 def remove(self, ignoremissing=False):
1646 1650 """wraps unlink for a repo's working directory"""
1647 1651 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1648 1652
1649 1653 def write(self, data, flags):
1650 1654 """wraps repo.wwrite"""
1651 1655 self._repo.wwrite(self._path, data, flags)
1652 1656
1653 1657 class workingcommitctx(workingctx):
1654 1658 """A workingcommitctx object makes access to data related to
1655 1659 the revision being committed convenient.
1656 1660
1657 1661 This hides changes in the working directory, if they aren't
1658 1662 committed in this context.
1659 1663 """
1660 1664 def __init__(self, repo, changes,
1661 1665 text="", user=None, date=None, extra=None):
1662 1666 super(workingctx, self).__init__(repo, text, user, date, extra,
1663 1667 changes)
1664 1668
1665 1669 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1666 1670 unknown=False):
1667 1671 """Return matched files only in ``self._status``
1668 1672
1669 1673 Uncommitted files appear "clean" via this context, even if
1670 1674 they aren't actually so in the working directory.
1671 1675 """
1672 1676 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1673 1677 if clean:
1674 1678 clean = [f for f in self._manifest if f not in self._changedset]
1675 1679 else:
1676 1680 clean = []
1677 1681 return scmutil.status([f for f in self._status.modified if match(f)],
1678 1682 [f for f in self._status.added if match(f)],
1679 1683 [f for f in self._status.removed if match(f)],
1680 1684 [], [], [], clean)
1681 1685
1682 1686 @propertycache
1683 1687 def _changedset(self):
1684 1688 """Return the set of files changed in this context
1685 1689 """
1686 1690 changed = set(self._status.modified)
1687 1691 changed.update(self._status.added)
1688 1692 changed.update(self._status.removed)
1689 1693 return changed
1690 1694
1691 1695 class memctx(committablectx):
1692 1696 """Use memctx to perform in-memory commits via localrepo.commitctx().
1693 1697
1694 1698 Revision information is supplied at initialization time while
1695 1699 related files data and is made available through a callback
1696 1700 mechanism. 'repo' is the current localrepo, 'parents' is a
1697 1701 sequence of two parent revisions identifiers (pass None for every
1698 1702 missing parent), 'text' is the commit message and 'files' lists
1699 1703 names of files touched by the revision (normalized and relative to
1700 1704 repository root).
1701 1705
1702 1706 filectxfn(repo, memctx, path) is a callable receiving the
1703 1707 repository, the current memctx object and the normalized path of
1704 1708 requested file, relative to repository root. It is fired by the
1705 1709 commit function for every file in 'files', but calls order is
1706 1710 undefined. If the file is available in the revision being
1707 1711 committed (updated or added), filectxfn returns a memfilectx
1708 1712 object. If the file was removed, filectxfn raises an
1709 1713 IOError. Moved files are represented by marking the source file
1710 1714 removed and the new file added with copy information (see
1711 1715 memfilectx).
1712 1716
1713 1717 user receives the committer name and defaults to current
1714 1718 repository username, date is the commit date in any format
1715 1719 supported by util.parsedate() and defaults to current date, extra
1716 1720 is a dictionary of metadata or is left empty.
1717 1721 """
1718 1722
1719 1723 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1720 1724 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1721 1725 # this field to determine what to do in filectxfn.
1722 1726 _returnnoneformissingfiles = True
1723 1727
1724 1728 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1725 1729 date=None, extra=None, editor=False):
1726 1730 super(memctx, self).__init__(repo, text, user, date, extra)
1727 1731 self._rev = None
1728 1732 self._node = None
1729 1733 parents = [(p or nullid) for p in parents]
1730 1734 p1, p2 = parents
1731 1735 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1732 1736 files = sorted(set(files))
1733 1737 self._files = files
1734 1738 self.substate = {}
1735 1739
1736 1740 # if store is not callable, wrap it in a function
1737 1741 if not callable(filectxfn):
1738 1742 def getfilectx(repo, memctx, path):
1739 1743 fctx = filectxfn[path]
1740 1744 # this is weird but apparently we only keep track of one parent
1741 1745 # (why not only store that instead of a tuple?)
1742 1746 copied = fctx.renamed()
1743 1747 if copied:
1744 1748 copied = copied[0]
1745 1749 return memfilectx(repo, path, fctx.data(),
1746 1750 islink=fctx.islink(), isexec=fctx.isexec(),
1747 1751 copied=copied, memctx=memctx)
1748 1752 self._filectxfn = getfilectx
1749 1753 else:
1750 1754 # "util.cachefunc" reduces invocation of possibly expensive
1751 1755 # "filectxfn" for performance (e.g. converting from another VCS)
1752 1756 self._filectxfn = util.cachefunc(filectxfn)
1753 1757
1754 1758 self._extra = extra and extra.copy() or {}
1755 1759 if self._extra.get('branch', '') == '':
1756 1760 self._extra['branch'] = 'default'
1757 1761
1758 1762 if editor:
1759 1763 self._text = editor(self._repo, self, [])
1760 1764 self._repo.savecommitmessage(self._text)
1761 1765
1762 1766 def filectx(self, path, filelog=None):
1763 1767 """get a file context from the working directory
1764 1768
1765 1769 Returns None if file doesn't exist and should be removed."""
1766 1770 return self._filectxfn(self._repo, self, path)
1767 1771
1768 1772 def commit(self):
1769 1773 """commit context to the repo"""
1770 1774 return self._repo.commitctx(self)
1771 1775
1772 1776 @propertycache
1773 1777 def _manifest(self):
1774 1778 """generate a manifest based on the return values of filectxfn"""
1775 1779
1776 1780 # keep this simple for now; just worry about p1
1777 1781 pctx = self._parents[0]
1778 1782 man = pctx.manifest().copy()
1779 1783
1780 1784 for f in self._status.modified:
1781 1785 p1node = nullid
1782 1786 p2node = nullid
1783 1787 p = pctx[f].parents() # if file isn't in pctx, check p2?
1784 1788 if len(p) > 0:
1785 1789 p1node = p[0].node()
1786 1790 if len(p) > 1:
1787 1791 p2node = p[1].node()
1788 1792 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1789 1793
1790 1794 for f in self._status.added:
1791 1795 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1792 1796
1793 1797 for f in self._status.removed:
1794 1798 if f in man:
1795 1799 del man[f]
1796 1800
1797 1801 return man
1798 1802
1799 1803 @propertycache
1800 1804 def _status(self):
1801 1805 """Calculate exact status from ``files`` specified at construction
1802 1806 """
1803 1807 man1 = self.p1().manifest()
1804 1808 p2 = self._parents[1]
1805 1809 # "1 < len(self._parents)" can't be used for checking
1806 1810 # existence of the 2nd parent, because "memctx._parents" is
1807 1811 # explicitly initialized by the list, of which length is 2.
1808 1812 if p2.node() != nullid:
1809 1813 man2 = p2.manifest()
1810 1814 managing = lambda f: f in man1 or f in man2
1811 1815 else:
1812 1816 managing = lambda f: f in man1
1813 1817
1814 1818 modified, added, removed = [], [], []
1815 1819 for f in self._files:
1816 1820 if not managing(f):
1817 1821 added.append(f)
1818 1822 elif self[f]:
1819 1823 modified.append(f)
1820 1824 else:
1821 1825 removed.append(f)
1822 1826
1823 1827 return scmutil.status(modified, added, removed, [], [], [], [])
1824 1828
1825 1829 class memfilectx(committablefilectx):
1826 1830 """memfilectx represents an in-memory file to commit.
1827 1831
1828 1832 See memctx and committablefilectx for more details.
1829 1833 """
1830 1834 def __init__(self, repo, path, data, islink=False,
1831 1835 isexec=False, copied=None, memctx=None):
1832 1836 """
1833 1837 path is the normalized file path relative to repository root.
1834 1838 data is the file content as a string.
1835 1839 islink is True if the file is a symbolic link.
1836 1840 isexec is True if the file is executable.
1837 1841 copied is the source file path if current file was copied in the
1838 1842 revision being committed, or None."""
1839 1843 super(memfilectx, self).__init__(repo, path, None, memctx)
1840 1844 self._data = data
1841 1845 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1842 1846 self._copied = None
1843 1847 if copied:
1844 1848 self._copied = (copied, nullid)
1845 1849
1846 1850 def data(self):
1847 1851 return self._data
1848 1852 def size(self):
1849 1853 return len(self.data())
1850 1854 def flags(self):
1851 1855 return self._flags
1852 1856 def renamed(self):
1853 1857 return self._copied
1854 1858
1855 1859 def remove(self, ignoremissing=False):
1856 1860 """wraps unlink for a repo's working directory"""
1857 1861 # need to figure out what to do here
1858 1862 del self._changectx[self._path]
1859 1863
1860 1864 def write(self, data, flags):
1861 1865 """wraps repo.wwrite"""
1862 1866 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now