##// END OF EJS Templates
context: use unfiltered repo for '.'...
Martin von Zweigbergk -
r24050:a9b61dbd default
parent child Browse files
Show More
@@ -1,1866 +1,1868
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 class basectx(object):
26 26 """A basectx object represents the common logic for its children:
27 27 changectx: read-only context that is already present in the repo,
28 28 workingctx: a context that represents the working directory and can
29 29 be committed,
30 30 memctx: a context that represents changes in-memory and can also
31 31 be committed."""
32 32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 33 if isinstance(changeid, basectx):
34 34 return changeid
35 35
36 36 o = super(basectx, cls).__new__(cls)
37 37
38 38 o._repo = repo
39 39 o._rev = nullrev
40 40 o._node = nullid
41 41
42 42 return o
43 43
44 44 def __str__(self):
45 45 return short(self.node())
46 46
47 47 def __int__(self):
48 48 return self.rev()
49 49
50 50 def __repr__(self):
51 51 return "<%s %s>" % (type(self).__name__, str(self))
52 52
53 53 def __eq__(self, other):
54 54 try:
55 55 return type(self) == type(other) and self._rev == other._rev
56 56 except AttributeError:
57 57 return False
58 58
59 59 def __ne__(self, other):
60 60 return not (self == other)
61 61
62 62 def __contains__(self, key):
63 63 return key in self._manifest
64 64
65 65 def __getitem__(self, key):
66 66 return self.filectx(key)
67 67
68 68 def __iter__(self):
69 69 for f in sorted(self._manifest):
70 70 yield f
71 71
72 72 def _manifestmatches(self, match, s):
73 73 """generate a new manifest filtered by the match argument
74 74
75 75 This method is for internal use only and mainly exists to provide an
76 76 object oriented way for other contexts to customize the manifest
77 77 generation.
78 78 """
79 79 return self.manifest().matches(match)
80 80
81 81 def _matchstatus(self, other, match):
82 82 """return match.always if match is none
83 83
84 84 This internal method provides a way for child objects to override the
85 85 match operator.
86 86 """
87 87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
88 88
89 89 def _buildstatus(self, other, s, match, listignored, listclean,
90 90 listunknown):
91 91 """build a status with respect to another context"""
92 92 # Load earliest manifest first for caching reasons. More specifically,
93 93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
94 94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
95 95 # 1000 and cache it so that when you read 1001, we just need to apply a
96 96 # delta to what's in the cache. So that's one full reconstruction + one
97 97 # delta application.
98 98 if self.rev() is not None and self.rev() < other.rev():
99 99 self.manifest()
100 100 mf1 = other._manifestmatches(match, s)
101 101 mf2 = self._manifestmatches(match, s)
102 102
103 103 modified, added = [], []
104 104 removed = []
105 105 clean = []
106 106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
107 107 deletedset = set(deleted)
108 108 d = mf1.diff(mf2, clean=listclean)
109 109 for fn, value in d.iteritems():
110 110 if fn in deletedset:
111 111 continue
112 112 if value is None:
113 113 clean.append(fn)
114 114 continue
115 115 (node1, flag1), (node2, flag2) = value
116 116 if node1 is None:
117 117 added.append(fn)
118 118 elif node2 is None:
119 119 removed.append(fn)
120 120 elif node2 != _newnode:
121 121 # The file was not a new file in mf2, so an entry
122 122 # from diff is really a difference.
123 123 modified.append(fn)
124 124 elif self[fn].cmp(other[fn]):
125 125 # node2 was newnode, but the working file doesn't
126 126 # match the one in mf1.
127 127 modified.append(fn)
128 128 else:
129 129 clean.append(fn)
130 130
131 131 if removed:
132 132 # need to filter files if they are already reported as removed
133 133 unknown = [fn for fn in unknown if fn not in mf1]
134 134 ignored = [fn for fn in ignored if fn not in mf1]
135 135 # if they're deleted, don't report them as removed
136 136 removed = [fn for fn in removed if fn not in deletedset]
137 137
138 138 return scmutil.status(modified, added, removed, deleted, unknown,
139 139 ignored, clean)
140 140
141 141 @propertycache
142 142 def substate(self):
143 143 return subrepo.state(self, self._repo.ui)
144 144
145 145 def subrev(self, subpath):
146 146 return self.substate[subpath][1]
147 147
148 148 def rev(self):
149 149 return self._rev
150 150 def node(self):
151 151 return self._node
152 152 def hex(self):
153 153 return hex(self.node())
154 154 def manifest(self):
155 155 return self._manifest
156 156 def phasestr(self):
157 157 return phases.phasenames[self.phase()]
158 158 def mutable(self):
159 159 return self.phase() > phases.public
160 160
161 161 def getfileset(self, expr):
162 162 return fileset.getfileset(self, expr)
163 163
164 164 def obsolete(self):
165 165 """True if the changeset is obsolete"""
166 166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167 167
168 168 def extinct(self):
169 169 """True if the changeset is extinct"""
170 170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171 171
172 172 def unstable(self):
173 173 """True if the changeset is not obsolete but it's ancestor are"""
174 174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175 175
176 176 def bumped(self):
177 177 """True if the changeset try to be a successor of a public changeset
178 178
179 179 Only non-public and non-obsolete changesets may be bumped.
180 180 """
181 181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182 182
183 183 def divergent(self):
184 184 """Is a successors of a changeset with multiple possible successors set
185 185
186 186 Only non-public and non-obsolete changesets may be divergent.
187 187 """
188 188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189 189
190 190 def troubled(self):
191 191 """True if the changeset is either unstable, bumped or divergent"""
192 192 return self.unstable() or self.bumped() or self.divergent()
193 193
194 194 def troubles(self):
195 195 """return the list of troubles affecting this changesets.
196 196
197 197 Troubles are returned as strings. possible values are:
198 198 - unstable,
199 199 - bumped,
200 200 - divergent.
201 201 """
202 202 troubles = []
203 203 if self.unstable():
204 204 troubles.append('unstable')
205 205 if self.bumped():
206 206 troubles.append('bumped')
207 207 if self.divergent():
208 208 troubles.append('divergent')
209 209 return troubles
210 210
211 211 def parents(self):
212 212 """return contexts for each parent changeset"""
213 213 return self._parents
214 214
215 215 def p1(self):
216 216 return self._parents[0]
217 217
218 218 def p2(self):
219 219 if len(self._parents) == 2:
220 220 return self._parents[1]
221 221 return changectx(self._repo, -1)
222 222
223 223 def _fileinfo(self, path):
224 224 if '_manifest' in self.__dict__:
225 225 try:
226 226 return self._manifest[path], self._manifest.flags(path)
227 227 except KeyError:
228 228 raise error.ManifestLookupError(self._node, path,
229 229 _('not found in manifest'))
230 230 if '_manifestdelta' in self.__dict__ or path in self.files():
231 231 if path in self._manifestdelta:
232 232 return (self._manifestdelta[path],
233 233 self._manifestdelta.flags(path))
234 234 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 235 if not node:
236 236 raise error.ManifestLookupError(self._node, path,
237 237 _('not found in manifest'))
238 238
239 239 return node, flag
240 240
241 241 def filenode(self, path):
242 242 return self._fileinfo(path)[0]
243 243
244 244 def flags(self, path):
245 245 try:
246 246 return self._fileinfo(path)[1]
247 247 except error.LookupError:
248 248 return ''
249 249
250 250 def sub(self, path):
251 251 return subrepo.subrepo(self, path)
252 252
253 253 def match(self, pats=[], include=None, exclude=None, default='glob'):
254 254 r = self._repo
255 255 return matchmod.match(r.root, r.getcwd(), pats,
256 256 include, exclude, default,
257 257 auditor=r.auditor, ctx=self)
258 258
259 259 def diff(self, ctx2=None, match=None, **opts):
260 260 """Returns a diff generator for the given contexts and matcher"""
261 261 if ctx2 is None:
262 262 ctx2 = self.p1()
263 263 if ctx2 is not None:
264 264 ctx2 = self._repo[ctx2]
265 265 diffopts = patch.diffopts(self._repo.ui, opts)
266 266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
267 267
268 268 @propertycache
269 269 def _dirs(self):
270 270 return scmutil.dirs(self._manifest)
271 271
272 272 def dirs(self):
273 273 return self._dirs
274 274
275 275 def dirty(self, missing=False, merge=True, branch=True):
276 276 return False
277 277
278 278 def status(self, other=None, match=None, listignored=False,
279 279 listclean=False, listunknown=False, listsubrepos=False):
280 280 """return status of files between two nodes or node and working
281 281 directory.
282 282
283 283 If other is None, compare this node with working directory.
284 284
285 285 returns (modified, added, removed, deleted, unknown, ignored, clean)
286 286 """
287 287
288 288 ctx1 = self
289 289 ctx2 = self._repo[other]
290 290
291 291 # This next code block is, admittedly, fragile logic that tests for
292 292 # reversing the contexts and wouldn't need to exist if it weren't for
293 293 # the fast (and common) code path of comparing the working directory
294 294 # with its first parent.
295 295 #
296 296 # What we're aiming for here is the ability to call:
297 297 #
298 298 # workingctx.status(parentctx)
299 299 #
300 300 # If we always built the manifest for each context and compared those,
301 301 # then we'd be done. But the special case of the above call means we
302 302 # just copy the manifest of the parent.
303 303 reversed = False
304 304 if (not isinstance(ctx1, changectx)
305 305 and isinstance(ctx2, changectx)):
306 306 reversed = True
307 307 ctx1, ctx2 = ctx2, ctx1
308 308
309 309 match = ctx2._matchstatus(ctx1, match)
310 310 r = scmutil.status([], [], [], [], [], [], [])
311 311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
312 312 listunknown)
313 313
314 314 if reversed:
315 315 # Reverse added and removed. Clear deleted, unknown and ignored as
316 316 # these make no sense to reverse.
317 317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
318 318 r.clean)
319 319
320 320 if listsubrepos:
321 321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
322 322 rev2 = ctx2.subrev(subpath)
323 323 try:
324 324 submatch = matchmod.narrowmatcher(subpath, match)
325 325 s = sub.status(rev2, match=submatch, ignored=listignored,
326 326 clean=listclean, unknown=listunknown,
327 327 listsubrepos=True)
328 328 for rfiles, sfiles in zip(r, s):
329 329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
330 330 except error.LookupError:
331 331 self._repo.ui.status(_("skipping missing "
332 332 "subrepository: %s\n") % subpath)
333 333
334 334 for l in r:
335 335 l.sort()
336 336
337 337 return r
338 338
339 339
340 340 def makememctx(repo, parents, text, user, date, branch, files, store,
341 341 editor=None):
342 342 def getfilectx(repo, memctx, path):
343 343 data, mode, copied = store.getfile(path)
344 344 if data is None:
345 345 return None
346 346 islink, isexec = mode
347 347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
348 348 copied=copied, memctx=memctx)
349 349 extra = {}
350 350 if branch:
351 351 extra['branch'] = encoding.fromlocal(branch)
352 352 ctx = memctx(repo, parents, text, files, getfilectx, user,
353 353 date, extra, editor)
354 354 return ctx
355 355
356 356 class changectx(basectx):
357 357 """A changecontext object makes access to data related to a particular
358 358 changeset convenient. It represents a read-only context already present in
359 359 the repo."""
360 360 def __init__(self, repo, changeid=''):
361 361 """changeid is a revision number, node, or tag"""
362 362
363 363 # since basectx.__new__ already took care of copying the object, we
364 364 # don't need to do anything in __init__, so we just exit here
365 365 if isinstance(changeid, basectx):
366 366 return
367 367
368 368 if changeid == '':
369 369 changeid = '.'
370 370 self._repo = repo
371 371
372 372 try:
373 373 if isinstance(changeid, int):
374 374 self._node = repo.changelog.node(changeid)
375 375 self._rev = changeid
376 376 return
377 377 if isinstance(changeid, long):
378 378 changeid = str(changeid)
379 if changeid == '.':
380 self._node = repo.dirstate.p1()
381 self._rev = repo.changelog.rev(self._node)
382 return
383 379 if changeid == 'null':
384 380 self._node = nullid
385 381 self._rev = nullrev
386 382 return
387 383 if changeid == 'tip':
388 384 self._node = repo.changelog.tip()
389 385 self._rev = repo.changelog.rev(self._node)
390 386 return
387 if changeid == '.' or changeid == repo.dirstate.p1():
388 # this is a hack to delay/avoid loading obsmarkers
389 # when we know that '.' won't be hidden
390 self._node = repo.dirstate.p1()
391 self._rev = repo.unfiltered().changelog.rev(self._node)
392 return
391 393 if len(changeid) == 20:
392 394 try:
393 395 self._node = changeid
394 396 self._rev = repo.changelog.rev(changeid)
395 397 return
396 398 except error.FilteredRepoLookupError:
397 399 raise
398 400 except LookupError:
399 401 pass
400 402
401 403 try:
402 404 r = int(changeid)
403 405 if str(r) != changeid:
404 406 raise ValueError
405 407 l = len(repo.changelog)
406 408 if r < 0:
407 409 r += l
408 410 if r < 0 or r >= l:
409 411 raise ValueError
410 412 self._rev = r
411 413 self._node = repo.changelog.node(r)
412 414 return
413 415 except error.FilteredIndexError:
414 416 raise
415 417 except (ValueError, OverflowError, IndexError):
416 418 pass
417 419
418 420 if len(changeid) == 40:
419 421 try:
420 422 self._node = bin(changeid)
421 423 self._rev = repo.changelog.rev(self._node)
422 424 return
423 425 except error.FilteredLookupError:
424 426 raise
425 427 except (TypeError, LookupError):
426 428 pass
427 429
428 430 # lookup bookmarks through the name interface
429 431 try:
430 432 self._node = repo.names.singlenode(repo, changeid)
431 433 self._rev = repo.changelog.rev(self._node)
432 434 return
433 435 except KeyError:
434 436 pass
435 437 except error.FilteredRepoLookupError:
436 438 raise
437 439 except error.RepoLookupError:
438 440 pass
439 441
440 442 self._node = repo.unfiltered().changelog._partialmatch(changeid)
441 443 if self._node is not None:
442 444 self._rev = repo.changelog.rev(self._node)
443 445 return
444 446
445 447 # lookup failed
446 448 # check if it might have come from damaged dirstate
447 449 #
448 450 # XXX we could avoid the unfiltered if we had a recognizable
449 451 # exception for filtered changeset access
450 452 if changeid in repo.unfiltered().dirstate.parents():
451 453 msg = _("working directory has unknown parent '%s'!")
452 454 raise error.Abort(msg % short(changeid))
453 455 try:
454 456 if len(changeid) == 20:
455 457 changeid = hex(changeid)
456 458 except TypeError:
457 459 pass
458 460 except (error.FilteredIndexError, error.FilteredLookupError,
459 461 error.FilteredRepoLookupError):
460 462 if repo.filtername == 'visible':
461 463 msg = _("hidden revision '%s'") % changeid
462 464 hint = _('use --hidden to access hidden revisions')
463 465 raise error.FilteredRepoLookupError(msg, hint=hint)
464 466 msg = _("filtered revision '%s' (not in '%s' subset)")
465 467 msg %= (changeid, repo.filtername)
466 468 raise error.FilteredRepoLookupError(msg)
467 469 except IndexError:
468 470 pass
469 471 raise error.RepoLookupError(
470 472 _("unknown revision '%s'") % changeid)
471 473
472 474 def __hash__(self):
473 475 try:
474 476 return hash(self._rev)
475 477 except AttributeError:
476 478 return id(self)
477 479
478 480 def __nonzero__(self):
479 481 return self._rev != nullrev
480 482
481 483 @propertycache
482 484 def _changeset(self):
483 485 return self._repo.changelog.read(self.rev())
484 486
485 487 @propertycache
486 488 def _manifest(self):
487 489 return self._repo.manifest.read(self._changeset[0])
488 490
489 491 @propertycache
490 492 def _manifestdelta(self):
491 493 return self._repo.manifest.readdelta(self._changeset[0])
492 494
493 495 @propertycache
494 496 def _parents(self):
495 497 p = self._repo.changelog.parentrevs(self._rev)
496 498 if p[1] == nullrev:
497 499 p = p[:-1]
498 500 return [changectx(self._repo, x) for x in p]
499 501
500 502 def changeset(self):
501 503 return self._changeset
502 504 def manifestnode(self):
503 505 return self._changeset[0]
504 506
505 507 def user(self):
506 508 return self._changeset[1]
507 509 def date(self):
508 510 return self._changeset[2]
509 511 def files(self):
510 512 return self._changeset[3]
511 513 def description(self):
512 514 return self._changeset[4]
513 515 def branch(self):
514 516 return encoding.tolocal(self._changeset[5].get("branch"))
515 517 def closesbranch(self):
516 518 return 'close' in self._changeset[5]
517 519 def extra(self):
518 520 return self._changeset[5]
519 521 def tags(self):
520 522 return self._repo.nodetags(self._node)
521 523 def bookmarks(self):
522 524 return self._repo.nodebookmarks(self._node)
523 525 def phase(self):
524 526 return self._repo._phasecache.phase(self._repo, self._rev)
525 527 def hidden(self):
526 528 return self._rev in repoview.filterrevs(self._repo, 'visible')
527 529
528 530 def children(self):
529 531 """return contexts for each child changeset"""
530 532 c = self._repo.changelog.children(self._node)
531 533 return [changectx(self._repo, x) for x in c]
532 534
533 535 def ancestors(self):
534 536 for a in self._repo.changelog.ancestors([self._rev]):
535 537 yield changectx(self._repo, a)
536 538
537 539 def descendants(self):
538 540 for d in self._repo.changelog.descendants([self._rev]):
539 541 yield changectx(self._repo, d)
540 542
541 543 def filectx(self, path, fileid=None, filelog=None):
542 544 """get a file context from this changeset"""
543 545 if fileid is None:
544 546 fileid = self.filenode(path)
545 547 return filectx(self._repo, path, fileid=fileid,
546 548 changectx=self, filelog=filelog)
547 549
548 550 def ancestor(self, c2, warn=False):
549 551 """return the "best" ancestor context of self and c2
550 552
551 553 If there are multiple candidates, it will show a message and check
552 554 merge.preferancestor configuration before falling back to the
553 555 revlog ancestor."""
554 556 # deal with workingctxs
555 557 n2 = c2._node
556 558 if n2 is None:
557 559 n2 = c2._parents[0]._node
558 560 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
559 561 if not cahs:
560 562 anc = nullid
561 563 elif len(cahs) == 1:
562 564 anc = cahs[0]
563 565 else:
564 566 for r in self._repo.ui.configlist('merge', 'preferancestor'):
565 567 try:
566 568 ctx = changectx(self._repo, r)
567 569 except error.RepoLookupError:
568 570 continue
569 571 anc = ctx.node()
570 572 if anc in cahs:
571 573 break
572 574 else:
573 575 anc = self._repo.changelog.ancestor(self._node, n2)
574 576 if warn:
575 577 self._repo.ui.status(
576 578 (_("note: using %s as ancestor of %s and %s\n") %
577 579 (short(anc), short(self._node), short(n2))) +
578 580 ''.join(_(" alternatively, use --config "
579 581 "merge.preferancestor=%s\n") %
580 582 short(n) for n in sorted(cahs) if n != anc))
581 583 return changectx(self._repo, anc)
582 584
583 585 def descendant(self, other):
584 586 """True if other is descendant of this changeset"""
585 587 return self._repo.changelog.descendant(self._rev, other._rev)
586 588
587 589 def walk(self, match):
588 590 fset = set(match.files())
589 591 # for dirstate.walk, files=['.'] means "walk the whole tree".
590 592 # follow that here, too
591 593 fset.discard('.')
592 594
593 595 # avoid the entire walk if we're only looking for specific files
594 596 if fset and not match.anypats():
595 597 if util.all([fn in self for fn in fset]):
596 598 for fn in sorted(fset):
597 599 if match(fn):
598 600 yield fn
599 601 raise StopIteration
600 602
601 603 for fn in self:
602 604 if fn in fset:
603 605 # specified pattern is the exact name
604 606 fset.remove(fn)
605 607 if match(fn):
606 608 yield fn
607 609 for fn in sorted(fset):
608 610 if fn in self._dirs:
609 611 # specified pattern is a directory
610 612 continue
611 613 match.bad(fn, _('no such file in rev %s') % self)
612 614
613 615 def matches(self, match):
614 616 return self.walk(match)
615 617
616 618 class basefilectx(object):
617 619 """A filecontext object represents the common logic for its children:
618 620 filectx: read-only access to a filerevision that is already present
619 621 in the repo,
620 622 workingfilectx: a filecontext that represents files from the working
621 623 directory,
622 624 memfilectx: a filecontext that represents files in-memory."""
623 625 def __new__(cls, repo, path, *args, **kwargs):
624 626 return super(basefilectx, cls).__new__(cls)
625 627
626 628 @propertycache
627 629 def _filelog(self):
628 630 return self._repo.file(self._path)
629 631
630 632 @propertycache
631 633 def _changeid(self):
632 634 if '_changeid' in self.__dict__:
633 635 return self._changeid
634 636 elif '_changectx' in self.__dict__:
635 637 return self._changectx.rev()
636 638 elif '_descendantrev' in self.__dict__:
637 639 # this file context was created from a revision with a known
638 640 # descendant, we can (lazily) correct for linkrev aliases
639 641 return self._adjustlinkrev(self._path, self._filelog,
640 642 self._filenode, self._descendantrev)
641 643 else:
642 644 return self._filelog.linkrev(self._filerev)
643 645
644 646 @propertycache
645 647 def _filenode(self):
646 648 if '_fileid' in self.__dict__:
647 649 return self._filelog.lookup(self._fileid)
648 650 else:
649 651 return self._changectx.filenode(self._path)
650 652
651 653 @propertycache
652 654 def _filerev(self):
653 655 return self._filelog.rev(self._filenode)
654 656
655 657 @propertycache
656 658 def _repopath(self):
657 659 return self._path
658 660
659 661 def __nonzero__(self):
660 662 try:
661 663 self._filenode
662 664 return True
663 665 except error.LookupError:
664 666 # file is missing
665 667 return False
666 668
667 669 def __str__(self):
668 670 return "%s@%s" % (self.path(), self._changectx)
669 671
670 672 def __repr__(self):
671 673 return "<%s %s>" % (type(self).__name__, str(self))
672 674
673 675 def __hash__(self):
674 676 try:
675 677 return hash((self._path, self._filenode))
676 678 except AttributeError:
677 679 return id(self)
678 680
679 681 def __eq__(self, other):
680 682 try:
681 683 return (type(self) == type(other) and self._path == other._path
682 684 and self._filenode == other._filenode)
683 685 except AttributeError:
684 686 return False
685 687
686 688 def __ne__(self, other):
687 689 return not (self == other)
688 690
689 691 def filerev(self):
690 692 return self._filerev
691 693 def filenode(self):
692 694 return self._filenode
693 695 def flags(self):
694 696 return self._changectx.flags(self._path)
695 697 def filelog(self):
696 698 return self._filelog
697 699 def rev(self):
698 700 return self._changeid
699 701 def linkrev(self):
700 702 return self._filelog.linkrev(self._filerev)
701 703 def node(self):
702 704 return self._changectx.node()
703 705 def hex(self):
704 706 return self._changectx.hex()
705 707 def user(self):
706 708 return self._changectx.user()
707 709 def date(self):
708 710 return self._changectx.date()
709 711 def files(self):
710 712 return self._changectx.files()
711 713 def description(self):
712 714 return self._changectx.description()
713 715 def branch(self):
714 716 return self._changectx.branch()
715 717 def extra(self):
716 718 return self._changectx.extra()
717 719 def phase(self):
718 720 return self._changectx.phase()
719 721 def phasestr(self):
720 722 return self._changectx.phasestr()
721 723 def manifest(self):
722 724 return self._changectx.manifest()
723 725 def changectx(self):
724 726 return self._changectx
725 727
726 728 def path(self):
727 729 return self._path
728 730
729 731 def isbinary(self):
730 732 try:
731 733 return util.binary(self.data())
732 734 except IOError:
733 735 return False
734 736 def isexec(self):
735 737 return 'x' in self.flags()
736 738 def islink(self):
737 739 return 'l' in self.flags()
738 740
739 741 def cmp(self, fctx):
740 742 """compare with other file context
741 743
742 744 returns True if different than fctx.
743 745 """
744 746 if (fctx._filerev is None
745 747 and (self._repo._encodefilterpats
746 748 # if file data starts with '\1\n', empty metadata block is
747 749 # prepended, which adds 4 bytes to filelog.size().
748 750 or self.size() - 4 == fctx.size())
749 751 or self.size() == fctx.size()):
750 752 return self._filelog.cmp(self._filenode, fctx.data())
751 753
752 754 return True
753 755
754 756 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
755 757 """return the first ancestor of <srcrev> introducting <fnode>
756 758
757 759 If the linkrev of the file revision does not point to an ancestor of
758 760 srcrev, we'll walk down the ancestors until we find one introducing
759 761 this file revision.
760 762
761 763 :repo: a localrepository object (used to access changelog and manifest)
762 764 :path: the file path
763 765 :fnode: the nodeid of the file revision
764 766 :filelog: the filelog of this path
765 767 :srcrev: the changeset revision we search ancestors from
766 768 :inclusive: if true, the src revision will also be checked
767 769 """
768 770 repo = self._repo
769 771 cl = repo.unfiltered().changelog
770 772 ma = repo.manifest
771 773 # fetch the linkrev
772 774 fr = filelog.rev(fnode)
773 775 lkr = filelog.linkrev(fr)
774 776 # hack to reuse ancestor computation when searching for renames
775 777 memberanc = getattr(self, '_ancestrycontext', None)
776 778 iteranc = None
777 779 if memberanc is None:
778 780 memberanc = iteranc = cl.ancestors([srcrev], lkr,
779 781 inclusive=inclusive)
780 782 # check if this linkrev is an ancestor of srcrev
781 783 if lkr not in memberanc:
782 784 if iteranc is None:
783 785 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
784 786 for a in iteranc:
785 787 ac = cl.read(a) # get changeset data (we avoid object creation)
786 788 if path in ac[3]: # checking the 'files' field.
787 789 # The file has been touched, check if the content is
788 790 # similar to the one we search for.
789 791 if fnode == ma.readfast(ac[0]).get(path):
790 792 return a
791 793 # In theory, we should never get out of that loop without a result.
792 794 # But if manifest uses a buggy file revision (not children of the
793 795 # one it replaces) we could. Such a buggy situation will likely
794 796 # result is crash somewhere else at to some point.
795 797 return lkr
796 798
797 799 def introrev(self):
798 800 """return the rev of the changeset which introduced this file revision
799 801
800 802 This method is different from linkrev because it take into account the
801 803 changeset the filectx was created from. It ensures the returned
802 804 revision is one of its ancestors. This prevents bugs from
803 805 'linkrev-shadowing' when a file revision is used by multiple
804 806 changesets.
805 807 """
806 808 lkr = self.linkrev()
807 809 attrs = vars(self)
808 810 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
809 811 if noctx or self.rev() == lkr:
810 812 return self.linkrev()
811 813 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
812 814 self.rev(), inclusive=True)
813 815
814 816 def parents(self):
815 817 _path = self._path
816 818 fl = self._filelog
817 819 parents = self._filelog.parents(self._filenode)
818 820 pl = [(_path, node, fl) for node in parents if node != nullid]
819 821
820 822 r = fl.renamed(self._filenode)
821 823 if r:
822 824 # - In the simple rename case, both parent are nullid, pl is empty.
823 825 # - In case of merge, only one of the parent is null id and should
824 826 # be replaced with the rename information. This parent is -always-
825 827 # the first one.
826 828 #
827 829 # As null id have alway been filtered out in the previous list
828 830 # comprehension, inserting to 0 will always result in "replacing
829 831 # first nullid parent with rename information.
830 832 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
831 833
832 834 ret = []
833 835 for path, fnode, l in pl:
834 836 if '_changeid' in vars(self) or '_changectx' in vars(self):
835 837 # If self is associated with a changeset (probably explicitly
836 838 # fed), ensure the created filectx is associated with a
837 839 # changeset that is an ancestor of self.changectx.
838 840 # This lets us later use _adjustlinkrev to get a correct link.
839 841 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
840 842 fctx._descendantrev = self.rev()
841 843 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
842 844 elif '_descendantrev' in vars(self):
843 845 # Otherwise propagate _descendantrev if we have one associated.
844 846 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
845 847 fctx._descendantrev = self._descendantrev
846 848 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
847 849 else:
848 850 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
849 851 ret.append(fctx)
850 852 return ret
851 853
852 854 def p1(self):
853 855 return self.parents()[0]
854 856
855 857 def p2(self):
856 858 p = self.parents()
857 859 if len(p) == 2:
858 860 return p[1]
859 861 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
860 862
861 863 def annotate(self, follow=False, linenumber=None, diffopts=None):
862 864 '''returns a list of tuples of (ctx, line) for each line
863 865 in the file, where ctx is the filectx of the node where
864 866 that line was last changed.
865 867 This returns tuples of ((ctx, linenumber), line) for each line,
866 868 if "linenumber" parameter is NOT "None".
867 869 In such tuples, linenumber means one at the first appearance
868 870 in the managed file.
869 871 To reduce annotation cost,
870 872 this returns fixed value(False is used) as linenumber,
871 873 if "linenumber" parameter is "False".'''
872 874
873 875 if linenumber is None:
874 876 def decorate(text, rev):
875 877 return ([rev] * len(text.splitlines()), text)
876 878 elif linenumber:
877 879 def decorate(text, rev):
878 880 size = len(text.splitlines())
879 881 return ([(rev, i) for i in xrange(1, size + 1)], text)
880 882 else:
881 883 def decorate(text, rev):
882 884 return ([(rev, False)] * len(text.splitlines()), text)
883 885
884 886 def pair(parent, child):
885 887 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
886 888 refine=True)
887 889 for (a1, a2, b1, b2), t in blocks:
888 890 # Changed blocks ('!') or blocks made only of blank lines ('~')
889 891 # belong to the child.
890 892 if t == '=':
891 893 child[0][b1:b2] = parent[0][a1:a2]
892 894 return child
893 895
894 896 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
895 897
896 898 def parents(f):
897 899 pl = f.parents()
898 900
899 901 # Don't return renamed parents if we aren't following.
900 902 if not follow:
901 903 pl = [p for p in pl if p.path() == f.path()]
902 904
903 905 # renamed filectx won't have a filelog yet, so set it
904 906 # from the cache to save time
905 907 for p in pl:
906 908 if not '_filelog' in p.__dict__:
907 909 p._filelog = getlog(p.path())
908 910
909 911 return pl
910 912
911 913 # use linkrev to find the first changeset where self appeared
912 914 base = self
913 915 introrev = self.introrev()
914 916 if self.rev() != introrev:
915 917 base = self.filectx(self.filenode(), changeid=introrev)
916 918
917 919 # This algorithm would prefer to be recursive, but Python is a
918 920 # bit recursion-hostile. Instead we do an iterative
919 921 # depth-first search.
920 922
921 923 visit = [base]
922 924 hist = {}
923 925 pcache = {}
924 926 needed = {base: 1}
925 927 while visit:
926 928 f = visit[-1]
927 929 pcached = f in pcache
928 930 if not pcached:
929 931 pcache[f] = parents(f)
930 932
931 933 ready = True
932 934 pl = pcache[f]
933 935 for p in pl:
934 936 if p not in hist:
935 937 ready = False
936 938 visit.append(p)
937 939 if not pcached:
938 940 needed[p] = needed.get(p, 0) + 1
939 941 if ready:
940 942 visit.pop()
941 943 reusable = f in hist
942 944 if reusable:
943 945 curr = hist[f]
944 946 else:
945 947 curr = decorate(f.data(), f)
946 948 for p in pl:
947 949 if not reusable:
948 950 curr = pair(hist[p], curr)
949 951 if needed[p] == 1:
950 952 del hist[p]
951 953 del needed[p]
952 954 else:
953 955 needed[p] -= 1
954 956
955 957 hist[f] = curr
956 958 pcache[f] = []
957 959
958 960 return zip(hist[base][0], hist[base][1].splitlines(True))
959 961
960 962 def ancestors(self, followfirst=False):
961 963 visit = {}
962 964 c = self
963 965 cut = followfirst and 1 or None
964 966 while True:
965 967 for parent in c.parents()[:cut]:
966 968 visit[(parent.linkrev(), parent.filenode())] = parent
967 969 if not visit:
968 970 break
969 971 c = visit.pop(max(visit))
970 972 yield c
971 973
972 974 class filectx(basefilectx):
973 975 """A filecontext object makes access to data related to a particular
974 976 filerevision convenient."""
975 977 def __init__(self, repo, path, changeid=None, fileid=None,
976 978 filelog=None, changectx=None):
977 979 """changeid can be a changeset revision, node, or tag.
978 980 fileid can be a file revision or node."""
979 981 self._repo = repo
980 982 self._path = path
981 983
982 984 assert (changeid is not None
983 985 or fileid is not None
984 986 or changectx is not None), \
985 987 ("bad args: changeid=%r, fileid=%r, changectx=%r"
986 988 % (changeid, fileid, changectx))
987 989
988 990 if filelog is not None:
989 991 self._filelog = filelog
990 992
991 993 if changeid is not None:
992 994 self._changeid = changeid
993 995 if changectx is not None:
994 996 self._changectx = changectx
995 997 if fileid is not None:
996 998 self._fileid = fileid
997 999
998 1000 @propertycache
999 1001 def _changectx(self):
1000 1002 try:
1001 1003 return changectx(self._repo, self._changeid)
1002 1004 except error.FilteredRepoLookupError:
1003 1005 # Linkrev may point to any revision in the repository. When the
1004 1006 # repository is filtered this may lead to `filectx` trying to build
1005 1007 # `changectx` for filtered revision. In such case we fallback to
1006 1008 # creating `changectx` on the unfiltered version of the reposition.
1007 1009 # This fallback should not be an issue because `changectx` from
1008 1010 # `filectx` are not used in complex operations that care about
1009 1011 # filtering.
1010 1012 #
1011 1013 # This fallback is a cheap and dirty fix that prevent several
1012 1014 # crashes. It does not ensure the behavior is correct. However the
1013 1015 # behavior was not correct before filtering either and "incorrect
1014 1016 # behavior" is seen as better as "crash"
1015 1017 #
1016 1018 # Linkrevs have several serious troubles with filtering that are
1017 1019 # complicated to solve. Proper handling of the issue here should be
1018 1020 # considered when solving linkrev issue are on the table.
1019 1021 return changectx(self._repo.unfiltered(), self._changeid)
1020 1022
1021 1023 def filectx(self, fileid, changeid=None):
1022 1024 '''opens an arbitrary revision of the file without
1023 1025 opening a new filelog'''
1024 1026 return filectx(self._repo, self._path, fileid=fileid,
1025 1027 filelog=self._filelog, changeid=changeid)
1026 1028
1027 1029 def data(self):
1028 1030 try:
1029 1031 return self._filelog.read(self._filenode)
1030 1032 except error.CensoredNodeError:
1031 1033 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1032 1034 return ""
1033 1035 raise util.Abort(_("censored node: %s") % short(self._filenode),
1034 1036 hint=_("set censor.policy to ignore errors"))
1035 1037
1036 1038 def size(self):
1037 1039 return self._filelog.size(self._filerev)
1038 1040
1039 1041 def renamed(self):
1040 1042 """check if file was actually renamed in this changeset revision
1041 1043
1042 1044 If rename logged in file revision, we report copy for changeset only
1043 1045 if file revisions linkrev points back to the changeset in question
1044 1046 or both changeset parents contain different file revisions.
1045 1047 """
1046 1048
1047 1049 renamed = self._filelog.renamed(self._filenode)
1048 1050 if not renamed:
1049 1051 return renamed
1050 1052
1051 1053 if self.rev() == self.linkrev():
1052 1054 return renamed
1053 1055
1054 1056 name = self.path()
1055 1057 fnode = self._filenode
1056 1058 for p in self._changectx.parents():
1057 1059 try:
1058 1060 if fnode == p.filenode(name):
1059 1061 return None
1060 1062 except error.LookupError:
1061 1063 pass
1062 1064 return renamed
1063 1065
1064 1066 def children(self):
1065 1067 # hard for renames
1066 1068 c = self._filelog.children(self._filenode)
1067 1069 return [filectx(self._repo, self._path, fileid=x,
1068 1070 filelog=self._filelog) for x in c]
1069 1071
1070 1072 class committablectx(basectx):
1071 1073 """A committablectx object provides common functionality for a context that
1072 1074 wants the ability to commit, e.g. workingctx or memctx."""
1073 1075 def __init__(self, repo, text="", user=None, date=None, extra=None,
1074 1076 changes=None):
1075 1077 self._repo = repo
1076 1078 self._rev = None
1077 1079 self._node = None
1078 1080 self._text = text
1079 1081 if date:
1080 1082 self._date = util.parsedate(date)
1081 1083 if user:
1082 1084 self._user = user
1083 1085 if changes:
1084 1086 self._status = changes
1085 1087
1086 1088 self._extra = {}
1087 1089 if extra:
1088 1090 self._extra = extra.copy()
1089 1091 if 'branch' not in self._extra:
1090 1092 try:
1091 1093 branch = encoding.fromlocal(self._repo.dirstate.branch())
1092 1094 except UnicodeDecodeError:
1093 1095 raise util.Abort(_('branch name not in UTF-8!'))
1094 1096 self._extra['branch'] = branch
1095 1097 if self._extra['branch'] == '':
1096 1098 self._extra['branch'] = 'default'
1097 1099
1098 1100 def __str__(self):
1099 1101 return str(self._parents[0]) + "+"
1100 1102
1101 1103 def __nonzero__(self):
1102 1104 return True
1103 1105
1104 1106 def _buildflagfunc(self):
1105 1107 # Create a fallback function for getting file flags when the
1106 1108 # filesystem doesn't support them
1107 1109
1108 1110 copiesget = self._repo.dirstate.copies().get
1109 1111
1110 1112 if len(self._parents) < 2:
1111 1113 # when we have one parent, it's easy: copy from parent
1112 1114 man = self._parents[0].manifest()
1113 1115 def func(f):
1114 1116 f = copiesget(f, f)
1115 1117 return man.flags(f)
1116 1118 else:
1117 1119 # merges are tricky: we try to reconstruct the unstored
1118 1120 # result from the merge (issue1802)
1119 1121 p1, p2 = self._parents
1120 1122 pa = p1.ancestor(p2)
1121 1123 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1122 1124
1123 1125 def func(f):
1124 1126 f = copiesget(f, f) # may be wrong for merges with copies
1125 1127 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1126 1128 if fl1 == fl2:
1127 1129 return fl1
1128 1130 if fl1 == fla:
1129 1131 return fl2
1130 1132 if fl2 == fla:
1131 1133 return fl1
1132 1134 return '' # punt for conflicts
1133 1135
1134 1136 return func
1135 1137
1136 1138 @propertycache
1137 1139 def _flagfunc(self):
1138 1140 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1139 1141
1140 1142 @propertycache
1141 1143 def _manifest(self):
1142 1144 """generate a manifest corresponding to the values in self._status
1143 1145
1144 1146 This reuse the file nodeid from parent, but we append an extra letter
1145 1147 when modified. Modified files get an extra 'm' while added files get
1146 1148 an extra 'a'. This is used by manifests merge to see that files
1147 1149 are different and by update logic to avoid deleting newly added files.
1148 1150 """
1149 1151
1150 1152 man1 = self._parents[0].manifest()
1151 1153 man = man1.copy()
1152 1154 if len(self._parents) > 1:
1153 1155 man2 = self.p2().manifest()
1154 1156 def getman(f):
1155 1157 if f in man1:
1156 1158 return man1
1157 1159 return man2
1158 1160 else:
1159 1161 getman = lambda f: man1
1160 1162
1161 1163 copied = self._repo.dirstate.copies()
1162 1164 ff = self._flagfunc
1163 1165 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1164 1166 for f in l:
1165 1167 orig = copied.get(f, f)
1166 1168 man[f] = getman(orig).get(orig, nullid) + i
1167 1169 try:
1168 1170 man.setflag(f, ff(f))
1169 1171 except OSError:
1170 1172 pass
1171 1173
1172 1174 for f in self._status.deleted + self._status.removed:
1173 1175 if f in man:
1174 1176 del man[f]
1175 1177
1176 1178 return man
1177 1179
1178 1180 @propertycache
1179 1181 def _status(self):
1180 1182 return self._repo.status()
1181 1183
1182 1184 @propertycache
1183 1185 def _user(self):
1184 1186 return self._repo.ui.username()
1185 1187
1186 1188 @propertycache
1187 1189 def _date(self):
1188 1190 return util.makedate()
1189 1191
1190 1192 def subrev(self, subpath):
1191 1193 return None
1192 1194
1193 1195 def user(self):
1194 1196 return self._user or self._repo.ui.username()
1195 1197 def date(self):
1196 1198 return self._date
1197 1199 def description(self):
1198 1200 return self._text
1199 1201 def files(self):
1200 1202 return sorted(self._status.modified + self._status.added +
1201 1203 self._status.removed)
1202 1204
1203 1205 def modified(self):
1204 1206 return self._status.modified
1205 1207 def added(self):
1206 1208 return self._status.added
1207 1209 def removed(self):
1208 1210 return self._status.removed
1209 1211 def deleted(self):
1210 1212 return self._status.deleted
1211 1213 def branch(self):
1212 1214 return encoding.tolocal(self._extra['branch'])
1213 1215 def closesbranch(self):
1214 1216 return 'close' in self._extra
1215 1217 def extra(self):
1216 1218 return self._extra
1217 1219
1218 1220 def tags(self):
1219 1221 t = []
1220 1222 for p in self.parents():
1221 1223 t.extend(p.tags())
1222 1224 return t
1223 1225
1224 1226 def bookmarks(self):
1225 1227 b = []
1226 1228 for p in self.parents():
1227 1229 b.extend(p.bookmarks())
1228 1230 return b
1229 1231
1230 1232 def phase(self):
1231 1233 phase = phases.draft # default phase to draft
1232 1234 for p in self.parents():
1233 1235 phase = max(phase, p.phase())
1234 1236 return phase
1235 1237
1236 1238 def hidden(self):
1237 1239 return False
1238 1240
1239 1241 def children(self):
1240 1242 return []
1241 1243
1242 1244 def flags(self, path):
1243 1245 if '_manifest' in self.__dict__:
1244 1246 try:
1245 1247 return self._manifest.flags(path)
1246 1248 except KeyError:
1247 1249 return ''
1248 1250
1249 1251 try:
1250 1252 return self._flagfunc(path)
1251 1253 except OSError:
1252 1254 return ''
1253 1255
1254 1256 def ancestor(self, c2):
1255 1257 """return the "best" ancestor context of self and c2"""
1256 1258 return self._parents[0].ancestor(c2) # punt on two parents for now
1257 1259
1258 1260 def walk(self, match):
1259 1261 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1260 1262 True, False))
1261 1263
1262 1264 def matches(self, match):
1263 1265 return sorted(self._repo.dirstate.matches(match))
1264 1266
1265 1267 def ancestors(self):
1266 1268 for p in self._parents:
1267 1269 yield p
1268 1270 for a in self._repo.changelog.ancestors(
1269 1271 [p.rev() for p in self._parents]):
1270 1272 yield changectx(self._repo, a)
1271 1273
1272 1274 def markcommitted(self, node):
1273 1275 """Perform post-commit cleanup necessary after committing this ctx
1274 1276
1275 1277 Specifically, this updates backing stores this working context
1276 1278 wraps to reflect the fact that the changes reflected by this
1277 1279 workingctx have been committed. For example, it marks
1278 1280 modified and added files as normal in the dirstate.
1279 1281
1280 1282 """
1281 1283
1282 1284 self._repo.dirstate.beginparentchange()
1283 1285 for f in self.modified() + self.added():
1284 1286 self._repo.dirstate.normal(f)
1285 1287 for f in self.removed():
1286 1288 self._repo.dirstate.drop(f)
1287 1289 self._repo.dirstate.setparents(node)
1288 1290 self._repo.dirstate.endparentchange()
1289 1291
1290 1292 def dirs(self):
1291 1293 return self._repo.dirstate.dirs()
1292 1294
1293 1295 class workingctx(committablectx):
1294 1296 """A workingctx object makes access to data related to
1295 1297 the current working directory convenient.
1296 1298 date - any valid date string or (unixtime, offset), or None.
1297 1299 user - username string, or None.
1298 1300 extra - a dictionary of extra values, or None.
1299 1301 changes - a list of file lists as returned by localrepo.status()
1300 1302 or None to use the repository status.
1301 1303 """
1302 1304 def __init__(self, repo, text="", user=None, date=None, extra=None,
1303 1305 changes=None):
1304 1306 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1305 1307
1306 1308 def __iter__(self):
1307 1309 d = self._repo.dirstate
1308 1310 for f in d:
1309 1311 if d[f] != 'r':
1310 1312 yield f
1311 1313
1312 1314 def __contains__(self, key):
1313 1315 return self._repo.dirstate[key] not in "?r"
1314 1316
1315 1317 @propertycache
1316 1318 def _parents(self):
1317 1319 p = self._repo.dirstate.parents()
1318 1320 if p[1] == nullid:
1319 1321 p = p[:-1]
1320 1322 return [changectx(self._repo, x) for x in p]
1321 1323
1322 1324 def filectx(self, path, filelog=None):
1323 1325 """get a file context from the working directory"""
1324 1326 return workingfilectx(self._repo, path, workingctx=self,
1325 1327 filelog=filelog)
1326 1328
1327 1329 def dirty(self, missing=False, merge=True, branch=True):
1328 1330 "check whether a working directory is modified"
1329 1331 # check subrepos first
1330 1332 for s in sorted(self.substate):
1331 1333 if self.sub(s).dirty():
1332 1334 return True
1333 1335 # check current working dir
1334 1336 return ((merge and self.p2()) or
1335 1337 (branch and self.branch() != self.p1().branch()) or
1336 1338 self.modified() or self.added() or self.removed() or
1337 1339 (missing and self.deleted()))
1338 1340
1339 1341 def add(self, list, prefix=""):
1340 1342 join = lambda f: os.path.join(prefix, f)
1341 1343 wlock = self._repo.wlock()
1342 1344 ui, ds = self._repo.ui, self._repo.dirstate
1343 1345 try:
1344 1346 rejected = []
1345 1347 lstat = self._repo.wvfs.lstat
1346 1348 for f in list:
1347 1349 scmutil.checkportable(ui, join(f))
1348 1350 try:
1349 1351 st = lstat(f)
1350 1352 except OSError:
1351 1353 ui.warn(_("%s does not exist!\n") % join(f))
1352 1354 rejected.append(f)
1353 1355 continue
1354 1356 if st.st_size > 10000000:
1355 1357 ui.warn(_("%s: up to %d MB of RAM may be required "
1356 1358 "to manage this file\n"
1357 1359 "(use 'hg revert %s' to cancel the "
1358 1360 "pending addition)\n")
1359 1361 % (f, 3 * st.st_size // 1000000, join(f)))
1360 1362 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1361 1363 ui.warn(_("%s not added: only files and symlinks "
1362 1364 "supported currently\n") % join(f))
1363 1365 rejected.append(f)
1364 1366 elif ds[f] in 'amn':
1365 1367 ui.warn(_("%s already tracked!\n") % join(f))
1366 1368 elif ds[f] == 'r':
1367 1369 ds.normallookup(f)
1368 1370 else:
1369 1371 ds.add(f)
1370 1372 return rejected
1371 1373 finally:
1372 1374 wlock.release()
1373 1375
1374 1376 def forget(self, files, prefix=""):
1375 1377 join = lambda f: os.path.join(prefix, f)
1376 1378 wlock = self._repo.wlock()
1377 1379 try:
1378 1380 rejected = []
1379 1381 for f in files:
1380 1382 if f not in self._repo.dirstate:
1381 1383 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1382 1384 rejected.append(f)
1383 1385 elif self._repo.dirstate[f] != 'a':
1384 1386 self._repo.dirstate.remove(f)
1385 1387 else:
1386 1388 self._repo.dirstate.drop(f)
1387 1389 return rejected
1388 1390 finally:
1389 1391 wlock.release()
1390 1392
1391 1393 def undelete(self, list):
1392 1394 pctxs = self.parents()
1393 1395 wlock = self._repo.wlock()
1394 1396 try:
1395 1397 for f in list:
1396 1398 if self._repo.dirstate[f] != 'r':
1397 1399 self._repo.ui.warn(_("%s not removed!\n") % f)
1398 1400 else:
1399 1401 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1400 1402 t = fctx.data()
1401 1403 self._repo.wwrite(f, t, fctx.flags())
1402 1404 self._repo.dirstate.normal(f)
1403 1405 finally:
1404 1406 wlock.release()
1405 1407
1406 1408 def copy(self, source, dest):
1407 1409 try:
1408 1410 st = self._repo.wvfs.lstat(dest)
1409 1411 except OSError, err:
1410 1412 if err.errno != errno.ENOENT:
1411 1413 raise
1412 1414 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1413 1415 return
1414 1416 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1415 1417 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1416 1418 "symbolic link\n") % dest)
1417 1419 else:
1418 1420 wlock = self._repo.wlock()
1419 1421 try:
1420 1422 if self._repo.dirstate[dest] in '?':
1421 1423 self._repo.dirstate.add(dest)
1422 1424 elif self._repo.dirstate[dest] in 'r':
1423 1425 self._repo.dirstate.normallookup(dest)
1424 1426 self._repo.dirstate.copy(source, dest)
1425 1427 finally:
1426 1428 wlock.release()
1427 1429
1428 1430 def _filtersuspectsymlink(self, files):
1429 1431 if not files or self._repo.dirstate._checklink:
1430 1432 return files
1431 1433
1432 1434 # Symlink placeholders may get non-symlink-like contents
1433 1435 # via user error or dereferencing by NFS or Samba servers,
1434 1436 # so we filter out any placeholders that don't look like a
1435 1437 # symlink
1436 1438 sane = []
1437 1439 for f in files:
1438 1440 if self.flags(f) == 'l':
1439 1441 d = self[f].data()
1440 1442 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1441 1443 self._repo.ui.debug('ignoring suspect symlink placeholder'
1442 1444 ' "%s"\n' % f)
1443 1445 continue
1444 1446 sane.append(f)
1445 1447 return sane
1446 1448
1447 1449 def _checklookup(self, files):
1448 1450 # check for any possibly clean files
1449 1451 if not files:
1450 1452 return [], []
1451 1453
1452 1454 modified = []
1453 1455 fixup = []
1454 1456 pctx = self._parents[0]
1455 1457 # do a full compare of any files that might have changed
1456 1458 for f in sorted(files):
1457 1459 if (f not in pctx or self.flags(f) != pctx.flags(f)
1458 1460 or pctx[f].cmp(self[f])):
1459 1461 modified.append(f)
1460 1462 else:
1461 1463 fixup.append(f)
1462 1464
1463 1465 # update dirstate for files that are actually clean
1464 1466 if fixup:
1465 1467 try:
1466 1468 # updating the dirstate is optional
1467 1469 # so we don't wait on the lock
1468 1470 # wlock can invalidate the dirstate, so cache normal _after_
1469 1471 # taking the lock
1470 1472 wlock = self._repo.wlock(False)
1471 1473 normal = self._repo.dirstate.normal
1472 1474 try:
1473 1475 for f in fixup:
1474 1476 normal(f)
1475 1477 finally:
1476 1478 wlock.release()
1477 1479 except error.LockError:
1478 1480 pass
1479 1481 return modified, fixup
1480 1482
1481 1483 def _manifestmatches(self, match, s):
1482 1484 """Slow path for workingctx
1483 1485
1484 1486 The fast path is when we compare the working directory to its parent
1485 1487 which means this function is comparing with a non-parent; therefore we
1486 1488 need to build a manifest and return what matches.
1487 1489 """
1488 1490 mf = self._repo['.']._manifestmatches(match, s)
1489 1491 for f in s.modified + s.added:
1490 1492 mf[f] = _newnode
1491 1493 mf.setflag(f, self.flags(f))
1492 1494 for f in s.removed:
1493 1495 if f in mf:
1494 1496 del mf[f]
1495 1497 return mf
1496 1498
1497 1499 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1498 1500 unknown=False):
1499 1501 '''Gets the status from the dirstate -- internal use only.'''
1500 1502 listignored, listclean, listunknown = ignored, clean, unknown
1501 1503 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1502 1504 subrepos = []
1503 1505 if '.hgsub' in self:
1504 1506 subrepos = sorted(self.substate)
1505 1507 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1506 1508 listclean, listunknown)
1507 1509
1508 1510 # check for any possibly clean files
1509 1511 if cmp:
1510 1512 modified2, fixup = self._checklookup(cmp)
1511 1513 s.modified.extend(modified2)
1512 1514
1513 1515 # update dirstate for files that are actually clean
1514 1516 if fixup and listclean:
1515 1517 s.clean.extend(fixup)
1516 1518
1517 1519 if match.always():
1518 1520 # cache for performance
1519 1521 if s.unknown or s.ignored or s.clean:
1520 1522 # "_status" is cached with list*=False in the normal route
1521 1523 self._status = scmutil.status(s.modified, s.added, s.removed,
1522 1524 s.deleted, [], [], [])
1523 1525 else:
1524 1526 self._status = s
1525 1527
1526 1528 return s
1527 1529
1528 1530 def _buildstatus(self, other, s, match, listignored, listclean,
1529 1531 listunknown):
1530 1532 """build a status with respect to another context
1531 1533
1532 1534 This includes logic for maintaining the fast path of status when
1533 1535 comparing the working directory against its parent, which is to skip
1534 1536 building a new manifest if self (working directory) is not comparing
1535 1537 against its parent (repo['.']).
1536 1538 """
1537 1539 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1538 1540 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1539 1541 # might have accidentally ended up with the entire contents of the file
1540 1542 # they are supposed to be linking to.
1541 1543 s.modified[:] = self._filtersuspectsymlink(s.modified)
1542 1544 if other != self._repo['.']:
1543 1545 s = super(workingctx, self)._buildstatus(other, s, match,
1544 1546 listignored, listclean,
1545 1547 listunknown)
1546 1548 return s
1547 1549
1548 1550 def _matchstatus(self, other, match):
1549 1551 """override the match method with a filter for directory patterns
1550 1552
1551 1553 We use inheritance to customize the match.bad method only in cases of
1552 1554 workingctx since it belongs only to the working directory when
1553 1555 comparing against the parent changeset.
1554 1556
1555 1557 If we aren't comparing against the working directory's parent, then we
1556 1558 just use the default match object sent to us.
1557 1559 """
1558 1560 superself = super(workingctx, self)
1559 1561 match = superself._matchstatus(other, match)
1560 1562 if other != self._repo['.']:
1561 1563 def bad(f, msg):
1562 1564 # 'f' may be a directory pattern from 'match.files()',
1563 1565 # so 'f not in ctx1' is not enough
1564 1566 if f not in other and f not in other.dirs():
1565 1567 self._repo.ui.warn('%s: %s\n' %
1566 1568 (self._repo.dirstate.pathto(f), msg))
1567 1569 match.bad = bad
1568 1570 return match
1569 1571
1570 1572 class committablefilectx(basefilectx):
1571 1573 """A committablefilectx provides common functionality for a file context
1572 1574 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1573 1575 def __init__(self, repo, path, filelog=None, ctx=None):
1574 1576 self._repo = repo
1575 1577 self._path = path
1576 1578 self._changeid = None
1577 1579 self._filerev = self._filenode = None
1578 1580
1579 1581 if filelog is not None:
1580 1582 self._filelog = filelog
1581 1583 if ctx:
1582 1584 self._changectx = ctx
1583 1585
1584 1586 def __nonzero__(self):
1585 1587 return True
1586 1588
1587 1589 def parents(self):
1588 1590 '''return parent filectxs, following copies if necessary'''
1589 1591 def filenode(ctx, path):
1590 1592 return ctx._manifest.get(path, nullid)
1591 1593
1592 1594 path = self._path
1593 1595 fl = self._filelog
1594 1596 pcl = self._changectx._parents
1595 1597 renamed = self.renamed()
1596 1598
1597 1599 if renamed:
1598 1600 pl = [renamed + (None,)]
1599 1601 else:
1600 1602 pl = [(path, filenode(pcl[0], path), fl)]
1601 1603
1602 1604 for pc in pcl[1:]:
1603 1605 pl.append((path, filenode(pc, path), fl))
1604 1606
1605 1607 return [filectx(self._repo, p, fileid=n, filelog=l)
1606 1608 for p, n, l in pl if n != nullid]
1607 1609
1608 1610 def children(self):
1609 1611 return []
1610 1612
1611 1613 class workingfilectx(committablefilectx):
1612 1614 """A workingfilectx object makes access to data related to a particular
1613 1615 file in the working directory convenient."""
1614 1616 def __init__(self, repo, path, filelog=None, workingctx=None):
1615 1617 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1616 1618
1617 1619 @propertycache
1618 1620 def _changectx(self):
1619 1621 return workingctx(self._repo)
1620 1622
1621 1623 def data(self):
1622 1624 return self._repo.wread(self._path)
1623 1625 def renamed(self):
1624 1626 rp = self._repo.dirstate.copied(self._path)
1625 1627 if not rp:
1626 1628 return None
1627 1629 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1628 1630
1629 1631 def size(self):
1630 1632 return self._repo.wvfs.lstat(self._path).st_size
1631 1633 def date(self):
1632 1634 t, tz = self._changectx.date()
1633 1635 try:
1634 1636 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1635 1637 except OSError, err:
1636 1638 if err.errno != errno.ENOENT:
1637 1639 raise
1638 1640 return (t, tz)
1639 1641
1640 1642 def cmp(self, fctx):
1641 1643 """compare with other file context
1642 1644
1643 1645 returns True if different than fctx.
1644 1646 """
1645 1647 # fctx should be a filectx (not a workingfilectx)
1646 1648 # invert comparison to reuse the same code path
1647 1649 return fctx.cmp(self)
1648 1650
1649 1651 def remove(self, ignoremissing=False):
1650 1652 """wraps unlink for a repo's working directory"""
1651 1653 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1652 1654
1653 1655 def write(self, data, flags):
1654 1656 """wraps repo.wwrite"""
1655 1657 self._repo.wwrite(self._path, data, flags)
1656 1658
1657 1659 class workingcommitctx(workingctx):
1658 1660 """A workingcommitctx object makes access to data related to
1659 1661 the revision being committed convenient.
1660 1662
1661 1663 This hides changes in the working directory, if they aren't
1662 1664 committed in this context.
1663 1665 """
1664 1666 def __init__(self, repo, changes,
1665 1667 text="", user=None, date=None, extra=None):
1666 1668 super(workingctx, self).__init__(repo, text, user, date, extra,
1667 1669 changes)
1668 1670
1669 1671 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1670 1672 unknown=False):
1671 1673 """Return matched files only in ``self._status``
1672 1674
1673 1675 Uncommitted files appear "clean" via this context, even if
1674 1676 they aren't actually so in the working directory.
1675 1677 """
1676 1678 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1677 1679 if clean:
1678 1680 clean = [f for f in self._manifest if f not in self._changedset]
1679 1681 else:
1680 1682 clean = []
1681 1683 return scmutil.status([f for f in self._status.modified if match(f)],
1682 1684 [f for f in self._status.added if match(f)],
1683 1685 [f for f in self._status.removed if match(f)],
1684 1686 [], [], [], clean)
1685 1687
1686 1688 @propertycache
1687 1689 def _changedset(self):
1688 1690 """Return the set of files changed in this context
1689 1691 """
1690 1692 changed = set(self._status.modified)
1691 1693 changed.update(self._status.added)
1692 1694 changed.update(self._status.removed)
1693 1695 return changed
1694 1696
1695 1697 class memctx(committablectx):
1696 1698 """Use memctx to perform in-memory commits via localrepo.commitctx().
1697 1699
1698 1700 Revision information is supplied at initialization time while
1699 1701 related files data and is made available through a callback
1700 1702 mechanism. 'repo' is the current localrepo, 'parents' is a
1701 1703 sequence of two parent revisions identifiers (pass None for every
1702 1704 missing parent), 'text' is the commit message and 'files' lists
1703 1705 names of files touched by the revision (normalized and relative to
1704 1706 repository root).
1705 1707
1706 1708 filectxfn(repo, memctx, path) is a callable receiving the
1707 1709 repository, the current memctx object and the normalized path of
1708 1710 requested file, relative to repository root. It is fired by the
1709 1711 commit function for every file in 'files', but calls order is
1710 1712 undefined. If the file is available in the revision being
1711 1713 committed (updated or added), filectxfn returns a memfilectx
1712 1714 object. If the file was removed, filectxfn raises an
1713 1715 IOError. Moved files are represented by marking the source file
1714 1716 removed and the new file added with copy information (see
1715 1717 memfilectx).
1716 1718
1717 1719 user receives the committer name and defaults to current
1718 1720 repository username, date is the commit date in any format
1719 1721 supported by util.parsedate() and defaults to current date, extra
1720 1722 is a dictionary of metadata or is left empty.
1721 1723 """
1722 1724
1723 1725 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1724 1726 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1725 1727 # this field to determine what to do in filectxfn.
1726 1728 _returnnoneformissingfiles = True
1727 1729
1728 1730 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1729 1731 date=None, extra=None, editor=False):
1730 1732 super(memctx, self).__init__(repo, text, user, date, extra)
1731 1733 self._rev = None
1732 1734 self._node = None
1733 1735 parents = [(p or nullid) for p in parents]
1734 1736 p1, p2 = parents
1735 1737 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1736 1738 files = sorted(set(files))
1737 1739 self._files = files
1738 1740 self.substate = {}
1739 1741
1740 1742 # if store is not callable, wrap it in a function
1741 1743 if not callable(filectxfn):
1742 1744 def getfilectx(repo, memctx, path):
1743 1745 fctx = filectxfn[path]
1744 1746 # this is weird but apparently we only keep track of one parent
1745 1747 # (why not only store that instead of a tuple?)
1746 1748 copied = fctx.renamed()
1747 1749 if copied:
1748 1750 copied = copied[0]
1749 1751 return memfilectx(repo, path, fctx.data(),
1750 1752 islink=fctx.islink(), isexec=fctx.isexec(),
1751 1753 copied=copied, memctx=memctx)
1752 1754 self._filectxfn = getfilectx
1753 1755 else:
1754 1756 # "util.cachefunc" reduces invocation of possibly expensive
1755 1757 # "filectxfn" for performance (e.g. converting from another VCS)
1756 1758 self._filectxfn = util.cachefunc(filectxfn)
1757 1759
1758 1760 self._extra = extra and extra.copy() or {}
1759 1761 if self._extra.get('branch', '') == '':
1760 1762 self._extra['branch'] = 'default'
1761 1763
1762 1764 if editor:
1763 1765 self._text = editor(self._repo, self, [])
1764 1766 self._repo.savecommitmessage(self._text)
1765 1767
1766 1768 def filectx(self, path, filelog=None):
1767 1769 """get a file context from the working directory
1768 1770
1769 1771 Returns None if file doesn't exist and should be removed."""
1770 1772 return self._filectxfn(self._repo, self, path)
1771 1773
1772 1774 def commit(self):
1773 1775 """commit context to the repo"""
1774 1776 return self._repo.commitctx(self)
1775 1777
1776 1778 @propertycache
1777 1779 def _manifest(self):
1778 1780 """generate a manifest based on the return values of filectxfn"""
1779 1781
1780 1782 # keep this simple for now; just worry about p1
1781 1783 pctx = self._parents[0]
1782 1784 man = pctx.manifest().copy()
1783 1785
1784 1786 for f in self._status.modified:
1785 1787 p1node = nullid
1786 1788 p2node = nullid
1787 1789 p = pctx[f].parents() # if file isn't in pctx, check p2?
1788 1790 if len(p) > 0:
1789 1791 p1node = p[0].node()
1790 1792 if len(p) > 1:
1791 1793 p2node = p[1].node()
1792 1794 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1793 1795
1794 1796 for f in self._status.added:
1795 1797 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1796 1798
1797 1799 for f in self._status.removed:
1798 1800 if f in man:
1799 1801 del man[f]
1800 1802
1801 1803 return man
1802 1804
1803 1805 @propertycache
1804 1806 def _status(self):
1805 1807 """Calculate exact status from ``files`` specified at construction
1806 1808 """
1807 1809 man1 = self.p1().manifest()
1808 1810 p2 = self._parents[1]
1809 1811 # "1 < len(self._parents)" can't be used for checking
1810 1812 # existence of the 2nd parent, because "memctx._parents" is
1811 1813 # explicitly initialized by the list, of which length is 2.
1812 1814 if p2.node() != nullid:
1813 1815 man2 = p2.manifest()
1814 1816 managing = lambda f: f in man1 or f in man2
1815 1817 else:
1816 1818 managing = lambda f: f in man1
1817 1819
1818 1820 modified, added, removed = [], [], []
1819 1821 for f in self._files:
1820 1822 if not managing(f):
1821 1823 added.append(f)
1822 1824 elif self[f]:
1823 1825 modified.append(f)
1824 1826 else:
1825 1827 removed.append(f)
1826 1828
1827 1829 return scmutil.status(modified, added, removed, [], [], [], [])
1828 1830
1829 1831 class memfilectx(committablefilectx):
1830 1832 """memfilectx represents an in-memory file to commit.
1831 1833
1832 1834 See memctx and committablefilectx for more details.
1833 1835 """
1834 1836 def __init__(self, repo, path, data, islink=False,
1835 1837 isexec=False, copied=None, memctx=None):
1836 1838 """
1837 1839 path is the normalized file path relative to repository root.
1838 1840 data is the file content as a string.
1839 1841 islink is True if the file is a symbolic link.
1840 1842 isexec is True if the file is executable.
1841 1843 copied is the source file path if current file was copied in the
1842 1844 revision being committed, or None."""
1843 1845 super(memfilectx, self).__init__(repo, path, None, memctx)
1844 1846 self._data = data
1845 1847 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1846 1848 self._copied = None
1847 1849 if copied:
1848 1850 self._copied = (copied, nullid)
1849 1851
1850 1852 def data(self):
1851 1853 return self._data
1852 1854 def size(self):
1853 1855 return len(self.data())
1854 1856 def flags(self):
1855 1857 return self._flags
1856 1858 def renamed(self):
1857 1859 return self._copied
1858 1860
1859 1861 def remove(self, ignoremissing=False):
1860 1862 """wraps unlink for a repo's working directory"""
1861 1863 # need to figure out what to do here
1862 1864 del self._changectx[self._path]
1863 1865
1864 1866 def write(self, data, flags):
1865 1867 """wraps repo.wwrite"""
1866 1868 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now