##// END OF EJS Templates
filectx: if we have a _descendantrev, use it to adjust linkrev...
Matt Mackall -
r23983:ff070a53 stable
parent child Browse files
Show More
@@ -1,1857 +1,1862 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 class basectx(object):
26 26 """A basectx object represents the common logic for its children:
27 27 changectx: read-only context that is already present in the repo,
28 28 workingctx: a context that represents the working directory and can
29 29 be committed,
30 30 memctx: a context that represents changes in-memory and can also
31 31 be committed."""
32 32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 33 if isinstance(changeid, basectx):
34 34 return changeid
35 35
36 36 o = super(basectx, cls).__new__(cls)
37 37
38 38 o._repo = repo
39 39 o._rev = nullrev
40 40 o._node = nullid
41 41
42 42 return o
43 43
44 44 def __str__(self):
45 45 return short(self.node())
46 46
47 47 def __int__(self):
48 48 return self.rev()
49 49
50 50 def __repr__(self):
51 51 return "<%s %s>" % (type(self).__name__, str(self))
52 52
53 53 def __eq__(self, other):
54 54 try:
55 55 return type(self) == type(other) and self._rev == other._rev
56 56 except AttributeError:
57 57 return False
58 58
59 59 def __ne__(self, other):
60 60 return not (self == other)
61 61
62 62 def __contains__(self, key):
63 63 return key in self._manifest
64 64
65 65 def __getitem__(self, key):
66 66 return self.filectx(key)
67 67
68 68 def __iter__(self):
69 69 for f in sorted(self._manifest):
70 70 yield f
71 71
72 72 def _manifestmatches(self, match, s):
73 73 """generate a new manifest filtered by the match argument
74 74
75 75 This method is for internal use only and mainly exists to provide an
76 76 object oriented way for other contexts to customize the manifest
77 77 generation.
78 78 """
79 79 return self.manifest().matches(match)
80 80
81 81 def _matchstatus(self, other, match):
82 82 """return match.always if match is none
83 83
84 84 This internal method provides a way for child objects to override the
85 85 match operator.
86 86 """
87 87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
88 88
89 89 def _buildstatus(self, other, s, match, listignored, listclean,
90 90 listunknown):
91 91 """build a status with respect to another context"""
92 92 # Load earliest manifest first for caching reasons. More specifically,
93 93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
94 94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
95 95 # 1000 and cache it so that when you read 1001, we just need to apply a
96 96 # delta to what's in the cache. So that's one full reconstruction + one
97 97 # delta application.
98 98 if self.rev() is not None and self.rev() < other.rev():
99 99 self.manifest()
100 100 mf1 = other._manifestmatches(match, s)
101 101 mf2 = self._manifestmatches(match, s)
102 102
103 103 modified, added = [], []
104 104 removed = []
105 105 clean = []
106 106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
107 107 deletedset = set(deleted)
108 108 d = mf1.diff(mf2, clean=listclean)
109 109 for fn, value in d.iteritems():
110 110 if fn in deletedset:
111 111 continue
112 112 if value is None:
113 113 clean.append(fn)
114 114 continue
115 115 (node1, flag1), (node2, flag2) = value
116 116 if node1 is None:
117 117 added.append(fn)
118 118 elif node2 is None:
119 119 removed.append(fn)
120 120 elif node2 != _newnode:
121 121 # The file was not a new file in mf2, so an entry
122 122 # from diff is really a difference.
123 123 modified.append(fn)
124 124 elif self[fn].cmp(other[fn]):
125 125 # node2 was newnode, but the working file doesn't
126 126 # match the one in mf1.
127 127 modified.append(fn)
128 128 else:
129 129 clean.append(fn)
130 130
131 131 if removed:
132 132 # need to filter files if they are already reported as removed
133 133 unknown = [fn for fn in unknown if fn not in mf1]
134 134 ignored = [fn for fn in ignored if fn not in mf1]
135 135 # if they're deleted, don't report them as removed
136 136 removed = [fn for fn in removed if fn not in deletedset]
137 137
138 138 return scmutil.status(modified, added, removed, deleted, unknown,
139 139 ignored, clean)
140 140
141 141 @propertycache
142 142 def substate(self):
143 143 return subrepo.state(self, self._repo.ui)
144 144
145 145 def subrev(self, subpath):
146 146 return self.substate[subpath][1]
147 147
148 148 def rev(self):
149 149 return self._rev
150 150 def node(self):
151 151 return self._node
152 152 def hex(self):
153 153 return hex(self.node())
154 154 def manifest(self):
155 155 return self._manifest
156 156 def phasestr(self):
157 157 return phases.phasenames[self.phase()]
158 158 def mutable(self):
159 159 return self.phase() > phases.public
160 160
161 161 def getfileset(self, expr):
162 162 return fileset.getfileset(self, expr)
163 163
164 164 def obsolete(self):
165 165 """True if the changeset is obsolete"""
166 166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167 167
168 168 def extinct(self):
169 169 """True if the changeset is extinct"""
170 170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171 171
172 172 def unstable(self):
173 173 """True if the changeset is not obsolete but it's ancestor are"""
174 174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175 175
176 176 def bumped(self):
177 177 """True if the changeset try to be a successor of a public changeset
178 178
179 179 Only non-public and non-obsolete changesets may be bumped.
180 180 """
181 181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182 182
183 183 def divergent(self):
184 184 """Is a successors of a changeset with multiple possible successors set
185 185
186 186 Only non-public and non-obsolete changesets may be divergent.
187 187 """
188 188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189 189
190 190 def troubled(self):
191 191 """True if the changeset is either unstable, bumped or divergent"""
192 192 return self.unstable() or self.bumped() or self.divergent()
193 193
194 194 def troubles(self):
195 195 """return the list of troubles affecting this changesets.
196 196
197 197 Troubles are returned as strings. possible values are:
198 198 - unstable,
199 199 - bumped,
200 200 - divergent.
201 201 """
202 202 troubles = []
203 203 if self.unstable():
204 204 troubles.append('unstable')
205 205 if self.bumped():
206 206 troubles.append('bumped')
207 207 if self.divergent():
208 208 troubles.append('divergent')
209 209 return troubles
210 210
211 211 def parents(self):
212 212 """return contexts for each parent changeset"""
213 213 return self._parents
214 214
215 215 def p1(self):
216 216 return self._parents[0]
217 217
218 218 def p2(self):
219 219 if len(self._parents) == 2:
220 220 return self._parents[1]
221 221 return changectx(self._repo, -1)
222 222
223 223 def _fileinfo(self, path):
224 224 if '_manifest' in self.__dict__:
225 225 try:
226 226 return self._manifest[path], self._manifest.flags(path)
227 227 except KeyError:
228 228 raise error.ManifestLookupError(self._node, path,
229 229 _('not found in manifest'))
230 230 if '_manifestdelta' in self.__dict__ or path in self.files():
231 231 if path in self._manifestdelta:
232 232 return (self._manifestdelta[path],
233 233 self._manifestdelta.flags(path))
234 234 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 235 if not node:
236 236 raise error.ManifestLookupError(self._node, path,
237 237 _('not found in manifest'))
238 238
239 239 return node, flag
240 240
241 241 def filenode(self, path):
242 242 return self._fileinfo(path)[0]
243 243
244 244 def flags(self, path):
245 245 try:
246 246 return self._fileinfo(path)[1]
247 247 except error.LookupError:
248 248 return ''
249 249
250 250 def sub(self, path):
251 251 return subrepo.subrepo(self, path)
252 252
253 253 def match(self, pats=[], include=None, exclude=None, default='glob'):
254 254 r = self._repo
255 255 return matchmod.match(r.root, r.getcwd(), pats,
256 256 include, exclude, default,
257 257 auditor=r.auditor, ctx=self)
258 258
259 259 def diff(self, ctx2=None, match=None, **opts):
260 260 """Returns a diff generator for the given contexts and matcher"""
261 261 if ctx2 is None:
262 262 ctx2 = self.p1()
263 263 if ctx2 is not None:
264 264 ctx2 = self._repo[ctx2]
265 265 diffopts = patch.diffopts(self._repo.ui, opts)
266 266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
267 267
268 268 @propertycache
269 269 def _dirs(self):
270 270 return scmutil.dirs(self._manifest)
271 271
272 272 def dirs(self):
273 273 return self._dirs
274 274
275 275 def dirty(self, missing=False, merge=True, branch=True):
276 276 return False
277 277
278 278 def status(self, other=None, match=None, listignored=False,
279 279 listclean=False, listunknown=False, listsubrepos=False):
280 280 """return status of files between two nodes or node and working
281 281 directory.
282 282
283 283 If other is None, compare this node with working directory.
284 284
285 285 returns (modified, added, removed, deleted, unknown, ignored, clean)
286 286 """
287 287
288 288 ctx1 = self
289 289 ctx2 = self._repo[other]
290 290
291 291 # This next code block is, admittedly, fragile logic that tests for
292 292 # reversing the contexts and wouldn't need to exist if it weren't for
293 293 # the fast (and common) code path of comparing the working directory
294 294 # with its first parent.
295 295 #
296 296 # What we're aiming for here is the ability to call:
297 297 #
298 298 # workingctx.status(parentctx)
299 299 #
300 300 # If we always built the manifest for each context and compared those,
301 301 # then we'd be done. But the special case of the above call means we
302 302 # just copy the manifest of the parent.
303 303 reversed = False
304 304 if (not isinstance(ctx1, changectx)
305 305 and isinstance(ctx2, changectx)):
306 306 reversed = True
307 307 ctx1, ctx2 = ctx2, ctx1
308 308
309 309 match = ctx2._matchstatus(ctx1, match)
310 310 r = scmutil.status([], [], [], [], [], [], [])
311 311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
312 312 listunknown)
313 313
314 314 if reversed:
315 315 # Reverse added and removed. Clear deleted, unknown and ignored as
316 316 # these make no sense to reverse.
317 317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
318 318 r.clean)
319 319
320 320 if listsubrepos:
321 321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
322 322 rev2 = ctx2.subrev(subpath)
323 323 try:
324 324 submatch = matchmod.narrowmatcher(subpath, match)
325 325 s = sub.status(rev2, match=submatch, ignored=listignored,
326 326 clean=listclean, unknown=listunknown,
327 327 listsubrepos=True)
328 328 for rfiles, sfiles in zip(r, s):
329 329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
330 330 except error.LookupError:
331 331 self._repo.ui.status(_("skipping missing "
332 332 "subrepository: %s\n") % subpath)
333 333
334 334 for l in r:
335 335 l.sort()
336 336
337 337 return r
338 338
339 339
340 340 def makememctx(repo, parents, text, user, date, branch, files, store,
341 341 editor=None):
342 342 def getfilectx(repo, memctx, path):
343 343 data, mode, copied = store.getfile(path)
344 344 if data is None:
345 345 return None
346 346 islink, isexec = mode
347 347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
348 348 copied=copied, memctx=memctx)
349 349 extra = {}
350 350 if branch:
351 351 extra['branch'] = encoding.fromlocal(branch)
352 352 ctx = memctx(repo, parents, text, files, getfilectx, user,
353 353 date, extra, editor)
354 354 return ctx
355 355
356 356 class changectx(basectx):
357 357 """A changecontext object makes access to data related to a particular
358 358 changeset convenient. It represents a read-only context already present in
359 359 the repo."""
360 360 def __init__(self, repo, changeid=''):
361 361 """changeid is a revision number, node, or tag"""
362 362
363 363 # since basectx.__new__ already took care of copying the object, we
364 364 # don't need to do anything in __init__, so we just exit here
365 365 if isinstance(changeid, basectx):
366 366 return
367 367
368 368 if changeid == '':
369 369 changeid = '.'
370 370 self._repo = repo
371 371
372 372 try:
373 373 if isinstance(changeid, int):
374 374 self._node = repo.changelog.node(changeid)
375 375 self._rev = changeid
376 376 return
377 377 if isinstance(changeid, long):
378 378 changeid = str(changeid)
379 379 if changeid == '.':
380 380 self._node = repo.dirstate.p1()
381 381 self._rev = repo.changelog.rev(self._node)
382 382 return
383 383 if changeid == 'null':
384 384 self._node = nullid
385 385 self._rev = nullrev
386 386 return
387 387 if changeid == 'tip':
388 388 self._node = repo.changelog.tip()
389 389 self._rev = repo.changelog.rev(self._node)
390 390 return
391 391 if len(changeid) == 20:
392 392 try:
393 393 self._node = changeid
394 394 self._rev = repo.changelog.rev(changeid)
395 395 return
396 396 except error.FilteredRepoLookupError:
397 397 raise
398 398 except LookupError:
399 399 pass
400 400
401 401 try:
402 402 r = int(changeid)
403 403 if str(r) != changeid:
404 404 raise ValueError
405 405 l = len(repo.changelog)
406 406 if r < 0:
407 407 r += l
408 408 if r < 0 or r >= l:
409 409 raise ValueError
410 410 self._rev = r
411 411 self._node = repo.changelog.node(r)
412 412 return
413 413 except error.FilteredIndexError:
414 414 raise
415 415 except (ValueError, OverflowError, IndexError):
416 416 pass
417 417
418 418 if len(changeid) == 40:
419 419 try:
420 420 self._node = bin(changeid)
421 421 self._rev = repo.changelog.rev(self._node)
422 422 return
423 423 except error.FilteredLookupError:
424 424 raise
425 425 except (TypeError, LookupError):
426 426 pass
427 427
428 428 # lookup bookmarks through the name interface
429 429 try:
430 430 self._node = repo.names.singlenode(repo, changeid)
431 431 self._rev = repo.changelog.rev(self._node)
432 432 return
433 433 except KeyError:
434 434 pass
435 435 except error.FilteredRepoLookupError:
436 436 raise
437 437 except error.RepoLookupError:
438 438 pass
439 439
440 440 self._node = repo.unfiltered().changelog._partialmatch(changeid)
441 441 if self._node is not None:
442 442 self._rev = repo.changelog.rev(self._node)
443 443 return
444 444
445 445 # lookup failed
446 446 # check if it might have come from damaged dirstate
447 447 #
448 448 # XXX we could avoid the unfiltered if we had a recognizable
449 449 # exception for filtered changeset access
450 450 if changeid in repo.unfiltered().dirstate.parents():
451 451 msg = _("working directory has unknown parent '%s'!")
452 452 raise error.Abort(msg % short(changeid))
453 453 try:
454 454 if len(changeid) == 20:
455 455 changeid = hex(changeid)
456 456 except TypeError:
457 457 pass
458 458 except (error.FilteredIndexError, error.FilteredLookupError,
459 459 error.FilteredRepoLookupError):
460 460 if repo.filtername == 'visible':
461 461 msg = _("hidden revision '%s'") % changeid
462 462 hint = _('use --hidden to access hidden revisions')
463 463 raise error.FilteredRepoLookupError(msg, hint=hint)
464 464 msg = _("filtered revision '%s' (not in '%s' subset)")
465 465 msg %= (changeid, repo.filtername)
466 466 raise error.FilteredRepoLookupError(msg)
467 467 except IndexError:
468 468 pass
469 469 raise error.RepoLookupError(
470 470 _("unknown revision '%s'") % changeid)
471 471
472 472 def __hash__(self):
473 473 try:
474 474 return hash(self._rev)
475 475 except AttributeError:
476 476 return id(self)
477 477
478 478 def __nonzero__(self):
479 479 return self._rev != nullrev
480 480
481 481 @propertycache
482 482 def _changeset(self):
483 483 return self._repo.changelog.read(self.rev())
484 484
485 485 @propertycache
486 486 def _manifest(self):
487 487 return self._repo.manifest.read(self._changeset[0])
488 488
489 489 @propertycache
490 490 def _manifestdelta(self):
491 491 return self._repo.manifest.readdelta(self._changeset[0])
492 492
493 493 @propertycache
494 494 def _parents(self):
495 495 p = self._repo.changelog.parentrevs(self._rev)
496 496 if p[1] == nullrev:
497 497 p = p[:-1]
498 498 return [changectx(self._repo, x) for x in p]
499 499
500 500 def changeset(self):
501 501 return self._changeset
502 502 def manifestnode(self):
503 503 return self._changeset[0]
504 504
505 505 def user(self):
506 506 return self._changeset[1]
507 507 def date(self):
508 508 return self._changeset[2]
509 509 def files(self):
510 510 return self._changeset[3]
511 511 def description(self):
512 512 return self._changeset[4]
513 513 def branch(self):
514 514 return encoding.tolocal(self._changeset[5].get("branch"))
515 515 def closesbranch(self):
516 516 return 'close' in self._changeset[5]
517 517 def extra(self):
518 518 return self._changeset[5]
519 519 def tags(self):
520 520 return self._repo.nodetags(self._node)
521 521 def bookmarks(self):
522 522 return self._repo.nodebookmarks(self._node)
523 523 def phase(self):
524 524 return self._repo._phasecache.phase(self._repo, self._rev)
525 525 def hidden(self):
526 526 return self._rev in repoview.filterrevs(self._repo, 'visible')
527 527
528 528 def children(self):
529 529 """return contexts for each child changeset"""
530 530 c = self._repo.changelog.children(self._node)
531 531 return [changectx(self._repo, x) for x in c]
532 532
533 533 def ancestors(self):
534 534 for a in self._repo.changelog.ancestors([self._rev]):
535 535 yield changectx(self._repo, a)
536 536
537 537 def descendants(self):
538 538 for d in self._repo.changelog.descendants([self._rev]):
539 539 yield changectx(self._repo, d)
540 540
541 541 def filectx(self, path, fileid=None, filelog=None):
542 542 """get a file context from this changeset"""
543 543 if fileid is None:
544 544 fileid = self.filenode(path)
545 545 return filectx(self._repo, path, fileid=fileid,
546 546 changectx=self, filelog=filelog)
547 547
548 548 def ancestor(self, c2, warn=False):
549 549 """return the "best" ancestor context of self and c2
550 550
551 551 If there are multiple candidates, it will show a message and check
552 552 merge.preferancestor configuration before falling back to the
553 553 revlog ancestor."""
554 554 # deal with workingctxs
555 555 n2 = c2._node
556 556 if n2 is None:
557 557 n2 = c2._parents[0]._node
558 558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
559 559 if not cahs:
560 560 anc = nullid
561 561 elif len(cahs) == 1:
562 562 anc = cahs[0]
563 563 else:
564 564 for r in self._repo.ui.configlist('merge', 'preferancestor'):
565 565 try:
566 566 ctx = changectx(self._repo, r)
567 567 except error.RepoLookupError:
568 568 continue
569 569 anc = ctx.node()
570 570 if anc in cahs:
571 571 break
572 572 else:
573 573 anc = self._repo.changelog.ancestor(self._node, n2)
574 574 if warn:
575 575 self._repo.ui.status(
576 576 (_("note: using %s as ancestor of %s and %s\n") %
577 577 (short(anc), short(self._node), short(n2))) +
578 578 ''.join(_(" alternatively, use --config "
579 579 "merge.preferancestor=%s\n") %
580 580 short(n) for n in sorted(cahs) if n != anc))
581 581 return changectx(self._repo, anc)
582 582
583 583 def descendant(self, other):
584 584 """True if other is descendant of this changeset"""
585 585 return self._repo.changelog.descendant(self._rev, other._rev)
586 586
587 587 def walk(self, match):
588 588 fset = set(match.files())
589 589 # for dirstate.walk, files=['.'] means "walk the whole tree".
590 590 # follow that here, too
591 591 fset.discard('.')
592 592
593 593 # avoid the entire walk if we're only looking for specific files
594 594 if fset and not match.anypats():
595 595 if util.all([fn in self for fn in fset]):
596 596 for fn in sorted(fset):
597 597 if match(fn):
598 598 yield fn
599 599 raise StopIteration
600 600
601 601 for fn in self:
602 602 if fn in fset:
603 603 # specified pattern is the exact name
604 604 fset.remove(fn)
605 605 if match(fn):
606 606 yield fn
607 607 for fn in sorted(fset):
608 608 if fn in self._dirs:
609 609 # specified pattern is a directory
610 610 continue
611 611 match.bad(fn, _('no such file in rev %s') % self)
612 612
613 613 def matches(self, match):
614 614 return self.walk(match)
615 615
616 616 class basefilectx(object):
617 617 """A filecontext object represents the common logic for its children:
618 618 filectx: read-only access to a filerevision that is already present
619 619 in the repo,
620 620 workingfilectx: a filecontext that represents files from the working
621 621 directory,
622 622 memfilectx: a filecontext that represents files in-memory."""
623 623 def __new__(cls, repo, path, *args, **kwargs):
624 624 return super(basefilectx, cls).__new__(cls)
625 625
626 626 @propertycache
627 627 def _filelog(self):
628 628 return self._repo.file(self._path)
629 629
630 630 @propertycache
631 631 def _changeid(self):
632 632 if '_changeid' in self.__dict__:
633 633 return self._changeid
634 634 elif '_changectx' in self.__dict__:
635 635 return self._changectx.rev()
636 elif '_descendantrev' in self.__dict__:
637 # this file context was created from a revision with a known
638 # descendant, we can (lazily) correct for linkrev aliases
639 return self._adjustlinkrev(self._path, self._filelog,
640 self._filenode, self._descendantrev)
636 641 else:
637 642 return self._filelog.linkrev(self._filerev)
638 643
639 644 @propertycache
640 645 def _filenode(self):
641 646 if '_fileid' in self.__dict__:
642 647 return self._filelog.lookup(self._fileid)
643 648 else:
644 649 return self._changectx.filenode(self._path)
645 650
646 651 @propertycache
647 652 def _filerev(self):
648 653 return self._filelog.rev(self._filenode)
649 654
650 655 @propertycache
651 656 def _repopath(self):
652 657 return self._path
653 658
654 659 def __nonzero__(self):
655 660 try:
656 661 self._filenode
657 662 return True
658 663 except error.LookupError:
659 664 # file is missing
660 665 return False
661 666
662 667 def __str__(self):
663 668 return "%s@%s" % (self.path(), self._changectx)
664 669
665 670 def __repr__(self):
666 671 return "<%s %s>" % (type(self).__name__, str(self))
667 672
668 673 def __hash__(self):
669 674 try:
670 675 return hash((self._path, self._filenode))
671 676 except AttributeError:
672 677 return id(self)
673 678
674 679 def __eq__(self, other):
675 680 try:
676 681 return (type(self) == type(other) and self._path == other._path
677 682 and self._filenode == other._filenode)
678 683 except AttributeError:
679 684 return False
680 685
681 686 def __ne__(self, other):
682 687 return not (self == other)
683 688
684 689 def filerev(self):
685 690 return self._filerev
686 691 def filenode(self):
687 692 return self._filenode
688 693 def flags(self):
689 694 return self._changectx.flags(self._path)
690 695 def filelog(self):
691 696 return self._filelog
692 697 def rev(self):
693 698 return self._changeid
694 699 def linkrev(self):
695 700 return self._filelog.linkrev(self._filerev)
696 701 def node(self):
697 702 return self._changectx.node()
698 703 def hex(self):
699 704 return self._changectx.hex()
700 705 def user(self):
701 706 return self._changectx.user()
702 707 def date(self):
703 708 return self._changectx.date()
704 709 def files(self):
705 710 return self._changectx.files()
706 711 def description(self):
707 712 return self._changectx.description()
708 713 def branch(self):
709 714 return self._changectx.branch()
710 715 def extra(self):
711 716 return self._changectx.extra()
712 717 def phase(self):
713 718 return self._changectx.phase()
714 719 def phasestr(self):
715 720 return self._changectx.phasestr()
716 721 def manifest(self):
717 722 return self._changectx.manifest()
718 723 def changectx(self):
719 724 return self._changectx
720 725
721 726 def path(self):
722 727 return self._path
723 728
724 729 def isbinary(self):
725 730 try:
726 731 return util.binary(self.data())
727 732 except IOError:
728 733 return False
729 734 def isexec(self):
730 735 return 'x' in self.flags()
731 736 def islink(self):
732 737 return 'l' in self.flags()
733 738
734 739 def cmp(self, fctx):
735 740 """compare with other file context
736 741
737 742 returns True if different than fctx.
738 743 """
739 744 if (fctx._filerev is None
740 745 and (self._repo._encodefilterpats
741 746 # if file data starts with '\1\n', empty metadata block is
742 747 # prepended, which adds 4 bytes to filelog.size().
743 748 or self.size() - 4 == fctx.size())
744 749 or self.size() == fctx.size()):
745 750 return self._filelog.cmp(self._filenode, fctx.data())
746 751
747 752 return True
748 753
749 754 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
750 755 """return the first ancestor of <srcrev> introducting <fnode>
751 756
752 757 If the linkrev of the file revision does not point to an ancestor of
753 758 srcrev, we'll walk down the ancestors until we find one introducing
754 759 this file revision.
755 760
756 761 :repo: a localrepository object (used to access changelog and manifest)
757 762 :path: the file path
758 763 :fnode: the nodeid of the file revision
759 764 :filelog: the filelog of this path
760 765 :srcrev: the changeset revision we search ancestors from
761 766 :inclusive: if true, the src revision will also be checked
762 767 """
763 768 repo = self._repo
764 769 cl = repo.unfiltered().changelog
765 770 ma = repo.manifest
766 771 # fetch the linkrev
767 772 fr = filelog.rev(fnode)
768 773 lkr = filelog.linkrev(fr)
769 774 # hack to reuse ancestor computation when searching for renames
770 775 memberanc = getattr(self, '_ancestrycontext', None)
771 776 iteranc = None
772 777 if memberanc is None:
773 778 memberanc = iteranc = cl.ancestors([srcrev], lkr,
774 779 inclusive=inclusive)
775 780 # check if this linkrev is an ancestor of srcrev
776 781 if lkr not in memberanc:
777 782 if iteranc is None:
778 783 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
779 784 for a in iteranc:
780 785 ac = cl.read(a) # get changeset data (we avoid object creation)
781 786 if path in ac[3]: # checking the 'files' field.
782 787 # The file has been touched, check if the content is
783 788 # similar to the one we search for.
784 789 if fnode == ma.readfast(ac[0]).get(path):
785 790 return a
786 791 # In theory, we should never get out of that loop without a result.
787 792 # But if manifest uses a buggy file revision (not children of the
788 793 # one it replaces) we could. Such a buggy situation will likely
789 794 # result is crash somewhere else at to some point.
790 795 return lkr
791 796
792 797 def introrev(self):
793 798 """return the rev of the changeset which introduced this file revision
794 799
795 800 This method is different from linkrev because it take into account the
796 801 changeset the filectx was created from. It ensures the returned
797 802 revision is one of its ancestors. This prevents bugs from
798 803 'linkrev-shadowing' when a file revision is used by multiple
799 804 changesets.
800 805 """
801 806 lkr = self.linkrev()
802 807 attrs = vars(self)
803 808 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
804 809 if noctx or self.rev() == lkr:
805 810 return self.linkrev()
806 811 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
807 812 self.rev(), inclusive=True)
808 813
809 814 def parents(self):
810 815 _path = self._path
811 816 fl = self._filelog
812 817 parents = self._filelog.parents(self._filenode)
813 818 pl = [(_path, node, fl) for node in parents if node != nullid]
814 819
815 820 r = fl.renamed(self._filenode)
816 821 if r:
817 822 # - In the simple rename case, both parent are nullid, pl is empty.
818 823 # - In case of merge, only one of the parent is null id and should
819 824 # be replaced with the rename information. This parent is -always-
820 825 # the first one.
821 826 #
822 827 # As null id have alway been filtered out in the previous list
823 828 # comprehension, inserting to 0 will always result in "replacing
824 829 # first nullid parent with rename information.
825 830 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
826 831
827 832 ret = []
828 833 for path, fnode, l in pl:
829 834 if '_changeid' in vars(self) or '_changectx' in vars(self):
830 835 # If self is associated with a changeset (probably explicitly
831 836 # fed), ensure the created filectx is associated with a
832 837 # changeset that is an ancestor of self.changectx.
833 838 rev = self._adjustlinkrev(path, l, fnode, self.rev())
834 839 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
835 840 changeid=rev)
836 841 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
837 842
838 843 else:
839 844 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
840 845 ret.append(fctx)
841 846 return ret
842 847
843 848 def p1(self):
844 849 return self.parents()[0]
845 850
846 851 def p2(self):
847 852 p = self.parents()
848 853 if len(p) == 2:
849 854 return p[1]
850 855 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
851 856
852 857 def annotate(self, follow=False, linenumber=None, diffopts=None):
853 858 '''returns a list of tuples of (ctx, line) for each line
854 859 in the file, where ctx is the filectx of the node where
855 860 that line was last changed.
856 861 This returns tuples of ((ctx, linenumber), line) for each line,
857 862 if "linenumber" parameter is NOT "None".
858 863 In such tuples, linenumber means one at the first appearance
859 864 in the managed file.
860 865 To reduce annotation cost,
861 866 this returns fixed value(False is used) as linenumber,
862 867 if "linenumber" parameter is "False".'''
863 868
864 869 if linenumber is None:
865 870 def decorate(text, rev):
866 871 return ([rev] * len(text.splitlines()), text)
867 872 elif linenumber:
868 873 def decorate(text, rev):
869 874 size = len(text.splitlines())
870 875 return ([(rev, i) for i in xrange(1, size + 1)], text)
871 876 else:
872 877 def decorate(text, rev):
873 878 return ([(rev, False)] * len(text.splitlines()), text)
874 879
875 880 def pair(parent, child):
876 881 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
877 882 refine=True)
878 883 for (a1, a2, b1, b2), t in blocks:
879 884 # Changed blocks ('!') or blocks made only of blank lines ('~')
880 885 # belong to the child.
881 886 if t == '=':
882 887 child[0][b1:b2] = parent[0][a1:a2]
883 888 return child
884 889
885 890 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
886 891
887 892 def parents(f):
888 893 pl = f.parents()
889 894
890 895 # Don't return renamed parents if we aren't following.
891 896 if not follow:
892 897 pl = [p for p in pl if p.path() == f.path()]
893 898
894 899 # renamed filectx won't have a filelog yet, so set it
895 900 # from the cache to save time
896 901 for p in pl:
897 902 if not '_filelog' in p.__dict__:
898 903 p._filelog = getlog(p.path())
899 904
900 905 return pl
901 906
902 907 # use linkrev to find the first changeset where self appeared
903 908 base = self
904 909 introrev = self.introrev()
905 910 if self.rev() != introrev:
906 911 base = self.filectx(self.filenode(), changeid=introrev)
907 912
908 913 # This algorithm would prefer to be recursive, but Python is a
909 914 # bit recursion-hostile. Instead we do an iterative
910 915 # depth-first search.
911 916
912 917 visit = [base]
913 918 hist = {}
914 919 pcache = {}
915 920 needed = {base: 1}
916 921 while visit:
917 922 f = visit[-1]
918 923 pcached = f in pcache
919 924 if not pcached:
920 925 pcache[f] = parents(f)
921 926
922 927 ready = True
923 928 pl = pcache[f]
924 929 for p in pl:
925 930 if p not in hist:
926 931 ready = False
927 932 visit.append(p)
928 933 if not pcached:
929 934 needed[p] = needed.get(p, 0) + 1
930 935 if ready:
931 936 visit.pop()
932 937 reusable = f in hist
933 938 if reusable:
934 939 curr = hist[f]
935 940 else:
936 941 curr = decorate(f.data(), f)
937 942 for p in pl:
938 943 if not reusable:
939 944 curr = pair(hist[p], curr)
940 945 if needed[p] == 1:
941 946 del hist[p]
942 947 del needed[p]
943 948 else:
944 949 needed[p] -= 1
945 950
946 951 hist[f] = curr
947 952 pcache[f] = []
948 953
949 954 return zip(hist[base][0], hist[base][1].splitlines(True))
950 955
951 956 def ancestors(self, followfirst=False):
952 957 visit = {}
953 958 c = self
954 959 cut = followfirst and 1 or None
955 960 while True:
956 961 for parent in c.parents()[:cut]:
957 962 visit[(parent.linkrev(), parent.filenode())] = parent
958 963 if not visit:
959 964 break
960 965 c = visit.pop(max(visit))
961 966 yield c
962 967
963 968 class filectx(basefilectx):
964 969 """A filecontext object makes access to data related to a particular
965 970 filerevision convenient."""
966 971 def __init__(self, repo, path, changeid=None, fileid=None,
967 972 filelog=None, changectx=None):
968 973 """changeid can be a changeset revision, node, or tag.
969 974 fileid can be a file revision or node."""
970 975 self._repo = repo
971 976 self._path = path
972 977
973 978 assert (changeid is not None
974 979 or fileid is not None
975 980 or changectx is not None), \
976 981 ("bad args: changeid=%r, fileid=%r, changectx=%r"
977 982 % (changeid, fileid, changectx))
978 983
979 984 if filelog is not None:
980 985 self._filelog = filelog
981 986
982 987 if changeid is not None:
983 988 self._changeid = changeid
984 989 if changectx is not None:
985 990 self._changectx = changectx
986 991 if fileid is not None:
987 992 self._fileid = fileid
988 993
989 994 @propertycache
990 995 def _changectx(self):
991 996 try:
992 997 return changectx(self._repo, self._changeid)
993 998 except error.FilteredRepoLookupError:
994 999 # Linkrev may point to any revision in the repository. When the
995 1000 # repository is filtered this may lead to `filectx` trying to build
996 1001 # `changectx` for filtered revision. In such case we fallback to
997 1002 # creating `changectx` on the unfiltered version of the reposition.
998 1003 # This fallback should not be an issue because `changectx` from
999 1004 # `filectx` are not used in complex operations that care about
1000 1005 # filtering.
1001 1006 #
1002 1007 # This fallback is a cheap and dirty fix that prevent several
1003 1008 # crashes. It does not ensure the behavior is correct. However the
1004 1009 # behavior was not correct before filtering either and "incorrect
1005 1010 # behavior" is seen as better as "crash"
1006 1011 #
1007 1012 # Linkrevs have several serious troubles with filtering that are
1008 1013 # complicated to solve. Proper handling of the issue here should be
1009 1014 # considered when solving linkrev issue are on the table.
1010 1015 return changectx(self._repo.unfiltered(), self._changeid)
1011 1016
1012 1017 def filectx(self, fileid, changeid=None):
1013 1018 '''opens an arbitrary revision of the file without
1014 1019 opening a new filelog'''
1015 1020 return filectx(self._repo, self._path, fileid=fileid,
1016 1021 filelog=self._filelog, changeid=changeid)
1017 1022
1018 1023 def data(self):
1019 1024 try:
1020 1025 return self._filelog.read(self._filenode)
1021 1026 except error.CensoredNodeError:
1022 1027 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1023 1028 return ""
1024 1029 raise util.Abort(_("censored node: %s") % short(self._filenode),
1025 1030 hint=_("set censor.policy to ignore errors"))
1026 1031
1027 1032 def size(self):
1028 1033 return self._filelog.size(self._filerev)
1029 1034
1030 1035 def renamed(self):
1031 1036 """check if file was actually renamed in this changeset revision
1032 1037
1033 1038 If rename logged in file revision, we report copy for changeset only
1034 1039 if file revisions linkrev points back to the changeset in question
1035 1040 or both changeset parents contain different file revisions.
1036 1041 """
1037 1042
1038 1043 renamed = self._filelog.renamed(self._filenode)
1039 1044 if not renamed:
1040 1045 return renamed
1041 1046
1042 1047 if self.rev() == self.linkrev():
1043 1048 return renamed
1044 1049
1045 1050 name = self.path()
1046 1051 fnode = self._filenode
1047 1052 for p in self._changectx.parents():
1048 1053 try:
1049 1054 if fnode == p.filenode(name):
1050 1055 return None
1051 1056 except error.LookupError:
1052 1057 pass
1053 1058 return renamed
1054 1059
1055 1060 def children(self):
1056 1061 # hard for renames
1057 1062 c = self._filelog.children(self._filenode)
1058 1063 return [filectx(self._repo, self._path, fileid=x,
1059 1064 filelog=self._filelog) for x in c]
1060 1065
1061 1066 class committablectx(basectx):
1062 1067 """A committablectx object provides common functionality for a context that
1063 1068 wants the ability to commit, e.g. workingctx or memctx."""
1064 1069 def __init__(self, repo, text="", user=None, date=None, extra=None,
1065 1070 changes=None):
1066 1071 self._repo = repo
1067 1072 self._rev = None
1068 1073 self._node = None
1069 1074 self._text = text
1070 1075 if date:
1071 1076 self._date = util.parsedate(date)
1072 1077 if user:
1073 1078 self._user = user
1074 1079 if changes:
1075 1080 self._status = changes
1076 1081
1077 1082 self._extra = {}
1078 1083 if extra:
1079 1084 self._extra = extra.copy()
1080 1085 if 'branch' not in self._extra:
1081 1086 try:
1082 1087 branch = encoding.fromlocal(self._repo.dirstate.branch())
1083 1088 except UnicodeDecodeError:
1084 1089 raise util.Abort(_('branch name not in UTF-8!'))
1085 1090 self._extra['branch'] = branch
1086 1091 if self._extra['branch'] == '':
1087 1092 self._extra['branch'] = 'default'
1088 1093
1089 1094 def __str__(self):
1090 1095 return str(self._parents[0]) + "+"
1091 1096
1092 1097 def __nonzero__(self):
1093 1098 return True
1094 1099
1095 1100 def _buildflagfunc(self):
1096 1101 # Create a fallback function for getting file flags when the
1097 1102 # filesystem doesn't support them
1098 1103
1099 1104 copiesget = self._repo.dirstate.copies().get
1100 1105
1101 1106 if len(self._parents) < 2:
1102 1107 # when we have one parent, it's easy: copy from parent
1103 1108 man = self._parents[0].manifest()
1104 1109 def func(f):
1105 1110 f = copiesget(f, f)
1106 1111 return man.flags(f)
1107 1112 else:
1108 1113 # merges are tricky: we try to reconstruct the unstored
1109 1114 # result from the merge (issue1802)
1110 1115 p1, p2 = self._parents
1111 1116 pa = p1.ancestor(p2)
1112 1117 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1113 1118
1114 1119 def func(f):
1115 1120 f = copiesget(f, f) # may be wrong for merges with copies
1116 1121 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1117 1122 if fl1 == fl2:
1118 1123 return fl1
1119 1124 if fl1 == fla:
1120 1125 return fl2
1121 1126 if fl2 == fla:
1122 1127 return fl1
1123 1128 return '' # punt for conflicts
1124 1129
1125 1130 return func
1126 1131
1127 1132 @propertycache
1128 1133 def _flagfunc(self):
1129 1134 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1130 1135
1131 1136 @propertycache
1132 1137 def _manifest(self):
1133 1138 """generate a manifest corresponding to the values in self._status
1134 1139
1135 1140 This reuse the file nodeid from parent, but we append an extra letter
1136 1141 when modified. Modified files get an extra 'm' while added files get
1137 1142 an extra 'a'. This is used by manifests merge to see that files
1138 1143 are different and by update logic to avoid deleting newly added files.
1139 1144 """
1140 1145
1141 1146 man1 = self._parents[0].manifest()
1142 1147 man = man1.copy()
1143 1148 if len(self._parents) > 1:
1144 1149 man2 = self.p2().manifest()
1145 1150 def getman(f):
1146 1151 if f in man1:
1147 1152 return man1
1148 1153 return man2
1149 1154 else:
1150 1155 getman = lambda f: man1
1151 1156
1152 1157 copied = self._repo.dirstate.copies()
1153 1158 ff = self._flagfunc
1154 1159 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1155 1160 for f in l:
1156 1161 orig = copied.get(f, f)
1157 1162 man[f] = getman(orig).get(orig, nullid) + i
1158 1163 try:
1159 1164 man.setflag(f, ff(f))
1160 1165 except OSError:
1161 1166 pass
1162 1167
1163 1168 for f in self._status.deleted + self._status.removed:
1164 1169 if f in man:
1165 1170 del man[f]
1166 1171
1167 1172 return man
1168 1173
1169 1174 @propertycache
1170 1175 def _status(self):
1171 1176 return self._repo.status()
1172 1177
1173 1178 @propertycache
1174 1179 def _user(self):
1175 1180 return self._repo.ui.username()
1176 1181
1177 1182 @propertycache
1178 1183 def _date(self):
1179 1184 return util.makedate()
1180 1185
1181 1186 def subrev(self, subpath):
1182 1187 return None
1183 1188
1184 1189 def user(self):
1185 1190 return self._user or self._repo.ui.username()
1186 1191 def date(self):
1187 1192 return self._date
1188 1193 def description(self):
1189 1194 return self._text
1190 1195 def files(self):
1191 1196 return sorted(self._status.modified + self._status.added +
1192 1197 self._status.removed)
1193 1198
1194 1199 def modified(self):
1195 1200 return self._status.modified
1196 1201 def added(self):
1197 1202 return self._status.added
1198 1203 def removed(self):
1199 1204 return self._status.removed
1200 1205 def deleted(self):
1201 1206 return self._status.deleted
1202 1207 def branch(self):
1203 1208 return encoding.tolocal(self._extra['branch'])
1204 1209 def closesbranch(self):
1205 1210 return 'close' in self._extra
1206 1211 def extra(self):
1207 1212 return self._extra
1208 1213
1209 1214 def tags(self):
1210 1215 t = []
1211 1216 for p in self.parents():
1212 1217 t.extend(p.tags())
1213 1218 return t
1214 1219
1215 1220 def bookmarks(self):
1216 1221 b = []
1217 1222 for p in self.parents():
1218 1223 b.extend(p.bookmarks())
1219 1224 return b
1220 1225
1221 1226 def phase(self):
1222 1227 phase = phases.draft # default phase to draft
1223 1228 for p in self.parents():
1224 1229 phase = max(phase, p.phase())
1225 1230 return phase
1226 1231
1227 1232 def hidden(self):
1228 1233 return False
1229 1234
1230 1235 def children(self):
1231 1236 return []
1232 1237
1233 1238 def flags(self, path):
1234 1239 if '_manifest' in self.__dict__:
1235 1240 try:
1236 1241 return self._manifest.flags(path)
1237 1242 except KeyError:
1238 1243 return ''
1239 1244
1240 1245 try:
1241 1246 return self._flagfunc(path)
1242 1247 except OSError:
1243 1248 return ''
1244 1249
1245 1250 def ancestor(self, c2):
1246 1251 """return the "best" ancestor context of self and c2"""
1247 1252 return self._parents[0].ancestor(c2) # punt on two parents for now
1248 1253
1249 1254 def walk(self, match):
1250 1255 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1251 1256 True, False))
1252 1257
1253 1258 def matches(self, match):
1254 1259 return sorted(self._repo.dirstate.matches(match))
1255 1260
1256 1261 def ancestors(self):
1257 1262 for p in self._parents:
1258 1263 yield p
1259 1264 for a in self._repo.changelog.ancestors(
1260 1265 [p.rev() for p in self._parents]):
1261 1266 yield changectx(self._repo, a)
1262 1267
1263 1268 def markcommitted(self, node):
1264 1269 """Perform post-commit cleanup necessary after committing this ctx
1265 1270
1266 1271 Specifically, this updates backing stores this working context
1267 1272 wraps to reflect the fact that the changes reflected by this
1268 1273 workingctx have been committed. For example, it marks
1269 1274 modified and added files as normal in the dirstate.
1270 1275
1271 1276 """
1272 1277
1273 1278 self._repo.dirstate.beginparentchange()
1274 1279 for f in self.modified() + self.added():
1275 1280 self._repo.dirstate.normal(f)
1276 1281 for f in self.removed():
1277 1282 self._repo.dirstate.drop(f)
1278 1283 self._repo.dirstate.setparents(node)
1279 1284 self._repo.dirstate.endparentchange()
1280 1285
1281 1286 def dirs(self):
1282 1287 return self._repo.dirstate.dirs()
1283 1288
1284 1289 class workingctx(committablectx):
1285 1290 """A workingctx object makes access to data related to
1286 1291 the current working directory convenient.
1287 1292 date - any valid date string or (unixtime, offset), or None.
1288 1293 user - username string, or None.
1289 1294 extra - a dictionary of extra values, or None.
1290 1295 changes - a list of file lists as returned by localrepo.status()
1291 1296 or None to use the repository status.
1292 1297 """
1293 1298 def __init__(self, repo, text="", user=None, date=None, extra=None,
1294 1299 changes=None):
1295 1300 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1296 1301
1297 1302 def __iter__(self):
1298 1303 d = self._repo.dirstate
1299 1304 for f in d:
1300 1305 if d[f] != 'r':
1301 1306 yield f
1302 1307
1303 1308 def __contains__(self, key):
1304 1309 return self._repo.dirstate[key] not in "?r"
1305 1310
1306 1311 @propertycache
1307 1312 def _parents(self):
1308 1313 p = self._repo.dirstate.parents()
1309 1314 if p[1] == nullid:
1310 1315 p = p[:-1]
1311 1316 return [changectx(self._repo, x) for x in p]
1312 1317
1313 1318 def filectx(self, path, filelog=None):
1314 1319 """get a file context from the working directory"""
1315 1320 return workingfilectx(self._repo, path, workingctx=self,
1316 1321 filelog=filelog)
1317 1322
1318 1323 def dirty(self, missing=False, merge=True, branch=True):
1319 1324 "check whether a working directory is modified"
1320 1325 # check subrepos first
1321 1326 for s in sorted(self.substate):
1322 1327 if self.sub(s).dirty():
1323 1328 return True
1324 1329 # check current working dir
1325 1330 return ((merge and self.p2()) or
1326 1331 (branch and self.branch() != self.p1().branch()) or
1327 1332 self.modified() or self.added() or self.removed() or
1328 1333 (missing and self.deleted()))
1329 1334
1330 1335 def add(self, list, prefix=""):
1331 1336 join = lambda f: os.path.join(prefix, f)
1332 1337 wlock = self._repo.wlock()
1333 1338 ui, ds = self._repo.ui, self._repo.dirstate
1334 1339 try:
1335 1340 rejected = []
1336 1341 lstat = self._repo.wvfs.lstat
1337 1342 for f in list:
1338 1343 scmutil.checkportable(ui, join(f))
1339 1344 try:
1340 1345 st = lstat(f)
1341 1346 except OSError:
1342 1347 ui.warn(_("%s does not exist!\n") % join(f))
1343 1348 rejected.append(f)
1344 1349 continue
1345 1350 if st.st_size > 10000000:
1346 1351 ui.warn(_("%s: up to %d MB of RAM may be required "
1347 1352 "to manage this file\n"
1348 1353 "(use 'hg revert %s' to cancel the "
1349 1354 "pending addition)\n")
1350 1355 % (f, 3 * st.st_size // 1000000, join(f)))
1351 1356 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1352 1357 ui.warn(_("%s not added: only files and symlinks "
1353 1358 "supported currently\n") % join(f))
1354 1359 rejected.append(f)
1355 1360 elif ds[f] in 'amn':
1356 1361 ui.warn(_("%s already tracked!\n") % join(f))
1357 1362 elif ds[f] == 'r':
1358 1363 ds.normallookup(f)
1359 1364 else:
1360 1365 ds.add(f)
1361 1366 return rejected
1362 1367 finally:
1363 1368 wlock.release()
1364 1369
1365 1370 def forget(self, files, prefix=""):
1366 1371 join = lambda f: os.path.join(prefix, f)
1367 1372 wlock = self._repo.wlock()
1368 1373 try:
1369 1374 rejected = []
1370 1375 for f in files:
1371 1376 if f not in self._repo.dirstate:
1372 1377 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1373 1378 rejected.append(f)
1374 1379 elif self._repo.dirstate[f] != 'a':
1375 1380 self._repo.dirstate.remove(f)
1376 1381 else:
1377 1382 self._repo.dirstate.drop(f)
1378 1383 return rejected
1379 1384 finally:
1380 1385 wlock.release()
1381 1386
1382 1387 def undelete(self, list):
1383 1388 pctxs = self.parents()
1384 1389 wlock = self._repo.wlock()
1385 1390 try:
1386 1391 for f in list:
1387 1392 if self._repo.dirstate[f] != 'r':
1388 1393 self._repo.ui.warn(_("%s not removed!\n") % f)
1389 1394 else:
1390 1395 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1391 1396 t = fctx.data()
1392 1397 self._repo.wwrite(f, t, fctx.flags())
1393 1398 self._repo.dirstate.normal(f)
1394 1399 finally:
1395 1400 wlock.release()
1396 1401
1397 1402 def copy(self, source, dest):
1398 1403 try:
1399 1404 st = self._repo.wvfs.lstat(dest)
1400 1405 except OSError, err:
1401 1406 if err.errno != errno.ENOENT:
1402 1407 raise
1403 1408 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1404 1409 return
1405 1410 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1406 1411 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1407 1412 "symbolic link\n") % dest)
1408 1413 else:
1409 1414 wlock = self._repo.wlock()
1410 1415 try:
1411 1416 if self._repo.dirstate[dest] in '?':
1412 1417 self._repo.dirstate.add(dest)
1413 1418 elif self._repo.dirstate[dest] in 'r':
1414 1419 self._repo.dirstate.normallookup(dest)
1415 1420 self._repo.dirstate.copy(source, dest)
1416 1421 finally:
1417 1422 wlock.release()
1418 1423
1419 1424 def _filtersuspectsymlink(self, files):
1420 1425 if not files or self._repo.dirstate._checklink:
1421 1426 return files
1422 1427
1423 1428 # Symlink placeholders may get non-symlink-like contents
1424 1429 # via user error or dereferencing by NFS or Samba servers,
1425 1430 # so we filter out any placeholders that don't look like a
1426 1431 # symlink
1427 1432 sane = []
1428 1433 for f in files:
1429 1434 if self.flags(f) == 'l':
1430 1435 d = self[f].data()
1431 1436 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1432 1437 self._repo.ui.debug('ignoring suspect symlink placeholder'
1433 1438 ' "%s"\n' % f)
1434 1439 continue
1435 1440 sane.append(f)
1436 1441 return sane
1437 1442
1438 1443 def _checklookup(self, files):
1439 1444 # check for any possibly clean files
1440 1445 if not files:
1441 1446 return [], []
1442 1447
1443 1448 modified = []
1444 1449 fixup = []
1445 1450 pctx = self._parents[0]
1446 1451 # do a full compare of any files that might have changed
1447 1452 for f in sorted(files):
1448 1453 if (f not in pctx or self.flags(f) != pctx.flags(f)
1449 1454 or pctx[f].cmp(self[f])):
1450 1455 modified.append(f)
1451 1456 else:
1452 1457 fixup.append(f)
1453 1458
1454 1459 # update dirstate for files that are actually clean
1455 1460 if fixup:
1456 1461 try:
1457 1462 # updating the dirstate is optional
1458 1463 # so we don't wait on the lock
1459 1464 # wlock can invalidate the dirstate, so cache normal _after_
1460 1465 # taking the lock
1461 1466 wlock = self._repo.wlock(False)
1462 1467 normal = self._repo.dirstate.normal
1463 1468 try:
1464 1469 for f in fixup:
1465 1470 normal(f)
1466 1471 finally:
1467 1472 wlock.release()
1468 1473 except error.LockError:
1469 1474 pass
1470 1475 return modified, fixup
1471 1476
1472 1477 def _manifestmatches(self, match, s):
1473 1478 """Slow path for workingctx
1474 1479
1475 1480 The fast path is when we compare the working directory to its parent
1476 1481 which means this function is comparing with a non-parent; therefore we
1477 1482 need to build a manifest and return what matches.
1478 1483 """
1479 1484 mf = self._repo['.']._manifestmatches(match, s)
1480 1485 for f in s.modified + s.added:
1481 1486 mf[f] = _newnode
1482 1487 mf.setflag(f, self.flags(f))
1483 1488 for f in s.removed:
1484 1489 if f in mf:
1485 1490 del mf[f]
1486 1491 return mf
1487 1492
1488 1493 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1489 1494 unknown=False):
1490 1495 '''Gets the status from the dirstate -- internal use only.'''
1491 1496 listignored, listclean, listunknown = ignored, clean, unknown
1492 1497 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1493 1498 subrepos = []
1494 1499 if '.hgsub' in self:
1495 1500 subrepos = sorted(self.substate)
1496 1501 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1497 1502 listclean, listunknown)
1498 1503
1499 1504 # check for any possibly clean files
1500 1505 if cmp:
1501 1506 modified2, fixup = self._checklookup(cmp)
1502 1507 s.modified.extend(modified2)
1503 1508
1504 1509 # update dirstate for files that are actually clean
1505 1510 if fixup and listclean:
1506 1511 s.clean.extend(fixup)
1507 1512
1508 1513 if match.always():
1509 1514 # cache for performance
1510 1515 if s.unknown or s.ignored or s.clean:
1511 1516 # "_status" is cached with list*=False in the normal route
1512 1517 self._status = scmutil.status(s.modified, s.added, s.removed,
1513 1518 s.deleted, [], [], [])
1514 1519 else:
1515 1520 self._status = s
1516 1521
1517 1522 return s
1518 1523
1519 1524 def _buildstatus(self, other, s, match, listignored, listclean,
1520 1525 listunknown):
1521 1526 """build a status with respect to another context
1522 1527
1523 1528 This includes logic for maintaining the fast path of status when
1524 1529 comparing the working directory against its parent, which is to skip
1525 1530 building a new manifest if self (working directory) is not comparing
1526 1531 against its parent (repo['.']).
1527 1532 """
1528 1533 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1529 1534 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1530 1535 # might have accidentally ended up with the entire contents of the file
1531 1536 # they are supposed to be linking to.
1532 1537 s.modified[:] = self._filtersuspectsymlink(s.modified)
1533 1538 if other != self._repo['.']:
1534 1539 s = super(workingctx, self)._buildstatus(other, s, match,
1535 1540 listignored, listclean,
1536 1541 listunknown)
1537 1542 return s
1538 1543
1539 1544 def _matchstatus(self, other, match):
1540 1545 """override the match method with a filter for directory patterns
1541 1546
1542 1547 We use inheritance to customize the match.bad method only in cases of
1543 1548 workingctx since it belongs only to the working directory when
1544 1549 comparing against the parent changeset.
1545 1550
1546 1551 If we aren't comparing against the working directory's parent, then we
1547 1552 just use the default match object sent to us.
1548 1553 """
1549 1554 superself = super(workingctx, self)
1550 1555 match = superself._matchstatus(other, match)
1551 1556 if other != self._repo['.']:
1552 1557 def bad(f, msg):
1553 1558 # 'f' may be a directory pattern from 'match.files()',
1554 1559 # so 'f not in ctx1' is not enough
1555 1560 if f not in other and f not in other.dirs():
1556 1561 self._repo.ui.warn('%s: %s\n' %
1557 1562 (self._repo.dirstate.pathto(f), msg))
1558 1563 match.bad = bad
1559 1564 return match
1560 1565
1561 1566 class committablefilectx(basefilectx):
1562 1567 """A committablefilectx provides common functionality for a file context
1563 1568 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1564 1569 def __init__(self, repo, path, filelog=None, ctx=None):
1565 1570 self._repo = repo
1566 1571 self._path = path
1567 1572 self._changeid = None
1568 1573 self._filerev = self._filenode = None
1569 1574
1570 1575 if filelog is not None:
1571 1576 self._filelog = filelog
1572 1577 if ctx:
1573 1578 self._changectx = ctx
1574 1579
1575 1580 def __nonzero__(self):
1576 1581 return True
1577 1582
1578 1583 def parents(self):
1579 1584 '''return parent filectxs, following copies if necessary'''
1580 1585 def filenode(ctx, path):
1581 1586 return ctx._manifest.get(path, nullid)
1582 1587
1583 1588 path = self._path
1584 1589 fl = self._filelog
1585 1590 pcl = self._changectx._parents
1586 1591 renamed = self.renamed()
1587 1592
1588 1593 if renamed:
1589 1594 pl = [renamed + (None,)]
1590 1595 else:
1591 1596 pl = [(path, filenode(pcl[0], path), fl)]
1592 1597
1593 1598 for pc in pcl[1:]:
1594 1599 pl.append((path, filenode(pc, path), fl))
1595 1600
1596 1601 return [filectx(self._repo, p, fileid=n, filelog=l)
1597 1602 for p, n, l in pl if n != nullid]
1598 1603
1599 1604 def children(self):
1600 1605 return []
1601 1606
1602 1607 class workingfilectx(committablefilectx):
1603 1608 """A workingfilectx object makes access to data related to a particular
1604 1609 file in the working directory convenient."""
1605 1610 def __init__(self, repo, path, filelog=None, workingctx=None):
1606 1611 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1607 1612
1608 1613 @propertycache
1609 1614 def _changectx(self):
1610 1615 return workingctx(self._repo)
1611 1616
1612 1617 def data(self):
1613 1618 return self._repo.wread(self._path)
1614 1619 def renamed(self):
1615 1620 rp = self._repo.dirstate.copied(self._path)
1616 1621 if not rp:
1617 1622 return None
1618 1623 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1619 1624
1620 1625 def size(self):
1621 1626 return self._repo.wvfs.lstat(self._path).st_size
1622 1627 def date(self):
1623 1628 t, tz = self._changectx.date()
1624 1629 try:
1625 1630 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1626 1631 except OSError, err:
1627 1632 if err.errno != errno.ENOENT:
1628 1633 raise
1629 1634 return (t, tz)
1630 1635
1631 1636 def cmp(self, fctx):
1632 1637 """compare with other file context
1633 1638
1634 1639 returns True if different than fctx.
1635 1640 """
1636 1641 # fctx should be a filectx (not a workingfilectx)
1637 1642 # invert comparison to reuse the same code path
1638 1643 return fctx.cmp(self)
1639 1644
1640 1645 def remove(self, ignoremissing=False):
1641 1646 """wraps unlink for a repo's working directory"""
1642 1647 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1643 1648
1644 1649 def write(self, data, flags):
1645 1650 """wraps repo.wwrite"""
1646 1651 self._repo.wwrite(self._path, data, flags)
1647 1652
1648 1653 class workingcommitctx(workingctx):
1649 1654 """A workingcommitctx object makes access to data related to
1650 1655 the revision being committed convenient.
1651 1656
1652 1657 This hides changes in the working directory, if they aren't
1653 1658 committed in this context.
1654 1659 """
1655 1660 def __init__(self, repo, changes,
1656 1661 text="", user=None, date=None, extra=None):
1657 1662 super(workingctx, self).__init__(repo, text, user, date, extra,
1658 1663 changes)
1659 1664
1660 1665 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1661 1666 unknown=False):
1662 1667 """Return matched files only in ``self._status``
1663 1668
1664 1669 Uncommitted files appear "clean" via this context, even if
1665 1670 they aren't actually so in the working directory.
1666 1671 """
1667 1672 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1668 1673 if clean:
1669 1674 clean = [f for f in self._manifest if f not in self._changedset]
1670 1675 else:
1671 1676 clean = []
1672 1677 return scmutil.status([f for f in self._status.modified if match(f)],
1673 1678 [f for f in self._status.added if match(f)],
1674 1679 [f for f in self._status.removed if match(f)],
1675 1680 [], [], [], clean)
1676 1681
1677 1682 @propertycache
1678 1683 def _changedset(self):
1679 1684 """Return the set of files changed in this context
1680 1685 """
1681 1686 changed = set(self._status.modified)
1682 1687 changed.update(self._status.added)
1683 1688 changed.update(self._status.removed)
1684 1689 return changed
1685 1690
1686 1691 class memctx(committablectx):
1687 1692 """Use memctx to perform in-memory commits via localrepo.commitctx().
1688 1693
1689 1694 Revision information is supplied at initialization time while
1690 1695 related files data and is made available through a callback
1691 1696 mechanism. 'repo' is the current localrepo, 'parents' is a
1692 1697 sequence of two parent revisions identifiers (pass None for every
1693 1698 missing parent), 'text' is the commit message and 'files' lists
1694 1699 names of files touched by the revision (normalized and relative to
1695 1700 repository root).
1696 1701
1697 1702 filectxfn(repo, memctx, path) is a callable receiving the
1698 1703 repository, the current memctx object and the normalized path of
1699 1704 requested file, relative to repository root. It is fired by the
1700 1705 commit function for every file in 'files', but calls order is
1701 1706 undefined. If the file is available in the revision being
1702 1707 committed (updated or added), filectxfn returns a memfilectx
1703 1708 object. If the file was removed, filectxfn raises an
1704 1709 IOError. Moved files are represented by marking the source file
1705 1710 removed and the new file added with copy information (see
1706 1711 memfilectx).
1707 1712
1708 1713 user receives the committer name and defaults to current
1709 1714 repository username, date is the commit date in any format
1710 1715 supported by util.parsedate() and defaults to current date, extra
1711 1716 is a dictionary of metadata or is left empty.
1712 1717 """
1713 1718
1714 1719 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1715 1720 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1716 1721 # this field to determine what to do in filectxfn.
1717 1722 _returnnoneformissingfiles = True
1718 1723
1719 1724 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1720 1725 date=None, extra=None, editor=False):
1721 1726 super(memctx, self).__init__(repo, text, user, date, extra)
1722 1727 self._rev = None
1723 1728 self._node = None
1724 1729 parents = [(p or nullid) for p in parents]
1725 1730 p1, p2 = parents
1726 1731 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1727 1732 files = sorted(set(files))
1728 1733 self._files = files
1729 1734 self.substate = {}
1730 1735
1731 1736 # if store is not callable, wrap it in a function
1732 1737 if not callable(filectxfn):
1733 1738 def getfilectx(repo, memctx, path):
1734 1739 fctx = filectxfn[path]
1735 1740 # this is weird but apparently we only keep track of one parent
1736 1741 # (why not only store that instead of a tuple?)
1737 1742 copied = fctx.renamed()
1738 1743 if copied:
1739 1744 copied = copied[0]
1740 1745 return memfilectx(repo, path, fctx.data(),
1741 1746 islink=fctx.islink(), isexec=fctx.isexec(),
1742 1747 copied=copied, memctx=memctx)
1743 1748 self._filectxfn = getfilectx
1744 1749 else:
1745 1750 # "util.cachefunc" reduces invocation of possibly expensive
1746 1751 # "filectxfn" for performance (e.g. converting from another VCS)
1747 1752 self._filectxfn = util.cachefunc(filectxfn)
1748 1753
1749 1754 self._extra = extra and extra.copy() or {}
1750 1755 if self._extra.get('branch', '') == '':
1751 1756 self._extra['branch'] = 'default'
1752 1757
1753 1758 if editor:
1754 1759 self._text = editor(self._repo, self, [])
1755 1760 self._repo.savecommitmessage(self._text)
1756 1761
1757 1762 def filectx(self, path, filelog=None):
1758 1763 """get a file context from the working directory
1759 1764
1760 1765 Returns None if file doesn't exist and should be removed."""
1761 1766 return self._filectxfn(self._repo, self, path)
1762 1767
1763 1768 def commit(self):
1764 1769 """commit context to the repo"""
1765 1770 return self._repo.commitctx(self)
1766 1771
1767 1772 @propertycache
1768 1773 def _manifest(self):
1769 1774 """generate a manifest based on the return values of filectxfn"""
1770 1775
1771 1776 # keep this simple for now; just worry about p1
1772 1777 pctx = self._parents[0]
1773 1778 man = pctx.manifest().copy()
1774 1779
1775 1780 for f in self._status.modified:
1776 1781 p1node = nullid
1777 1782 p2node = nullid
1778 1783 p = pctx[f].parents() # if file isn't in pctx, check p2?
1779 1784 if len(p) > 0:
1780 1785 p1node = p[0].node()
1781 1786 if len(p) > 1:
1782 1787 p2node = p[1].node()
1783 1788 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1784 1789
1785 1790 for f in self._status.added:
1786 1791 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1787 1792
1788 1793 for f in self._status.removed:
1789 1794 if f in man:
1790 1795 del man[f]
1791 1796
1792 1797 return man
1793 1798
1794 1799 @propertycache
1795 1800 def _status(self):
1796 1801 """Calculate exact status from ``files`` specified at construction
1797 1802 """
1798 1803 man1 = self.p1().manifest()
1799 1804 p2 = self._parents[1]
1800 1805 # "1 < len(self._parents)" can't be used for checking
1801 1806 # existence of the 2nd parent, because "memctx._parents" is
1802 1807 # explicitly initialized by the list, of which length is 2.
1803 1808 if p2.node() != nullid:
1804 1809 man2 = p2.manifest()
1805 1810 managing = lambda f: f in man1 or f in man2
1806 1811 else:
1807 1812 managing = lambda f: f in man1
1808 1813
1809 1814 modified, added, removed = [], [], []
1810 1815 for f in self._files:
1811 1816 if not managing(f):
1812 1817 added.append(f)
1813 1818 elif self[f]:
1814 1819 modified.append(f)
1815 1820 else:
1816 1821 removed.append(f)
1817 1822
1818 1823 return scmutil.status(modified, added, removed, [], [], [], [])
1819 1824
1820 1825 class memfilectx(committablefilectx):
1821 1826 """memfilectx represents an in-memory file to commit.
1822 1827
1823 1828 See memctx and committablefilectx for more details.
1824 1829 """
1825 1830 def __init__(self, repo, path, data, islink=False,
1826 1831 isexec=False, copied=None, memctx=None):
1827 1832 """
1828 1833 path is the normalized file path relative to repository root.
1829 1834 data is the file content as a string.
1830 1835 islink is True if the file is a symbolic link.
1831 1836 isexec is True if the file is executable.
1832 1837 copied is the source file path if current file was copied in the
1833 1838 revision being committed, or None."""
1834 1839 super(memfilectx, self).__init__(repo, path, None, memctx)
1835 1840 self._data = data
1836 1841 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1837 1842 self._copied = None
1838 1843 if copied:
1839 1844 self._copied = (copied, nullid)
1840 1845
1841 1846 def data(self):
1842 1847 return self._data
1843 1848 def size(self):
1844 1849 return len(self.data())
1845 1850 def flags(self):
1846 1851 return self._flags
1847 1852 def renamed(self):
1848 1853 return self._copied
1849 1854
1850 1855 def remove(self, ignoremissing=False):
1851 1856 """wraps unlink for a repo's working directory"""
1852 1857 # need to figure out what to do here
1853 1858 del self._changectx[self._path]
1854 1859
1855 1860 def write(self, data, flags):
1856 1861 """wraps repo.wwrite"""
1857 1862 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now