##// END OF EJS Templates
adjustlinkrev: handle 'None' value as source...
Pierre-Yves David -
r24411:5a12ef61 stable
parent child Browse files
Show More
@@ -1,1868 +1,1874 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 class basectx(object):
26 26 """A basectx object represents the common logic for its children:
27 27 changectx: read-only context that is already present in the repo,
28 28 workingctx: a context that represents the working directory and can
29 29 be committed,
30 30 memctx: a context that represents changes in-memory and can also
31 31 be committed."""
32 32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 33 if isinstance(changeid, basectx):
34 34 return changeid
35 35
36 36 o = super(basectx, cls).__new__(cls)
37 37
38 38 o._repo = repo
39 39 o._rev = nullrev
40 40 o._node = nullid
41 41
42 42 return o
43 43
44 44 def __str__(self):
45 45 return short(self.node())
46 46
47 47 def __int__(self):
48 48 return self.rev()
49 49
50 50 def __repr__(self):
51 51 return "<%s %s>" % (type(self).__name__, str(self))
52 52
53 53 def __eq__(self, other):
54 54 try:
55 55 return type(self) == type(other) and self._rev == other._rev
56 56 except AttributeError:
57 57 return False
58 58
59 59 def __ne__(self, other):
60 60 return not (self == other)
61 61
62 62 def __contains__(self, key):
63 63 return key in self._manifest
64 64
65 65 def __getitem__(self, key):
66 66 return self.filectx(key)
67 67
68 68 def __iter__(self):
69 69 for f in sorted(self._manifest):
70 70 yield f
71 71
72 72 def _manifestmatches(self, match, s):
73 73 """generate a new manifest filtered by the match argument
74 74
75 75 This method is for internal use only and mainly exists to provide an
76 76 object oriented way for other contexts to customize the manifest
77 77 generation.
78 78 """
79 79 return self.manifest().matches(match)
80 80
81 81 def _matchstatus(self, other, match):
82 82 """return match.always if match is none
83 83
84 84 This internal method provides a way for child objects to override the
85 85 match operator.
86 86 """
87 87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
88 88
89 89 def _buildstatus(self, other, s, match, listignored, listclean,
90 90 listunknown):
91 91 """build a status with respect to another context"""
92 92 # Load earliest manifest first for caching reasons. More specifically,
93 93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
94 94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
95 95 # 1000 and cache it so that when you read 1001, we just need to apply a
96 96 # delta to what's in the cache. So that's one full reconstruction + one
97 97 # delta application.
98 98 if self.rev() is not None and self.rev() < other.rev():
99 99 self.manifest()
100 100 mf1 = other._manifestmatches(match, s)
101 101 mf2 = self._manifestmatches(match, s)
102 102
103 103 modified, added = [], []
104 104 removed = []
105 105 clean = []
106 106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
107 107 deletedset = set(deleted)
108 108 d = mf1.diff(mf2, clean=listclean)
109 109 for fn, value in d.iteritems():
110 110 if fn in deletedset:
111 111 continue
112 112 if value is None:
113 113 clean.append(fn)
114 114 continue
115 115 (node1, flag1), (node2, flag2) = value
116 116 if node1 is None:
117 117 added.append(fn)
118 118 elif node2 is None:
119 119 removed.append(fn)
120 120 elif node2 != _newnode:
121 121 # The file was not a new file in mf2, so an entry
122 122 # from diff is really a difference.
123 123 modified.append(fn)
124 124 elif self[fn].cmp(other[fn]):
125 125 # node2 was newnode, but the working file doesn't
126 126 # match the one in mf1.
127 127 modified.append(fn)
128 128 else:
129 129 clean.append(fn)
130 130
131 131 if removed:
132 132 # need to filter files if they are already reported as removed
133 133 unknown = [fn for fn in unknown if fn not in mf1]
134 134 ignored = [fn for fn in ignored if fn not in mf1]
135 135 # if they're deleted, don't report them as removed
136 136 removed = [fn for fn in removed if fn not in deletedset]
137 137
138 138 return scmutil.status(modified, added, removed, deleted, unknown,
139 139 ignored, clean)
140 140
141 141 @propertycache
142 142 def substate(self):
143 143 return subrepo.state(self, self._repo.ui)
144 144
145 145 def subrev(self, subpath):
146 146 return self.substate[subpath][1]
147 147
148 148 def rev(self):
149 149 return self._rev
150 150 def node(self):
151 151 return self._node
152 152 def hex(self):
153 153 return hex(self.node())
154 154 def manifest(self):
155 155 return self._manifest
156 156 def phasestr(self):
157 157 return phases.phasenames[self.phase()]
158 158 def mutable(self):
159 159 return self.phase() > phases.public
160 160
161 161 def getfileset(self, expr):
162 162 return fileset.getfileset(self, expr)
163 163
164 164 def obsolete(self):
165 165 """True if the changeset is obsolete"""
166 166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167 167
168 168 def extinct(self):
169 169 """True if the changeset is extinct"""
170 170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171 171
172 172 def unstable(self):
173 173 """True if the changeset is not obsolete but it's ancestor are"""
174 174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175 175
176 176 def bumped(self):
177 177 """True if the changeset try to be a successor of a public changeset
178 178
179 179 Only non-public and non-obsolete changesets may be bumped.
180 180 """
181 181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182 182
183 183 def divergent(self):
184 184 """Is a successors of a changeset with multiple possible successors set
185 185
186 186 Only non-public and non-obsolete changesets may be divergent.
187 187 """
188 188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189 189
190 190 def troubled(self):
191 191 """True if the changeset is either unstable, bumped or divergent"""
192 192 return self.unstable() or self.bumped() or self.divergent()
193 193
194 194 def troubles(self):
195 195 """return the list of troubles affecting this changesets.
196 196
197 197 Troubles are returned as strings. possible values are:
198 198 - unstable,
199 199 - bumped,
200 200 - divergent.
201 201 """
202 202 troubles = []
203 203 if self.unstable():
204 204 troubles.append('unstable')
205 205 if self.bumped():
206 206 troubles.append('bumped')
207 207 if self.divergent():
208 208 troubles.append('divergent')
209 209 return troubles
210 210
211 211 def parents(self):
212 212 """return contexts for each parent changeset"""
213 213 return self._parents
214 214
215 215 def p1(self):
216 216 return self._parents[0]
217 217
218 218 def p2(self):
219 219 if len(self._parents) == 2:
220 220 return self._parents[1]
221 221 return changectx(self._repo, -1)
222 222
223 223 def _fileinfo(self, path):
224 224 if '_manifest' in self.__dict__:
225 225 try:
226 226 return self._manifest[path], self._manifest.flags(path)
227 227 except KeyError:
228 228 raise error.ManifestLookupError(self._node, path,
229 229 _('not found in manifest'))
230 230 if '_manifestdelta' in self.__dict__ or path in self.files():
231 231 if path in self._manifestdelta:
232 232 return (self._manifestdelta[path],
233 233 self._manifestdelta.flags(path))
234 234 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 235 if not node:
236 236 raise error.ManifestLookupError(self._node, path,
237 237 _('not found in manifest'))
238 238
239 239 return node, flag
240 240
241 241 def filenode(self, path):
242 242 return self._fileinfo(path)[0]
243 243
244 244 def flags(self, path):
245 245 try:
246 246 return self._fileinfo(path)[1]
247 247 except error.LookupError:
248 248 return ''
249 249
250 250 def sub(self, path):
251 251 return subrepo.subrepo(self, path)
252 252
253 253 def match(self, pats=[], include=None, exclude=None, default='glob'):
254 254 r = self._repo
255 255 return matchmod.match(r.root, r.getcwd(), pats,
256 256 include, exclude, default,
257 257 auditor=r.auditor, ctx=self)
258 258
259 259 def diff(self, ctx2=None, match=None, **opts):
260 260 """Returns a diff generator for the given contexts and matcher"""
261 261 if ctx2 is None:
262 262 ctx2 = self.p1()
263 263 if ctx2 is not None:
264 264 ctx2 = self._repo[ctx2]
265 265 diffopts = patch.diffopts(self._repo.ui, opts)
266 266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
267 267
268 268 @propertycache
269 269 def _dirs(self):
270 270 return scmutil.dirs(self._manifest)
271 271
272 272 def dirs(self):
273 273 return self._dirs
274 274
275 275 def dirty(self, missing=False, merge=True, branch=True):
276 276 return False
277 277
278 278 def status(self, other=None, match=None, listignored=False,
279 279 listclean=False, listunknown=False, listsubrepos=False):
280 280 """return status of files between two nodes or node and working
281 281 directory.
282 282
283 283 If other is None, compare this node with working directory.
284 284
285 285 returns (modified, added, removed, deleted, unknown, ignored, clean)
286 286 """
287 287
288 288 ctx1 = self
289 289 ctx2 = self._repo[other]
290 290
291 291 # This next code block is, admittedly, fragile logic that tests for
292 292 # reversing the contexts and wouldn't need to exist if it weren't for
293 293 # the fast (and common) code path of comparing the working directory
294 294 # with its first parent.
295 295 #
296 296 # What we're aiming for here is the ability to call:
297 297 #
298 298 # workingctx.status(parentctx)
299 299 #
300 300 # If we always built the manifest for each context and compared those,
301 301 # then we'd be done. But the special case of the above call means we
302 302 # just copy the manifest of the parent.
303 303 reversed = False
304 304 if (not isinstance(ctx1, changectx)
305 305 and isinstance(ctx2, changectx)):
306 306 reversed = True
307 307 ctx1, ctx2 = ctx2, ctx1
308 308
309 309 match = ctx2._matchstatus(ctx1, match)
310 310 r = scmutil.status([], [], [], [], [], [], [])
311 311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
312 312 listunknown)
313 313
314 314 if reversed:
315 315 # Reverse added and removed. Clear deleted, unknown and ignored as
316 316 # these make no sense to reverse.
317 317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
318 318 r.clean)
319 319
320 320 if listsubrepos:
321 321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
322 322 rev2 = ctx2.subrev(subpath)
323 323 try:
324 324 submatch = matchmod.narrowmatcher(subpath, match)
325 325 s = sub.status(rev2, match=submatch, ignored=listignored,
326 326 clean=listclean, unknown=listunknown,
327 327 listsubrepos=True)
328 328 for rfiles, sfiles in zip(r, s):
329 329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
330 330 except error.LookupError:
331 331 self._repo.ui.status(_("skipping missing "
332 332 "subrepository: %s\n") % subpath)
333 333
334 334 for l in r:
335 335 l.sort()
336 336
337 337 return r
338 338
339 339
340 340 def makememctx(repo, parents, text, user, date, branch, files, store,
341 341 editor=None):
342 342 def getfilectx(repo, memctx, path):
343 343 data, mode, copied = store.getfile(path)
344 344 if data is None:
345 345 return None
346 346 islink, isexec = mode
347 347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
348 348 copied=copied, memctx=memctx)
349 349 extra = {}
350 350 if branch:
351 351 extra['branch'] = encoding.fromlocal(branch)
352 352 ctx = memctx(repo, parents, text, files, getfilectx, user,
353 353 date, extra, editor)
354 354 return ctx
355 355
356 356 class changectx(basectx):
357 357 """A changecontext object makes access to data related to a particular
358 358 changeset convenient. It represents a read-only context already present in
359 359 the repo."""
360 360 def __init__(self, repo, changeid=''):
361 361 """changeid is a revision number, node, or tag"""
362 362
363 363 # since basectx.__new__ already took care of copying the object, we
364 364 # don't need to do anything in __init__, so we just exit here
365 365 if isinstance(changeid, basectx):
366 366 return
367 367
368 368 if changeid == '':
369 369 changeid = '.'
370 370 self._repo = repo
371 371
372 372 try:
373 373 if isinstance(changeid, int):
374 374 self._node = repo.changelog.node(changeid)
375 375 self._rev = changeid
376 376 return
377 377 if isinstance(changeid, long):
378 378 changeid = str(changeid)
379 379 if changeid == '.':
380 380 self._node = repo.dirstate.p1()
381 381 self._rev = repo.changelog.rev(self._node)
382 382 return
383 383 if changeid == 'null':
384 384 self._node = nullid
385 385 self._rev = nullrev
386 386 return
387 387 if changeid == 'tip':
388 388 self._node = repo.changelog.tip()
389 389 self._rev = repo.changelog.rev(self._node)
390 390 return
391 391 if len(changeid) == 20:
392 392 try:
393 393 self._node = changeid
394 394 self._rev = repo.changelog.rev(changeid)
395 395 return
396 396 except error.FilteredRepoLookupError:
397 397 raise
398 398 except LookupError:
399 399 pass
400 400
401 401 try:
402 402 r = int(changeid)
403 403 if str(r) != changeid:
404 404 raise ValueError
405 405 l = len(repo.changelog)
406 406 if r < 0:
407 407 r += l
408 408 if r < 0 or r >= l:
409 409 raise ValueError
410 410 self._rev = r
411 411 self._node = repo.changelog.node(r)
412 412 return
413 413 except error.FilteredIndexError:
414 414 raise
415 415 except (ValueError, OverflowError, IndexError):
416 416 pass
417 417
418 418 if len(changeid) == 40:
419 419 try:
420 420 self._node = bin(changeid)
421 421 self._rev = repo.changelog.rev(self._node)
422 422 return
423 423 except error.FilteredLookupError:
424 424 raise
425 425 except (TypeError, LookupError):
426 426 pass
427 427
428 428 # lookup bookmarks through the name interface
429 429 try:
430 430 self._node = repo.names.singlenode(repo, changeid)
431 431 self._rev = repo.changelog.rev(self._node)
432 432 return
433 433 except KeyError:
434 434 pass
435 435 except error.FilteredRepoLookupError:
436 436 raise
437 437 except error.RepoLookupError:
438 438 pass
439 439
440 440 self._node = repo.unfiltered().changelog._partialmatch(changeid)
441 441 if self._node is not None:
442 442 self._rev = repo.changelog.rev(self._node)
443 443 return
444 444
445 445 # lookup failed
446 446 # check if it might have come from damaged dirstate
447 447 #
448 448 # XXX we could avoid the unfiltered if we had a recognizable
449 449 # exception for filtered changeset access
450 450 if changeid in repo.unfiltered().dirstate.parents():
451 451 msg = _("working directory has unknown parent '%s'!")
452 452 raise error.Abort(msg % short(changeid))
453 453 try:
454 454 if len(changeid) == 20:
455 455 changeid = hex(changeid)
456 456 except TypeError:
457 457 pass
458 458 except (error.FilteredIndexError, error.FilteredLookupError,
459 459 error.FilteredRepoLookupError):
460 460 if repo.filtername == 'visible':
461 461 msg = _("hidden revision '%s'") % changeid
462 462 hint = _('use --hidden to access hidden revisions')
463 463 raise error.FilteredRepoLookupError(msg, hint=hint)
464 464 msg = _("filtered revision '%s' (not in '%s' subset)")
465 465 msg %= (changeid, repo.filtername)
466 466 raise error.FilteredRepoLookupError(msg)
467 467 except IndexError:
468 468 pass
469 469 raise error.RepoLookupError(
470 470 _("unknown revision '%s'") % changeid)
471 471
472 472 def __hash__(self):
473 473 try:
474 474 return hash(self._rev)
475 475 except AttributeError:
476 476 return id(self)
477 477
478 478 def __nonzero__(self):
479 479 return self._rev != nullrev
480 480
481 481 @propertycache
482 482 def _changeset(self):
483 483 return self._repo.changelog.read(self.rev())
484 484
485 485 @propertycache
486 486 def _manifest(self):
487 487 return self._repo.manifest.read(self._changeset[0])
488 488
489 489 @propertycache
490 490 def _manifestdelta(self):
491 491 return self._repo.manifest.readdelta(self._changeset[0])
492 492
493 493 @propertycache
494 494 def _parents(self):
495 495 p = self._repo.changelog.parentrevs(self._rev)
496 496 if p[1] == nullrev:
497 497 p = p[:-1]
498 498 return [changectx(self._repo, x) for x in p]
499 499
500 500 def changeset(self):
501 501 return self._changeset
502 502 def manifestnode(self):
503 503 return self._changeset[0]
504 504
505 505 def user(self):
506 506 return self._changeset[1]
507 507 def date(self):
508 508 return self._changeset[2]
509 509 def files(self):
510 510 return self._changeset[3]
511 511 def description(self):
512 512 return self._changeset[4]
513 513 def branch(self):
514 514 return encoding.tolocal(self._changeset[5].get("branch"))
515 515 def closesbranch(self):
516 516 return 'close' in self._changeset[5]
517 517 def extra(self):
518 518 return self._changeset[5]
519 519 def tags(self):
520 520 return self._repo.nodetags(self._node)
521 521 def bookmarks(self):
522 522 return self._repo.nodebookmarks(self._node)
523 523 def phase(self):
524 524 return self._repo._phasecache.phase(self._repo, self._rev)
525 525 def hidden(self):
526 526 return self._rev in repoview.filterrevs(self._repo, 'visible')
527 527
528 528 def children(self):
529 529 """return contexts for each child changeset"""
530 530 c = self._repo.changelog.children(self._node)
531 531 return [changectx(self._repo, x) for x in c]
532 532
533 533 def ancestors(self):
534 534 for a in self._repo.changelog.ancestors([self._rev]):
535 535 yield changectx(self._repo, a)
536 536
537 537 def descendants(self):
538 538 for d in self._repo.changelog.descendants([self._rev]):
539 539 yield changectx(self._repo, d)
540 540
541 541 def filectx(self, path, fileid=None, filelog=None):
542 542 """get a file context from this changeset"""
543 543 if fileid is None:
544 544 fileid = self.filenode(path)
545 545 return filectx(self._repo, path, fileid=fileid,
546 546 changectx=self, filelog=filelog)
547 547
548 548 def ancestor(self, c2, warn=False):
549 549 """return the "best" ancestor context of self and c2
550 550
551 551 If there are multiple candidates, it will show a message and check
552 552 merge.preferancestor configuration before falling back to the
553 553 revlog ancestor."""
554 554 # deal with workingctxs
555 555 n2 = c2._node
556 556 if n2 is None:
557 557 n2 = c2._parents[0]._node
558 558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
559 559 if not cahs:
560 560 anc = nullid
561 561 elif len(cahs) == 1:
562 562 anc = cahs[0]
563 563 else:
564 564 for r in self._repo.ui.configlist('merge', 'preferancestor'):
565 565 try:
566 566 ctx = changectx(self._repo, r)
567 567 except error.RepoLookupError:
568 568 continue
569 569 anc = ctx.node()
570 570 if anc in cahs:
571 571 break
572 572 else:
573 573 anc = self._repo.changelog.ancestor(self._node, n2)
574 574 if warn:
575 575 self._repo.ui.status(
576 576 (_("note: using %s as ancestor of %s and %s\n") %
577 577 (short(anc), short(self._node), short(n2))) +
578 578 ''.join(_(" alternatively, use --config "
579 579 "merge.preferancestor=%s\n") %
580 580 short(n) for n in sorted(cahs) if n != anc))
581 581 return changectx(self._repo, anc)
582 582
583 583 def descendant(self, other):
584 584 """True if other is descendant of this changeset"""
585 585 return self._repo.changelog.descendant(self._rev, other._rev)
586 586
587 587 def walk(self, match):
588 588 fset = set(match.files())
589 589 # for dirstate.walk, files=['.'] means "walk the whole tree".
590 590 # follow that here, too
591 591 fset.discard('.')
592 592
593 593 # avoid the entire walk if we're only looking for specific files
594 594 if fset and not match.anypats():
595 595 if util.all([fn in self for fn in fset]):
596 596 for fn in sorted(fset):
597 597 if match(fn):
598 598 yield fn
599 599 raise StopIteration
600 600
601 601 for fn in self:
602 602 if fn in fset:
603 603 # specified pattern is the exact name
604 604 fset.remove(fn)
605 605 if match(fn):
606 606 yield fn
607 607 for fn in sorted(fset):
608 608 if fn in self._dirs:
609 609 # specified pattern is a directory
610 610 continue
611 611 match.bad(fn, _('no such file in rev %s') % self)
612 612
613 613 def matches(self, match):
614 614 return self.walk(match)
615 615
616 616 class basefilectx(object):
617 617 """A filecontext object represents the common logic for its children:
618 618 filectx: read-only access to a filerevision that is already present
619 619 in the repo,
620 620 workingfilectx: a filecontext that represents files from the working
621 621 directory,
622 622 memfilectx: a filecontext that represents files in-memory."""
623 623 def __new__(cls, repo, path, *args, **kwargs):
624 624 return super(basefilectx, cls).__new__(cls)
625 625
626 626 @propertycache
627 627 def _filelog(self):
628 628 return self._repo.file(self._path)
629 629
630 630 @propertycache
631 631 def _changeid(self):
632 632 if '_changeid' in self.__dict__:
633 633 return self._changeid
634 634 elif '_changectx' in self.__dict__:
635 635 return self._changectx.rev()
636 636 elif '_descendantrev' in self.__dict__:
637 637 # this file context was created from a revision with a known
638 638 # descendant, we can (lazily) correct for linkrev aliases
639 639 return self._adjustlinkrev(self._path, self._filelog,
640 640 self._filenode, self._descendantrev)
641 641 else:
642 642 return self._filelog.linkrev(self._filerev)
643 643
644 644 @propertycache
645 645 def _filenode(self):
646 646 if '_fileid' in self.__dict__:
647 647 return self._filelog.lookup(self._fileid)
648 648 else:
649 649 return self._changectx.filenode(self._path)
650 650
651 651 @propertycache
652 652 def _filerev(self):
653 653 return self._filelog.rev(self._filenode)
654 654
655 655 @propertycache
656 656 def _repopath(self):
657 657 return self._path
658 658
659 659 def __nonzero__(self):
660 660 try:
661 661 self._filenode
662 662 return True
663 663 except error.LookupError:
664 664 # file is missing
665 665 return False
666 666
667 667 def __str__(self):
668 668 return "%s@%s" % (self.path(), self._changectx)
669 669
670 670 def __repr__(self):
671 671 return "<%s %s>" % (type(self).__name__, str(self))
672 672
673 673 def __hash__(self):
674 674 try:
675 675 return hash((self._path, self._filenode))
676 676 except AttributeError:
677 677 return id(self)
678 678
679 679 def __eq__(self, other):
680 680 try:
681 681 return (type(self) == type(other) and self._path == other._path
682 682 and self._filenode == other._filenode)
683 683 except AttributeError:
684 684 return False
685 685
686 686 def __ne__(self, other):
687 687 return not (self == other)
688 688
689 689 def filerev(self):
690 690 return self._filerev
691 691 def filenode(self):
692 692 return self._filenode
693 693 def flags(self):
694 694 return self._changectx.flags(self._path)
695 695 def filelog(self):
696 696 return self._filelog
697 697 def rev(self):
698 698 return self._changeid
699 699 def linkrev(self):
700 700 return self._filelog.linkrev(self._filerev)
701 701 def node(self):
702 702 return self._changectx.node()
703 703 def hex(self):
704 704 return self._changectx.hex()
705 705 def user(self):
706 706 return self._changectx.user()
707 707 def date(self):
708 708 return self._changectx.date()
709 709 def files(self):
710 710 return self._changectx.files()
711 711 def description(self):
712 712 return self._changectx.description()
713 713 def branch(self):
714 714 return self._changectx.branch()
715 715 def extra(self):
716 716 return self._changectx.extra()
717 717 def phase(self):
718 718 return self._changectx.phase()
719 719 def phasestr(self):
720 720 return self._changectx.phasestr()
721 721 def manifest(self):
722 722 return self._changectx.manifest()
723 723 def changectx(self):
724 724 return self._changectx
725 725
726 726 def path(self):
727 727 return self._path
728 728
729 729 def isbinary(self):
730 730 try:
731 731 return util.binary(self.data())
732 732 except IOError:
733 733 return False
734 734 def isexec(self):
735 735 return 'x' in self.flags()
736 736 def islink(self):
737 737 return 'l' in self.flags()
738 738
739 739 def cmp(self, fctx):
740 740 """compare with other file context
741 741
742 742 returns True if different than fctx.
743 743 """
744 744 if (fctx._filerev is None
745 745 and (self._repo._encodefilterpats
746 746 # if file data starts with '\1\n', empty metadata block is
747 747 # prepended, which adds 4 bytes to filelog.size().
748 748 or self.size() - 4 == fctx.size())
749 749 or self.size() == fctx.size()):
750 750 return self._filelog.cmp(self._filenode, fctx.data())
751 751
752 752 return True
753 753
754 754 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
755 755 """return the first ancestor of <srcrev> introducting <fnode>
756 756
757 757 If the linkrev of the file revision does not point to an ancestor of
758 758 srcrev, we'll walk down the ancestors until we find one introducing
759 759 this file revision.
760 760
761 761 :repo: a localrepository object (used to access changelog and manifest)
762 762 :path: the file path
763 763 :fnode: the nodeid of the file revision
764 764 :filelog: the filelog of this path
765 765 :srcrev: the changeset revision we search ancestors from
766 766 :inclusive: if true, the src revision will also be checked
767 767 """
768 768 repo = self._repo
769 769 cl = repo.unfiltered().changelog
770 770 ma = repo.manifest
771 771 # fetch the linkrev
772 772 fr = filelog.rev(fnode)
773 773 lkr = filelog.linkrev(fr)
774 774 # hack to reuse ancestor computation when searching for renames
775 775 memberanc = getattr(self, '_ancestrycontext', None)
776 776 iteranc = None
777 revs = [srcrev]
777 if srcrev is None:
778 # wctx case, used by workingfilectx during mergecopy
779 revs = [p.rev() for p in self._repo[None].parents()]
780 inclusive = True # we skipped the real (revless) source
781 else:
782 revs = [srcrev]
778 783 if memberanc is None:
779 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
784 memberanc = iteranc = cl.ancestors(revs, lkr,
785 inclusive=inclusive)
780 786 # check if this linkrev is an ancestor of srcrev
781 787 if lkr not in memberanc:
782 788 if iteranc is None:
783 789 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
784 790 for a in iteranc:
785 791 ac = cl.read(a) # get changeset data (we avoid object creation)
786 792 if path in ac[3]: # checking the 'files' field.
787 793 # The file has been touched, check if the content is
788 794 # similar to the one we search for.
789 795 if fnode == ma.readfast(ac[0]).get(path):
790 796 return a
791 797 # In theory, we should never get out of that loop without a result.
792 798 # But if manifest uses a buggy file revision (not children of the
793 799 # one it replaces) we could. Such a buggy situation will likely
794 800 # result is crash somewhere else at to some point.
795 801 return lkr
796 802
797 803 def introrev(self):
798 804 """return the rev of the changeset which introduced this file revision
799 805
800 806 This method is different from linkrev because it take into account the
801 807 changeset the filectx was created from. It ensures the returned
802 808 revision is one of its ancestors. This prevents bugs from
803 809 'linkrev-shadowing' when a file revision is used by multiple
804 810 changesets.
805 811 """
806 812 lkr = self.linkrev()
807 813 attrs = vars(self)
808 814 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
809 815 if noctx or self.rev() == lkr:
810 816 return self.linkrev()
811 817 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
812 818 self.rev(), inclusive=True)
813 819
814 820 def parents(self):
815 821 _path = self._path
816 822 fl = self._filelog
817 823 parents = self._filelog.parents(self._filenode)
818 824 pl = [(_path, node, fl) for node in parents if node != nullid]
819 825
820 826 r = fl.renamed(self._filenode)
821 827 if r:
822 828 # - In the simple rename case, both parent are nullid, pl is empty.
823 829 # - In case of merge, only one of the parent is null id and should
824 830 # be replaced with the rename information. This parent is -always-
825 831 # the first one.
826 832 #
827 833 # As null id have alway been filtered out in the previous list
828 834 # comprehension, inserting to 0 will always result in "replacing
829 835 # first nullid parent with rename information.
830 836 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
831 837
832 838 ret = []
833 839 for path, fnode, l in pl:
834 840 if '_changeid' in vars(self) or '_changectx' in vars(self):
835 841 # If self is associated with a changeset (probably explicitly
836 842 # fed), ensure the created filectx is associated with a
837 843 # changeset that is an ancestor of self.changectx.
838 844 # This lets us later use _adjustlinkrev to get a correct link.
839 845 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
840 846 fctx._descendantrev = self.rev()
841 847 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
842 848 elif '_descendantrev' in vars(self):
843 849 # Otherwise propagate _descendantrev if we have one associated.
844 850 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
845 851 fctx._descendantrev = self._descendantrev
846 852 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
847 853 else:
848 854 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
849 855 ret.append(fctx)
850 856 return ret
851 857
852 858 def p1(self):
853 859 return self.parents()[0]
854 860
855 861 def p2(self):
856 862 p = self.parents()
857 863 if len(p) == 2:
858 864 return p[1]
859 865 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
860 866
861 867 def annotate(self, follow=False, linenumber=None, diffopts=None):
862 868 '''returns a list of tuples of (ctx, line) for each line
863 869 in the file, where ctx is the filectx of the node where
864 870 that line was last changed.
865 871 This returns tuples of ((ctx, linenumber), line) for each line,
866 872 if "linenumber" parameter is NOT "None".
867 873 In such tuples, linenumber means one at the first appearance
868 874 in the managed file.
869 875 To reduce annotation cost,
870 876 this returns fixed value(False is used) as linenumber,
871 877 if "linenumber" parameter is "False".'''
872 878
873 879 if linenumber is None:
874 880 def decorate(text, rev):
875 881 return ([rev] * len(text.splitlines()), text)
876 882 elif linenumber:
877 883 def decorate(text, rev):
878 884 size = len(text.splitlines())
879 885 return ([(rev, i) for i in xrange(1, size + 1)], text)
880 886 else:
881 887 def decorate(text, rev):
882 888 return ([(rev, False)] * len(text.splitlines()), text)
883 889
884 890 def pair(parent, child):
885 891 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
886 892 refine=True)
887 893 for (a1, a2, b1, b2), t in blocks:
888 894 # Changed blocks ('!') or blocks made only of blank lines ('~')
889 895 # belong to the child.
890 896 if t == '=':
891 897 child[0][b1:b2] = parent[0][a1:a2]
892 898 return child
893 899
894 900 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
895 901
896 902 def parents(f):
897 903 pl = f.parents()
898 904
899 905 # Don't return renamed parents if we aren't following.
900 906 if not follow:
901 907 pl = [p for p in pl if p.path() == f.path()]
902 908
903 909 # renamed filectx won't have a filelog yet, so set it
904 910 # from the cache to save time
905 911 for p in pl:
906 912 if not '_filelog' in p.__dict__:
907 913 p._filelog = getlog(p.path())
908 914
909 915 return pl
910 916
911 917 # use linkrev to find the first changeset where self appeared
912 918 base = self
913 919 introrev = self.introrev()
914 920 if self.rev() != introrev:
915 921 base = self.filectx(self.filenode(), changeid=introrev)
916 922 ac = self._repo.changelog.ancestors([introrev], inclusive=True)
917 923 base._ancestrycontext = ac
918 924
919 925 # This algorithm would prefer to be recursive, but Python is a
920 926 # bit recursion-hostile. Instead we do an iterative
921 927 # depth-first search.
922 928
923 929 visit = [base]
924 930 hist = {}
925 931 pcache = {}
926 932 needed = {base: 1}
927 933 while visit:
928 934 f = visit[-1]
929 935 pcached = f in pcache
930 936 if not pcached:
931 937 pcache[f] = parents(f)
932 938
933 939 ready = True
934 940 pl = pcache[f]
935 941 for p in pl:
936 942 if p not in hist:
937 943 ready = False
938 944 visit.append(p)
939 945 if not pcached:
940 946 needed[p] = needed.get(p, 0) + 1
941 947 if ready:
942 948 visit.pop()
943 949 reusable = f in hist
944 950 if reusable:
945 951 curr = hist[f]
946 952 else:
947 953 curr = decorate(f.data(), f)
948 954 for p in pl:
949 955 if not reusable:
950 956 curr = pair(hist[p], curr)
951 957 if needed[p] == 1:
952 958 del hist[p]
953 959 del needed[p]
954 960 else:
955 961 needed[p] -= 1
956 962
957 963 hist[f] = curr
958 964 pcache[f] = []
959 965
960 966 return zip(hist[base][0], hist[base][1].splitlines(True))
961 967
962 968 def ancestors(self, followfirst=False):
963 969 visit = {}
964 970 c = self
965 971 cut = followfirst and 1 or None
966 972 while True:
967 973 for parent in c.parents()[:cut]:
968 974 visit[(parent.linkrev(), parent.filenode())] = parent
969 975 if not visit:
970 976 break
971 977 c = visit.pop(max(visit))
972 978 yield c
973 979
974 980 class filectx(basefilectx):
975 981 """A filecontext object makes access to data related to a particular
976 982 filerevision convenient."""
977 983 def __init__(self, repo, path, changeid=None, fileid=None,
978 984 filelog=None, changectx=None):
979 985 """changeid can be a changeset revision, node, or tag.
980 986 fileid can be a file revision or node."""
981 987 self._repo = repo
982 988 self._path = path
983 989
984 990 assert (changeid is not None
985 991 or fileid is not None
986 992 or changectx is not None), \
987 993 ("bad args: changeid=%r, fileid=%r, changectx=%r"
988 994 % (changeid, fileid, changectx))
989 995
990 996 if filelog is not None:
991 997 self._filelog = filelog
992 998
993 999 if changeid is not None:
994 1000 self._changeid = changeid
995 1001 if changectx is not None:
996 1002 self._changectx = changectx
997 1003 if fileid is not None:
998 1004 self._fileid = fileid
999 1005
1000 1006 @propertycache
1001 1007 def _changectx(self):
1002 1008 try:
1003 1009 return changectx(self._repo, self._changeid)
1004 1010 except error.FilteredRepoLookupError:
1005 1011 # Linkrev may point to any revision in the repository. When the
1006 1012 # repository is filtered this may lead to `filectx` trying to build
1007 1013 # `changectx` for filtered revision. In such case we fallback to
1008 1014 # creating `changectx` on the unfiltered version of the reposition.
1009 1015 # This fallback should not be an issue because `changectx` from
1010 1016 # `filectx` are not used in complex operations that care about
1011 1017 # filtering.
1012 1018 #
1013 1019 # This fallback is a cheap and dirty fix that prevent several
1014 1020 # crashes. It does not ensure the behavior is correct. However the
1015 1021 # behavior was not correct before filtering either and "incorrect
1016 1022 # behavior" is seen as better as "crash"
1017 1023 #
1018 1024 # Linkrevs have several serious troubles with filtering that are
1019 1025 # complicated to solve. Proper handling of the issue here should be
1020 1026 # considered when solving linkrev issue are on the table.
1021 1027 return changectx(self._repo.unfiltered(), self._changeid)
1022 1028
1023 1029 def filectx(self, fileid, changeid=None):
1024 1030 '''opens an arbitrary revision of the file without
1025 1031 opening a new filelog'''
1026 1032 return filectx(self._repo, self._path, fileid=fileid,
1027 1033 filelog=self._filelog, changeid=changeid)
1028 1034
1029 1035 def data(self):
1030 1036 try:
1031 1037 return self._filelog.read(self._filenode)
1032 1038 except error.CensoredNodeError:
1033 1039 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1034 1040 return ""
1035 1041 raise util.Abort(_("censored node: %s") % short(self._filenode),
1036 1042 hint=_("set censor.policy to ignore errors"))
1037 1043
1038 1044 def size(self):
1039 1045 return self._filelog.size(self._filerev)
1040 1046
1041 1047 def renamed(self):
1042 1048 """check if file was actually renamed in this changeset revision
1043 1049
1044 1050 If rename logged in file revision, we report copy for changeset only
1045 1051 if file revisions linkrev points back to the changeset in question
1046 1052 or both changeset parents contain different file revisions.
1047 1053 """
1048 1054
1049 1055 renamed = self._filelog.renamed(self._filenode)
1050 1056 if not renamed:
1051 1057 return renamed
1052 1058
1053 1059 if self.rev() == self.linkrev():
1054 1060 return renamed
1055 1061
1056 1062 name = self.path()
1057 1063 fnode = self._filenode
1058 1064 for p in self._changectx.parents():
1059 1065 try:
1060 1066 if fnode == p.filenode(name):
1061 1067 return None
1062 1068 except error.LookupError:
1063 1069 pass
1064 1070 return renamed
1065 1071
1066 1072 def children(self):
1067 1073 # hard for renames
1068 1074 c = self._filelog.children(self._filenode)
1069 1075 return [filectx(self._repo, self._path, fileid=x,
1070 1076 filelog=self._filelog) for x in c]
1071 1077
1072 1078 class committablectx(basectx):
1073 1079 """A committablectx object provides common functionality for a context that
1074 1080 wants the ability to commit, e.g. workingctx or memctx."""
1075 1081 def __init__(self, repo, text="", user=None, date=None, extra=None,
1076 1082 changes=None):
1077 1083 self._repo = repo
1078 1084 self._rev = None
1079 1085 self._node = None
1080 1086 self._text = text
1081 1087 if date:
1082 1088 self._date = util.parsedate(date)
1083 1089 if user:
1084 1090 self._user = user
1085 1091 if changes:
1086 1092 self._status = changes
1087 1093
1088 1094 self._extra = {}
1089 1095 if extra:
1090 1096 self._extra = extra.copy()
1091 1097 if 'branch' not in self._extra:
1092 1098 try:
1093 1099 branch = encoding.fromlocal(self._repo.dirstate.branch())
1094 1100 except UnicodeDecodeError:
1095 1101 raise util.Abort(_('branch name not in UTF-8!'))
1096 1102 self._extra['branch'] = branch
1097 1103 if self._extra['branch'] == '':
1098 1104 self._extra['branch'] = 'default'
1099 1105
1100 1106 def __str__(self):
1101 1107 return str(self._parents[0]) + "+"
1102 1108
1103 1109 def __nonzero__(self):
1104 1110 return True
1105 1111
1106 1112 def _buildflagfunc(self):
1107 1113 # Create a fallback function for getting file flags when the
1108 1114 # filesystem doesn't support them
1109 1115
1110 1116 copiesget = self._repo.dirstate.copies().get
1111 1117
1112 1118 if len(self._parents) < 2:
1113 1119 # when we have one parent, it's easy: copy from parent
1114 1120 man = self._parents[0].manifest()
1115 1121 def func(f):
1116 1122 f = copiesget(f, f)
1117 1123 return man.flags(f)
1118 1124 else:
1119 1125 # merges are tricky: we try to reconstruct the unstored
1120 1126 # result from the merge (issue1802)
1121 1127 p1, p2 = self._parents
1122 1128 pa = p1.ancestor(p2)
1123 1129 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1124 1130
1125 1131 def func(f):
1126 1132 f = copiesget(f, f) # may be wrong for merges with copies
1127 1133 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1128 1134 if fl1 == fl2:
1129 1135 return fl1
1130 1136 if fl1 == fla:
1131 1137 return fl2
1132 1138 if fl2 == fla:
1133 1139 return fl1
1134 1140 return '' # punt for conflicts
1135 1141
1136 1142 return func
1137 1143
1138 1144 @propertycache
1139 1145 def _flagfunc(self):
1140 1146 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1141 1147
1142 1148 @propertycache
1143 1149 def _manifest(self):
1144 1150 """generate a manifest corresponding to the values in self._status
1145 1151
1146 1152 This reuse the file nodeid from parent, but we append an extra letter
1147 1153 when modified. Modified files get an extra 'm' while added files get
1148 1154 an extra 'a'. This is used by manifests merge to see that files
1149 1155 are different and by update logic to avoid deleting newly added files.
1150 1156 """
1151 1157
1152 1158 man1 = self._parents[0].manifest()
1153 1159 man = man1.copy()
1154 1160 if len(self._parents) > 1:
1155 1161 man2 = self.p2().manifest()
1156 1162 def getman(f):
1157 1163 if f in man1:
1158 1164 return man1
1159 1165 return man2
1160 1166 else:
1161 1167 getman = lambda f: man1
1162 1168
1163 1169 copied = self._repo.dirstate.copies()
1164 1170 ff = self._flagfunc
1165 1171 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1166 1172 for f in l:
1167 1173 orig = copied.get(f, f)
1168 1174 man[f] = getman(orig).get(orig, nullid) + i
1169 1175 try:
1170 1176 man.setflag(f, ff(f))
1171 1177 except OSError:
1172 1178 pass
1173 1179
1174 1180 for f in self._status.deleted + self._status.removed:
1175 1181 if f in man:
1176 1182 del man[f]
1177 1183
1178 1184 return man
1179 1185
1180 1186 @propertycache
1181 1187 def _status(self):
1182 1188 return self._repo.status()
1183 1189
1184 1190 @propertycache
1185 1191 def _user(self):
1186 1192 return self._repo.ui.username()
1187 1193
1188 1194 @propertycache
1189 1195 def _date(self):
1190 1196 return util.makedate()
1191 1197
1192 1198 def subrev(self, subpath):
1193 1199 return None
1194 1200
1195 1201 def user(self):
1196 1202 return self._user or self._repo.ui.username()
1197 1203 def date(self):
1198 1204 return self._date
1199 1205 def description(self):
1200 1206 return self._text
1201 1207 def files(self):
1202 1208 return sorted(self._status.modified + self._status.added +
1203 1209 self._status.removed)
1204 1210
1205 1211 def modified(self):
1206 1212 return self._status.modified
1207 1213 def added(self):
1208 1214 return self._status.added
1209 1215 def removed(self):
1210 1216 return self._status.removed
1211 1217 def deleted(self):
1212 1218 return self._status.deleted
1213 1219 def branch(self):
1214 1220 return encoding.tolocal(self._extra['branch'])
1215 1221 def closesbranch(self):
1216 1222 return 'close' in self._extra
1217 1223 def extra(self):
1218 1224 return self._extra
1219 1225
1220 1226 def tags(self):
1221 1227 t = []
1222 1228 for p in self.parents():
1223 1229 t.extend(p.tags())
1224 1230 return t
1225 1231
1226 1232 def bookmarks(self):
1227 1233 b = []
1228 1234 for p in self.parents():
1229 1235 b.extend(p.bookmarks())
1230 1236 return b
1231 1237
1232 1238 def phase(self):
1233 1239 phase = phases.draft # default phase to draft
1234 1240 for p in self.parents():
1235 1241 phase = max(phase, p.phase())
1236 1242 return phase
1237 1243
1238 1244 def hidden(self):
1239 1245 return False
1240 1246
1241 1247 def children(self):
1242 1248 return []
1243 1249
1244 1250 def flags(self, path):
1245 1251 if '_manifest' in self.__dict__:
1246 1252 try:
1247 1253 return self._manifest.flags(path)
1248 1254 except KeyError:
1249 1255 return ''
1250 1256
1251 1257 try:
1252 1258 return self._flagfunc(path)
1253 1259 except OSError:
1254 1260 return ''
1255 1261
1256 1262 def ancestor(self, c2):
1257 1263 """return the "best" ancestor context of self and c2"""
1258 1264 return self._parents[0].ancestor(c2) # punt on two parents for now
1259 1265
1260 1266 def walk(self, match):
1261 1267 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1262 1268 True, False))
1263 1269
1264 1270 def matches(self, match):
1265 1271 return sorted(self._repo.dirstate.matches(match))
1266 1272
1267 1273 def ancestors(self):
1268 1274 for p in self._parents:
1269 1275 yield p
1270 1276 for a in self._repo.changelog.ancestors(
1271 1277 [p.rev() for p in self._parents]):
1272 1278 yield changectx(self._repo, a)
1273 1279
1274 1280 def markcommitted(self, node):
1275 1281 """Perform post-commit cleanup necessary after committing this ctx
1276 1282
1277 1283 Specifically, this updates backing stores this working context
1278 1284 wraps to reflect the fact that the changes reflected by this
1279 1285 workingctx have been committed. For example, it marks
1280 1286 modified and added files as normal in the dirstate.
1281 1287
1282 1288 """
1283 1289
1284 1290 self._repo.dirstate.beginparentchange()
1285 1291 for f in self.modified() + self.added():
1286 1292 self._repo.dirstate.normal(f)
1287 1293 for f in self.removed():
1288 1294 self._repo.dirstate.drop(f)
1289 1295 self._repo.dirstate.setparents(node)
1290 1296 self._repo.dirstate.endparentchange()
1291 1297
1292 1298 def dirs(self):
1293 1299 return self._repo.dirstate.dirs()
1294 1300
1295 1301 class workingctx(committablectx):
1296 1302 """A workingctx object makes access to data related to
1297 1303 the current working directory convenient.
1298 1304 date - any valid date string or (unixtime, offset), or None.
1299 1305 user - username string, or None.
1300 1306 extra - a dictionary of extra values, or None.
1301 1307 changes - a list of file lists as returned by localrepo.status()
1302 1308 or None to use the repository status.
1303 1309 """
1304 1310 def __init__(self, repo, text="", user=None, date=None, extra=None,
1305 1311 changes=None):
1306 1312 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1307 1313
1308 1314 def __iter__(self):
1309 1315 d = self._repo.dirstate
1310 1316 for f in d:
1311 1317 if d[f] != 'r':
1312 1318 yield f
1313 1319
1314 1320 def __contains__(self, key):
1315 1321 return self._repo.dirstate[key] not in "?r"
1316 1322
1317 1323 @propertycache
1318 1324 def _parents(self):
1319 1325 p = self._repo.dirstate.parents()
1320 1326 if p[1] == nullid:
1321 1327 p = p[:-1]
1322 1328 return [changectx(self._repo, x) for x in p]
1323 1329
1324 1330 def filectx(self, path, filelog=None):
1325 1331 """get a file context from the working directory"""
1326 1332 return workingfilectx(self._repo, path, workingctx=self,
1327 1333 filelog=filelog)
1328 1334
1329 1335 def dirty(self, missing=False, merge=True, branch=True):
1330 1336 "check whether a working directory is modified"
1331 1337 # check subrepos first
1332 1338 for s in sorted(self.substate):
1333 1339 if self.sub(s).dirty():
1334 1340 return True
1335 1341 # check current working dir
1336 1342 return ((merge and self.p2()) or
1337 1343 (branch and self.branch() != self.p1().branch()) or
1338 1344 self.modified() or self.added() or self.removed() or
1339 1345 (missing and self.deleted()))
1340 1346
1341 1347 def add(self, list, prefix=""):
1342 1348 join = lambda f: os.path.join(prefix, f)
1343 1349 wlock = self._repo.wlock()
1344 1350 ui, ds = self._repo.ui, self._repo.dirstate
1345 1351 try:
1346 1352 rejected = []
1347 1353 lstat = self._repo.wvfs.lstat
1348 1354 for f in list:
1349 1355 scmutil.checkportable(ui, join(f))
1350 1356 try:
1351 1357 st = lstat(f)
1352 1358 except OSError:
1353 1359 ui.warn(_("%s does not exist!\n") % join(f))
1354 1360 rejected.append(f)
1355 1361 continue
1356 1362 if st.st_size > 10000000:
1357 1363 ui.warn(_("%s: up to %d MB of RAM may be required "
1358 1364 "to manage this file\n"
1359 1365 "(use 'hg revert %s' to cancel the "
1360 1366 "pending addition)\n")
1361 1367 % (f, 3 * st.st_size // 1000000, join(f)))
1362 1368 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1363 1369 ui.warn(_("%s not added: only files and symlinks "
1364 1370 "supported currently\n") % join(f))
1365 1371 rejected.append(f)
1366 1372 elif ds[f] in 'amn':
1367 1373 ui.warn(_("%s already tracked!\n") % join(f))
1368 1374 elif ds[f] == 'r':
1369 1375 ds.normallookup(f)
1370 1376 else:
1371 1377 ds.add(f)
1372 1378 return rejected
1373 1379 finally:
1374 1380 wlock.release()
1375 1381
1376 1382 def forget(self, files, prefix=""):
1377 1383 join = lambda f: os.path.join(prefix, f)
1378 1384 wlock = self._repo.wlock()
1379 1385 try:
1380 1386 rejected = []
1381 1387 for f in files:
1382 1388 if f not in self._repo.dirstate:
1383 1389 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1384 1390 rejected.append(f)
1385 1391 elif self._repo.dirstate[f] != 'a':
1386 1392 self._repo.dirstate.remove(f)
1387 1393 else:
1388 1394 self._repo.dirstate.drop(f)
1389 1395 return rejected
1390 1396 finally:
1391 1397 wlock.release()
1392 1398
1393 1399 def undelete(self, list):
1394 1400 pctxs = self.parents()
1395 1401 wlock = self._repo.wlock()
1396 1402 try:
1397 1403 for f in list:
1398 1404 if self._repo.dirstate[f] != 'r':
1399 1405 self._repo.ui.warn(_("%s not removed!\n") % f)
1400 1406 else:
1401 1407 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1402 1408 t = fctx.data()
1403 1409 self._repo.wwrite(f, t, fctx.flags())
1404 1410 self._repo.dirstate.normal(f)
1405 1411 finally:
1406 1412 wlock.release()
1407 1413
1408 1414 def copy(self, source, dest):
1409 1415 try:
1410 1416 st = self._repo.wvfs.lstat(dest)
1411 1417 except OSError, err:
1412 1418 if err.errno != errno.ENOENT:
1413 1419 raise
1414 1420 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1415 1421 return
1416 1422 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1417 1423 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1418 1424 "symbolic link\n") % dest)
1419 1425 else:
1420 1426 wlock = self._repo.wlock()
1421 1427 try:
1422 1428 if self._repo.dirstate[dest] in '?':
1423 1429 self._repo.dirstate.add(dest)
1424 1430 elif self._repo.dirstate[dest] in 'r':
1425 1431 self._repo.dirstate.normallookup(dest)
1426 1432 self._repo.dirstate.copy(source, dest)
1427 1433 finally:
1428 1434 wlock.release()
1429 1435
1430 1436 def _filtersuspectsymlink(self, files):
1431 1437 if not files or self._repo.dirstate._checklink:
1432 1438 return files
1433 1439
1434 1440 # Symlink placeholders may get non-symlink-like contents
1435 1441 # via user error or dereferencing by NFS or Samba servers,
1436 1442 # so we filter out any placeholders that don't look like a
1437 1443 # symlink
1438 1444 sane = []
1439 1445 for f in files:
1440 1446 if self.flags(f) == 'l':
1441 1447 d = self[f].data()
1442 1448 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1443 1449 self._repo.ui.debug('ignoring suspect symlink placeholder'
1444 1450 ' "%s"\n' % f)
1445 1451 continue
1446 1452 sane.append(f)
1447 1453 return sane
1448 1454
1449 1455 def _checklookup(self, files):
1450 1456 # check for any possibly clean files
1451 1457 if not files:
1452 1458 return [], []
1453 1459
1454 1460 modified = []
1455 1461 fixup = []
1456 1462 pctx = self._parents[0]
1457 1463 # do a full compare of any files that might have changed
1458 1464 for f in sorted(files):
1459 1465 if (f not in pctx or self.flags(f) != pctx.flags(f)
1460 1466 or pctx[f].cmp(self[f])):
1461 1467 modified.append(f)
1462 1468 else:
1463 1469 fixup.append(f)
1464 1470
1465 1471 # update dirstate for files that are actually clean
1466 1472 if fixup:
1467 1473 try:
1468 1474 # updating the dirstate is optional
1469 1475 # so we don't wait on the lock
1470 1476 # wlock can invalidate the dirstate, so cache normal _after_
1471 1477 # taking the lock
1472 1478 wlock = self._repo.wlock(False)
1473 1479 normal = self._repo.dirstate.normal
1474 1480 try:
1475 1481 for f in fixup:
1476 1482 normal(f)
1477 1483 finally:
1478 1484 wlock.release()
1479 1485 except error.LockError:
1480 1486 pass
1481 1487 return modified, fixup
1482 1488
1483 1489 def _manifestmatches(self, match, s):
1484 1490 """Slow path for workingctx
1485 1491
1486 1492 The fast path is when we compare the working directory to its parent
1487 1493 which means this function is comparing with a non-parent; therefore we
1488 1494 need to build a manifest and return what matches.
1489 1495 """
1490 1496 mf = self._repo['.']._manifestmatches(match, s)
1491 1497 for f in s.modified + s.added:
1492 1498 mf[f] = _newnode
1493 1499 mf.setflag(f, self.flags(f))
1494 1500 for f in s.removed:
1495 1501 if f in mf:
1496 1502 del mf[f]
1497 1503 return mf
1498 1504
1499 1505 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1500 1506 unknown=False):
1501 1507 '''Gets the status from the dirstate -- internal use only.'''
1502 1508 listignored, listclean, listunknown = ignored, clean, unknown
1503 1509 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1504 1510 subrepos = []
1505 1511 if '.hgsub' in self:
1506 1512 subrepos = sorted(self.substate)
1507 1513 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1508 1514 listclean, listunknown)
1509 1515
1510 1516 # check for any possibly clean files
1511 1517 if cmp:
1512 1518 modified2, fixup = self._checklookup(cmp)
1513 1519 s.modified.extend(modified2)
1514 1520
1515 1521 # update dirstate for files that are actually clean
1516 1522 if fixup and listclean:
1517 1523 s.clean.extend(fixup)
1518 1524
1519 1525 if match.always():
1520 1526 # cache for performance
1521 1527 if s.unknown or s.ignored or s.clean:
1522 1528 # "_status" is cached with list*=False in the normal route
1523 1529 self._status = scmutil.status(s.modified, s.added, s.removed,
1524 1530 s.deleted, [], [], [])
1525 1531 else:
1526 1532 self._status = s
1527 1533
1528 1534 return s
1529 1535
1530 1536 def _buildstatus(self, other, s, match, listignored, listclean,
1531 1537 listunknown):
1532 1538 """build a status with respect to another context
1533 1539
1534 1540 This includes logic for maintaining the fast path of status when
1535 1541 comparing the working directory against its parent, which is to skip
1536 1542 building a new manifest if self (working directory) is not comparing
1537 1543 against its parent (repo['.']).
1538 1544 """
1539 1545 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1540 1546 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1541 1547 # might have accidentally ended up with the entire contents of the file
1542 1548 # they are supposed to be linking to.
1543 1549 s.modified[:] = self._filtersuspectsymlink(s.modified)
1544 1550 if other != self._repo['.']:
1545 1551 s = super(workingctx, self)._buildstatus(other, s, match,
1546 1552 listignored, listclean,
1547 1553 listunknown)
1548 1554 return s
1549 1555
1550 1556 def _matchstatus(self, other, match):
1551 1557 """override the match method with a filter for directory patterns
1552 1558
1553 1559 We use inheritance to customize the match.bad method only in cases of
1554 1560 workingctx since it belongs only to the working directory when
1555 1561 comparing against the parent changeset.
1556 1562
1557 1563 If we aren't comparing against the working directory's parent, then we
1558 1564 just use the default match object sent to us.
1559 1565 """
1560 1566 superself = super(workingctx, self)
1561 1567 match = superself._matchstatus(other, match)
1562 1568 if other != self._repo['.']:
1563 1569 def bad(f, msg):
1564 1570 # 'f' may be a directory pattern from 'match.files()',
1565 1571 # so 'f not in ctx1' is not enough
1566 1572 if f not in other and f not in other.dirs():
1567 1573 self._repo.ui.warn('%s: %s\n' %
1568 1574 (self._repo.dirstate.pathto(f), msg))
1569 1575 match.bad = bad
1570 1576 return match
1571 1577
1572 1578 class committablefilectx(basefilectx):
1573 1579 """A committablefilectx provides common functionality for a file context
1574 1580 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1575 1581 def __init__(self, repo, path, filelog=None, ctx=None):
1576 1582 self._repo = repo
1577 1583 self._path = path
1578 1584 self._changeid = None
1579 1585 self._filerev = self._filenode = None
1580 1586
1581 1587 if filelog is not None:
1582 1588 self._filelog = filelog
1583 1589 if ctx:
1584 1590 self._changectx = ctx
1585 1591
1586 1592 def __nonzero__(self):
1587 1593 return True
1588 1594
1589 1595 def parents(self):
1590 1596 '''return parent filectxs, following copies if necessary'''
1591 1597 def filenode(ctx, path):
1592 1598 return ctx._manifest.get(path, nullid)
1593 1599
1594 1600 path = self._path
1595 1601 fl = self._filelog
1596 1602 pcl = self._changectx._parents
1597 1603 renamed = self.renamed()
1598 1604
1599 1605 if renamed:
1600 1606 pl = [renamed + (None,)]
1601 1607 else:
1602 1608 pl = [(path, filenode(pcl[0], path), fl)]
1603 1609
1604 1610 for pc in pcl[1:]:
1605 1611 pl.append((path, filenode(pc, path), fl))
1606 1612
1607 1613 return [filectx(self._repo, p, fileid=n, filelog=l)
1608 1614 for p, n, l in pl if n != nullid]
1609 1615
1610 1616 def children(self):
1611 1617 return []
1612 1618
1613 1619 class workingfilectx(committablefilectx):
1614 1620 """A workingfilectx object makes access to data related to a particular
1615 1621 file in the working directory convenient."""
1616 1622 def __init__(self, repo, path, filelog=None, workingctx=None):
1617 1623 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1618 1624
1619 1625 @propertycache
1620 1626 def _changectx(self):
1621 1627 return workingctx(self._repo)
1622 1628
1623 1629 def data(self):
1624 1630 return self._repo.wread(self._path)
1625 1631 def renamed(self):
1626 1632 rp = self._repo.dirstate.copied(self._path)
1627 1633 if not rp:
1628 1634 return None
1629 1635 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1630 1636
1631 1637 def size(self):
1632 1638 return self._repo.wvfs.lstat(self._path).st_size
1633 1639 def date(self):
1634 1640 t, tz = self._changectx.date()
1635 1641 try:
1636 1642 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1637 1643 except OSError, err:
1638 1644 if err.errno != errno.ENOENT:
1639 1645 raise
1640 1646 return (t, tz)
1641 1647
1642 1648 def cmp(self, fctx):
1643 1649 """compare with other file context
1644 1650
1645 1651 returns True if different than fctx.
1646 1652 """
1647 1653 # fctx should be a filectx (not a workingfilectx)
1648 1654 # invert comparison to reuse the same code path
1649 1655 return fctx.cmp(self)
1650 1656
1651 1657 def remove(self, ignoremissing=False):
1652 1658 """wraps unlink for a repo's working directory"""
1653 1659 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1654 1660
1655 1661 def write(self, data, flags):
1656 1662 """wraps repo.wwrite"""
1657 1663 self._repo.wwrite(self._path, data, flags)
1658 1664
1659 1665 class workingcommitctx(workingctx):
1660 1666 """A workingcommitctx object makes access to data related to
1661 1667 the revision being committed convenient.
1662 1668
1663 1669 This hides changes in the working directory, if they aren't
1664 1670 committed in this context.
1665 1671 """
1666 1672 def __init__(self, repo, changes,
1667 1673 text="", user=None, date=None, extra=None):
1668 1674 super(workingctx, self).__init__(repo, text, user, date, extra,
1669 1675 changes)
1670 1676
1671 1677 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1672 1678 unknown=False):
1673 1679 """Return matched files only in ``self._status``
1674 1680
1675 1681 Uncommitted files appear "clean" via this context, even if
1676 1682 they aren't actually so in the working directory.
1677 1683 """
1678 1684 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1679 1685 if clean:
1680 1686 clean = [f for f in self._manifest if f not in self._changedset]
1681 1687 else:
1682 1688 clean = []
1683 1689 return scmutil.status([f for f in self._status.modified if match(f)],
1684 1690 [f for f in self._status.added if match(f)],
1685 1691 [f for f in self._status.removed if match(f)],
1686 1692 [], [], [], clean)
1687 1693
1688 1694 @propertycache
1689 1695 def _changedset(self):
1690 1696 """Return the set of files changed in this context
1691 1697 """
1692 1698 changed = set(self._status.modified)
1693 1699 changed.update(self._status.added)
1694 1700 changed.update(self._status.removed)
1695 1701 return changed
1696 1702
1697 1703 class memctx(committablectx):
1698 1704 """Use memctx to perform in-memory commits via localrepo.commitctx().
1699 1705
1700 1706 Revision information is supplied at initialization time while
1701 1707 related files data and is made available through a callback
1702 1708 mechanism. 'repo' is the current localrepo, 'parents' is a
1703 1709 sequence of two parent revisions identifiers (pass None for every
1704 1710 missing parent), 'text' is the commit message and 'files' lists
1705 1711 names of files touched by the revision (normalized and relative to
1706 1712 repository root).
1707 1713
1708 1714 filectxfn(repo, memctx, path) is a callable receiving the
1709 1715 repository, the current memctx object and the normalized path of
1710 1716 requested file, relative to repository root. It is fired by the
1711 1717 commit function for every file in 'files', but calls order is
1712 1718 undefined. If the file is available in the revision being
1713 1719 committed (updated or added), filectxfn returns a memfilectx
1714 1720 object. If the file was removed, filectxfn raises an
1715 1721 IOError. Moved files are represented by marking the source file
1716 1722 removed and the new file added with copy information (see
1717 1723 memfilectx).
1718 1724
1719 1725 user receives the committer name and defaults to current
1720 1726 repository username, date is the commit date in any format
1721 1727 supported by util.parsedate() and defaults to current date, extra
1722 1728 is a dictionary of metadata or is left empty.
1723 1729 """
1724 1730
1725 1731 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1726 1732 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1727 1733 # this field to determine what to do in filectxfn.
1728 1734 _returnnoneformissingfiles = True
1729 1735
1730 1736 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1731 1737 date=None, extra=None, editor=False):
1732 1738 super(memctx, self).__init__(repo, text, user, date, extra)
1733 1739 self._rev = None
1734 1740 self._node = None
1735 1741 parents = [(p or nullid) for p in parents]
1736 1742 p1, p2 = parents
1737 1743 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1738 1744 files = sorted(set(files))
1739 1745 self._files = files
1740 1746 self.substate = {}
1741 1747
1742 1748 # if store is not callable, wrap it in a function
1743 1749 if not callable(filectxfn):
1744 1750 def getfilectx(repo, memctx, path):
1745 1751 fctx = filectxfn[path]
1746 1752 # this is weird but apparently we only keep track of one parent
1747 1753 # (why not only store that instead of a tuple?)
1748 1754 copied = fctx.renamed()
1749 1755 if copied:
1750 1756 copied = copied[0]
1751 1757 return memfilectx(repo, path, fctx.data(),
1752 1758 islink=fctx.islink(), isexec=fctx.isexec(),
1753 1759 copied=copied, memctx=memctx)
1754 1760 self._filectxfn = getfilectx
1755 1761 else:
1756 1762 # "util.cachefunc" reduces invocation of possibly expensive
1757 1763 # "filectxfn" for performance (e.g. converting from another VCS)
1758 1764 self._filectxfn = util.cachefunc(filectxfn)
1759 1765
1760 1766 self._extra = extra and extra.copy() or {}
1761 1767 if self._extra.get('branch', '') == '':
1762 1768 self._extra['branch'] = 'default'
1763 1769
1764 1770 if editor:
1765 1771 self._text = editor(self._repo, self, [])
1766 1772 self._repo.savecommitmessage(self._text)
1767 1773
1768 1774 def filectx(self, path, filelog=None):
1769 1775 """get a file context from the working directory
1770 1776
1771 1777 Returns None if file doesn't exist and should be removed."""
1772 1778 return self._filectxfn(self._repo, self, path)
1773 1779
1774 1780 def commit(self):
1775 1781 """commit context to the repo"""
1776 1782 return self._repo.commitctx(self)
1777 1783
1778 1784 @propertycache
1779 1785 def _manifest(self):
1780 1786 """generate a manifest based on the return values of filectxfn"""
1781 1787
1782 1788 # keep this simple for now; just worry about p1
1783 1789 pctx = self._parents[0]
1784 1790 man = pctx.manifest().copy()
1785 1791
1786 1792 for f in self._status.modified:
1787 1793 p1node = nullid
1788 1794 p2node = nullid
1789 1795 p = pctx[f].parents() # if file isn't in pctx, check p2?
1790 1796 if len(p) > 0:
1791 1797 p1node = p[0].node()
1792 1798 if len(p) > 1:
1793 1799 p2node = p[1].node()
1794 1800 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1795 1801
1796 1802 for f in self._status.added:
1797 1803 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1798 1804
1799 1805 for f in self._status.removed:
1800 1806 if f in man:
1801 1807 del man[f]
1802 1808
1803 1809 return man
1804 1810
1805 1811 @propertycache
1806 1812 def _status(self):
1807 1813 """Calculate exact status from ``files`` specified at construction
1808 1814 """
1809 1815 man1 = self.p1().manifest()
1810 1816 p2 = self._parents[1]
1811 1817 # "1 < len(self._parents)" can't be used for checking
1812 1818 # existence of the 2nd parent, because "memctx._parents" is
1813 1819 # explicitly initialized by the list, of which length is 2.
1814 1820 if p2.node() != nullid:
1815 1821 man2 = p2.manifest()
1816 1822 managing = lambda f: f in man1 or f in man2
1817 1823 else:
1818 1824 managing = lambda f: f in man1
1819 1825
1820 1826 modified, added, removed = [], [], []
1821 1827 for f in self._files:
1822 1828 if not managing(f):
1823 1829 added.append(f)
1824 1830 elif self[f]:
1825 1831 modified.append(f)
1826 1832 else:
1827 1833 removed.append(f)
1828 1834
1829 1835 return scmutil.status(modified, added, removed, [], [], [], [])
1830 1836
1831 1837 class memfilectx(committablefilectx):
1832 1838 """memfilectx represents an in-memory file to commit.
1833 1839
1834 1840 See memctx and committablefilectx for more details.
1835 1841 """
1836 1842 def __init__(self, repo, path, data, islink=False,
1837 1843 isexec=False, copied=None, memctx=None):
1838 1844 """
1839 1845 path is the normalized file path relative to repository root.
1840 1846 data is the file content as a string.
1841 1847 islink is True if the file is a symbolic link.
1842 1848 isexec is True if the file is executable.
1843 1849 copied is the source file path if current file was copied in the
1844 1850 revision being committed, or None."""
1845 1851 super(memfilectx, self).__init__(repo, path, None, memctx)
1846 1852 self._data = data
1847 1853 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1848 1854 self._copied = None
1849 1855 if copied:
1850 1856 self._copied = (copied, nullid)
1851 1857
1852 1858 def data(self):
1853 1859 return self._data
1854 1860 def size(self):
1855 1861 return len(self.data())
1856 1862 def flags(self):
1857 1863 return self._flags
1858 1864 def renamed(self):
1859 1865 return self._copied
1860 1866
1861 1867 def remove(self, ignoremissing=False):
1862 1868 """wraps unlink for a repo's working directory"""
1863 1869 # need to figure out what to do here
1864 1870 del self._changectx[self._path]
1865 1871
1866 1872 def write(self, data, flags):
1867 1873 """wraps repo.wwrite"""
1868 1874 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now