##// END OF EJS Templates
changectx: use names api to simplify and extend node lookup...
Sean Farley -
r23560:aead6370 default
parent child Browse files
Show More
@@ -1,1685 +1,1689 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 class basectx(object):
21 21 """A basectx object represents the common logic for its children:
22 22 changectx: read-only context that is already present in the repo,
23 23 workingctx: a context that represents the working directory and can
24 24 be committed,
25 25 memctx: a context that represents changes in-memory and can also
26 26 be committed."""
27 27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 28 if isinstance(changeid, basectx):
29 29 return changeid
30 30
31 31 o = super(basectx, cls).__new__(cls)
32 32
33 33 o._repo = repo
34 34 o._rev = nullrev
35 35 o._node = nullid
36 36
37 37 return o
38 38
39 39 def __str__(self):
40 40 return short(self.node())
41 41
42 42 def __int__(self):
43 43 return self.rev()
44 44
45 45 def __repr__(self):
46 46 return "<%s %s>" % (type(self).__name__, str(self))
47 47
48 48 def __eq__(self, other):
49 49 try:
50 50 return type(self) == type(other) and self._rev == other._rev
51 51 except AttributeError:
52 52 return False
53 53
54 54 def __ne__(self, other):
55 55 return not (self == other)
56 56
57 57 def __contains__(self, key):
58 58 return key in self._manifest
59 59
60 60 def __getitem__(self, key):
61 61 return self.filectx(key)
62 62
63 63 def __iter__(self):
64 64 for f in sorted(self._manifest):
65 65 yield f
66 66
67 67 def _manifestmatches(self, match, s):
68 68 """generate a new manifest filtered by the match argument
69 69
70 70 This method is for internal use only and mainly exists to provide an
71 71 object oriented way for other contexts to customize the manifest
72 72 generation.
73 73 """
74 74 return self.manifest().matches(match)
75 75
76 76 def _matchstatus(self, other, match):
77 77 """return match.always if match is none
78 78
79 79 This internal method provides a way for child objects to override the
80 80 match operator.
81 81 """
82 82 return match or matchmod.always(self._repo.root, self._repo.getcwd())
83 83
84 84 def _buildstatus(self, other, s, match, listignored, listclean,
85 85 listunknown):
86 86 """build a status with respect to another context"""
87 87 # Load earliest manifest first for caching reasons. More specifically,
88 88 # if you have revisions 1000 and 1001, 1001 is probably stored as a
89 89 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
90 90 # 1000 and cache it so that when you read 1001, we just need to apply a
91 91 # delta to what's in the cache. So that's one full reconstruction + one
92 92 # delta application.
93 93 if self.rev() is not None and self.rev() < other.rev():
94 94 self.manifest()
95 95 mf1 = other._manifestmatches(match, s)
96 96 mf2 = self._manifestmatches(match, s)
97 97
98 98 modified, added, clean = [], [], []
99 99 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
100 100 deletedset = set(deleted)
101 101 withflags = mf1.withflags() | mf2.withflags()
102 102 for fn, mf2node in mf2.iteritems():
103 103 if fn in mf1:
104 104 if (fn not in deletedset and
105 105 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
106 106 (mf1[fn] != mf2node and
107 107 (mf2node or self[fn].cmp(other[fn]))))):
108 108 modified.append(fn)
109 109 elif listclean:
110 110 clean.append(fn)
111 111 del mf1[fn]
112 112 elif fn not in deletedset:
113 113 added.append(fn)
114 114 removed = mf1.keys()
115 115 if removed:
116 116 # need to filter files if they are already reported as removed
117 117 unknown = [fn for fn in unknown if fn not in mf1]
118 118 ignored = [fn for fn in ignored if fn not in mf1]
119 119
120 120 return scmutil.status(modified, added, removed, deleted, unknown,
121 121 ignored, clean)
122 122
123 123 @propertycache
124 124 def substate(self):
125 125 return subrepo.state(self, self._repo.ui)
126 126
127 127 def subrev(self, subpath):
128 128 return self.substate[subpath][1]
129 129
130 130 def rev(self):
131 131 return self._rev
132 132 def node(self):
133 133 return self._node
134 134 def hex(self):
135 135 return hex(self.node())
136 136 def manifest(self):
137 137 return self._manifest
138 138 def phasestr(self):
139 139 return phases.phasenames[self.phase()]
140 140 def mutable(self):
141 141 return self.phase() > phases.public
142 142
143 143 def getfileset(self, expr):
144 144 return fileset.getfileset(self, expr)
145 145
146 146 def obsolete(self):
147 147 """True if the changeset is obsolete"""
148 148 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
149 149
150 150 def extinct(self):
151 151 """True if the changeset is extinct"""
152 152 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
153 153
154 154 def unstable(self):
155 155 """True if the changeset is not obsolete but it's ancestor are"""
156 156 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
157 157
158 158 def bumped(self):
159 159 """True if the changeset try to be a successor of a public changeset
160 160
161 161 Only non-public and non-obsolete changesets may be bumped.
162 162 """
163 163 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
164 164
165 165 def divergent(self):
166 166 """Is a successors of a changeset with multiple possible successors set
167 167
168 168 Only non-public and non-obsolete changesets may be divergent.
169 169 """
170 170 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
171 171
172 172 def troubled(self):
173 173 """True if the changeset is either unstable, bumped or divergent"""
174 174 return self.unstable() or self.bumped() or self.divergent()
175 175
176 176 def troubles(self):
177 177 """return the list of troubles affecting this changesets.
178 178
179 179 Troubles are returned as strings. possible values are:
180 180 - unstable,
181 181 - bumped,
182 182 - divergent.
183 183 """
184 184 troubles = []
185 185 if self.unstable():
186 186 troubles.append('unstable')
187 187 if self.bumped():
188 188 troubles.append('bumped')
189 189 if self.divergent():
190 190 troubles.append('divergent')
191 191 return troubles
192 192
193 193 def parents(self):
194 194 """return contexts for each parent changeset"""
195 195 return self._parents
196 196
197 197 def p1(self):
198 198 return self._parents[0]
199 199
200 200 def p2(self):
201 201 if len(self._parents) == 2:
202 202 return self._parents[1]
203 203 return changectx(self._repo, -1)
204 204
205 205 def _fileinfo(self, path):
206 206 if '_manifest' in self.__dict__:
207 207 try:
208 208 return self._manifest[path], self._manifest.flags(path)
209 209 except KeyError:
210 210 raise error.ManifestLookupError(self._node, path,
211 211 _('not found in manifest'))
212 212 if '_manifestdelta' in self.__dict__ or path in self.files():
213 213 if path in self._manifestdelta:
214 214 return (self._manifestdelta[path],
215 215 self._manifestdelta.flags(path))
216 216 node, flag = self._repo.manifest.find(self._changeset[0], path)
217 217 if not node:
218 218 raise error.ManifestLookupError(self._node, path,
219 219 _('not found in manifest'))
220 220
221 221 return node, flag
222 222
223 223 def filenode(self, path):
224 224 return self._fileinfo(path)[0]
225 225
226 226 def flags(self, path):
227 227 try:
228 228 return self._fileinfo(path)[1]
229 229 except error.LookupError:
230 230 return ''
231 231
232 232 def sub(self, path):
233 233 return subrepo.subrepo(self, path)
234 234
235 235 def match(self, pats=[], include=None, exclude=None, default='glob'):
236 236 r = self._repo
237 237 return matchmod.match(r.root, r.getcwd(), pats,
238 238 include, exclude, default,
239 239 auditor=r.auditor, ctx=self)
240 240
241 241 def diff(self, ctx2=None, match=None, **opts):
242 242 """Returns a diff generator for the given contexts and matcher"""
243 243 if ctx2 is None:
244 244 ctx2 = self.p1()
245 245 if ctx2 is not None:
246 246 ctx2 = self._repo[ctx2]
247 247 diffopts = patch.diffopts(self._repo.ui, opts)
248 248 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
249 249
250 250 @propertycache
251 251 def _dirs(self):
252 252 return scmutil.dirs(self._manifest)
253 253
254 254 def dirs(self):
255 255 return self._dirs
256 256
257 257 def dirty(self, missing=False, merge=True, branch=True):
258 258 return False
259 259
260 260 def status(self, other=None, match=None, listignored=False,
261 261 listclean=False, listunknown=False, listsubrepos=False):
262 262 """return status of files between two nodes or node and working
263 263 directory.
264 264
265 265 If other is None, compare this node with working directory.
266 266
267 267 returns (modified, added, removed, deleted, unknown, ignored, clean)
268 268 """
269 269
270 270 ctx1 = self
271 271 ctx2 = self._repo[other]
272 272
273 273 # This next code block is, admittedly, fragile logic that tests for
274 274 # reversing the contexts and wouldn't need to exist if it weren't for
275 275 # the fast (and common) code path of comparing the working directory
276 276 # with its first parent.
277 277 #
278 278 # What we're aiming for here is the ability to call:
279 279 #
280 280 # workingctx.status(parentctx)
281 281 #
282 282 # If we always built the manifest for each context and compared those,
283 283 # then we'd be done. But the special case of the above call means we
284 284 # just copy the manifest of the parent.
285 285 reversed = False
286 286 if (not isinstance(ctx1, changectx)
287 287 and isinstance(ctx2, changectx)):
288 288 reversed = True
289 289 ctx1, ctx2 = ctx2, ctx1
290 290
291 291 match = ctx2._matchstatus(ctx1, match)
292 292 r = scmutil.status([], [], [], [], [], [], [])
293 293 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
294 294 listunknown)
295 295
296 296 if reversed:
297 297 # Reverse added and removed. Clear deleted, unknown and ignored as
298 298 # these make no sense to reverse.
299 299 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
300 300 r.clean)
301 301
302 302 if listsubrepos:
303 303 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
304 304 rev2 = ctx2.subrev(subpath)
305 305 try:
306 306 submatch = matchmod.narrowmatcher(subpath, match)
307 307 s = sub.status(rev2, match=submatch, ignored=listignored,
308 308 clean=listclean, unknown=listunknown,
309 309 listsubrepos=True)
310 310 for rfiles, sfiles in zip(r, s):
311 311 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
312 312 except error.LookupError:
313 313 self._repo.ui.status(_("skipping missing "
314 314 "subrepository: %s\n") % subpath)
315 315
316 316 for l in r:
317 317 l.sort()
318 318
319 319 return r
320 320
321 321
322 322 def makememctx(repo, parents, text, user, date, branch, files, store,
323 323 editor=None):
324 324 def getfilectx(repo, memctx, path):
325 325 data, mode, copied = store.getfile(path)
326 326 if data is None:
327 327 return None
328 328 islink, isexec = mode
329 329 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
330 330 copied=copied, memctx=memctx)
331 331 extra = {}
332 332 if branch:
333 333 extra['branch'] = encoding.fromlocal(branch)
334 334 ctx = memctx(repo, parents, text, files, getfilectx, user,
335 335 date, extra, editor)
336 336 return ctx
337 337
338 338 class changectx(basectx):
339 339 """A changecontext object makes access to data related to a particular
340 340 changeset convenient. It represents a read-only context already present in
341 341 the repo."""
342 342 def __init__(self, repo, changeid=''):
343 343 """changeid is a revision number, node, or tag"""
344 344
345 345 # since basectx.__new__ already took care of copying the object, we
346 346 # don't need to do anything in __init__, so we just exit here
347 347 if isinstance(changeid, basectx):
348 348 return
349 349
350 350 if changeid == '':
351 351 changeid = '.'
352 352 self._repo = repo
353 353
354 354 try:
355 355 if isinstance(changeid, int):
356 356 self._node = repo.changelog.node(changeid)
357 357 self._rev = changeid
358 358 return
359 359 if isinstance(changeid, long):
360 360 changeid = str(changeid)
361 361 if changeid == '.':
362 362 self._node = repo.dirstate.p1()
363 363 self._rev = repo.changelog.rev(self._node)
364 364 return
365 365 if changeid == 'null':
366 366 self._node = nullid
367 367 self._rev = nullrev
368 368 return
369 369 if changeid == 'tip':
370 370 self._node = repo.changelog.tip()
371 371 self._rev = repo.changelog.rev(self._node)
372 372 return
373 373 if len(changeid) == 20:
374 374 try:
375 375 self._node = changeid
376 376 self._rev = repo.changelog.rev(changeid)
377 377 return
378 378 except error.FilteredRepoLookupError:
379 379 raise
380 380 except LookupError:
381 381 pass
382 382
383 383 try:
384 384 r = int(changeid)
385 385 if str(r) != changeid:
386 386 raise ValueError
387 387 l = len(repo.changelog)
388 388 if r < 0:
389 389 r += l
390 390 if r < 0 or r >= l:
391 391 raise ValueError
392 392 self._rev = r
393 393 self._node = repo.changelog.node(r)
394 394 return
395 395 except error.FilteredIndexError:
396 396 raise
397 397 except (ValueError, OverflowError, IndexError):
398 398 pass
399 399
400 400 if len(changeid) == 40:
401 401 try:
402 402 self._node = bin(changeid)
403 403 self._rev = repo.changelog.rev(self._node)
404 404 return
405 405 except error.FilteredLookupError:
406 406 raise
407 407 except (TypeError, LookupError):
408 408 pass
409 409
410 if changeid in repo._bookmarks:
411 self._node = repo._bookmarks[changeid]
410 # lookup bookmarks through the name interface
411 try:
412 self._node = repo.names.singlenode(changeid)
412 413 self._rev = repo.changelog.rev(self._node)
413 414 return
415 except KeyError:
416 pass
417
414 418 if changeid in repo._tagscache.tags:
415 419 self._node = repo._tagscache.tags[changeid]
416 420 self._rev = repo.changelog.rev(self._node)
417 421 return
418 422 try:
419 423 self._node = repo.branchtip(changeid)
420 424 self._rev = repo.changelog.rev(self._node)
421 425 return
422 426 except error.FilteredRepoLookupError:
423 427 raise
424 428 except error.RepoLookupError:
425 429 pass
426 430
427 431 self._node = repo.unfiltered().changelog._partialmatch(changeid)
428 432 if self._node is not None:
429 433 self._rev = repo.changelog.rev(self._node)
430 434 return
431 435
432 436 # lookup failed
433 437 # check if it might have come from damaged dirstate
434 438 #
435 439 # XXX we could avoid the unfiltered if we had a recognizable
436 440 # exception for filtered changeset access
437 441 if changeid in repo.unfiltered().dirstate.parents():
438 442 msg = _("working directory has unknown parent '%s'!")
439 443 raise error.Abort(msg % short(changeid))
440 444 try:
441 445 if len(changeid) == 20:
442 446 changeid = hex(changeid)
443 447 except TypeError:
444 448 pass
445 449 except (error.FilteredIndexError, error.FilteredLookupError,
446 450 error.FilteredRepoLookupError):
447 451 if repo.filtername == 'visible':
448 452 msg = _("hidden revision '%s'") % changeid
449 453 hint = _('use --hidden to access hidden revisions')
450 454 raise error.FilteredRepoLookupError(msg, hint=hint)
451 455 msg = _("filtered revision '%s' (not in '%s' subset)")
452 456 msg %= (changeid, repo.filtername)
453 457 raise error.FilteredRepoLookupError(msg)
454 458 except IndexError:
455 459 pass
456 460 raise error.RepoLookupError(
457 461 _("unknown revision '%s'") % changeid)
458 462
459 463 def __hash__(self):
460 464 try:
461 465 return hash(self._rev)
462 466 except AttributeError:
463 467 return id(self)
464 468
465 469 def __nonzero__(self):
466 470 return self._rev != nullrev
467 471
468 472 @propertycache
469 473 def _changeset(self):
470 474 return self._repo.changelog.read(self.rev())
471 475
472 476 @propertycache
473 477 def _manifest(self):
474 478 return self._repo.manifest.read(self._changeset[0])
475 479
476 480 @propertycache
477 481 def _manifestdelta(self):
478 482 return self._repo.manifest.readdelta(self._changeset[0])
479 483
480 484 @propertycache
481 485 def _parents(self):
482 486 p = self._repo.changelog.parentrevs(self._rev)
483 487 if p[1] == nullrev:
484 488 p = p[:-1]
485 489 return [changectx(self._repo, x) for x in p]
486 490
487 491 def changeset(self):
488 492 return self._changeset
489 493 def manifestnode(self):
490 494 return self._changeset[0]
491 495
492 496 def user(self):
493 497 return self._changeset[1]
494 498 def date(self):
495 499 return self._changeset[2]
496 500 def files(self):
497 501 return self._changeset[3]
498 502 def description(self):
499 503 return self._changeset[4]
500 504 def branch(self):
501 505 return encoding.tolocal(self._changeset[5].get("branch"))
502 506 def closesbranch(self):
503 507 return 'close' in self._changeset[5]
504 508 def extra(self):
505 509 return self._changeset[5]
506 510 def tags(self):
507 511 return self._repo.nodetags(self._node)
508 512 def bookmarks(self):
509 513 return self._repo.nodebookmarks(self._node)
510 514 def phase(self):
511 515 return self._repo._phasecache.phase(self._repo, self._rev)
512 516 def hidden(self):
513 517 return self._rev in repoview.filterrevs(self._repo, 'visible')
514 518
515 519 def children(self):
516 520 """return contexts for each child changeset"""
517 521 c = self._repo.changelog.children(self._node)
518 522 return [changectx(self._repo, x) for x in c]
519 523
520 524 def ancestors(self):
521 525 for a in self._repo.changelog.ancestors([self._rev]):
522 526 yield changectx(self._repo, a)
523 527
524 528 def descendants(self):
525 529 for d in self._repo.changelog.descendants([self._rev]):
526 530 yield changectx(self._repo, d)
527 531
528 532 def filectx(self, path, fileid=None, filelog=None):
529 533 """get a file context from this changeset"""
530 534 if fileid is None:
531 535 fileid = self.filenode(path)
532 536 return filectx(self._repo, path, fileid=fileid,
533 537 changectx=self, filelog=filelog)
534 538
535 539 def ancestor(self, c2, warn=False):
536 540 """return the "best" ancestor context of self and c2
537 541
538 542 If there are multiple candidates, it will show a message and check
539 543 merge.preferancestor configuration before falling back to the
540 544 revlog ancestor."""
541 545 # deal with workingctxs
542 546 n2 = c2._node
543 547 if n2 is None:
544 548 n2 = c2._parents[0]._node
545 549 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
546 550 if not cahs:
547 551 anc = nullid
548 552 elif len(cahs) == 1:
549 553 anc = cahs[0]
550 554 else:
551 555 for r in self._repo.ui.configlist('merge', 'preferancestor'):
552 556 try:
553 557 ctx = changectx(self._repo, r)
554 558 except error.RepoLookupError:
555 559 continue
556 560 anc = ctx.node()
557 561 if anc in cahs:
558 562 break
559 563 else:
560 564 anc = self._repo.changelog.ancestor(self._node, n2)
561 565 if warn:
562 566 self._repo.ui.status(
563 567 (_("note: using %s as ancestor of %s and %s\n") %
564 568 (short(anc), short(self._node), short(n2))) +
565 569 ''.join(_(" alternatively, use --config "
566 570 "merge.preferancestor=%s\n") %
567 571 short(n) for n in sorted(cahs) if n != anc))
568 572 return changectx(self._repo, anc)
569 573
570 574 def descendant(self, other):
571 575 """True if other is descendant of this changeset"""
572 576 return self._repo.changelog.descendant(self._rev, other._rev)
573 577
574 578 def walk(self, match):
575 579 fset = set(match.files())
576 580 # for dirstate.walk, files=['.'] means "walk the whole tree".
577 581 # follow that here, too
578 582 fset.discard('.')
579 583
580 584 # avoid the entire walk if we're only looking for specific files
581 585 if fset and not match.anypats():
582 586 if util.all([fn in self for fn in fset]):
583 587 for fn in sorted(fset):
584 588 if match(fn):
585 589 yield fn
586 590 raise StopIteration
587 591
588 592 for fn in self:
589 593 if fn in fset:
590 594 # specified pattern is the exact name
591 595 fset.remove(fn)
592 596 if match(fn):
593 597 yield fn
594 598 for fn in sorted(fset):
595 599 if fn in self._dirs:
596 600 # specified pattern is a directory
597 601 continue
598 602 match.bad(fn, _('no such file in rev %s') % self)
599 603
600 604 def matches(self, match):
601 605 return self.walk(match)
602 606
603 607 class basefilectx(object):
604 608 """A filecontext object represents the common logic for its children:
605 609 filectx: read-only access to a filerevision that is already present
606 610 in the repo,
607 611 workingfilectx: a filecontext that represents files from the working
608 612 directory,
609 613 memfilectx: a filecontext that represents files in-memory."""
610 614 def __new__(cls, repo, path, *args, **kwargs):
611 615 return super(basefilectx, cls).__new__(cls)
612 616
613 617 @propertycache
614 618 def _filelog(self):
615 619 return self._repo.file(self._path)
616 620
617 621 @propertycache
618 622 def _changeid(self):
619 623 if '_changeid' in self.__dict__:
620 624 return self._changeid
621 625 elif '_changectx' in self.__dict__:
622 626 return self._changectx.rev()
623 627 else:
624 628 return self._filelog.linkrev(self._filerev)
625 629
626 630 @propertycache
627 631 def _filenode(self):
628 632 if '_fileid' in self.__dict__:
629 633 return self._filelog.lookup(self._fileid)
630 634 else:
631 635 return self._changectx.filenode(self._path)
632 636
633 637 @propertycache
634 638 def _filerev(self):
635 639 return self._filelog.rev(self._filenode)
636 640
637 641 @propertycache
638 642 def _repopath(self):
639 643 return self._path
640 644
641 645 def __nonzero__(self):
642 646 try:
643 647 self._filenode
644 648 return True
645 649 except error.LookupError:
646 650 # file is missing
647 651 return False
648 652
649 653 def __str__(self):
650 654 return "%s@%s" % (self.path(), self._changectx)
651 655
652 656 def __repr__(self):
653 657 return "<%s %s>" % (type(self).__name__, str(self))
654 658
655 659 def __hash__(self):
656 660 try:
657 661 return hash((self._path, self._filenode))
658 662 except AttributeError:
659 663 return id(self)
660 664
661 665 def __eq__(self, other):
662 666 try:
663 667 return (type(self) == type(other) and self._path == other._path
664 668 and self._filenode == other._filenode)
665 669 except AttributeError:
666 670 return False
667 671
668 672 def __ne__(self, other):
669 673 return not (self == other)
670 674
671 675 def filerev(self):
672 676 return self._filerev
673 677 def filenode(self):
674 678 return self._filenode
675 679 def flags(self):
676 680 return self._changectx.flags(self._path)
677 681 def filelog(self):
678 682 return self._filelog
679 683 def rev(self):
680 684 return self._changeid
681 685 def linkrev(self):
682 686 return self._filelog.linkrev(self._filerev)
683 687 def node(self):
684 688 return self._changectx.node()
685 689 def hex(self):
686 690 return self._changectx.hex()
687 691 def user(self):
688 692 return self._changectx.user()
689 693 def date(self):
690 694 return self._changectx.date()
691 695 def files(self):
692 696 return self._changectx.files()
693 697 def description(self):
694 698 return self._changectx.description()
695 699 def branch(self):
696 700 return self._changectx.branch()
697 701 def extra(self):
698 702 return self._changectx.extra()
699 703 def phase(self):
700 704 return self._changectx.phase()
701 705 def phasestr(self):
702 706 return self._changectx.phasestr()
703 707 def manifest(self):
704 708 return self._changectx.manifest()
705 709 def changectx(self):
706 710 return self._changectx
707 711
708 712 def path(self):
709 713 return self._path
710 714
711 715 def isbinary(self):
712 716 try:
713 717 return util.binary(self.data())
714 718 except IOError:
715 719 return False
716 720 def isexec(self):
717 721 return 'x' in self.flags()
718 722 def islink(self):
719 723 return 'l' in self.flags()
720 724
721 725 def cmp(self, fctx):
722 726 """compare with other file context
723 727
724 728 returns True if different than fctx.
725 729 """
726 730 if (fctx._filerev is None
727 731 and (self._repo._encodefilterpats
728 732 # if file data starts with '\1\n', empty metadata block is
729 733 # prepended, which adds 4 bytes to filelog.size().
730 734 or self.size() - 4 == fctx.size())
731 735 or self.size() == fctx.size()):
732 736 return self._filelog.cmp(self._filenode, fctx.data())
733 737
734 738 return True
735 739
736 740 def parents(self):
737 741 _path = self._path
738 742 fl = self._filelog
739 743 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
740 744
741 745 r = self._filelog.renamed(self._filenode)
742 746 if r:
743 747 pl[0] = (r[0], r[1], None)
744 748
745 749 return [filectx(self._repo, p, fileid=n, filelog=l)
746 750 for p, n, l in pl if n != nullid]
747 751
748 752 def p1(self):
749 753 return self.parents()[0]
750 754
751 755 def p2(self):
752 756 p = self.parents()
753 757 if len(p) == 2:
754 758 return p[1]
755 759 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
756 760
757 761 def annotate(self, follow=False, linenumber=None, diffopts=None):
758 762 '''returns a list of tuples of (ctx, line) for each line
759 763 in the file, where ctx is the filectx of the node where
760 764 that line was last changed.
761 765 This returns tuples of ((ctx, linenumber), line) for each line,
762 766 if "linenumber" parameter is NOT "None".
763 767 In such tuples, linenumber means one at the first appearance
764 768 in the managed file.
765 769 To reduce annotation cost,
766 770 this returns fixed value(False is used) as linenumber,
767 771 if "linenumber" parameter is "False".'''
768 772
769 773 if linenumber is None:
770 774 def decorate(text, rev):
771 775 return ([rev] * len(text.splitlines()), text)
772 776 elif linenumber:
773 777 def decorate(text, rev):
774 778 size = len(text.splitlines())
775 779 return ([(rev, i) for i in xrange(1, size + 1)], text)
776 780 else:
777 781 def decorate(text, rev):
778 782 return ([(rev, False)] * len(text.splitlines()), text)
779 783
780 784 def pair(parent, child):
781 785 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
782 786 refine=True)
783 787 for (a1, a2, b1, b2), t in blocks:
784 788 # Changed blocks ('!') or blocks made only of blank lines ('~')
785 789 # belong to the child.
786 790 if t == '=':
787 791 child[0][b1:b2] = parent[0][a1:a2]
788 792 return child
789 793
790 794 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
791 795
792 796 def parents(f):
793 797 pl = f.parents()
794 798
795 799 # Don't return renamed parents if we aren't following.
796 800 if not follow:
797 801 pl = [p for p in pl if p.path() == f.path()]
798 802
799 803 # renamed filectx won't have a filelog yet, so set it
800 804 # from the cache to save time
801 805 for p in pl:
802 806 if not '_filelog' in p.__dict__:
803 807 p._filelog = getlog(p.path())
804 808
805 809 return pl
806 810
807 811 # use linkrev to find the first changeset where self appeared
808 812 if self.rev() != self.linkrev():
809 813 base = self.filectx(self.filenode())
810 814 else:
811 815 base = self
812 816
813 817 # This algorithm would prefer to be recursive, but Python is a
814 818 # bit recursion-hostile. Instead we do an iterative
815 819 # depth-first search.
816 820
817 821 visit = [base]
818 822 hist = {}
819 823 pcache = {}
820 824 needed = {base: 1}
821 825 while visit:
822 826 f = visit[-1]
823 827 pcached = f in pcache
824 828 if not pcached:
825 829 pcache[f] = parents(f)
826 830
827 831 ready = True
828 832 pl = pcache[f]
829 833 for p in pl:
830 834 if p not in hist:
831 835 ready = False
832 836 visit.append(p)
833 837 if not pcached:
834 838 needed[p] = needed.get(p, 0) + 1
835 839 if ready:
836 840 visit.pop()
837 841 reusable = f in hist
838 842 if reusable:
839 843 curr = hist[f]
840 844 else:
841 845 curr = decorate(f.data(), f)
842 846 for p in pl:
843 847 if not reusable:
844 848 curr = pair(hist[p], curr)
845 849 if needed[p] == 1:
846 850 del hist[p]
847 851 del needed[p]
848 852 else:
849 853 needed[p] -= 1
850 854
851 855 hist[f] = curr
852 856 pcache[f] = []
853 857
854 858 return zip(hist[base][0], hist[base][1].splitlines(True))
855 859
856 860 def ancestors(self, followfirst=False):
857 861 visit = {}
858 862 c = self
859 863 cut = followfirst and 1 or None
860 864 while True:
861 865 for parent in c.parents()[:cut]:
862 866 visit[(parent.rev(), parent.node())] = parent
863 867 if not visit:
864 868 break
865 869 c = visit.pop(max(visit))
866 870 yield c
867 871
868 872 class filectx(basefilectx):
869 873 """A filecontext object makes access to data related to a particular
870 874 filerevision convenient."""
871 875 def __init__(self, repo, path, changeid=None, fileid=None,
872 876 filelog=None, changectx=None):
873 877 """changeid can be a changeset revision, node, or tag.
874 878 fileid can be a file revision or node."""
875 879 self._repo = repo
876 880 self._path = path
877 881
878 882 assert (changeid is not None
879 883 or fileid is not None
880 884 or changectx is not None), \
881 885 ("bad args: changeid=%r, fileid=%r, changectx=%r"
882 886 % (changeid, fileid, changectx))
883 887
884 888 if filelog is not None:
885 889 self._filelog = filelog
886 890
887 891 if changeid is not None:
888 892 self._changeid = changeid
889 893 if changectx is not None:
890 894 self._changectx = changectx
891 895 if fileid is not None:
892 896 self._fileid = fileid
893 897
894 898 @propertycache
895 899 def _changectx(self):
896 900 try:
897 901 return changectx(self._repo, self._changeid)
898 902 except error.RepoLookupError:
899 903 # Linkrev may point to any revision in the repository. When the
900 904 # repository is filtered this may lead to `filectx` trying to build
901 905 # `changectx` for filtered revision. In such case we fallback to
902 906 # creating `changectx` on the unfiltered version of the reposition.
903 907 # This fallback should not be an issue because `changectx` from
904 908 # `filectx` are not used in complex operations that care about
905 909 # filtering.
906 910 #
907 911 # This fallback is a cheap and dirty fix that prevent several
908 912 # crashes. It does not ensure the behavior is correct. However the
909 913 # behavior was not correct before filtering either and "incorrect
910 914 # behavior" is seen as better as "crash"
911 915 #
912 916 # Linkrevs have several serious troubles with filtering that are
913 917 # complicated to solve. Proper handling of the issue here should be
914 918 # considered when solving linkrev issue are on the table.
915 919 return changectx(self._repo.unfiltered(), self._changeid)
916 920
917 921 def filectx(self, fileid):
918 922 '''opens an arbitrary revision of the file without
919 923 opening a new filelog'''
920 924 return filectx(self._repo, self._path, fileid=fileid,
921 925 filelog=self._filelog)
922 926
923 927 def data(self):
924 928 try:
925 929 return self._filelog.read(self._filenode)
926 930 except error.CensoredNodeError:
927 931 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
928 932 return ""
929 933 raise util.Abort(_("censored node: %s") % short(self._filenode),
930 934 hint=_("set censor.policy to ignore errors"))
931 935
932 936 def size(self):
933 937 return self._filelog.size(self._filerev)
934 938
935 939 def renamed(self):
936 940 """check if file was actually renamed in this changeset revision
937 941
938 942 If rename logged in file revision, we report copy for changeset only
939 943 if file revisions linkrev points back to the changeset in question
940 944 or both changeset parents contain different file revisions.
941 945 """
942 946
943 947 renamed = self._filelog.renamed(self._filenode)
944 948 if not renamed:
945 949 return renamed
946 950
947 951 if self.rev() == self.linkrev():
948 952 return renamed
949 953
950 954 name = self.path()
951 955 fnode = self._filenode
952 956 for p in self._changectx.parents():
953 957 try:
954 958 if fnode == p.filenode(name):
955 959 return None
956 960 except error.LookupError:
957 961 pass
958 962 return renamed
959 963
960 964 def children(self):
961 965 # hard for renames
962 966 c = self._filelog.children(self._filenode)
963 967 return [filectx(self._repo, self._path, fileid=x,
964 968 filelog=self._filelog) for x in c]
965 969
966 970 class committablectx(basectx):
967 971 """A committablectx object provides common functionality for a context that
968 972 wants the ability to commit, e.g. workingctx or memctx."""
969 973 def __init__(self, repo, text="", user=None, date=None, extra=None,
970 974 changes=None):
971 975 self._repo = repo
972 976 self._rev = None
973 977 self._node = None
974 978 self._text = text
975 979 if date:
976 980 self._date = util.parsedate(date)
977 981 if user:
978 982 self._user = user
979 983 if changes:
980 984 self._status = changes
981 985
982 986 self._extra = {}
983 987 if extra:
984 988 self._extra = extra.copy()
985 989 if 'branch' not in self._extra:
986 990 try:
987 991 branch = encoding.fromlocal(self._repo.dirstate.branch())
988 992 except UnicodeDecodeError:
989 993 raise util.Abort(_('branch name not in UTF-8!'))
990 994 self._extra['branch'] = branch
991 995 if self._extra['branch'] == '':
992 996 self._extra['branch'] = 'default'
993 997
994 998 def __str__(self):
995 999 return str(self._parents[0]) + "+"
996 1000
997 1001 def __nonzero__(self):
998 1002 return True
999 1003
1000 1004 def _buildflagfunc(self):
1001 1005 # Create a fallback function for getting file flags when the
1002 1006 # filesystem doesn't support them
1003 1007
1004 1008 copiesget = self._repo.dirstate.copies().get
1005 1009
1006 1010 if len(self._parents) < 2:
1007 1011 # when we have one parent, it's easy: copy from parent
1008 1012 man = self._parents[0].manifest()
1009 1013 def func(f):
1010 1014 f = copiesget(f, f)
1011 1015 return man.flags(f)
1012 1016 else:
1013 1017 # merges are tricky: we try to reconstruct the unstored
1014 1018 # result from the merge (issue1802)
1015 1019 p1, p2 = self._parents
1016 1020 pa = p1.ancestor(p2)
1017 1021 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1018 1022
1019 1023 def func(f):
1020 1024 f = copiesget(f, f) # may be wrong for merges with copies
1021 1025 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1022 1026 if fl1 == fl2:
1023 1027 return fl1
1024 1028 if fl1 == fla:
1025 1029 return fl2
1026 1030 if fl2 == fla:
1027 1031 return fl1
1028 1032 return '' # punt for conflicts
1029 1033
1030 1034 return func
1031 1035
1032 1036 @propertycache
1033 1037 def _flagfunc(self):
1034 1038 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1035 1039
1036 1040 @propertycache
1037 1041 def _manifest(self):
1038 1042 """generate a manifest corresponding to the values in self._status
1039 1043
1040 1044 This reuse the file nodeid from parent, but we append an extra letter
1041 1045 when modified. Modified files get an extra 'm' while added files get
1042 1046 an extra 'a'. This is used by manifests merge to see that files
1043 1047 are different and by update logic to avoid deleting newly added files.
1044 1048 """
1045 1049
1046 1050 man1 = self._parents[0].manifest()
1047 1051 man = man1.copy()
1048 1052 if len(self._parents) > 1:
1049 1053 man2 = self.p2().manifest()
1050 1054 def getman(f):
1051 1055 if f in man1:
1052 1056 return man1
1053 1057 return man2
1054 1058 else:
1055 1059 getman = lambda f: man1
1056 1060
1057 1061 copied = self._repo.dirstate.copies()
1058 1062 ff = self._flagfunc
1059 1063 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1060 1064 for f in l:
1061 1065 orig = copied.get(f, f)
1062 1066 man[f] = getman(orig).get(orig, nullid) + i
1063 1067 try:
1064 1068 man.setflag(f, ff(f))
1065 1069 except OSError:
1066 1070 pass
1067 1071
1068 1072 for f in self._status.deleted + self._status.removed:
1069 1073 if f in man:
1070 1074 del man[f]
1071 1075
1072 1076 return man
1073 1077
1074 1078 @propertycache
1075 1079 def _status(self):
1076 1080 return self._repo.status()
1077 1081
1078 1082 @propertycache
1079 1083 def _user(self):
1080 1084 return self._repo.ui.username()
1081 1085
1082 1086 @propertycache
1083 1087 def _date(self):
1084 1088 return util.makedate()
1085 1089
1086 1090 def subrev(self, subpath):
1087 1091 return None
1088 1092
1089 1093 def user(self):
1090 1094 return self._user or self._repo.ui.username()
1091 1095 def date(self):
1092 1096 return self._date
1093 1097 def description(self):
1094 1098 return self._text
1095 1099 def files(self):
1096 1100 return sorted(self._status.modified + self._status.added +
1097 1101 self._status.removed)
1098 1102
1099 1103 def modified(self):
1100 1104 return self._status.modified
1101 1105 def added(self):
1102 1106 return self._status.added
1103 1107 def removed(self):
1104 1108 return self._status.removed
1105 1109 def deleted(self):
1106 1110 return self._status.deleted
1107 1111 def unknown(self):
1108 1112 return self._status.unknown
1109 1113 def ignored(self):
1110 1114 return self._status.ignored
1111 1115 def clean(self):
1112 1116 return self._status.clean
1113 1117 def branch(self):
1114 1118 return encoding.tolocal(self._extra['branch'])
1115 1119 def closesbranch(self):
1116 1120 return 'close' in self._extra
1117 1121 def extra(self):
1118 1122 return self._extra
1119 1123
1120 1124 def tags(self):
1121 1125 t = []
1122 1126 for p in self.parents():
1123 1127 t.extend(p.tags())
1124 1128 return t
1125 1129
1126 1130 def bookmarks(self):
1127 1131 b = []
1128 1132 for p in self.parents():
1129 1133 b.extend(p.bookmarks())
1130 1134 return b
1131 1135
1132 1136 def phase(self):
1133 1137 phase = phases.draft # default phase to draft
1134 1138 for p in self.parents():
1135 1139 phase = max(phase, p.phase())
1136 1140 return phase
1137 1141
1138 1142 def hidden(self):
1139 1143 return False
1140 1144
1141 1145 def children(self):
1142 1146 return []
1143 1147
1144 1148 def flags(self, path):
1145 1149 if '_manifest' in self.__dict__:
1146 1150 try:
1147 1151 return self._manifest.flags(path)
1148 1152 except KeyError:
1149 1153 return ''
1150 1154
1151 1155 try:
1152 1156 return self._flagfunc(path)
1153 1157 except OSError:
1154 1158 return ''
1155 1159
1156 1160 def ancestor(self, c2):
1157 1161 """return the "best" ancestor context of self and c2"""
1158 1162 return self._parents[0].ancestor(c2) # punt on two parents for now
1159 1163
1160 1164 def walk(self, match):
1161 1165 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1162 1166 True, False))
1163 1167
1164 1168 def matches(self, match):
1165 1169 return sorted(self._repo.dirstate.matches(match))
1166 1170
1167 1171 def ancestors(self):
1168 1172 for a in self._repo.changelog.ancestors(
1169 1173 [p.rev() for p in self._parents]):
1170 1174 yield changectx(self._repo, a)
1171 1175
1172 1176 def markcommitted(self, node):
1173 1177 """Perform post-commit cleanup necessary after committing this ctx
1174 1178
1175 1179 Specifically, this updates backing stores this working context
1176 1180 wraps to reflect the fact that the changes reflected by this
1177 1181 workingctx have been committed. For example, it marks
1178 1182 modified and added files as normal in the dirstate.
1179 1183
1180 1184 """
1181 1185
1182 1186 self._repo.dirstate.beginparentchange()
1183 1187 for f in self.modified() + self.added():
1184 1188 self._repo.dirstate.normal(f)
1185 1189 for f in self.removed():
1186 1190 self._repo.dirstate.drop(f)
1187 1191 self._repo.dirstate.setparents(node)
1188 1192 self._repo.dirstate.endparentchange()
1189 1193
1190 1194 def dirs(self):
1191 1195 return self._repo.dirstate.dirs()
1192 1196
1193 1197 class workingctx(committablectx):
1194 1198 """A workingctx object makes access to data related to
1195 1199 the current working directory convenient.
1196 1200 date - any valid date string or (unixtime, offset), or None.
1197 1201 user - username string, or None.
1198 1202 extra - a dictionary of extra values, or None.
1199 1203 changes - a list of file lists as returned by localrepo.status()
1200 1204 or None to use the repository status.
1201 1205 """
1202 1206 def __init__(self, repo, text="", user=None, date=None, extra=None,
1203 1207 changes=None):
1204 1208 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1205 1209
1206 1210 def __iter__(self):
1207 1211 d = self._repo.dirstate
1208 1212 for f in d:
1209 1213 if d[f] != 'r':
1210 1214 yield f
1211 1215
1212 1216 def __contains__(self, key):
1213 1217 return self._repo.dirstate[key] not in "?r"
1214 1218
1215 1219 @propertycache
1216 1220 def _parents(self):
1217 1221 p = self._repo.dirstate.parents()
1218 1222 if p[1] == nullid:
1219 1223 p = p[:-1]
1220 1224 return [changectx(self._repo, x) for x in p]
1221 1225
1222 1226 def filectx(self, path, filelog=None):
1223 1227 """get a file context from the working directory"""
1224 1228 return workingfilectx(self._repo, path, workingctx=self,
1225 1229 filelog=filelog)
1226 1230
1227 1231 def dirty(self, missing=False, merge=True, branch=True):
1228 1232 "check whether a working directory is modified"
1229 1233 # check subrepos first
1230 1234 for s in sorted(self.substate):
1231 1235 if self.sub(s).dirty():
1232 1236 return True
1233 1237 # check current working dir
1234 1238 return ((merge and self.p2()) or
1235 1239 (branch and self.branch() != self.p1().branch()) or
1236 1240 self.modified() or self.added() or self.removed() or
1237 1241 (missing and self.deleted()))
1238 1242
1239 1243 def add(self, list, prefix=""):
1240 1244 join = lambda f: os.path.join(prefix, f)
1241 1245 wlock = self._repo.wlock()
1242 1246 ui, ds = self._repo.ui, self._repo.dirstate
1243 1247 try:
1244 1248 rejected = []
1245 1249 lstat = self._repo.wvfs.lstat
1246 1250 for f in list:
1247 1251 scmutil.checkportable(ui, join(f))
1248 1252 try:
1249 1253 st = lstat(f)
1250 1254 except OSError:
1251 1255 ui.warn(_("%s does not exist!\n") % join(f))
1252 1256 rejected.append(f)
1253 1257 continue
1254 1258 if st.st_size > 10000000:
1255 1259 ui.warn(_("%s: up to %d MB of RAM may be required "
1256 1260 "to manage this file\n"
1257 1261 "(use 'hg revert %s' to cancel the "
1258 1262 "pending addition)\n")
1259 1263 % (f, 3 * st.st_size // 1000000, join(f)))
1260 1264 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1261 1265 ui.warn(_("%s not added: only files and symlinks "
1262 1266 "supported currently\n") % join(f))
1263 1267 rejected.append(f)
1264 1268 elif ds[f] in 'amn':
1265 1269 ui.warn(_("%s already tracked!\n") % join(f))
1266 1270 elif ds[f] == 'r':
1267 1271 ds.normallookup(f)
1268 1272 else:
1269 1273 ds.add(f)
1270 1274 return rejected
1271 1275 finally:
1272 1276 wlock.release()
1273 1277
1274 1278 def forget(self, files, prefix=""):
1275 1279 join = lambda f: os.path.join(prefix, f)
1276 1280 wlock = self._repo.wlock()
1277 1281 try:
1278 1282 rejected = []
1279 1283 for f in files:
1280 1284 if f not in self._repo.dirstate:
1281 1285 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1282 1286 rejected.append(f)
1283 1287 elif self._repo.dirstate[f] != 'a':
1284 1288 self._repo.dirstate.remove(f)
1285 1289 else:
1286 1290 self._repo.dirstate.drop(f)
1287 1291 return rejected
1288 1292 finally:
1289 1293 wlock.release()
1290 1294
1291 1295 def undelete(self, list):
1292 1296 pctxs = self.parents()
1293 1297 wlock = self._repo.wlock()
1294 1298 try:
1295 1299 for f in list:
1296 1300 if self._repo.dirstate[f] != 'r':
1297 1301 self._repo.ui.warn(_("%s not removed!\n") % f)
1298 1302 else:
1299 1303 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1300 1304 t = fctx.data()
1301 1305 self._repo.wwrite(f, t, fctx.flags())
1302 1306 self._repo.dirstate.normal(f)
1303 1307 finally:
1304 1308 wlock.release()
1305 1309
1306 1310 def copy(self, source, dest):
1307 1311 try:
1308 1312 st = self._repo.wvfs.lstat(dest)
1309 1313 except OSError, err:
1310 1314 if err.errno != errno.ENOENT:
1311 1315 raise
1312 1316 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1313 1317 return
1314 1318 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1315 1319 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1316 1320 "symbolic link\n") % dest)
1317 1321 else:
1318 1322 wlock = self._repo.wlock()
1319 1323 try:
1320 1324 if self._repo.dirstate[dest] in '?':
1321 1325 self._repo.dirstate.add(dest)
1322 1326 elif self._repo.dirstate[dest] in 'r':
1323 1327 self._repo.dirstate.normallookup(dest)
1324 1328 self._repo.dirstate.copy(source, dest)
1325 1329 finally:
1326 1330 wlock.release()
1327 1331
1328 1332 def _filtersuspectsymlink(self, files):
1329 1333 if not files or self._repo.dirstate._checklink:
1330 1334 return files
1331 1335
1332 1336 # Symlink placeholders may get non-symlink-like contents
1333 1337 # via user error or dereferencing by NFS or Samba servers,
1334 1338 # so we filter out any placeholders that don't look like a
1335 1339 # symlink
1336 1340 sane = []
1337 1341 for f in files:
1338 1342 if self.flags(f) == 'l':
1339 1343 d = self[f].data()
1340 1344 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1341 1345 self._repo.ui.debug('ignoring suspect symlink placeholder'
1342 1346 ' "%s"\n' % f)
1343 1347 continue
1344 1348 sane.append(f)
1345 1349 return sane
1346 1350
1347 1351 def _checklookup(self, files):
1348 1352 # check for any possibly clean files
1349 1353 if not files:
1350 1354 return [], []
1351 1355
1352 1356 modified = []
1353 1357 fixup = []
1354 1358 pctx = self._parents[0]
1355 1359 # do a full compare of any files that might have changed
1356 1360 for f in sorted(files):
1357 1361 if (f not in pctx or self.flags(f) != pctx.flags(f)
1358 1362 or pctx[f].cmp(self[f])):
1359 1363 modified.append(f)
1360 1364 else:
1361 1365 fixup.append(f)
1362 1366
1363 1367 # update dirstate for files that are actually clean
1364 1368 if fixup:
1365 1369 try:
1366 1370 # updating the dirstate is optional
1367 1371 # so we don't wait on the lock
1368 1372 # wlock can invalidate the dirstate, so cache normal _after_
1369 1373 # taking the lock
1370 1374 wlock = self._repo.wlock(False)
1371 1375 normal = self._repo.dirstate.normal
1372 1376 try:
1373 1377 for f in fixup:
1374 1378 normal(f)
1375 1379 finally:
1376 1380 wlock.release()
1377 1381 except error.LockError:
1378 1382 pass
1379 1383 return modified, fixup
1380 1384
1381 1385 def _manifestmatches(self, match, s):
1382 1386 """Slow path for workingctx
1383 1387
1384 1388 The fast path is when we compare the working directory to its parent
1385 1389 which means this function is comparing with a non-parent; therefore we
1386 1390 need to build a manifest and return what matches.
1387 1391 """
1388 1392 mf = self._repo['.']._manifestmatches(match, s)
1389 1393 for f in s.modified + s.added:
1390 1394 mf[f] = None
1391 1395 mf.setflag(f, self.flags(f))
1392 1396 for f in s.removed:
1393 1397 if f in mf:
1394 1398 del mf[f]
1395 1399 return mf
1396 1400
1397 1401 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1398 1402 unknown=False):
1399 1403 '''Gets the status from the dirstate -- internal use only.'''
1400 1404 listignored, listclean, listunknown = ignored, clean, unknown
1401 1405 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1402 1406 subrepos = []
1403 1407 if '.hgsub' in self:
1404 1408 subrepos = sorted(self.substate)
1405 1409 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1406 1410 listclean, listunknown)
1407 1411
1408 1412 # check for any possibly clean files
1409 1413 if cmp:
1410 1414 modified2, fixup = self._checklookup(cmp)
1411 1415 s.modified.extend(modified2)
1412 1416
1413 1417 # update dirstate for files that are actually clean
1414 1418 if fixup and listclean:
1415 1419 s.clean.extend(fixup)
1416 1420
1417 1421 return s
1418 1422
1419 1423 def _buildstatus(self, other, s, match, listignored, listclean,
1420 1424 listunknown):
1421 1425 """build a status with respect to another context
1422 1426
1423 1427 This includes logic for maintaining the fast path of status when
1424 1428 comparing the working directory against its parent, which is to skip
1425 1429 building a new manifest if self (working directory) is not comparing
1426 1430 against its parent (repo['.']).
1427 1431 """
1428 1432 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1429 1433 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1430 1434 # might have accidentally ended up with the entire contents of the file
1431 1435 # they are supposed to be linking to.
1432 1436 s.modified[:] = self._filtersuspectsymlink(s.modified)
1433 1437 if other != self._repo['.']:
1434 1438 s = super(workingctx, self)._buildstatus(other, s, match,
1435 1439 listignored, listclean,
1436 1440 listunknown)
1437 1441 self._status = s
1438 1442 return s
1439 1443
1440 1444 def _matchstatus(self, other, match):
1441 1445 """override the match method with a filter for directory patterns
1442 1446
1443 1447 We use inheritance to customize the match.bad method only in cases of
1444 1448 workingctx since it belongs only to the working directory when
1445 1449 comparing against the parent changeset.
1446 1450
1447 1451 If we aren't comparing against the working directory's parent, then we
1448 1452 just use the default match object sent to us.
1449 1453 """
1450 1454 superself = super(workingctx, self)
1451 1455 match = superself._matchstatus(other, match)
1452 1456 if other != self._repo['.']:
1453 1457 def bad(f, msg):
1454 1458 # 'f' may be a directory pattern from 'match.files()',
1455 1459 # so 'f not in ctx1' is not enough
1456 1460 if f not in other and f not in other.dirs():
1457 1461 self._repo.ui.warn('%s: %s\n' %
1458 1462 (self._repo.dirstate.pathto(f), msg))
1459 1463 match.bad = bad
1460 1464 return match
1461 1465
1462 1466 class committablefilectx(basefilectx):
1463 1467 """A committablefilectx provides common functionality for a file context
1464 1468 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1465 1469 def __init__(self, repo, path, filelog=None, ctx=None):
1466 1470 self._repo = repo
1467 1471 self._path = path
1468 1472 self._changeid = None
1469 1473 self._filerev = self._filenode = None
1470 1474
1471 1475 if filelog is not None:
1472 1476 self._filelog = filelog
1473 1477 if ctx:
1474 1478 self._changectx = ctx
1475 1479
1476 1480 def __nonzero__(self):
1477 1481 return True
1478 1482
1479 1483 def parents(self):
1480 1484 '''return parent filectxs, following copies if necessary'''
1481 1485 def filenode(ctx, path):
1482 1486 return ctx._manifest.get(path, nullid)
1483 1487
1484 1488 path = self._path
1485 1489 fl = self._filelog
1486 1490 pcl = self._changectx._parents
1487 1491 renamed = self.renamed()
1488 1492
1489 1493 if renamed:
1490 1494 pl = [renamed + (None,)]
1491 1495 else:
1492 1496 pl = [(path, filenode(pcl[0], path), fl)]
1493 1497
1494 1498 for pc in pcl[1:]:
1495 1499 pl.append((path, filenode(pc, path), fl))
1496 1500
1497 1501 return [filectx(self._repo, p, fileid=n, filelog=l)
1498 1502 for p, n, l in pl if n != nullid]
1499 1503
1500 1504 def children(self):
1501 1505 return []
1502 1506
1503 1507 class workingfilectx(committablefilectx):
1504 1508 """A workingfilectx object makes access to data related to a particular
1505 1509 file in the working directory convenient."""
1506 1510 def __init__(self, repo, path, filelog=None, workingctx=None):
1507 1511 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1508 1512
1509 1513 @propertycache
1510 1514 def _changectx(self):
1511 1515 return workingctx(self._repo)
1512 1516
1513 1517 def data(self):
1514 1518 return self._repo.wread(self._path)
1515 1519 def renamed(self):
1516 1520 rp = self._repo.dirstate.copied(self._path)
1517 1521 if not rp:
1518 1522 return None
1519 1523 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1520 1524
1521 1525 def size(self):
1522 1526 return self._repo.wvfs.lstat(self._path).st_size
1523 1527 def date(self):
1524 1528 t, tz = self._changectx.date()
1525 1529 try:
1526 1530 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1527 1531 except OSError, err:
1528 1532 if err.errno != errno.ENOENT:
1529 1533 raise
1530 1534 return (t, tz)
1531 1535
1532 1536 def cmp(self, fctx):
1533 1537 """compare with other file context
1534 1538
1535 1539 returns True if different than fctx.
1536 1540 """
1537 1541 # fctx should be a filectx (not a workingfilectx)
1538 1542 # invert comparison to reuse the same code path
1539 1543 return fctx.cmp(self)
1540 1544
1541 1545 def remove(self, ignoremissing=False):
1542 1546 """wraps unlink for a repo's working directory"""
1543 1547 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1544 1548
1545 1549 def write(self, data, flags):
1546 1550 """wraps repo.wwrite"""
1547 1551 self._repo.wwrite(self._path, data, flags)
1548 1552
1549 1553 class memctx(committablectx):
1550 1554 """Use memctx to perform in-memory commits via localrepo.commitctx().
1551 1555
1552 1556 Revision information is supplied at initialization time while
1553 1557 related files data and is made available through a callback
1554 1558 mechanism. 'repo' is the current localrepo, 'parents' is a
1555 1559 sequence of two parent revisions identifiers (pass None for every
1556 1560 missing parent), 'text' is the commit message and 'files' lists
1557 1561 names of files touched by the revision (normalized and relative to
1558 1562 repository root).
1559 1563
1560 1564 filectxfn(repo, memctx, path) is a callable receiving the
1561 1565 repository, the current memctx object and the normalized path of
1562 1566 requested file, relative to repository root. It is fired by the
1563 1567 commit function for every file in 'files', but calls order is
1564 1568 undefined. If the file is available in the revision being
1565 1569 committed (updated or added), filectxfn returns a memfilectx
1566 1570 object. If the file was removed, filectxfn raises an
1567 1571 IOError. Moved files are represented by marking the source file
1568 1572 removed and the new file added with copy information (see
1569 1573 memfilectx).
1570 1574
1571 1575 user receives the committer name and defaults to current
1572 1576 repository username, date is the commit date in any format
1573 1577 supported by util.parsedate() and defaults to current date, extra
1574 1578 is a dictionary of metadata or is left empty.
1575 1579 """
1576 1580
1577 1581 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1578 1582 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1579 1583 # this field to determine what to do in filectxfn.
1580 1584 _returnnoneformissingfiles = True
1581 1585
1582 1586 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1583 1587 date=None, extra=None, editor=False):
1584 1588 super(memctx, self).__init__(repo, text, user, date, extra)
1585 1589 self._rev = None
1586 1590 self._node = None
1587 1591 parents = [(p or nullid) for p in parents]
1588 1592 p1, p2 = parents
1589 1593 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1590 1594 files = sorted(set(files))
1591 1595 self._status = scmutil.status(files, [], [], [], [], [], [])
1592 1596 self._filectxfn = filectxfn
1593 1597 self.substate = {}
1594 1598
1595 1599 # if store is not callable, wrap it in a function
1596 1600 if not callable(filectxfn):
1597 1601 def getfilectx(repo, memctx, path):
1598 1602 fctx = filectxfn[path]
1599 1603 # this is weird but apparently we only keep track of one parent
1600 1604 # (why not only store that instead of a tuple?)
1601 1605 copied = fctx.renamed()
1602 1606 if copied:
1603 1607 copied = copied[0]
1604 1608 return memfilectx(repo, path, fctx.data(),
1605 1609 islink=fctx.islink(), isexec=fctx.isexec(),
1606 1610 copied=copied, memctx=memctx)
1607 1611 self._filectxfn = getfilectx
1608 1612
1609 1613 self._extra = extra and extra.copy() or {}
1610 1614 if self._extra.get('branch', '') == '':
1611 1615 self._extra['branch'] = 'default'
1612 1616
1613 1617 if editor:
1614 1618 self._text = editor(self._repo, self, [])
1615 1619 self._repo.savecommitmessage(self._text)
1616 1620
1617 1621 def filectx(self, path, filelog=None):
1618 1622 """get a file context from the working directory
1619 1623
1620 1624 Returns None if file doesn't exist and should be removed."""
1621 1625 return self._filectxfn(self._repo, self, path)
1622 1626
1623 1627 def commit(self):
1624 1628 """commit context to the repo"""
1625 1629 return self._repo.commitctx(self)
1626 1630
1627 1631 @propertycache
1628 1632 def _manifest(self):
1629 1633 """generate a manifest based on the return values of filectxfn"""
1630 1634
1631 1635 # keep this simple for now; just worry about p1
1632 1636 pctx = self._parents[0]
1633 1637 man = pctx.manifest().copy()
1634 1638
1635 1639 for f, fnode in man.iteritems():
1636 1640 p1node = nullid
1637 1641 p2node = nullid
1638 1642 p = pctx[f].parents() # if file isn't in pctx, check p2?
1639 1643 if len(p) > 0:
1640 1644 p1node = p[0].node()
1641 1645 if len(p) > 1:
1642 1646 p2node = p[1].node()
1643 1647 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1644 1648
1645 1649 return man
1646 1650
1647 1651
1648 1652 class memfilectx(committablefilectx):
1649 1653 """memfilectx represents an in-memory file to commit.
1650 1654
1651 1655 See memctx and committablefilectx for more details.
1652 1656 """
1653 1657 def __init__(self, repo, path, data, islink=False,
1654 1658 isexec=False, copied=None, memctx=None):
1655 1659 """
1656 1660 path is the normalized file path relative to repository root.
1657 1661 data is the file content as a string.
1658 1662 islink is True if the file is a symbolic link.
1659 1663 isexec is True if the file is executable.
1660 1664 copied is the source file path if current file was copied in the
1661 1665 revision being committed, or None."""
1662 1666 super(memfilectx, self).__init__(repo, path, None, memctx)
1663 1667 self._data = data
1664 1668 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1665 1669 self._copied = None
1666 1670 if copied:
1667 1671 self._copied = (copied, nullid)
1668 1672
1669 1673 def data(self):
1670 1674 return self._data
1671 1675 def size(self):
1672 1676 return len(self.data())
1673 1677 def flags(self):
1674 1678 return self._flags
1675 1679 def renamed(self):
1676 1680 return self._copied
1677 1681
1678 1682 def remove(self, ignoremissing=False):
1679 1683 """wraps unlink for a repo's working directory"""
1680 1684 # need to figure out what to do here
1681 1685 del self._changectx[self._path]
1682 1686
1683 1687 def write(self, data, flags):
1684 1688 """wraps repo.wwrite"""
1685 1689 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now