##// END OF EJS Templates
context.status: explain "caching reasons" more fully...
Martin von Zweigbergk -
r23257:37c57a7c default
parent child Browse files
Show More
@@ -1,1683 +1,1688
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 class basectx(object):
21 21 """A basectx object represents the common logic for its children:
22 22 changectx: read-only context that is already present in the repo,
23 23 workingctx: a context that represents the working directory and can
24 24 be committed,
25 25 memctx: a context that represents changes in-memory and can also
26 26 be committed."""
27 27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 28 if isinstance(changeid, basectx):
29 29 return changeid
30 30
31 31 o = super(basectx, cls).__new__(cls)
32 32
33 33 o._repo = repo
34 34 o._rev = nullrev
35 35 o._node = nullid
36 36
37 37 return o
38 38
39 39 def __str__(self):
40 40 return short(self.node())
41 41
42 42 def __int__(self):
43 43 return self.rev()
44 44
45 45 def __repr__(self):
46 46 return "<%s %s>" % (type(self).__name__, str(self))
47 47
48 48 def __eq__(self, other):
49 49 try:
50 50 return type(self) == type(other) and self._rev == other._rev
51 51 except AttributeError:
52 52 return False
53 53
54 54 def __ne__(self, other):
55 55 return not (self == other)
56 56
57 57 def __contains__(self, key):
58 58 return key in self._manifest
59 59
60 60 def __getitem__(self, key):
61 61 return self.filectx(key)
62 62
63 63 def __iter__(self):
64 64 for f in sorted(self._manifest):
65 65 yield f
66 66
67 67 def _manifestmatches(self, match, s):
68 68 """generate a new manifest filtered by the match argument
69 69
70 70 This method is for internal use only and mainly exists to provide an
71 71 object oriented way for other contexts to customize the manifest
72 72 generation.
73 73 """
74 74 if match.always():
75 75 return self.manifest().copy()
76 76
77 77 files = match.files()
78 78 if (match.matchfn == match.exact or
79 79 (not match.anypats() and util.all(fn in self for fn in files))):
80 80 return self.manifest().intersectfiles(files)
81 81
82 82 mf = self.manifest().copy()
83 83 for fn in mf.keys():
84 84 if not match(fn):
85 85 del mf[fn]
86 86 return mf
87 87
88 88 def _matchstatus(self, other, match):
89 89 """return match.always if match is none
90 90
91 91 This internal method provides a way for child objects to override the
92 92 match operator.
93 93 """
94 94 return match or matchmod.always(self._repo.root, self._repo.getcwd())
95 95
96 96 def _buildstatus(self, other, s, match, listignored, listclean,
97 97 listunknown):
98 98 """build a status with respect to another context"""
99 # load earliest manifest first for caching reasons
99 # Load earliest manifest first for caching reasons. More specifically,
100 # if you have revisions 1000 and 1001, 1001 is probably stored as a
101 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
102 # 1000 and cache it so that when you read 1001, we just need to apply a
103 # delta to what's in the cache. So that's one full reconstruction + one
104 # delta application.
100 105 if self.rev() is not None and self.rev() < other.rev():
101 106 self.manifest()
102 107 mf1 = other._manifestmatches(match, s)
103 108 mf2 = self._manifestmatches(match, s)
104 109
105 110 modified, added, clean = [], [], []
106 111 deleted, unknown, ignored = s[3], s[4], s[5]
107 112 deletedset = set(deleted)
108 113 withflags = mf1.withflags() | mf2.withflags()
109 114 for fn, mf2node in mf2.iteritems():
110 115 if fn in mf1:
111 116 if (fn not in deletedset and
112 117 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
113 118 (mf1[fn] != mf2node and
114 119 (mf2node or self[fn].cmp(other[fn]))))):
115 120 modified.append(fn)
116 121 elif listclean:
117 122 clean.append(fn)
118 123 del mf1[fn]
119 124 elif fn not in deletedset:
120 125 added.append(fn)
121 126 removed = mf1.keys()
122 127 if removed:
123 128 # need to filter files if they are already reported as removed
124 129 unknown = [fn for fn in unknown if fn not in mf1]
125 130 ignored = [fn for fn in ignored if fn not in mf1]
126 131
127 132 return [modified, added, removed, deleted, unknown, ignored, clean]
128 133
129 134 @propertycache
130 135 def substate(self):
131 136 return subrepo.state(self, self._repo.ui)
132 137
133 138 def subrev(self, subpath):
134 139 return self.substate[subpath][1]
135 140
136 141 def rev(self):
137 142 return self._rev
138 143 def node(self):
139 144 return self._node
140 145 def hex(self):
141 146 return hex(self.node())
142 147 def manifest(self):
143 148 return self._manifest
144 149 def phasestr(self):
145 150 return phases.phasenames[self.phase()]
146 151 def mutable(self):
147 152 return self.phase() > phases.public
148 153
149 154 def getfileset(self, expr):
150 155 return fileset.getfileset(self, expr)
151 156
152 157 def obsolete(self):
153 158 """True if the changeset is obsolete"""
154 159 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
155 160
156 161 def extinct(self):
157 162 """True if the changeset is extinct"""
158 163 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
159 164
160 165 def unstable(self):
161 166 """True if the changeset is not obsolete but it's ancestor are"""
162 167 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
163 168
164 169 def bumped(self):
165 170 """True if the changeset try to be a successor of a public changeset
166 171
167 172 Only non-public and non-obsolete changesets may be bumped.
168 173 """
169 174 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
170 175
171 176 def divergent(self):
172 177 """Is a successors of a changeset with multiple possible successors set
173 178
174 179 Only non-public and non-obsolete changesets may be divergent.
175 180 """
176 181 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
177 182
178 183 def troubled(self):
179 184 """True if the changeset is either unstable, bumped or divergent"""
180 185 return self.unstable() or self.bumped() or self.divergent()
181 186
182 187 def troubles(self):
183 188 """return the list of troubles affecting this changesets.
184 189
185 190 Troubles are returned as strings. possible values are:
186 191 - unstable,
187 192 - bumped,
188 193 - divergent.
189 194 """
190 195 troubles = []
191 196 if self.unstable():
192 197 troubles.append('unstable')
193 198 if self.bumped():
194 199 troubles.append('bumped')
195 200 if self.divergent():
196 201 troubles.append('divergent')
197 202 return troubles
198 203
199 204 def parents(self):
200 205 """return contexts for each parent changeset"""
201 206 return self._parents
202 207
203 208 def p1(self):
204 209 return self._parents[0]
205 210
206 211 def p2(self):
207 212 if len(self._parents) == 2:
208 213 return self._parents[1]
209 214 return changectx(self._repo, -1)
210 215
211 216 def _fileinfo(self, path):
212 217 if '_manifest' in self.__dict__:
213 218 try:
214 219 return self._manifest[path], self._manifest.flags(path)
215 220 except KeyError:
216 221 raise error.ManifestLookupError(self._node, path,
217 222 _('not found in manifest'))
218 223 if '_manifestdelta' in self.__dict__ or path in self.files():
219 224 if path in self._manifestdelta:
220 225 return (self._manifestdelta[path],
221 226 self._manifestdelta.flags(path))
222 227 node, flag = self._repo.manifest.find(self._changeset[0], path)
223 228 if not node:
224 229 raise error.ManifestLookupError(self._node, path,
225 230 _('not found in manifest'))
226 231
227 232 return node, flag
228 233
229 234 def filenode(self, path):
230 235 return self._fileinfo(path)[0]
231 236
232 237 def flags(self, path):
233 238 try:
234 239 return self._fileinfo(path)[1]
235 240 except error.LookupError:
236 241 return ''
237 242
238 243 def sub(self, path):
239 244 return subrepo.subrepo(self, path)
240 245
241 246 def match(self, pats=[], include=None, exclude=None, default='glob'):
242 247 r = self._repo
243 248 return matchmod.match(r.root, r.getcwd(), pats,
244 249 include, exclude, default,
245 250 auditor=r.auditor, ctx=self)
246 251
247 252 def diff(self, ctx2=None, match=None, **opts):
248 253 """Returns a diff generator for the given contexts and matcher"""
249 254 if ctx2 is None:
250 255 ctx2 = self.p1()
251 256 if ctx2 is not None:
252 257 ctx2 = self._repo[ctx2]
253 258 diffopts = patch.diffopts(self._repo.ui, opts)
254 259 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
255 260
256 261 @propertycache
257 262 def _dirs(self):
258 263 return scmutil.dirs(self._manifest)
259 264
260 265 def dirs(self):
261 266 return self._dirs
262 267
263 268 def dirty(self, missing=False, merge=True, branch=True):
264 269 return False
265 270
266 271 def status(self, other=None, match=None, listignored=False,
267 272 listclean=False, listunknown=False, listsubrepos=False):
268 273 """return status of files between two nodes or node and working
269 274 directory.
270 275
271 276 If other is None, compare this node with working directory.
272 277
273 278 returns (modified, added, removed, deleted, unknown, ignored, clean)
274 279 """
275 280
276 281 ctx1 = self
277 282 ctx2 = self._repo[other]
278 283
279 284 # This next code block is, admittedly, fragile logic that tests for
280 285 # reversing the contexts and wouldn't need to exist if it weren't for
281 286 # the fast (and common) code path of comparing the working directory
282 287 # with its first parent.
283 288 #
284 289 # What we're aiming for here is the ability to call:
285 290 #
286 291 # workingctx.status(parentctx)
287 292 #
288 293 # If we always built the manifest for each context and compared those,
289 294 # then we'd be done. But the special case of the above call means we
290 295 # just copy the manifest of the parent.
291 296 reversed = False
292 297 if (not isinstance(ctx1, changectx)
293 298 and isinstance(ctx2, changectx)):
294 299 reversed = True
295 300 ctx1, ctx2 = ctx2, ctx1
296 301
297 302 match = ctx2._matchstatus(ctx1, match)
298 303 r = [[], [], [], [], [], [], []]
299 304 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
300 305 listunknown)
301 306
302 307 if reversed:
303 308 # reverse added and removed
304 309 r[1], r[2] = r[2], r[1]
305 310
306 311 if listsubrepos:
307 312 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
308 313 rev2 = ctx2.subrev(subpath)
309 314 try:
310 315 submatch = matchmod.narrowmatcher(subpath, match)
311 316 s = sub.status(rev2, match=submatch, ignored=listignored,
312 317 clean=listclean, unknown=listunknown,
313 318 listsubrepos=True)
314 319 for rfiles, sfiles in zip(r, s):
315 320 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
316 321 except error.LookupError:
317 322 self._repo.ui.status(_("skipping missing "
318 323 "subrepository: %s\n") % subpath)
319 324
320 325 for l in r:
321 326 l.sort()
322 327
323 328 # we return a tuple to signify that this list isn't changing
324 329 return scmutil.status(*r)
325 330
326 331
327 332 def makememctx(repo, parents, text, user, date, branch, files, store,
328 333 editor=None):
329 334 def getfilectx(repo, memctx, path):
330 335 data, mode, copied = store.getfile(path)
331 336 if data is None:
332 337 return None
333 338 islink, isexec = mode
334 339 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
335 340 copied=copied, memctx=memctx)
336 341 extra = {}
337 342 if branch:
338 343 extra['branch'] = encoding.fromlocal(branch)
339 344 ctx = memctx(repo, parents, text, files, getfilectx, user,
340 345 date, extra, editor)
341 346 return ctx
342 347
343 348 class changectx(basectx):
344 349 """A changecontext object makes access to data related to a particular
345 350 changeset convenient. It represents a read-only context already present in
346 351 the repo."""
347 352 def __init__(self, repo, changeid=''):
348 353 """changeid is a revision number, node, or tag"""
349 354
350 355 # since basectx.__new__ already took care of copying the object, we
351 356 # don't need to do anything in __init__, so we just exit here
352 357 if isinstance(changeid, basectx):
353 358 return
354 359
355 360 if changeid == '':
356 361 changeid = '.'
357 362 self._repo = repo
358 363
359 364 try:
360 365 if isinstance(changeid, int):
361 366 self._node = repo.changelog.node(changeid)
362 367 self._rev = changeid
363 368 return
364 369 if isinstance(changeid, long):
365 370 changeid = str(changeid)
366 371 if changeid == '.':
367 372 self._node = repo.dirstate.p1()
368 373 self._rev = repo.changelog.rev(self._node)
369 374 return
370 375 if changeid == 'null':
371 376 self._node = nullid
372 377 self._rev = nullrev
373 378 return
374 379 if changeid == 'tip':
375 380 self._node = repo.changelog.tip()
376 381 self._rev = repo.changelog.rev(self._node)
377 382 return
378 383 if len(changeid) == 20:
379 384 try:
380 385 self._node = changeid
381 386 self._rev = repo.changelog.rev(changeid)
382 387 return
383 388 except error.FilteredRepoLookupError:
384 389 raise
385 390 except LookupError:
386 391 pass
387 392
388 393 try:
389 394 r = int(changeid)
390 395 if str(r) != changeid:
391 396 raise ValueError
392 397 l = len(repo.changelog)
393 398 if r < 0:
394 399 r += l
395 400 if r < 0 or r >= l:
396 401 raise ValueError
397 402 self._rev = r
398 403 self._node = repo.changelog.node(r)
399 404 return
400 405 except error.FilteredIndexError:
401 406 raise
402 407 except (ValueError, OverflowError, IndexError):
403 408 pass
404 409
405 410 if len(changeid) == 40:
406 411 try:
407 412 self._node = bin(changeid)
408 413 self._rev = repo.changelog.rev(self._node)
409 414 return
410 415 except error.FilteredLookupError:
411 416 raise
412 417 except (TypeError, LookupError):
413 418 pass
414 419
415 420 if changeid in repo._bookmarks:
416 421 self._node = repo._bookmarks[changeid]
417 422 self._rev = repo.changelog.rev(self._node)
418 423 return
419 424 if changeid in repo._tagscache.tags:
420 425 self._node = repo._tagscache.tags[changeid]
421 426 self._rev = repo.changelog.rev(self._node)
422 427 return
423 428 try:
424 429 self._node = repo.branchtip(changeid)
425 430 self._rev = repo.changelog.rev(self._node)
426 431 return
427 432 except error.FilteredRepoLookupError:
428 433 raise
429 434 except error.RepoLookupError:
430 435 pass
431 436
432 437 self._node = repo.unfiltered().changelog._partialmatch(changeid)
433 438 if self._node is not None:
434 439 self._rev = repo.changelog.rev(self._node)
435 440 return
436 441
437 442 # lookup failed
438 443 # check if it might have come from damaged dirstate
439 444 #
440 445 # XXX we could avoid the unfiltered if we had a recognizable
441 446 # exception for filtered changeset access
442 447 if changeid in repo.unfiltered().dirstate.parents():
443 448 msg = _("working directory has unknown parent '%s'!")
444 449 raise error.Abort(msg % short(changeid))
445 450 try:
446 451 if len(changeid) == 20:
447 452 changeid = hex(changeid)
448 453 except TypeError:
449 454 pass
450 455 except (error.FilteredIndexError, error.FilteredLookupError,
451 456 error.FilteredRepoLookupError):
452 457 if repo.filtername == 'visible':
453 458 msg = _("hidden revision '%s'") % changeid
454 459 hint = _('use --hidden to access hidden revisions')
455 460 raise error.FilteredRepoLookupError(msg, hint=hint)
456 461 msg = _("filtered revision '%s' (not in '%s' subset)")
457 462 msg %= (changeid, repo.filtername)
458 463 raise error.FilteredRepoLookupError(msg)
459 464 except IndexError:
460 465 pass
461 466 raise error.RepoLookupError(
462 467 _("unknown revision '%s'") % changeid)
463 468
464 469 def __hash__(self):
465 470 try:
466 471 return hash(self._rev)
467 472 except AttributeError:
468 473 return id(self)
469 474
470 475 def __nonzero__(self):
471 476 return self._rev != nullrev
472 477
473 478 @propertycache
474 479 def _changeset(self):
475 480 return self._repo.changelog.read(self.rev())
476 481
477 482 @propertycache
478 483 def _manifest(self):
479 484 return self._repo.manifest.read(self._changeset[0])
480 485
481 486 @propertycache
482 487 def _manifestdelta(self):
483 488 return self._repo.manifest.readdelta(self._changeset[0])
484 489
485 490 @propertycache
486 491 def _parents(self):
487 492 p = self._repo.changelog.parentrevs(self._rev)
488 493 if p[1] == nullrev:
489 494 p = p[:-1]
490 495 return [changectx(self._repo, x) for x in p]
491 496
492 497 def changeset(self):
493 498 return self._changeset
494 499 def manifestnode(self):
495 500 return self._changeset[0]
496 501
497 502 def user(self):
498 503 return self._changeset[1]
499 504 def date(self):
500 505 return self._changeset[2]
501 506 def files(self):
502 507 return self._changeset[3]
503 508 def description(self):
504 509 return self._changeset[4]
505 510 def branch(self):
506 511 return encoding.tolocal(self._changeset[5].get("branch"))
507 512 def closesbranch(self):
508 513 return 'close' in self._changeset[5]
509 514 def extra(self):
510 515 return self._changeset[5]
511 516 def tags(self):
512 517 return self._repo.nodetags(self._node)
513 518 def bookmarks(self):
514 519 return self._repo.nodebookmarks(self._node)
515 520 def phase(self):
516 521 return self._repo._phasecache.phase(self._repo, self._rev)
517 522 def hidden(self):
518 523 return self._rev in repoview.filterrevs(self._repo, 'visible')
519 524
520 525 def children(self):
521 526 """return contexts for each child changeset"""
522 527 c = self._repo.changelog.children(self._node)
523 528 return [changectx(self._repo, x) for x in c]
524 529
525 530 def ancestors(self):
526 531 for a in self._repo.changelog.ancestors([self._rev]):
527 532 yield changectx(self._repo, a)
528 533
529 534 def descendants(self):
530 535 for d in self._repo.changelog.descendants([self._rev]):
531 536 yield changectx(self._repo, d)
532 537
533 538 def filectx(self, path, fileid=None, filelog=None):
534 539 """get a file context from this changeset"""
535 540 if fileid is None:
536 541 fileid = self.filenode(path)
537 542 return filectx(self._repo, path, fileid=fileid,
538 543 changectx=self, filelog=filelog)
539 544
540 545 def ancestor(self, c2, warn=False):
541 546 """return the "best" ancestor context of self and c2
542 547
543 548 If there are multiple candidates, it will show a message and check
544 549 merge.preferancestor configuration before falling back to the
545 550 revlog ancestor."""
546 551 # deal with workingctxs
547 552 n2 = c2._node
548 553 if n2 is None:
549 554 n2 = c2._parents[0]._node
550 555 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
551 556 if not cahs:
552 557 anc = nullid
553 558 elif len(cahs) == 1:
554 559 anc = cahs[0]
555 560 else:
556 561 for r in self._repo.ui.configlist('merge', 'preferancestor'):
557 562 try:
558 563 ctx = changectx(self._repo, r)
559 564 except error.RepoLookupError:
560 565 continue
561 566 anc = ctx.node()
562 567 if anc in cahs:
563 568 break
564 569 else:
565 570 anc = self._repo.changelog.ancestor(self._node, n2)
566 571 if warn:
567 572 self._repo.ui.status(
568 573 (_("note: using %s as ancestor of %s and %s\n") %
569 574 (short(anc), short(self._node), short(n2))) +
570 575 ''.join(_(" alternatively, use --config "
571 576 "merge.preferancestor=%s\n") %
572 577 short(n) for n in sorted(cahs) if n != anc))
573 578 return changectx(self._repo, anc)
574 579
575 580 def descendant(self, other):
576 581 """True if other is descendant of this changeset"""
577 582 return self._repo.changelog.descendant(self._rev, other._rev)
578 583
579 584 def walk(self, match):
580 585 fset = set(match.files())
581 586 # for dirstate.walk, files=['.'] means "walk the whole tree".
582 587 # follow that here, too
583 588 fset.discard('.')
584 589
585 590 # avoid the entire walk if we're only looking for specific files
586 591 if fset and not match.anypats():
587 592 if util.all([fn in self for fn in fset]):
588 593 for fn in sorted(fset):
589 594 if match(fn):
590 595 yield fn
591 596 raise StopIteration
592 597
593 598 for fn in self:
594 599 if fn in fset:
595 600 # specified pattern is the exact name
596 601 fset.remove(fn)
597 602 if match(fn):
598 603 yield fn
599 604 for fn in sorted(fset):
600 605 if fn in self._dirs:
601 606 # specified pattern is a directory
602 607 continue
603 608 match.bad(fn, _('no such file in rev %s') % self)
604 609
605 610 def matches(self, match):
606 611 return self.walk(match)
607 612
608 613 class basefilectx(object):
609 614 """A filecontext object represents the common logic for its children:
610 615 filectx: read-only access to a filerevision that is already present
611 616 in the repo,
612 617 workingfilectx: a filecontext that represents files from the working
613 618 directory,
614 619 memfilectx: a filecontext that represents files in-memory."""
615 620 def __new__(cls, repo, path, *args, **kwargs):
616 621 return super(basefilectx, cls).__new__(cls)
617 622
618 623 @propertycache
619 624 def _filelog(self):
620 625 return self._repo.file(self._path)
621 626
622 627 @propertycache
623 628 def _changeid(self):
624 629 if '_changeid' in self.__dict__:
625 630 return self._changeid
626 631 elif '_changectx' in self.__dict__:
627 632 return self._changectx.rev()
628 633 else:
629 634 return self._filelog.linkrev(self._filerev)
630 635
631 636 @propertycache
632 637 def _filenode(self):
633 638 if '_fileid' in self.__dict__:
634 639 return self._filelog.lookup(self._fileid)
635 640 else:
636 641 return self._changectx.filenode(self._path)
637 642
638 643 @propertycache
639 644 def _filerev(self):
640 645 return self._filelog.rev(self._filenode)
641 646
642 647 @propertycache
643 648 def _repopath(self):
644 649 return self._path
645 650
646 651 def __nonzero__(self):
647 652 try:
648 653 self._filenode
649 654 return True
650 655 except error.LookupError:
651 656 # file is missing
652 657 return False
653 658
654 659 def __str__(self):
655 660 return "%s@%s" % (self.path(), self._changectx)
656 661
657 662 def __repr__(self):
658 663 return "<%s %s>" % (type(self).__name__, str(self))
659 664
660 665 def __hash__(self):
661 666 try:
662 667 return hash((self._path, self._filenode))
663 668 except AttributeError:
664 669 return id(self)
665 670
666 671 def __eq__(self, other):
667 672 try:
668 673 return (type(self) == type(other) and self._path == other._path
669 674 and self._filenode == other._filenode)
670 675 except AttributeError:
671 676 return False
672 677
673 678 def __ne__(self, other):
674 679 return not (self == other)
675 680
676 681 def filerev(self):
677 682 return self._filerev
678 683 def filenode(self):
679 684 return self._filenode
680 685 def flags(self):
681 686 return self._changectx.flags(self._path)
682 687 def filelog(self):
683 688 return self._filelog
684 689 def rev(self):
685 690 return self._changeid
686 691 def linkrev(self):
687 692 return self._filelog.linkrev(self._filerev)
688 693 def node(self):
689 694 return self._changectx.node()
690 695 def hex(self):
691 696 return self._changectx.hex()
692 697 def user(self):
693 698 return self._changectx.user()
694 699 def date(self):
695 700 return self._changectx.date()
696 701 def files(self):
697 702 return self._changectx.files()
698 703 def description(self):
699 704 return self._changectx.description()
700 705 def branch(self):
701 706 return self._changectx.branch()
702 707 def extra(self):
703 708 return self._changectx.extra()
704 709 def phase(self):
705 710 return self._changectx.phase()
706 711 def phasestr(self):
707 712 return self._changectx.phasestr()
708 713 def manifest(self):
709 714 return self._changectx.manifest()
710 715 def changectx(self):
711 716 return self._changectx
712 717
713 718 def path(self):
714 719 return self._path
715 720
716 721 def isbinary(self):
717 722 try:
718 723 return util.binary(self.data())
719 724 except IOError:
720 725 return False
721 726 def isexec(self):
722 727 return 'x' in self.flags()
723 728 def islink(self):
724 729 return 'l' in self.flags()
725 730
726 731 def cmp(self, fctx):
727 732 """compare with other file context
728 733
729 734 returns True if different than fctx.
730 735 """
731 736 if (fctx._filerev is None
732 737 and (self._repo._encodefilterpats
733 738 # if file data starts with '\1\n', empty metadata block is
734 739 # prepended, which adds 4 bytes to filelog.size().
735 740 or self.size() - 4 == fctx.size())
736 741 or self.size() == fctx.size()):
737 742 return self._filelog.cmp(self._filenode, fctx.data())
738 743
739 744 return True
740 745
741 746 def parents(self):
742 747 _path = self._path
743 748 fl = self._filelog
744 749 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
745 750
746 751 r = self._filelog.renamed(self._filenode)
747 752 if r:
748 753 pl[0] = (r[0], r[1], None)
749 754
750 755 return [filectx(self._repo, p, fileid=n, filelog=l)
751 756 for p, n, l in pl if n != nullid]
752 757
753 758 def p1(self):
754 759 return self.parents()[0]
755 760
756 761 def p2(self):
757 762 p = self.parents()
758 763 if len(p) == 2:
759 764 return p[1]
760 765 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
761 766
762 767 def annotate(self, follow=False, linenumber=None, diffopts=None):
763 768 '''returns a list of tuples of (ctx, line) for each line
764 769 in the file, where ctx is the filectx of the node where
765 770 that line was last changed.
766 771 This returns tuples of ((ctx, linenumber), line) for each line,
767 772 if "linenumber" parameter is NOT "None".
768 773 In such tuples, linenumber means one at the first appearance
769 774 in the managed file.
770 775 To reduce annotation cost,
771 776 this returns fixed value(False is used) as linenumber,
772 777 if "linenumber" parameter is "False".'''
773 778
774 779 if linenumber is None:
775 780 def decorate(text, rev):
776 781 return ([rev] * len(text.splitlines()), text)
777 782 elif linenumber:
778 783 def decorate(text, rev):
779 784 size = len(text.splitlines())
780 785 return ([(rev, i) for i in xrange(1, size + 1)], text)
781 786 else:
782 787 def decorate(text, rev):
783 788 return ([(rev, False)] * len(text.splitlines()), text)
784 789
785 790 def pair(parent, child):
786 791 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
787 792 refine=True)
788 793 for (a1, a2, b1, b2), t in blocks:
789 794 # Changed blocks ('!') or blocks made only of blank lines ('~')
790 795 # belong to the child.
791 796 if t == '=':
792 797 child[0][b1:b2] = parent[0][a1:a2]
793 798 return child
794 799
795 800 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
796 801
797 802 def parents(f):
798 803 pl = f.parents()
799 804
800 805 # Don't return renamed parents if we aren't following.
801 806 if not follow:
802 807 pl = [p for p in pl if p.path() == f.path()]
803 808
804 809 # renamed filectx won't have a filelog yet, so set it
805 810 # from the cache to save time
806 811 for p in pl:
807 812 if not '_filelog' in p.__dict__:
808 813 p._filelog = getlog(p.path())
809 814
810 815 return pl
811 816
812 817 # use linkrev to find the first changeset where self appeared
813 818 if self.rev() != self.linkrev():
814 819 base = self.filectx(self.filenode())
815 820 else:
816 821 base = self
817 822
818 823 # This algorithm would prefer to be recursive, but Python is a
819 824 # bit recursion-hostile. Instead we do an iterative
820 825 # depth-first search.
821 826
822 827 visit = [base]
823 828 hist = {}
824 829 pcache = {}
825 830 needed = {base: 1}
826 831 while visit:
827 832 f = visit[-1]
828 833 pcached = f in pcache
829 834 if not pcached:
830 835 pcache[f] = parents(f)
831 836
832 837 ready = True
833 838 pl = pcache[f]
834 839 for p in pl:
835 840 if p not in hist:
836 841 ready = False
837 842 visit.append(p)
838 843 if not pcached:
839 844 needed[p] = needed.get(p, 0) + 1
840 845 if ready:
841 846 visit.pop()
842 847 reusable = f in hist
843 848 if reusable:
844 849 curr = hist[f]
845 850 else:
846 851 curr = decorate(f.data(), f)
847 852 for p in pl:
848 853 if not reusable:
849 854 curr = pair(hist[p], curr)
850 855 if needed[p] == 1:
851 856 del hist[p]
852 857 del needed[p]
853 858 else:
854 859 needed[p] -= 1
855 860
856 861 hist[f] = curr
857 862 pcache[f] = []
858 863
859 864 return zip(hist[base][0], hist[base][1].splitlines(True))
860 865
861 866 def ancestors(self, followfirst=False):
862 867 visit = {}
863 868 c = self
864 869 cut = followfirst and 1 or None
865 870 while True:
866 871 for parent in c.parents()[:cut]:
867 872 visit[(parent.rev(), parent.node())] = parent
868 873 if not visit:
869 874 break
870 875 c = visit.pop(max(visit))
871 876 yield c
872 877
873 878 class filectx(basefilectx):
874 879 """A filecontext object makes access to data related to a particular
875 880 filerevision convenient."""
876 881 def __init__(self, repo, path, changeid=None, fileid=None,
877 882 filelog=None, changectx=None):
878 883 """changeid can be a changeset revision, node, or tag.
879 884 fileid can be a file revision or node."""
880 885 self._repo = repo
881 886 self._path = path
882 887
883 888 assert (changeid is not None
884 889 or fileid is not None
885 890 or changectx is not None), \
886 891 ("bad args: changeid=%r, fileid=%r, changectx=%r"
887 892 % (changeid, fileid, changectx))
888 893
889 894 if filelog is not None:
890 895 self._filelog = filelog
891 896
892 897 if changeid is not None:
893 898 self._changeid = changeid
894 899 if changectx is not None:
895 900 self._changectx = changectx
896 901 if fileid is not None:
897 902 self._fileid = fileid
898 903
899 904 @propertycache
900 905 def _changectx(self):
901 906 try:
902 907 return changectx(self._repo, self._changeid)
903 908 except error.RepoLookupError:
904 909 # Linkrev may point to any revision in the repository. When the
905 910 # repository is filtered this may lead to `filectx` trying to build
906 911 # `changectx` for filtered revision. In such case we fallback to
907 912 # creating `changectx` on the unfiltered version of the reposition.
908 913 # This fallback should not be an issue because `changectx` from
909 914 # `filectx` are not used in complex operations that care about
910 915 # filtering.
911 916 #
912 917 # This fallback is a cheap and dirty fix that prevent several
913 918 # crashes. It does not ensure the behavior is correct. However the
914 919 # behavior was not correct before filtering either and "incorrect
915 920 # behavior" is seen as better as "crash"
916 921 #
917 922 # Linkrevs have several serious troubles with filtering that are
918 923 # complicated to solve. Proper handling of the issue here should be
919 924 # considered when solving linkrev issue are on the table.
920 925 return changectx(self._repo.unfiltered(), self._changeid)
921 926
922 927 def filectx(self, fileid):
923 928 '''opens an arbitrary revision of the file without
924 929 opening a new filelog'''
925 930 return filectx(self._repo, self._path, fileid=fileid,
926 931 filelog=self._filelog)
927 932
928 933 def data(self):
929 934 try:
930 935 return self._filelog.read(self._filenode)
931 936 except error.CensoredNodeError:
932 937 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
933 938 return ""
934 939 raise util.Abort(_("censored node: %s") % short(self._filenode),
935 940 hint=_("set censor.policy to ignore errors"))
936 941
937 942 def size(self):
938 943 return self._filelog.size(self._filerev)
939 944
940 945 def renamed(self):
941 946 """check if file was actually renamed in this changeset revision
942 947
943 948 If rename logged in file revision, we report copy for changeset only
944 949 if file revisions linkrev points back to the changeset in question
945 950 or both changeset parents contain different file revisions.
946 951 """
947 952
948 953 renamed = self._filelog.renamed(self._filenode)
949 954 if not renamed:
950 955 return renamed
951 956
952 957 if self.rev() == self.linkrev():
953 958 return renamed
954 959
955 960 name = self.path()
956 961 fnode = self._filenode
957 962 for p in self._changectx.parents():
958 963 try:
959 964 if fnode == p.filenode(name):
960 965 return None
961 966 except error.LookupError:
962 967 pass
963 968 return renamed
964 969
965 970 def children(self):
966 971 # hard for renames
967 972 c = self._filelog.children(self._filenode)
968 973 return [filectx(self._repo, self._path, fileid=x,
969 974 filelog=self._filelog) for x in c]
970 975
971 976 class committablectx(basectx):
972 977 """A committablectx object provides common functionality for a context that
973 978 wants the ability to commit, e.g. workingctx or memctx."""
974 979 def __init__(self, repo, text="", user=None, date=None, extra=None,
975 980 changes=None):
976 981 self._repo = repo
977 982 self._rev = None
978 983 self._node = None
979 984 self._text = text
980 985 if date:
981 986 self._date = util.parsedate(date)
982 987 if user:
983 988 self._user = user
984 989 if changes:
985 990 self._status = changes
986 991
987 992 self._extra = {}
988 993 if extra:
989 994 self._extra = extra.copy()
990 995 if 'branch' not in self._extra:
991 996 try:
992 997 branch = encoding.fromlocal(self._repo.dirstate.branch())
993 998 except UnicodeDecodeError:
994 999 raise util.Abort(_('branch name not in UTF-8!'))
995 1000 self._extra['branch'] = branch
996 1001 if self._extra['branch'] == '':
997 1002 self._extra['branch'] = 'default'
998 1003
999 1004 def __str__(self):
1000 1005 return str(self._parents[0]) + "+"
1001 1006
1002 1007 def __nonzero__(self):
1003 1008 return True
1004 1009
1005 1010 def _buildflagfunc(self):
1006 1011 # Create a fallback function for getting file flags when the
1007 1012 # filesystem doesn't support them
1008 1013
1009 1014 copiesget = self._repo.dirstate.copies().get
1010 1015
1011 1016 if len(self._parents) < 2:
1012 1017 # when we have one parent, it's easy: copy from parent
1013 1018 man = self._parents[0].manifest()
1014 1019 def func(f):
1015 1020 f = copiesget(f, f)
1016 1021 return man.flags(f)
1017 1022 else:
1018 1023 # merges are tricky: we try to reconstruct the unstored
1019 1024 # result from the merge (issue1802)
1020 1025 p1, p2 = self._parents
1021 1026 pa = p1.ancestor(p2)
1022 1027 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1023 1028
1024 1029 def func(f):
1025 1030 f = copiesget(f, f) # may be wrong for merges with copies
1026 1031 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1027 1032 if fl1 == fl2:
1028 1033 return fl1
1029 1034 if fl1 == fla:
1030 1035 return fl2
1031 1036 if fl2 == fla:
1032 1037 return fl1
1033 1038 return '' # punt for conflicts
1034 1039
1035 1040 return func
1036 1041
1037 1042 @propertycache
1038 1043 def _flagfunc(self):
1039 1044 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1040 1045
1041 1046 @propertycache
1042 1047 def _manifest(self):
1043 1048 """generate a manifest corresponding to the values in self._status"""
1044 1049
1045 1050 man = self._parents[0].manifest().copy()
1046 1051 if len(self._parents) > 1:
1047 1052 man2 = self.p2().manifest()
1048 1053 def getman(f):
1049 1054 if f in man:
1050 1055 return man
1051 1056 return man2
1052 1057 else:
1053 1058 getman = lambda f: man
1054 1059
1055 1060 copied = self._repo.dirstate.copies()
1056 1061 ff = self._flagfunc
1057 1062 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1058 1063 for f in l:
1059 1064 orig = copied.get(f, f)
1060 1065 man[f] = getman(orig).get(orig, nullid) + i
1061 1066 try:
1062 1067 man.setflag(f, ff(f))
1063 1068 except OSError:
1064 1069 pass
1065 1070
1066 1071 for f in self._status.deleted + self._status.removed:
1067 1072 if f in man:
1068 1073 del man[f]
1069 1074
1070 1075 return man
1071 1076
1072 1077 @propertycache
1073 1078 def _status(self):
1074 1079 return self._repo.status()
1075 1080
1076 1081 @propertycache
1077 1082 def _user(self):
1078 1083 return self._repo.ui.username()
1079 1084
1080 1085 @propertycache
1081 1086 def _date(self):
1082 1087 return util.makedate()
1083 1088
1084 1089 def subrev(self, subpath):
1085 1090 return None
1086 1091
1087 1092 def user(self):
1088 1093 return self._user or self._repo.ui.username()
1089 1094 def date(self):
1090 1095 return self._date
1091 1096 def description(self):
1092 1097 return self._text
1093 1098 def files(self):
1094 1099 return sorted(self._status.modified + self._status.added +
1095 1100 self._status.removed)
1096 1101
1097 1102 def modified(self):
1098 1103 return self._status.modified
1099 1104 def added(self):
1100 1105 return self._status.added
1101 1106 def removed(self):
1102 1107 return self._status.removed
1103 1108 def deleted(self):
1104 1109 return self._status.deleted
1105 1110 def unknown(self):
1106 1111 return self._status.unknown
1107 1112 def ignored(self):
1108 1113 return self._status.ignored
1109 1114 def clean(self):
1110 1115 return self._status.clean
1111 1116 def branch(self):
1112 1117 return encoding.tolocal(self._extra['branch'])
1113 1118 def closesbranch(self):
1114 1119 return 'close' in self._extra
1115 1120 def extra(self):
1116 1121 return self._extra
1117 1122
1118 1123 def tags(self):
1119 1124 t = []
1120 1125 for p in self.parents():
1121 1126 t.extend(p.tags())
1122 1127 return t
1123 1128
1124 1129 def bookmarks(self):
1125 1130 b = []
1126 1131 for p in self.parents():
1127 1132 b.extend(p.bookmarks())
1128 1133 return b
1129 1134
1130 1135 def phase(self):
1131 1136 phase = phases.draft # default phase to draft
1132 1137 for p in self.parents():
1133 1138 phase = max(phase, p.phase())
1134 1139 return phase
1135 1140
1136 1141 def hidden(self):
1137 1142 return False
1138 1143
1139 1144 def children(self):
1140 1145 return []
1141 1146
1142 1147 def flags(self, path):
1143 1148 if '_manifest' in self.__dict__:
1144 1149 try:
1145 1150 return self._manifest.flags(path)
1146 1151 except KeyError:
1147 1152 return ''
1148 1153
1149 1154 try:
1150 1155 return self._flagfunc(path)
1151 1156 except OSError:
1152 1157 return ''
1153 1158
1154 1159 def ancestor(self, c2):
1155 1160 """return the "best" ancestor context of self and c2"""
1156 1161 return self._parents[0].ancestor(c2) # punt on two parents for now
1157 1162
1158 1163 def walk(self, match):
1159 1164 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1160 1165 True, False))
1161 1166
1162 1167 def matches(self, match):
1163 1168 return sorted(self._repo.dirstate.matches(match))
1164 1169
1165 1170 def ancestors(self):
1166 1171 for a in self._repo.changelog.ancestors(
1167 1172 [p.rev() for p in self._parents]):
1168 1173 yield changectx(self._repo, a)
1169 1174
1170 1175 def markcommitted(self, node):
1171 1176 """Perform post-commit cleanup necessary after committing this ctx
1172 1177
1173 1178 Specifically, this updates backing stores this working context
1174 1179 wraps to reflect the fact that the changes reflected by this
1175 1180 workingctx have been committed. For example, it marks
1176 1181 modified and added files as normal in the dirstate.
1177 1182
1178 1183 """
1179 1184
1180 1185 self._repo.dirstate.beginparentchange()
1181 1186 for f in self.modified() + self.added():
1182 1187 self._repo.dirstate.normal(f)
1183 1188 for f in self.removed():
1184 1189 self._repo.dirstate.drop(f)
1185 1190 self._repo.dirstate.setparents(node)
1186 1191 self._repo.dirstate.endparentchange()
1187 1192
1188 1193 def dirs(self):
1189 1194 return self._repo.dirstate.dirs()
1190 1195
1191 1196 class workingctx(committablectx):
1192 1197 """A workingctx object makes access to data related to
1193 1198 the current working directory convenient.
1194 1199 date - any valid date string or (unixtime, offset), or None.
1195 1200 user - username string, or None.
1196 1201 extra - a dictionary of extra values, or None.
1197 1202 changes - a list of file lists as returned by localrepo.status()
1198 1203 or None to use the repository status.
1199 1204 """
1200 1205 def __init__(self, repo, text="", user=None, date=None, extra=None,
1201 1206 changes=None):
1202 1207 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1203 1208
1204 1209 def __iter__(self):
1205 1210 d = self._repo.dirstate
1206 1211 for f in d:
1207 1212 if d[f] != 'r':
1208 1213 yield f
1209 1214
1210 1215 def __contains__(self, key):
1211 1216 return self._repo.dirstate[key] not in "?r"
1212 1217
1213 1218 @propertycache
1214 1219 def _parents(self):
1215 1220 p = self._repo.dirstate.parents()
1216 1221 if p[1] == nullid:
1217 1222 p = p[:-1]
1218 1223 return [changectx(self._repo, x) for x in p]
1219 1224
1220 1225 def filectx(self, path, filelog=None):
1221 1226 """get a file context from the working directory"""
1222 1227 return workingfilectx(self._repo, path, workingctx=self,
1223 1228 filelog=filelog)
1224 1229
1225 1230 def dirty(self, missing=False, merge=True, branch=True):
1226 1231 "check whether a working directory is modified"
1227 1232 # check subrepos first
1228 1233 for s in sorted(self.substate):
1229 1234 if self.sub(s).dirty():
1230 1235 return True
1231 1236 # check current working dir
1232 1237 return ((merge and self.p2()) or
1233 1238 (branch and self.branch() != self.p1().branch()) or
1234 1239 self.modified() or self.added() or self.removed() or
1235 1240 (missing and self.deleted()))
1236 1241
1237 1242 def add(self, list, prefix=""):
1238 1243 join = lambda f: os.path.join(prefix, f)
1239 1244 wlock = self._repo.wlock()
1240 1245 ui, ds = self._repo.ui, self._repo.dirstate
1241 1246 try:
1242 1247 rejected = []
1243 1248 lstat = self._repo.wvfs.lstat
1244 1249 for f in list:
1245 1250 scmutil.checkportable(ui, join(f))
1246 1251 try:
1247 1252 st = lstat(f)
1248 1253 except OSError:
1249 1254 ui.warn(_("%s does not exist!\n") % join(f))
1250 1255 rejected.append(f)
1251 1256 continue
1252 1257 if st.st_size > 10000000:
1253 1258 ui.warn(_("%s: up to %d MB of RAM may be required "
1254 1259 "to manage this file\n"
1255 1260 "(use 'hg revert %s' to cancel the "
1256 1261 "pending addition)\n")
1257 1262 % (f, 3 * st.st_size // 1000000, join(f)))
1258 1263 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1259 1264 ui.warn(_("%s not added: only files and symlinks "
1260 1265 "supported currently\n") % join(f))
1261 1266 rejected.append(f)
1262 1267 elif ds[f] in 'amn':
1263 1268 ui.warn(_("%s already tracked!\n") % join(f))
1264 1269 elif ds[f] == 'r':
1265 1270 ds.normallookup(f)
1266 1271 else:
1267 1272 ds.add(f)
1268 1273 return rejected
1269 1274 finally:
1270 1275 wlock.release()
1271 1276
1272 1277 def forget(self, files, prefix=""):
1273 1278 join = lambda f: os.path.join(prefix, f)
1274 1279 wlock = self._repo.wlock()
1275 1280 try:
1276 1281 rejected = []
1277 1282 for f in files:
1278 1283 if f not in self._repo.dirstate:
1279 1284 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1280 1285 rejected.append(f)
1281 1286 elif self._repo.dirstate[f] != 'a':
1282 1287 self._repo.dirstate.remove(f)
1283 1288 else:
1284 1289 self._repo.dirstate.drop(f)
1285 1290 return rejected
1286 1291 finally:
1287 1292 wlock.release()
1288 1293
1289 1294 def undelete(self, list):
1290 1295 pctxs = self.parents()
1291 1296 wlock = self._repo.wlock()
1292 1297 try:
1293 1298 for f in list:
1294 1299 if self._repo.dirstate[f] != 'r':
1295 1300 self._repo.ui.warn(_("%s not removed!\n") % f)
1296 1301 else:
1297 1302 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1298 1303 t = fctx.data()
1299 1304 self._repo.wwrite(f, t, fctx.flags())
1300 1305 self._repo.dirstate.normal(f)
1301 1306 finally:
1302 1307 wlock.release()
1303 1308
1304 1309 def copy(self, source, dest):
1305 1310 try:
1306 1311 st = self._repo.wvfs.lstat(dest)
1307 1312 except OSError, err:
1308 1313 if err.errno != errno.ENOENT:
1309 1314 raise
1310 1315 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1311 1316 return
1312 1317 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1313 1318 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1314 1319 "symbolic link\n") % dest)
1315 1320 else:
1316 1321 wlock = self._repo.wlock()
1317 1322 try:
1318 1323 if self._repo.dirstate[dest] in '?r':
1319 1324 self._repo.dirstate.add(dest)
1320 1325 self._repo.dirstate.copy(source, dest)
1321 1326 finally:
1322 1327 wlock.release()
1323 1328
1324 1329 def _filtersuspectsymlink(self, files):
1325 1330 if not files or self._repo.dirstate._checklink:
1326 1331 return files
1327 1332
1328 1333 # Symlink placeholders may get non-symlink-like contents
1329 1334 # via user error or dereferencing by NFS or Samba servers,
1330 1335 # so we filter out any placeholders that don't look like a
1331 1336 # symlink
1332 1337 sane = []
1333 1338 for f in files:
1334 1339 if self.flags(f) == 'l':
1335 1340 d = self[f].data()
1336 1341 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1337 1342 self._repo.ui.debug('ignoring suspect symlink placeholder'
1338 1343 ' "%s"\n' % f)
1339 1344 continue
1340 1345 sane.append(f)
1341 1346 return sane
1342 1347
1343 1348 def _checklookup(self, files):
1344 1349 # check for any possibly clean files
1345 1350 if not files:
1346 1351 return [], []
1347 1352
1348 1353 modified = []
1349 1354 fixup = []
1350 1355 pctx = self._parents[0]
1351 1356 # do a full compare of any files that might have changed
1352 1357 for f in sorted(files):
1353 1358 if (f not in pctx or self.flags(f) != pctx.flags(f)
1354 1359 or pctx[f].cmp(self[f])):
1355 1360 modified.append(f)
1356 1361 else:
1357 1362 fixup.append(f)
1358 1363
1359 1364 # update dirstate for files that are actually clean
1360 1365 if fixup:
1361 1366 try:
1362 1367 # updating the dirstate is optional
1363 1368 # so we don't wait on the lock
1364 1369 # wlock can invalidate the dirstate, so cache normal _after_
1365 1370 # taking the lock
1366 1371 wlock = self._repo.wlock(False)
1367 1372 normal = self._repo.dirstate.normal
1368 1373 try:
1369 1374 for f in fixup:
1370 1375 normal(f)
1371 1376 finally:
1372 1377 wlock.release()
1373 1378 except error.LockError:
1374 1379 pass
1375 1380 return modified, fixup
1376 1381
1377 1382 def _manifestmatches(self, match, s):
1378 1383 """Slow path for workingctx
1379 1384
1380 1385 The fast path is when we compare the working directory to its parent
1381 1386 which means this function is comparing with a non-parent; therefore we
1382 1387 need to build a manifest and return what matches.
1383 1388 """
1384 1389 mf = self._repo['.']._manifestmatches(match, s)
1385 1390 modified, added, removed = s[0:3]
1386 1391 for f in modified + added:
1387 1392 mf[f] = None
1388 1393 mf.setflag(f, self.flags(f))
1389 1394 for f in removed:
1390 1395 if f in mf:
1391 1396 del mf[f]
1392 1397 return mf
1393 1398
1394 1399 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1395 1400 unknown=False):
1396 1401 '''Gets the status from the dirstate -- internal use only.'''
1397 1402 listignored, listclean, listunknown = ignored, clean, unknown
1398 1403 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1399 1404 subrepos = []
1400 1405 if '.hgsub' in self:
1401 1406 subrepos = sorted(self.substate)
1402 1407 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1403 1408 listclean, listunknown)
1404 1409 modified, added, removed, deleted, unknown, ignored, clean = s
1405 1410
1406 1411 # check for any possibly clean files
1407 1412 if cmp:
1408 1413 modified2, fixup = self._checklookup(cmp)
1409 1414 modified += modified2
1410 1415
1411 1416 # update dirstate for files that are actually clean
1412 1417 if fixup and listclean:
1413 1418 clean += fixup
1414 1419
1415 1420 return [modified, added, removed, deleted, unknown, ignored, clean]
1416 1421
1417 1422 def _buildstatus(self, other, s, match, listignored, listclean,
1418 1423 listunknown):
1419 1424 """build a status with respect to another context
1420 1425
1421 1426 This includes logic for maintaining the fast path of status when
1422 1427 comparing the working directory against its parent, which is to skip
1423 1428 building a new manifest if self (working directory) is not comparing
1424 1429 against its parent (repo['.']).
1425 1430 """
1426 1431 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1427 1432 # Filter out symlinks that, in the case of FAT32 and NTFS filesytems,
1428 1433 # might have accidentally ended up with the entire contents of the file
1429 1434 # they are susposed to be linking to.
1430 1435 s[0] = self._filtersuspectsymlink(s[0])
1431 1436 if other != self._repo['.']:
1432 1437 s = super(workingctx, self)._buildstatus(other, s, match,
1433 1438 listignored, listclean,
1434 1439 listunknown)
1435 1440 self._status = scmutil.status(*s)
1436 1441 return s
1437 1442
1438 1443 def _matchstatus(self, other, match):
1439 1444 """override the match method with a filter for directory patterns
1440 1445
1441 1446 We use inheritance to customize the match.bad method only in cases of
1442 1447 workingctx since it belongs only to the working directory when
1443 1448 comparing against the parent changeset.
1444 1449
1445 1450 If we aren't comparing against the working directory's parent, then we
1446 1451 just use the default match object sent to us.
1447 1452 """
1448 1453 superself = super(workingctx, self)
1449 1454 match = superself._matchstatus(other, match)
1450 1455 if other != self._repo['.']:
1451 1456 def bad(f, msg):
1452 1457 # 'f' may be a directory pattern from 'match.files()',
1453 1458 # so 'f not in ctx1' is not enough
1454 1459 if f not in other and f not in other.dirs():
1455 1460 self._repo.ui.warn('%s: %s\n' %
1456 1461 (self._repo.dirstate.pathto(f), msg))
1457 1462 match.bad = bad
1458 1463 return match
1459 1464
1460 1465 class committablefilectx(basefilectx):
1461 1466 """A committablefilectx provides common functionality for a file context
1462 1467 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1463 1468 def __init__(self, repo, path, filelog=None, ctx=None):
1464 1469 self._repo = repo
1465 1470 self._path = path
1466 1471 self._changeid = None
1467 1472 self._filerev = self._filenode = None
1468 1473
1469 1474 if filelog is not None:
1470 1475 self._filelog = filelog
1471 1476 if ctx:
1472 1477 self._changectx = ctx
1473 1478
1474 1479 def __nonzero__(self):
1475 1480 return True
1476 1481
1477 1482 def parents(self):
1478 1483 '''return parent filectxs, following copies if necessary'''
1479 1484 def filenode(ctx, path):
1480 1485 return ctx._manifest.get(path, nullid)
1481 1486
1482 1487 path = self._path
1483 1488 fl = self._filelog
1484 1489 pcl = self._changectx._parents
1485 1490 renamed = self.renamed()
1486 1491
1487 1492 if renamed:
1488 1493 pl = [renamed + (None,)]
1489 1494 else:
1490 1495 pl = [(path, filenode(pcl[0], path), fl)]
1491 1496
1492 1497 for pc in pcl[1:]:
1493 1498 pl.append((path, filenode(pc, path), fl))
1494 1499
1495 1500 return [filectx(self._repo, p, fileid=n, filelog=l)
1496 1501 for p, n, l in pl if n != nullid]
1497 1502
1498 1503 def children(self):
1499 1504 return []
1500 1505
1501 1506 class workingfilectx(committablefilectx):
1502 1507 """A workingfilectx object makes access to data related to a particular
1503 1508 file in the working directory convenient."""
1504 1509 def __init__(self, repo, path, filelog=None, workingctx=None):
1505 1510 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1506 1511
1507 1512 @propertycache
1508 1513 def _changectx(self):
1509 1514 return workingctx(self._repo)
1510 1515
1511 1516 def data(self):
1512 1517 return self._repo.wread(self._path)
1513 1518 def renamed(self):
1514 1519 rp = self._repo.dirstate.copied(self._path)
1515 1520 if not rp:
1516 1521 return None
1517 1522 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1518 1523
1519 1524 def size(self):
1520 1525 return self._repo.wvfs.lstat(self._path).st_size
1521 1526 def date(self):
1522 1527 t, tz = self._changectx.date()
1523 1528 try:
1524 1529 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1525 1530 except OSError, err:
1526 1531 if err.errno != errno.ENOENT:
1527 1532 raise
1528 1533 return (t, tz)
1529 1534
1530 1535 def cmp(self, fctx):
1531 1536 """compare with other file context
1532 1537
1533 1538 returns True if different than fctx.
1534 1539 """
1535 1540 # fctx should be a filectx (not a workingfilectx)
1536 1541 # invert comparison to reuse the same code path
1537 1542 return fctx.cmp(self)
1538 1543
1539 1544 def remove(self, ignoremissing=False):
1540 1545 """wraps unlink for a repo's working directory"""
1541 1546 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1542 1547
1543 1548 def write(self, data, flags):
1544 1549 """wraps repo.wwrite"""
1545 1550 self._repo.wwrite(self._path, data, flags)
1546 1551
1547 1552 class memctx(committablectx):
1548 1553 """Use memctx to perform in-memory commits via localrepo.commitctx().
1549 1554
1550 1555 Revision information is supplied at initialization time while
1551 1556 related files data and is made available through a callback
1552 1557 mechanism. 'repo' is the current localrepo, 'parents' is a
1553 1558 sequence of two parent revisions identifiers (pass None for every
1554 1559 missing parent), 'text' is the commit message and 'files' lists
1555 1560 names of files touched by the revision (normalized and relative to
1556 1561 repository root).
1557 1562
1558 1563 filectxfn(repo, memctx, path) is a callable receiving the
1559 1564 repository, the current memctx object and the normalized path of
1560 1565 requested file, relative to repository root. It is fired by the
1561 1566 commit function for every file in 'files', but calls order is
1562 1567 undefined. If the file is available in the revision being
1563 1568 committed (updated or added), filectxfn returns a memfilectx
1564 1569 object. If the file was removed, filectxfn raises an
1565 1570 IOError. Moved files are represented by marking the source file
1566 1571 removed and the new file added with copy information (see
1567 1572 memfilectx).
1568 1573
1569 1574 user receives the committer name and defaults to current
1570 1575 repository username, date is the commit date in any format
1571 1576 supported by util.parsedate() and defaults to current date, extra
1572 1577 is a dictionary of metadata or is left empty.
1573 1578 """
1574 1579
1575 1580 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1576 1581 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1577 1582 # this field to determine what to do in filectxfn.
1578 1583 _returnnoneformissingfiles = True
1579 1584
1580 1585 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1581 1586 date=None, extra=None, editor=False):
1582 1587 super(memctx, self).__init__(repo, text, user, date, extra)
1583 1588 self._rev = None
1584 1589 self._node = None
1585 1590 parents = [(p or nullid) for p in parents]
1586 1591 p1, p2 = parents
1587 1592 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1588 1593 files = sorted(set(files))
1589 1594 self._status = scmutil.status(files, [], [], [], [], [], [])
1590 1595 self._filectxfn = filectxfn
1591 1596 self.substate = {}
1592 1597
1593 1598 # if store is not callable, wrap it in a function
1594 1599 if not callable(filectxfn):
1595 1600 def getfilectx(repo, memctx, path):
1596 1601 fctx = filectxfn[path]
1597 1602 # this is weird but apparently we only keep track of one parent
1598 1603 # (why not only store that instead of a tuple?)
1599 1604 copied = fctx.renamed()
1600 1605 if copied:
1601 1606 copied = copied[0]
1602 1607 return memfilectx(repo, path, fctx.data(),
1603 1608 islink=fctx.islink(), isexec=fctx.isexec(),
1604 1609 copied=copied, memctx=memctx)
1605 1610 self._filectxfn = getfilectx
1606 1611
1607 1612 self._extra = extra and extra.copy() or {}
1608 1613 if self._extra.get('branch', '') == '':
1609 1614 self._extra['branch'] = 'default'
1610 1615
1611 1616 if editor:
1612 1617 self._text = editor(self._repo, self, [])
1613 1618 self._repo.savecommitmessage(self._text)
1614 1619
1615 1620 def filectx(self, path, filelog=None):
1616 1621 """get a file context from the working directory
1617 1622
1618 1623 Returns None if file doesn't exist and should be removed."""
1619 1624 return self._filectxfn(self._repo, self, path)
1620 1625
1621 1626 def commit(self):
1622 1627 """commit context to the repo"""
1623 1628 return self._repo.commitctx(self)
1624 1629
1625 1630 @propertycache
1626 1631 def _manifest(self):
1627 1632 """generate a manifest based on the return values of filectxfn"""
1628 1633
1629 1634 # keep this simple for now; just worry about p1
1630 1635 pctx = self._parents[0]
1631 1636 man = pctx.manifest().copy()
1632 1637
1633 1638 for f, fnode in man.iteritems():
1634 1639 p1node = nullid
1635 1640 p2node = nullid
1636 1641 p = pctx[f].parents() # if file isn't in pctx, check p2?
1637 1642 if len(p) > 0:
1638 1643 p1node = p[0].node()
1639 1644 if len(p) > 1:
1640 1645 p2node = p[1].node()
1641 1646 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1642 1647
1643 1648 return man
1644 1649
1645 1650
1646 1651 class memfilectx(committablefilectx):
1647 1652 """memfilectx represents an in-memory file to commit.
1648 1653
1649 1654 See memctx and committablefilectx for more details.
1650 1655 """
1651 1656 def __init__(self, repo, path, data, islink=False,
1652 1657 isexec=False, copied=None, memctx=None):
1653 1658 """
1654 1659 path is the normalized file path relative to repository root.
1655 1660 data is the file content as a string.
1656 1661 islink is True if the file is a symbolic link.
1657 1662 isexec is True if the file is executable.
1658 1663 copied is the source file path if current file was copied in the
1659 1664 revision being committed, or None."""
1660 1665 super(memfilectx, self).__init__(repo, path, None, memctx)
1661 1666 self._data = data
1662 1667 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1663 1668 self._copied = None
1664 1669 if copied:
1665 1670 self._copied = (copied, nullid)
1666 1671
1667 1672 def data(self):
1668 1673 return self._data
1669 1674 def size(self):
1670 1675 return len(self.data())
1671 1676 def flags(self):
1672 1677 return self._flags
1673 1678 def renamed(self):
1674 1679 return self._copied
1675 1680
1676 1681 def remove(self, ignoremissing=False):
1677 1682 """wraps unlink for a repo's working directory"""
1678 1683 # need to figure out what to do here
1679 1684 del self._changectx[self._path]
1680 1685
1681 1686 def write(self, data, flags):
1682 1687 """wraps repo.wwrite"""
1683 1688 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now