##// END OF EJS Templates
namespaces: add branches...
Sean Farley -
r23563:11499204 default
parent child Browse files
Show More
@@ -1,1685 +1,1680 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 class basectx(object):
21 21 """A basectx object represents the common logic for its children:
22 22 changectx: read-only context that is already present in the repo,
23 23 workingctx: a context that represents the working directory and can
24 24 be committed,
25 25 memctx: a context that represents changes in-memory and can also
26 26 be committed."""
27 27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 28 if isinstance(changeid, basectx):
29 29 return changeid
30 30
31 31 o = super(basectx, cls).__new__(cls)
32 32
33 33 o._repo = repo
34 34 o._rev = nullrev
35 35 o._node = nullid
36 36
37 37 return o
38 38
39 39 def __str__(self):
40 40 return short(self.node())
41 41
42 42 def __int__(self):
43 43 return self.rev()
44 44
45 45 def __repr__(self):
46 46 return "<%s %s>" % (type(self).__name__, str(self))
47 47
48 48 def __eq__(self, other):
49 49 try:
50 50 return type(self) == type(other) and self._rev == other._rev
51 51 except AttributeError:
52 52 return False
53 53
54 54 def __ne__(self, other):
55 55 return not (self == other)
56 56
57 57 def __contains__(self, key):
58 58 return key in self._manifest
59 59
60 60 def __getitem__(self, key):
61 61 return self.filectx(key)
62 62
63 63 def __iter__(self):
64 64 for f in sorted(self._manifest):
65 65 yield f
66 66
67 67 def _manifestmatches(self, match, s):
68 68 """generate a new manifest filtered by the match argument
69 69
70 70 This method is for internal use only and mainly exists to provide an
71 71 object oriented way for other contexts to customize the manifest
72 72 generation.
73 73 """
74 74 return self.manifest().matches(match)
75 75
76 76 def _matchstatus(self, other, match):
77 77 """return match.always if match is none
78 78
79 79 This internal method provides a way for child objects to override the
80 80 match operator.
81 81 """
82 82 return match or matchmod.always(self._repo.root, self._repo.getcwd())
83 83
84 84 def _buildstatus(self, other, s, match, listignored, listclean,
85 85 listunknown):
86 86 """build a status with respect to another context"""
87 87 # Load earliest manifest first for caching reasons. More specifically,
88 88 # if you have revisions 1000 and 1001, 1001 is probably stored as a
89 89 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
90 90 # 1000 and cache it so that when you read 1001, we just need to apply a
91 91 # delta to what's in the cache. So that's one full reconstruction + one
92 92 # delta application.
93 93 if self.rev() is not None and self.rev() < other.rev():
94 94 self.manifest()
95 95 mf1 = other._manifestmatches(match, s)
96 96 mf2 = self._manifestmatches(match, s)
97 97
98 98 modified, added, clean = [], [], []
99 99 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
100 100 deletedset = set(deleted)
101 101 withflags = mf1.withflags() | mf2.withflags()
102 102 for fn, mf2node in mf2.iteritems():
103 103 if fn in mf1:
104 104 if (fn not in deletedset and
105 105 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
106 106 (mf1[fn] != mf2node and
107 107 (mf2node or self[fn].cmp(other[fn]))))):
108 108 modified.append(fn)
109 109 elif listclean:
110 110 clean.append(fn)
111 111 del mf1[fn]
112 112 elif fn not in deletedset:
113 113 added.append(fn)
114 114 removed = mf1.keys()
115 115 if removed:
116 116 # need to filter files if they are already reported as removed
117 117 unknown = [fn for fn in unknown if fn not in mf1]
118 118 ignored = [fn for fn in ignored if fn not in mf1]
119 119
120 120 return scmutil.status(modified, added, removed, deleted, unknown,
121 121 ignored, clean)
122 122
123 123 @propertycache
124 124 def substate(self):
125 125 return subrepo.state(self, self._repo.ui)
126 126
127 127 def subrev(self, subpath):
128 128 return self.substate[subpath][1]
129 129
130 130 def rev(self):
131 131 return self._rev
132 132 def node(self):
133 133 return self._node
134 134 def hex(self):
135 135 return hex(self.node())
136 136 def manifest(self):
137 137 return self._manifest
138 138 def phasestr(self):
139 139 return phases.phasenames[self.phase()]
140 140 def mutable(self):
141 141 return self.phase() > phases.public
142 142
143 143 def getfileset(self, expr):
144 144 return fileset.getfileset(self, expr)
145 145
146 146 def obsolete(self):
147 147 """True if the changeset is obsolete"""
148 148 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
149 149
150 150 def extinct(self):
151 151 """True if the changeset is extinct"""
152 152 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
153 153
154 154 def unstable(self):
155 155 """True if the changeset is not obsolete but it's ancestor are"""
156 156 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
157 157
158 158 def bumped(self):
159 159 """True if the changeset try to be a successor of a public changeset
160 160
161 161 Only non-public and non-obsolete changesets may be bumped.
162 162 """
163 163 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
164 164
165 165 def divergent(self):
166 166 """Is a successors of a changeset with multiple possible successors set
167 167
168 168 Only non-public and non-obsolete changesets may be divergent.
169 169 """
170 170 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
171 171
172 172 def troubled(self):
173 173 """True if the changeset is either unstable, bumped or divergent"""
174 174 return self.unstable() or self.bumped() or self.divergent()
175 175
176 176 def troubles(self):
177 177 """return the list of troubles affecting this changesets.
178 178
179 179 Troubles are returned as strings. possible values are:
180 180 - unstable,
181 181 - bumped,
182 182 - divergent.
183 183 """
184 184 troubles = []
185 185 if self.unstable():
186 186 troubles.append('unstable')
187 187 if self.bumped():
188 188 troubles.append('bumped')
189 189 if self.divergent():
190 190 troubles.append('divergent')
191 191 return troubles
192 192
193 193 def parents(self):
194 194 """return contexts for each parent changeset"""
195 195 return self._parents
196 196
197 197 def p1(self):
198 198 return self._parents[0]
199 199
200 200 def p2(self):
201 201 if len(self._parents) == 2:
202 202 return self._parents[1]
203 203 return changectx(self._repo, -1)
204 204
205 205 def _fileinfo(self, path):
206 206 if '_manifest' in self.__dict__:
207 207 try:
208 208 return self._manifest[path], self._manifest.flags(path)
209 209 except KeyError:
210 210 raise error.ManifestLookupError(self._node, path,
211 211 _('not found in manifest'))
212 212 if '_manifestdelta' in self.__dict__ or path in self.files():
213 213 if path in self._manifestdelta:
214 214 return (self._manifestdelta[path],
215 215 self._manifestdelta.flags(path))
216 216 node, flag = self._repo.manifest.find(self._changeset[0], path)
217 217 if not node:
218 218 raise error.ManifestLookupError(self._node, path,
219 219 _('not found in manifest'))
220 220
221 221 return node, flag
222 222
223 223 def filenode(self, path):
224 224 return self._fileinfo(path)[0]
225 225
226 226 def flags(self, path):
227 227 try:
228 228 return self._fileinfo(path)[1]
229 229 except error.LookupError:
230 230 return ''
231 231
232 232 def sub(self, path):
233 233 return subrepo.subrepo(self, path)
234 234
235 235 def match(self, pats=[], include=None, exclude=None, default='glob'):
236 236 r = self._repo
237 237 return matchmod.match(r.root, r.getcwd(), pats,
238 238 include, exclude, default,
239 239 auditor=r.auditor, ctx=self)
240 240
241 241 def diff(self, ctx2=None, match=None, **opts):
242 242 """Returns a diff generator for the given contexts and matcher"""
243 243 if ctx2 is None:
244 244 ctx2 = self.p1()
245 245 if ctx2 is not None:
246 246 ctx2 = self._repo[ctx2]
247 247 diffopts = patch.diffopts(self._repo.ui, opts)
248 248 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
249 249
250 250 @propertycache
251 251 def _dirs(self):
252 252 return scmutil.dirs(self._manifest)
253 253
254 254 def dirs(self):
255 255 return self._dirs
256 256
257 257 def dirty(self, missing=False, merge=True, branch=True):
258 258 return False
259 259
260 260 def status(self, other=None, match=None, listignored=False,
261 261 listclean=False, listunknown=False, listsubrepos=False):
262 262 """return status of files between two nodes or node and working
263 263 directory.
264 264
265 265 If other is None, compare this node with working directory.
266 266
267 267 returns (modified, added, removed, deleted, unknown, ignored, clean)
268 268 """
269 269
270 270 ctx1 = self
271 271 ctx2 = self._repo[other]
272 272
273 273 # This next code block is, admittedly, fragile logic that tests for
274 274 # reversing the contexts and wouldn't need to exist if it weren't for
275 275 # the fast (and common) code path of comparing the working directory
276 276 # with its first parent.
277 277 #
278 278 # What we're aiming for here is the ability to call:
279 279 #
280 280 # workingctx.status(parentctx)
281 281 #
282 282 # If we always built the manifest for each context and compared those,
283 283 # then we'd be done. But the special case of the above call means we
284 284 # just copy the manifest of the parent.
285 285 reversed = False
286 286 if (not isinstance(ctx1, changectx)
287 287 and isinstance(ctx2, changectx)):
288 288 reversed = True
289 289 ctx1, ctx2 = ctx2, ctx1
290 290
291 291 match = ctx2._matchstatus(ctx1, match)
292 292 r = scmutil.status([], [], [], [], [], [], [])
293 293 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
294 294 listunknown)
295 295
296 296 if reversed:
297 297 # Reverse added and removed. Clear deleted, unknown and ignored as
298 298 # these make no sense to reverse.
299 299 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
300 300 r.clean)
301 301
302 302 if listsubrepos:
303 303 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
304 304 rev2 = ctx2.subrev(subpath)
305 305 try:
306 306 submatch = matchmod.narrowmatcher(subpath, match)
307 307 s = sub.status(rev2, match=submatch, ignored=listignored,
308 308 clean=listclean, unknown=listunknown,
309 309 listsubrepos=True)
310 310 for rfiles, sfiles in zip(r, s):
311 311 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
312 312 except error.LookupError:
313 313 self._repo.ui.status(_("skipping missing "
314 314 "subrepository: %s\n") % subpath)
315 315
316 316 for l in r:
317 317 l.sort()
318 318
319 319 return r
320 320
321 321
322 322 def makememctx(repo, parents, text, user, date, branch, files, store,
323 323 editor=None):
324 324 def getfilectx(repo, memctx, path):
325 325 data, mode, copied = store.getfile(path)
326 326 if data is None:
327 327 return None
328 328 islink, isexec = mode
329 329 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
330 330 copied=copied, memctx=memctx)
331 331 extra = {}
332 332 if branch:
333 333 extra['branch'] = encoding.fromlocal(branch)
334 334 ctx = memctx(repo, parents, text, files, getfilectx, user,
335 335 date, extra, editor)
336 336 return ctx
337 337
338 338 class changectx(basectx):
339 339 """A changecontext object makes access to data related to a particular
340 340 changeset convenient. It represents a read-only context already present in
341 341 the repo."""
342 342 def __init__(self, repo, changeid=''):
343 343 """changeid is a revision number, node, or tag"""
344 344
345 345 # since basectx.__new__ already took care of copying the object, we
346 346 # don't need to do anything in __init__, so we just exit here
347 347 if isinstance(changeid, basectx):
348 348 return
349 349
350 350 if changeid == '':
351 351 changeid = '.'
352 352 self._repo = repo
353 353
354 354 try:
355 355 if isinstance(changeid, int):
356 356 self._node = repo.changelog.node(changeid)
357 357 self._rev = changeid
358 358 return
359 359 if isinstance(changeid, long):
360 360 changeid = str(changeid)
361 361 if changeid == '.':
362 362 self._node = repo.dirstate.p1()
363 363 self._rev = repo.changelog.rev(self._node)
364 364 return
365 365 if changeid == 'null':
366 366 self._node = nullid
367 367 self._rev = nullrev
368 368 return
369 369 if changeid == 'tip':
370 370 self._node = repo.changelog.tip()
371 371 self._rev = repo.changelog.rev(self._node)
372 372 return
373 373 if len(changeid) == 20:
374 374 try:
375 375 self._node = changeid
376 376 self._rev = repo.changelog.rev(changeid)
377 377 return
378 378 except error.FilteredRepoLookupError:
379 379 raise
380 380 except LookupError:
381 381 pass
382 382
383 383 try:
384 384 r = int(changeid)
385 385 if str(r) != changeid:
386 386 raise ValueError
387 387 l = len(repo.changelog)
388 388 if r < 0:
389 389 r += l
390 390 if r < 0 or r >= l:
391 391 raise ValueError
392 392 self._rev = r
393 393 self._node = repo.changelog.node(r)
394 394 return
395 395 except error.FilteredIndexError:
396 396 raise
397 397 except (ValueError, OverflowError, IndexError):
398 398 pass
399 399
400 400 if len(changeid) == 40:
401 401 try:
402 402 self._node = bin(changeid)
403 403 self._rev = repo.changelog.rev(self._node)
404 404 return
405 405 except error.FilteredLookupError:
406 406 raise
407 407 except (TypeError, LookupError):
408 408 pass
409 409
410 410 # lookup bookmarks through the name interface
411 411 try:
412 412 self._node = repo.names.singlenode(repo, changeid)
413 413 self._rev = repo.changelog.rev(self._node)
414 414 return
415 415 except KeyError:
416 416 pass
417
418 try:
419 self._node = repo.branchtip(changeid)
420 self._rev = repo.changelog.rev(self._node)
421 return
422 417 except error.FilteredRepoLookupError:
423 418 raise
424 419 except error.RepoLookupError:
425 420 pass
426 421
427 422 self._node = repo.unfiltered().changelog._partialmatch(changeid)
428 423 if self._node is not None:
429 424 self._rev = repo.changelog.rev(self._node)
430 425 return
431 426
432 427 # lookup failed
433 428 # check if it might have come from damaged dirstate
434 429 #
435 430 # XXX we could avoid the unfiltered if we had a recognizable
436 431 # exception for filtered changeset access
437 432 if changeid in repo.unfiltered().dirstate.parents():
438 433 msg = _("working directory has unknown parent '%s'!")
439 434 raise error.Abort(msg % short(changeid))
440 435 try:
441 436 if len(changeid) == 20:
442 437 changeid = hex(changeid)
443 438 except TypeError:
444 439 pass
445 440 except (error.FilteredIndexError, error.FilteredLookupError,
446 441 error.FilteredRepoLookupError):
447 442 if repo.filtername == 'visible':
448 443 msg = _("hidden revision '%s'") % changeid
449 444 hint = _('use --hidden to access hidden revisions')
450 445 raise error.FilteredRepoLookupError(msg, hint=hint)
451 446 msg = _("filtered revision '%s' (not in '%s' subset)")
452 447 msg %= (changeid, repo.filtername)
453 448 raise error.FilteredRepoLookupError(msg)
454 449 except IndexError:
455 450 pass
456 451 raise error.RepoLookupError(
457 452 _("unknown revision '%s'") % changeid)
458 453
459 454 def __hash__(self):
460 455 try:
461 456 return hash(self._rev)
462 457 except AttributeError:
463 458 return id(self)
464 459
465 460 def __nonzero__(self):
466 461 return self._rev != nullrev
467 462
468 463 @propertycache
469 464 def _changeset(self):
470 465 return self._repo.changelog.read(self.rev())
471 466
472 467 @propertycache
473 468 def _manifest(self):
474 469 return self._repo.manifest.read(self._changeset[0])
475 470
476 471 @propertycache
477 472 def _manifestdelta(self):
478 473 return self._repo.manifest.readdelta(self._changeset[0])
479 474
480 475 @propertycache
481 476 def _parents(self):
482 477 p = self._repo.changelog.parentrevs(self._rev)
483 478 if p[1] == nullrev:
484 479 p = p[:-1]
485 480 return [changectx(self._repo, x) for x in p]
486 481
487 482 def changeset(self):
488 483 return self._changeset
489 484 def manifestnode(self):
490 485 return self._changeset[0]
491 486
492 487 def user(self):
493 488 return self._changeset[1]
494 489 def date(self):
495 490 return self._changeset[2]
496 491 def files(self):
497 492 return self._changeset[3]
498 493 def description(self):
499 494 return self._changeset[4]
500 495 def branch(self):
501 496 return encoding.tolocal(self._changeset[5].get("branch"))
502 497 def closesbranch(self):
503 498 return 'close' in self._changeset[5]
504 499 def extra(self):
505 500 return self._changeset[5]
506 501 def tags(self):
507 502 return self._repo.nodetags(self._node)
508 503 def bookmarks(self):
509 504 return self._repo.nodebookmarks(self._node)
510 505 def phase(self):
511 506 return self._repo._phasecache.phase(self._repo, self._rev)
512 507 def hidden(self):
513 508 return self._rev in repoview.filterrevs(self._repo, 'visible')
514 509
515 510 def children(self):
516 511 """return contexts for each child changeset"""
517 512 c = self._repo.changelog.children(self._node)
518 513 return [changectx(self._repo, x) for x in c]
519 514
520 515 def ancestors(self):
521 516 for a in self._repo.changelog.ancestors([self._rev]):
522 517 yield changectx(self._repo, a)
523 518
524 519 def descendants(self):
525 520 for d in self._repo.changelog.descendants([self._rev]):
526 521 yield changectx(self._repo, d)
527 522
528 523 def filectx(self, path, fileid=None, filelog=None):
529 524 """get a file context from this changeset"""
530 525 if fileid is None:
531 526 fileid = self.filenode(path)
532 527 return filectx(self._repo, path, fileid=fileid,
533 528 changectx=self, filelog=filelog)
534 529
535 530 def ancestor(self, c2, warn=False):
536 531 """return the "best" ancestor context of self and c2
537 532
538 533 If there are multiple candidates, it will show a message and check
539 534 merge.preferancestor configuration before falling back to the
540 535 revlog ancestor."""
541 536 # deal with workingctxs
542 537 n2 = c2._node
543 538 if n2 is None:
544 539 n2 = c2._parents[0]._node
545 540 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
546 541 if not cahs:
547 542 anc = nullid
548 543 elif len(cahs) == 1:
549 544 anc = cahs[0]
550 545 else:
551 546 for r in self._repo.ui.configlist('merge', 'preferancestor'):
552 547 try:
553 548 ctx = changectx(self._repo, r)
554 549 except error.RepoLookupError:
555 550 continue
556 551 anc = ctx.node()
557 552 if anc in cahs:
558 553 break
559 554 else:
560 555 anc = self._repo.changelog.ancestor(self._node, n2)
561 556 if warn:
562 557 self._repo.ui.status(
563 558 (_("note: using %s as ancestor of %s and %s\n") %
564 559 (short(anc), short(self._node), short(n2))) +
565 560 ''.join(_(" alternatively, use --config "
566 561 "merge.preferancestor=%s\n") %
567 562 short(n) for n in sorted(cahs) if n != anc))
568 563 return changectx(self._repo, anc)
569 564
570 565 def descendant(self, other):
571 566 """True if other is descendant of this changeset"""
572 567 return self._repo.changelog.descendant(self._rev, other._rev)
573 568
574 569 def walk(self, match):
575 570 fset = set(match.files())
576 571 # for dirstate.walk, files=['.'] means "walk the whole tree".
577 572 # follow that here, too
578 573 fset.discard('.')
579 574
580 575 # avoid the entire walk if we're only looking for specific files
581 576 if fset and not match.anypats():
582 577 if util.all([fn in self for fn in fset]):
583 578 for fn in sorted(fset):
584 579 if match(fn):
585 580 yield fn
586 581 raise StopIteration
587 582
588 583 for fn in self:
589 584 if fn in fset:
590 585 # specified pattern is the exact name
591 586 fset.remove(fn)
592 587 if match(fn):
593 588 yield fn
594 589 for fn in sorted(fset):
595 590 if fn in self._dirs:
596 591 # specified pattern is a directory
597 592 continue
598 593 match.bad(fn, _('no such file in rev %s') % self)
599 594
600 595 def matches(self, match):
601 596 return self.walk(match)
602 597
603 598 class basefilectx(object):
604 599 """A filecontext object represents the common logic for its children:
605 600 filectx: read-only access to a filerevision that is already present
606 601 in the repo,
607 602 workingfilectx: a filecontext that represents files from the working
608 603 directory,
609 604 memfilectx: a filecontext that represents files in-memory."""
610 605 def __new__(cls, repo, path, *args, **kwargs):
611 606 return super(basefilectx, cls).__new__(cls)
612 607
613 608 @propertycache
614 609 def _filelog(self):
615 610 return self._repo.file(self._path)
616 611
617 612 @propertycache
618 613 def _changeid(self):
619 614 if '_changeid' in self.__dict__:
620 615 return self._changeid
621 616 elif '_changectx' in self.__dict__:
622 617 return self._changectx.rev()
623 618 else:
624 619 return self._filelog.linkrev(self._filerev)
625 620
626 621 @propertycache
627 622 def _filenode(self):
628 623 if '_fileid' in self.__dict__:
629 624 return self._filelog.lookup(self._fileid)
630 625 else:
631 626 return self._changectx.filenode(self._path)
632 627
633 628 @propertycache
634 629 def _filerev(self):
635 630 return self._filelog.rev(self._filenode)
636 631
637 632 @propertycache
638 633 def _repopath(self):
639 634 return self._path
640 635
641 636 def __nonzero__(self):
642 637 try:
643 638 self._filenode
644 639 return True
645 640 except error.LookupError:
646 641 # file is missing
647 642 return False
648 643
649 644 def __str__(self):
650 645 return "%s@%s" % (self.path(), self._changectx)
651 646
652 647 def __repr__(self):
653 648 return "<%s %s>" % (type(self).__name__, str(self))
654 649
655 650 def __hash__(self):
656 651 try:
657 652 return hash((self._path, self._filenode))
658 653 except AttributeError:
659 654 return id(self)
660 655
661 656 def __eq__(self, other):
662 657 try:
663 658 return (type(self) == type(other) and self._path == other._path
664 659 and self._filenode == other._filenode)
665 660 except AttributeError:
666 661 return False
667 662
668 663 def __ne__(self, other):
669 664 return not (self == other)
670 665
671 666 def filerev(self):
672 667 return self._filerev
673 668 def filenode(self):
674 669 return self._filenode
675 670 def flags(self):
676 671 return self._changectx.flags(self._path)
677 672 def filelog(self):
678 673 return self._filelog
679 674 def rev(self):
680 675 return self._changeid
681 676 def linkrev(self):
682 677 return self._filelog.linkrev(self._filerev)
683 678 def node(self):
684 679 return self._changectx.node()
685 680 def hex(self):
686 681 return self._changectx.hex()
687 682 def user(self):
688 683 return self._changectx.user()
689 684 def date(self):
690 685 return self._changectx.date()
691 686 def files(self):
692 687 return self._changectx.files()
693 688 def description(self):
694 689 return self._changectx.description()
695 690 def branch(self):
696 691 return self._changectx.branch()
697 692 def extra(self):
698 693 return self._changectx.extra()
699 694 def phase(self):
700 695 return self._changectx.phase()
701 696 def phasestr(self):
702 697 return self._changectx.phasestr()
703 698 def manifest(self):
704 699 return self._changectx.manifest()
705 700 def changectx(self):
706 701 return self._changectx
707 702
708 703 def path(self):
709 704 return self._path
710 705
711 706 def isbinary(self):
712 707 try:
713 708 return util.binary(self.data())
714 709 except IOError:
715 710 return False
716 711 def isexec(self):
717 712 return 'x' in self.flags()
718 713 def islink(self):
719 714 return 'l' in self.flags()
720 715
721 716 def cmp(self, fctx):
722 717 """compare with other file context
723 718
724 719 returns True if different than fctx.
725 720 """
726 721 if (fctx._filerev is None
727 722 and (self._repo._encodefilterpats
728 723 # if file data starts with '\1\n', empty metadata block is
729 724 # prepended, which adds 4 bytes to filelog.size().
730 725 or self.size() - 4 == fctx.size())
731 726 or self.size() == fctx.size()):
732 727 return self._filelog.cmp(self._filenode, fctx.data())
733 728
734 729 return True
735 730
736 731 def parents(self):
737 732 _path = self._path
738 733 fl = self._filelog
739 734 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
740 735
741 736 r = self._filelog.renamed(self._filenode)
742 737 if r:
743 738 pl[0] = (r[0], r[1], None)
744 739
745 740 return [filectx(self._repo, p, fileid=n, filelog=l)
746 741 for p, n, l in pl if n != nullid]
747 742
748 743 def p1(self):
749 744 return self.parents()[0]
750 745
751 746 def p2(self):
752 747 p = self.parents()
753 748 if len(p) == 2:
754 749 return p[1]
755 750 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
756 751
757 752 def annotate(self, follow=False, linenumber=None, diffopts=None):
758 753 '''returns a list of tuples of (ctx, line) for each line
759 754 in the file, where ctx is the filectx of the node where
760 755 that line was last changed.
761 756 This returns tuples of ((ctx, linenumber), line) for each line,
762 757 if "linenumber" parameter is NOT "None".
763 758 In such tuples, linenumber means one at the first appearance
764 759 in the managed file.
765 760 To reduce annotation cost,
766 761 this returns fixed value(False is used) as linenumber,
767 762 if "linenumber" parameter is "False".'''
768 763
769 764 if linenumber is None:
770 765 def decorate(text, rev):
771 766 return ([rev] * len(text.splitlines()), text)
772 767 elif linenumber:
773 768 def decorate(text, rev):
774 769 size = len(text.splitlines())
775 770 return ([(rev, i) for i in xrange(1, size + 1)], text)
776 771 else:
777 772 def decorate(text, rev):
778 773 return ([(rev, False)] * len(text.splitlines()), text)
779 774
780 775 def pair(parent, child):
781 776 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
782 777 refine=True)
783 778 for (a1, a2, b1, b2), t in blocks:
784 779 # Changed blocks ('!') or blocks made only of blank lines ('~')
785 780 # belong to the child.
786 781 if t == '=':
787 782 child[0][b1:b2] = parent[0][a1:a2]
788 783 return child
789 784
790 785 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
791 786
792 787 def parents(f):
793 788 pl = f.parents()
794 789
795 790 # Don't return renamed parents if we aren't following.
796 791 if not follow:
797 792 pl = [p for p in pl if p.path() == f.path()]
798 793
799 794 # renamed filectx won't have a filelog yet, so set it
800 795 # from the cache to save time
801 796 for p in pl:
802 797 if not '_filelog' in p.__dict__:
803 798 p._filelog = getlog(p.path())
804 799
805 800 return pl
806 801
807 802 # use linkrev to find the first changeset where self appeared
808 803 if self.rev() != self.linkrev():
809 804 base = self.filectx(self.filenode())
810 805 else:
811 806 base = self
812 807
813 808 # This algorithm would prefer to be recursive, but Python is a
814 809 # bit recursion-hostile. Instead we do an iterative
815 810 # depth-first search.
816 811
817 812 visit = [base]
818 813 hist = {}
819 814 pcache = {}
820 815 needed = {base: 1}
821 816 while visit:
822 817 f = visit[-1]
823 818 pcached = f in pcache
824 819 if not pcached:
825 820 pcache[f] = parents(f)
826 821
827 822 ready = True
828 823 pl = pcache[f]
829 824 for p in pl:
830 825 if p not in hist:
831 826 ready = False
832 827 visit.append(p)
833 828 if not pcached:
834 829 needed[p] = needed.get(p, 0) + 1
835 830 if ready:
836 831 visit.pop()
837 832 reusable = f in hist
838 833 if reusable:
839 834 curr = hist[f]
840 835 else:
841 836 curr = decorate(f.data(), f)
842 837 for p in pl:
843 838 if not reusable:
844 839 curr = pair(hist[p], curr)
845 840 if needed[p] == 1:
846 841 del hist[p]
847 842 del needed[p]
848 843 else:
849 844 needed[p] -= 1
850 845
851 846 hist[f] = curr
852 847 pcache[f] = []
853 848
854 849 return zip(hist[base][0], hist[base][1].splitlines(True))
855 850
856 851 def ancestors(self, followfirst=False):
857 852 visit = {}
858 853 c = self
859 854 cut = followfirst and 1 or None
860 855 while True:
861 856 for parent in c.parents()[:cut]:
862 857 visit[(parent.rev(), parent.node())] = parent
863 858 if not visit:
864 859 break
865 860 c = visit.pop(max(visit))
866 861 yield c
867 862
868 863 class filectx(basefilectx):
869 864 """A filecontext object makes access to data related to a particular
870 865 filerevision convenient."""
871 866 def __init__(self, repo, path, changeid=None, fileid=None,
872 867 filelog=None, changectx=None):
873 868 """changeid can be a changeset revision, node, or tag.
874 869 fileid can be a file revision or node."""
875 870 self._repo = repo
876 871 self._path = path
877 872
878 873 assert (changeid is not None
879 874 or fileid is not None
880 875 or changectx is not None), \
881 876 ("bad args: changeid=%r, fileid=%r, changectx=%r"
882 877 % (changeid, fileid, changectx))
883 878
884 879 if filelog is not None:
885 880 self._filelog = filelog
886 881
887 882 if changeid is not None:
888 883 self._changeid = changeid
889 884 if changectx is not None:
890 885 self._changectx = changectx
891 886 if fileid is not None:
892 887 self._fileid = fileid
893 888
894 889 @propertycache
895 890 def _changectx(self):
896 891 try:
897 892 return changectx(self._repo, self._changeid)
898 893 except error.RepoLookupError:
899 894 # Linkrev may point to any revision in the repository. When the
900 895 # repository is filtered this may lead to `filectx` trying to build
901 896 # `changectx` for filtered revision. In such case we fallback to
902 897 # creating `changectx` on the unfiltered version of the reposition.
903 898 # This fallback should not be an issue because `changectx` from
904 899 # `filectx` are not used in complex operations that care about
905 900 # filtering.
906 901 #
907 902 # This fallback is a cheap and dirty fix that prevent several
908 903 # crashes. It does not ensure the behavior is correct. However the
909 904 # behavior was not correct before filtering either and "incorrect
910 905 # behavior" is seen as better as "crash"
911 906 #
912 907 # Linkrevs have several serious troubles with filtering that are
913 908 # complicated to solve. Proper handling of the issue here should be
914 909 # considered when solving linkrev issue are on the table.
915 910 return changectx(self._repo.unfiltered(), self._changeid)
916 911
917 912 def filectx(self, fileid):
918 913 '''opens an arbitrary revision of the file without
919 914 opening a new filelog'''
920 915 return filectx(self._repo, self._path, fileid=fileid,
921 916 filelog=self._filelog)
922 917
923 918 def data(self):
924 919 try:
925 920 return self._filelog.read(self._filenode)
926 921 except error.CensoredNodeError:
927 922 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
928 923 return ""
929 924 raise util.Abort(_("censored node: %s") % short(self._filenode),
930 925 hint=_("set censor.policy to ignore errors"))
931 926
932 927 def size(self):
933 928 return self._filelog.size(self._filerev)
934 929
935 930 def renamed(self):
936 931 """check if file was actually renamed in this changeset revision
937 932
938 933 If rename logged in file revision, we report copy for changeset only
939 934 if file revisions linkrev points back to the changeset in question
940 935 or both changeset parents contain different file revisions.
941 936 """
942 937
943 938 renamed = self._filelog.renamed(self._filenode)
944 939 if not renamed:
945 940 return renamed
946 941
947 942 if self.rev() == self.linkrev():
948 943 return renamed
949 944
950 945 name = self.path()
951 946 fnode = self._filenode
952 947 for p in self._changectx.parents():
953 948 try:
954 949 if fnode == p.filenode(name):
955 950 return None
956 951 except error.LookupError:
957 952 pass
958 953 return renamed
959 954
960 955 def children(self):
961 956 # hard for renames
962 957 c = self._filelog.children(self._filenode)
963 958 return [filectx(self._repo, self._path, fileid=x,
964 959 filelog=self._filelog) for x in c]
965 960
966 961 class committablectx(basectx):
967 962 """A committablectx object provides common functionality for a context that
968 963 wants the ability to commit, e.g. workingctx or memctx."""
969 964 def __init__(self, repo, text="", user=None, date=None, extra=None,
970 965 changes=None):
971 966 self._repo = repo
972 967 self._rev = None
973 968 self._node = None
974 969 self._text = text
975 970 if date:
976 971 self._date = util.parsedate(date)
977 972 if user:
978 973 self._user = user
979 974 if changes:
980 975 self._status = changes
981 976
982 977 self._extra = {}
983 978 if extra:
984 979 self._extra = extra.copy()
985 980 if 'branch' not in self._extra:
986 981 try:
987 982 branch = encoding.fromlocal(self._repo.dirstate.branch())
988 983 except UnicodeDecodeError:
989 984 raise util.Abort(_('branch name not in UTF-8!'))
990 985 self._extra['branch'] = branch
991 986 if self._extra['branch'] == '':
992 987 self._extra['branch'] = 'default'
993 988
994 989 def __str__(self):
995 990 return str(self._parents[0]) + "+"
996 991
997 992 def __nonzero__(self):
998 993 return True
999 994
1000 995 def _buildflagfunc(self):
1001 996 # Create a fallback function for getting file flags when the
1002 997 # filesystem doesn't support them
1003 998
1004 999 copiesget = self._repo.dirstate.copies().get
1005 1000
1006 1001 if len(self._parents) < 2:
1007 1002 # when we have one parent, it's easy: copy from parent
1008 1003 man = self._parents[0].manifest()
1009 1004 def func(f):
1010 1005 f = copiesget(f, f)
1011 1006 return man.flags(f)
1012 1007 else:
1013 1008 # merges are tricky: we try to reconstruct the unstored
1014 1009 # result from the merge (issue1802)
1015 1010 p1, p2 = self._parents
1016 1011 pa = p1.ancestor(p2)
1017 1012 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1018 1013
1019 1014 def func(f):
1020 1015 f = copiesget(f, f) # may be wrong for merges with copies
1021 1016 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1022 1017 if fl1 == fl2:
1023 1018 return fl1
1024 1019 if fl1 == fla:
1025 1020 return fl2
1026 1021 if fl2 == fla:
1027 1022 return fl1
1028 1023 return '' # punt for conflicts
1029 1024
1030 1025 return func
1031 1026
1032 1027 @propertycache
1033 1028 def _flagfunc(self):
1034 1029 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1035 1030
1036 1031 @propertycache
1037 1032 def _manifest(self):
1038 1033 """generate a manifest corresponding to the values in self._status
1039 1034
1040 1035 This reuse the file nodeid from parent, but we append an extra letter
1041 1036 when modified. Modified files get an extra 'm' while added files get
1042 1037 an extra 'a'. This is used by manifests merge to see that files
1043 1038 are different and by update logic to avoid deleting newly added files.
1044 1039 """
1045 1040
1046 1041 man1 = self._parents[0].manifest()
1047 1042 man = man1.copy()
1048 1043 if len(self._parents) > 1:
1049 1044 man2 = self.p2().manifest()
1050 1045 def getman(f):
1051 1046 if f in man1:
1052 1047 return man1
1053 1048 return man2
1054 1049 else:
1055 1050 getman = lambda f: man1
1056 1051
1057 1052 copied = self._repo.dirstate.copies()
1058 1053 ff = self._flagfunc
1059 1054 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1060 1055 for f in l:
1061 1056 orig = copied.get(f, f)
1062 1057 man[f] = getman(orig).get(orig, nullid) + i
1063 1058 try:
1064 1059 man.setflag(f, ff(f))
1065 1060 except OSError:
1066 1061 pass
1067 1062
1068 1063 for f in self._status.deleted + self._status.removed:
1069 1064 if f in man:
1070 1065 del man[f]
1071 1066
1072 1067 return man
1073 1068
1074 1069 @propertycache
1075 1070 def _status(self):
1076 1071 return self._repo.status()
1077 1072
1078 1073 @propertycache
1079 1074 def _user(self):
1080 1075 return self._repo.ui.username()
1081 1076
1082 1077 @propertycache
1083 1078 def _date(self):
1084 1079 return util.makedate()
1085 1080
1086 1081 def subrev(self, subpath):
1087 1082 return None
1088 1083
1089 1084 def user(self):
1090 1085 return self._user or self._repo.ui.username()
1091 1086 def date(self):
1092 1087 return self._date
1093 1088 def description(self):
1094 1089 return self._text
1095 1090 def files(self):
1096 1091 return sorted(self._status.modified + self._status.added +
1097 1092 self._status.removed)
1098 1093
1099 1094 def modified(self):
1100 1095 return self._status.modified
1101 1096 def added(self):
1102 1097 return self._status.added
1103 1098 def removed(self):
1104 1099 return self._status.removed
1105 1100 def deleted(self):
1106 1101 return self._status.deleted
1107 1102 def unknown(self):
1108 1103 return self._status.unknown
1109 1104 def ignored(self):
1110 1105 return self._status.ignored
1111 1106 def clean(self):
1112 1107 return self._status.clean
1113 1108 def branch(self):
1114 1109 return encoding.tolocal(self._extra['branch'])
1115 1110 def closesbranch(self):
1116 1111 return 'close' in self._extra
1117 1112 def extra(self):
1118 1113 return self._extra
1119 1114
1120 1115 def tags(self):
1121 1116 t = []
1122 1117 for p in self.parents():
1123 1118 t.extend(p.tags())
1124 1119 return t
1125 1120
1126 1121 def bookmarks(self):
1127 1122 b = []
1128 1123 for p in self.parents():
1129 1124 b.extend(p.bookmarks())
1130 1125 return b
1131 1126
1132 1127 def phase(self):
1133 1128 phase = phases.draft # default phase to draft
1134 1129 for p in self.parents():
1135 1130 phase = max(phase, p.phase())
1136 1131 return phase
1137 1132
1138 1133 def hidden(self):
1139 1134 return False
1140 1135
1141 1136 def children(self):
1142 1137 return []
1143 1138
1144 1139 def flags(self, path):
1145 1140 if '_manifest' in self.__dict__:
1146 1141 try:
1147 1142 return self._manifest.flags(path)
1148 1143 except KeyError:
1149 1144 return ''
1150 1145
1151 1146 try:
1152 1147 return self._flagfunc(path)
1153 1148 except OSError:
1154 1149 return ''
1155 1150
1156 1151 def ancestor(self, c2):
1157 1152 """return the "best" ancestor context of self and c2"""
1158 1153 return self._parents[0].ancestor(c2) # punt on two parents for now
1159 1154
1160 1155 def walk(self, match):
1161 1156 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1162 1157 True, False))
1163 1158
1164 1159 def matches(self, match):
1165 1160 return sorted(self._repo.dirstate.matches(match))
1166 1161
1167 1162 def ancestors(self):
1168 1163 for a in self._repo.changelog.ancestors(
1169 1164 [p.rev() for p in self._parents]):
1170 1165 yield changectx(self._repo, a)
1171 1166
1172 1167 def markcommitted(self, node):
1173 1168 """Perform post-commit cleanup necessary after committing this ctx
1174 1169
1175 1170 Specifically, this updates backing stores this working context
1176 1171 wraps to reflect the fact that the changes reflected by this
1177 1172 workingctx have been committed. For example, it marks
1178 1173 modified and added files as normal in the dirstate.
1179 1174
1180 1175 """
1181 1176
1182 1177 self._repo.dirstate.beginparentchange()
1183 1178 for f in self.modified() + self.added():
1184 1179 self._repo.dirstate.normal(f)
1185 1180 for f in self.removed():
1186 1181 self._repo.dirstate.drop(f)
1187 1182 self._repo.dirstate.setparents(node)
1188 1183 self._repo.dirstate.endparentchange()
1189 1184
1190 1185 def dirs(self):
1191 1186 return self._repo.dirstate.dirs()
1192 1187
1193 1188 class workingctx(committablectx):
1194 1189 """A workingctx object makes access to data related to
1195 1190 the current working directory convenient.
1196 1191 date - any valid date string or (unixtime, offset), or None.
1197 1192 user - username string, or None.
1198 1193 extra - a dictionary of extra values, or None.
1199 1194 changes - a list of file lists as returned by localrepo.status()
1200 1195 or None to use the repository status.
1201 1196 """
1202 1197 def __init__(self, repo, text="", user=None, date=None, extra=None,
1203 1198 changes=None):
1204 1199 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1205 1200
1206 1201 def __iter__(self):
1207 1202 d = self._repo.dirstate
1208 1203 for f in d:
1209 1204 if d[f] != 'r':
1210 1205 yield f
1211 1206
1212 1207 def __contains__(self, key):
1213 1208 return self._repo.dirstate[key] not in "?r"
1214 1209
1215 1210 @propertycache
1216 1211 def _parents(self):
1217 1212 p = self._repo.dirstate.parents()
1218 1213 if p[1] == nullid:
1219 1214 p = p[:-1]
1220 1215 return [changectx(self._repo, x) for x in p]
1221 1216
1222 1217 def filectx(self, path, filelog=None):
1223 1218 """get a file context from the working directory"""
1224 1219 return workingfilectx(self._repo, path, workingctx=self,
1225 1220 filelog=filelog)
1226 1221
1227 1222 def dirty(self, missing=False, merge=True, branch=True):
1228 1223 "check whether a working directory is modified"
1229 1224 # check subrepos first
1230 1225 for s in sorted(self.substate):
1231 1226 if self.sub(s).dirty():
1232 1227 return True
1233 1228 # check current working dir
1234 1229 return ((merge and self.p2()) or
1235 1230 (branch and self.branch() != self.p1().branch()) or
1236 1231 self.modified() or self.added() or self.removed() or
1237 1232 (missing and self.deleted()))
1238 1233
1239 1234 def add(self, list, prefix=""):
1240 1235 join = lambda f: os.path.join(prefix, f)
1241 1236 wlock = self._repo.wlock()
1242 1237 ui, ds = self._repo.ui, self._repo.dirstate
1243 1238 try:
1244 1239 rejected = []
1245 1240 lstat = self._repo.wvfs.lstat
1246 1241 for f in list:
1247 1242 scmutil.checkportable(ui, join(f))
1248 1243 try:
1249 1244 st = lstat(f)
1250 1245 except OSError:
1251 1246 ui.warn(_("%s does not exist!\n") % join(f))
1252 1247 rejected.append(f)
1253 1248 continue
1254 1249 if st.st_size > 10000000:
1255 1250 ui.warn(_("%s: up to %d MB of RAM may be required "
1256 1251 "to manage this file\n"
1257 1252 "(use 'hg revert %s' to cancel the "
1258 1253 "pending addition)\n")
1259 1254 % (f, 3 * st.st_size // 1000000, join(f)))
1260 1255 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1261 1256 ui.warn(_("%s not added: only files and symlinks "
1262 1257 "supported currently\n") % join(f))
1263 1258 rejected.append(f)
1264 1259 elif ds[f] in 'amn':
1265 1260 ui.warn(_("%s already tracked!\n") % join(f))
1266 1261 elif ds[f] == 'r':
1267 1262 ds.normallookup(f)
1268 1263 else:
1269 1264 ds.add(f)
1270 1265 return rejected
1271 1266 finally:
1272 1267 wlock.release()
1273 1268
1274 1269 def forget(self, files, prefix=""):
1275 1270 join = lambda f: os.path.join(prefix, f)
1276 1271 wlock = self._repo.wlock()
1277 1272 try:
1278 1273 rejected = []
1279 1274 for f in files:
1280 1275 if f not in self._repo.dirstate:
1281 1276 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1282 1277 rejected.append(f)
1283 1278 elif self._repo.dirstate[f] != 'a':
1284 1279 self._repo.dirstate.remove(f)
1285 1280 else:
1286 1281 self._repo.dirstate.drop(f)
1287 1282 return rejected
1288 1283 finally:
1289 1284 wlock.release()
1290 1285
1291 1286 def undelete(self, list):
1292 1287 pctxs = self.parents()
1293 1288 wlock = self._repo.wlock()
1294 1289 try:
1295 1290 for f in list:
1296 1291 if self._repo.dirstate[f] != 'r':
1297 1292 self._repo.ui.warn(_("%s not removed!\n") % f)
1298 1293 else:
1299 1294 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1300 1295 t = fctx.data()
1301 1296 self._repo.wwrite(f, t, fctx.flags())
1302 1297 self._repo.dirstate.normal(f)
1303 1298 finally:
1304 1299 wlock.release()
1305 1300
1306 1301 def copy(self, source, dest):
1307 1302 try:
1308 1303 st = self._repo.wvfs.lstat(dest)
1309 1304 except OSError, err:
1310 1305 if err.errno != errno.ENOENT:
1311 1306 raise
1312 1307 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1313 1308 return
1314 1309 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1315 1310 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1316 1311 "symbolic link\n") % dest)
1317 1312 else:
1318 1313 wlock = self._repo.wlock()
1319 1314 try:
1320 1315 if self._repo.dirstate[dest] in '?':
1321 1316 self._repo.dirstate.add(dest)
1322 1317 elif self._repo.dirstate[dest] in 'r':
1323 1318 self._repo.dirstate.normallookup(dest)
1324 1319 self._repo.dirstate.copy(source, dest)
1325 1320 finally:
1326 1321 wlock.release()
1327 1322
1328 1323 def _filtersuspectsymlink(self, files):
1329 1324 if not files or self._repo.dirstate._checklink:
1330 1325 return files
1331 1326
1332 1327 # Symlink placeholders may get non-symlink-like contents
1333 1328 # via user error or dereferencing by NFS or Samba servers,
1334 1329 # so we filter out any placeholders that don't look like a
1335 1330 # symlink
1336 1331 sane = []
1337 1332 for f in files:
1338 1333 if self.flags(f) == 'l':
1339 1334 d = self[f].data()
1340 1335 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1341 1336 self._repo.ui.debug('ignoring suspect symlink placeholder'
1342 1337 ' "%s"\n' % f)
1343 1338 continue
1344 1339 sane.append(f)
1345 1340 return sane
1346 1341
1347 1342 def _checklookup(self, files):
1348 1343 # check for any possibly clean files
1349 1344 if not files:
1350 1345 return [], []
1351 1346
1352 1347 modified = []
1353 1348 fixup = []
1354 1349 pctx = self._parents[0]
1355 1350 # do a full compare of any files that might have changed
1356 1351 for f in sorted(files):
1357 1352 if (f not in pctx or self.flags(f) != pctx.flags(f)
1358 1353 or pctx[f].cmp(self[f])):
1359 1354 modified.append(f)
1360 1355 else:
1361 1356 fixup.append(f)
1362 1357
1363 1358 # update dirstate for files that are actually clean
1364 1359 if fixup:
1365 1360 try:
1366 1361 # updating the dirstate is optional
1367 1362 # so we don't wait on the lock
1368 1363 # wlock can invalidate the dirstate, so cache normal _after_
1369 1364 # taking the lock
1370 1365 wlock = self._repo.wlock(False)
1371 1366 normal = self._repo.dirstate.normal
1372 1367 try:
1373 1368 for f in fixup:
1374 1369 normal(f)
1375 1370 finally:
1376 1371 wlock.release()
1377 1372 except error.LockError:
1378 1373 pass
1379 1374 return modified, fixup
1380 1375
1381 1376 def _manifestmatches(self, match, s):
1382 1377 """Slow path for workingctx
1383 1378
1384 1379 The fast path is when we compare the working directory to its parent
1385 1380 which means this function is comparing with a non-parent; therefore we
1386 1381 need to build a manifest and return what matches.
1387 1382 """
1388 1383 mf = self._repo['.']._manifestmatches(match, s)
1389 1384 for f in s.modified + s.added:
1390 1385 mf[f] = None
1391 1386 mf.setflag(f, self.flags(f))
1392 1387 for f in s.removed:
1393 1388 if f in mf:
1394 1389 del mf[f]
1395 1390 return mf
1396 1391
1397 1392 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1398 1393 unknown=False):
1399 1394 '''Gets the status from the dirstate -- internal use only.'''
1400 1395 listignored, listclean, listunknown = ignored, clean, unknown
1401 1396 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1402 1397 subrepos = []
1403 1398 if '.hgsub' in self:
1404 1399 subrepos = sorted(self.substate)
1405 1400 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1406 1401 listclean, listunknown)
1407 1402
1408 1403 # check for any possibly clean files
1409 1404 if cmp:
1410 1405 modified2, fixup = self._checklookup(cmp)
1411 1406 s.modified.extend(modified2)
1412 1407
1413 1408 # update dirstate for files that are actually clean
1414 1409 if fixup and listclean:
1415 1410 s.clean.extend(fixup)
1416 1411
1417 1412 return s
1418 1413
1419 1414 def _buildstatus(self, other, s, match, listignored, listclean,
1420 1415 listunknown):
1421 1416 """build a status with respect to another context
1422 1417
1423 1418 This includes logic for maintaining the fast path of status when
1424 1419 comparing the working directory against its parent, which is to skip
1425 1420 building a new manifest if self (working directory) is not comparing
1426 1421 against its parent (repo['.']).
1427 1422 """
1428 1423 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1429 1424 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1430 1425 # might have accidentally ended up with the entire contents of the file
1431 1426 # they are supposed to be linking to.
1432 1427 s.modified[:] = self._filtersuspectsymlink(s.modified)
1433 1428 if other != self._repo['.']:
1434 1429 s = super(workingctx, self)._buildstatus(other, s, match,
1435 1430 listignored, listclean,
1436 1431 listunknown)
1437 1432 self._status = s
1438 1433 return s
1439 1434
1440 1435 def _matchstatus(self, other, match):
1441 1436 """override the match method with a filter for directory patterns
1442 1437
1443 1438 We use inheritance to customize the match.bad method only in cases of
1444 1439 workingctx since it belongs only to the working directory when
1445 1440 comparing against the parent changeset.
1446 1441
1447 1442 If we aren't comparing against the working directory's parent, then we
1448 1443 just use the default match object sent to us.
1449 1444 """
1450 1445 superself = super(workingctx, self)
1451 1446 match = superself._matchstatus(other, match)
1452 1447 if other != self._repo['.']:
1453 1448 def bad(f, msg):
1454 1449 # 'f' may be a directory pattern from 'match.files()',
1455 1450 # so 'f not in ctx1' is not enough
1456 1451 if f not in other and f not in other.dirs():
1457 1452 self._repo.ui.warn('%s: %s\n' %
1458 1453 (self._repo.dirstate.pathto(f), msg))
1459 1454 match.bad = bad
1460 1455 return match
1461 1456
1462 1457 class committablefilectx(basefilectx):
1463 1458 """A committablefilectx provides common functionality for a file context
1464 1459 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1465 1460 def __init__(self, repo, path, filelog=None, ctx=None):
1466 1461 self._repo = repo
1467 1462 self._path = path
1468 1463 self._changeid = None
1469 1464 self._filerev = self._filenode = None
1470 1465
1471 1466 if filelog is not None:
1472 1467 self._filelog = filelog
1473 1468 if ctx:
1474 1469 self._changectx = ctx
1475 1470
1476 1471 def __nonzero__(self):
1477 1472 return True
1478 1473
1479 1474 def parents(self):
1480 1475 '''return parent filectxs, following copies if necessary'''
1481 1476 def filenode(ctx, path):
1482 1477 return ctx._manifest.get(path, nullid)
1483 1478
1484 1479 path = self._path
1485 1480 fl = self._filelog
1486 1481 pcl = self._changectx._parents
1487 1482 renamed = self.renamed()
1488 1483
1489 1484 if renamed:
1490 1485 pl = [renamed + (None,)]
1491 1486 else:
1492 1487 pl = [(path, filenode(pcl[0], path), fl)]
1493 1488
1494 1489 for pc in pcl[1:]:
1495 1490 pl.append((path, filenode(pc, path), fl))
1496 1491
1497 1492 return [filectx(self._repo, p, fileid=n, filelog=l)
1498 1493 for p, n, l in pl if n != nullid]
1499 1494
1500 1495 def children(self):
1501 1496 return []
1502 1497
1503 1498 class workingfilectx(committablefilectx):
1504 1499 """A workingfilectx object makes access to data related to a particular
1505 1500 file in the working directory convenient."""
1506 1501 def __init__(self, repo, path, filelog=None, workingctx=None):
1507 1502 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1508 1503
1509 1504 @propertycache
1510 1505 def _changectx(self):
1511 1506 return workingctx(self._repo)
1512 1507
1513 1508 def data(self):
1514 1509 return self._repo.wread(self._path)
1515 1510 def renamed(self):
1516 1511 rp = self._repo.dirstate.copied(self._path)
1517 1512 if not rp:
1518 1513 return None
1519 1514 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1520 1515
1521 1516 def size(self):
1522 1517 return self._repo.wvfs.lstat(self._path).st_size
1523 1518 def date(self):
1524 1519 t, tz = self._changectx.date()
1525 1520 try:
1526 1521 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1527 1522 except OSError, err:
1528 1523 if err.errno != errno.ENOENT:
1529 1524 raise
1530 1525 return (t, tz)
1531 1526
1532 1527 def cmp(self, fctx):
1533 1528 """compare with other file context
1534 1529
1535 1530 returns True if different than fctx.
1536 1531 """
1537 1532 # fctx should be a filectx (not a workingfilectx)
1538 1533 # invert comparison to reuse the same code path
1539 1534 return fctx.cmp(self)
1540 1535
1541 1536 def remove(self, ignoremissing=False):
1542 1537 """wraps unlink for a repo's working directory"""
1543 1538 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1544 1539
1545 1540 def write(self, data, flags):
1546 1541 """wraps repo.wwrite"""
1547 1542 self._repo.wwrite(self._path, data, flags)
1548 1543
1549 1544 class memctx(committablectx):
1550 1545 """Use memctx to perform in-memory commits via localrepo.commitctx().
1551 1546
1552 1547 Revision information is supplied at initialization time while
1553 1548 related files data and is made available through a callback
1554 1549 mechanism. 'repo' is the current localrepo, 'parents' is a
1555 1550 sequence of two parent revisions identifiers (pass None for every
1556 1551 missing parent), 'text' is the commit message and 'files' lists
1557 1552 names of files touched by the revision (normalized and relative to
1558 1553 repository root).
1559 1554
1560 1555 filectxfn(repo, memctx, path) is a callable receiving the
1561 1556 repository, the current memctx object and the normalized path of
1562 1557 requested file, relative to repository root. It is fired by the
1563 1558 commit function for every file in 'files', but calls order is
1564 1559 undefined. If the file is available in the revision being
1565 1560 committed (updated or added), filectxfn returns a memfilectx
1566 1561 object. If the file was removed, filectxfn raises an
1567 1562 IOError. Moved files are represented by marking the source file
1568 1563 removed and the new file added with copy information (see
1569 1564 memfilectx).
1570 1565
1571 1566 user receives the committer name and defaults to current
1572 1567 repository username, date is the commit date in any format
1573 1568 supported by util.parsedate() and defaults to current date, extra
1574 1569 is a dictionary of metadata or is left empty.
1575 1570 """
1576 1571
1577 1572 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1578 1573 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1579 1574 # this field to determine what to do in filectxfn.
1580 1575 _returnnoneformissingfiles = True
1581 1576
1582 1577 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1583 1578 date=None, extra=None, editor=False):
1584 1579 super(memctx, self).__init__(repo, text, user, date, extra)
1585 1580 self._rev = None
1586 1581 self._node = None
1587 1582 parents = [(p or nullid) for p in parents]
1588 1583 p1, p2 = parents
1589 1584 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1590 1585 files = sorted(set(files))
1591 1586 self._status = scmutil.status(files, [], [], [], [], [], [])
1592 1587 self._filectxfn = filectxfn
1593 1588 self.substate = {}
1594 1589
1595 1590 # if store is not callable, wrap it in a function
1596 1591 if not callable(filectxfn):
1597 1592 def getfilectx(repo, memctx, path):
1598 1593 fctx = filectxfn[path]
1599 1594 # this is weird but apparently we only keep track of one parent
1600 1595 # (why not only store that instead of a tuple?)
1601 1596 copied = fctx.renamed()
1602 1597 if copied:
1603 1598 copied = copied[0]
1604 1599 return memfilectx(repo, path, fctx.data(),
1605 1600 islink=fctx.islink(), isexec=fctx.isexec(),
1606 1601 copied=copied, memctx=memctx)
1607 1602 self._filectxfn = getfilectx
1608 1603
1609 1604 self._extra = extra and extra.copy() or {}
1610 1605 if self._extra.get('branch', '') == '':
1611 1606 self._extra['branch'] = 'default'
1612 1607
1613 1608 if editor:
1614 1609 self._text = editor(self._repo, self, [])
1615 1610 self._repo.savecommitmessage(self._text)
1616 1611
1617 1612 def filectx(self, path, filelog=None):
1618 1613 """get a file context from the working directory
1619 1614
1620 1615 Returns None if file doesn't exist and should be removed."""
1621 1616 return self._filectxfn(self._repo, self, path)
1622 1617
1623 1618 def commit(self):
1624 1619 """commit context to the repo"""
1625 1620 return self._repo.commitctx(self)
1626 1621
1627 1622 @propertycache
1628 1623 def _manifest(self):
1629 1624 """generate a manifest based on the return values of filectxfn"""
1630 1625
1631 1626 # keep this simple for now; just worry about p1
1632 1627 pctx = self._parents[0]
1633 1628 man = pctx.manifest().copy()
1634 1629
1635 1630 for f, fnode in man.iteritems():
1636 1631 p1node = nullid
1637 1632 p2node = nullid
1638 1633 p = pctx[f].parents() # if file isn't in pctx, check p2?
1639 1634 if len(p) > 0:
1640 1635 p1node = p[0].node()
1641 1636 if len(p) > 1:
1642 1637 p2node = p[1].node()
1643 1638 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1644 1639
1645 1640 return man
1646 1641
1647 1642
1648 1643 class memfilectx(committablefilectx):
1649 1644 """memfilectx represents an in-memory file to commit.
1650 1645
1651 1646 See memctx and committablefilectx for more details.
1652 1647 """
1653 1648 def __init__(self, repo, path, data, islink=False,
1654 1649 isexec=False, copied=None, memctx=None):
1655 1650 """
1656 1651 path is the normalized file path relative to repository root.
1657 1652 data is the file content as a string.
1658 1653 islink is True if the file is a symbolic link.
1659 1654 isexec is True if the file is executable.
1660 1655 copied is the source file path if current file was copied in the
1661 1656 revision being committed, or None."""
1662 1657 super(memfilectx, self).__init__(repo, path, None, memctx)
1663 1658 self._data = data
1664 1659 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1665 1660 self._copied = None
1666 1661 if copied:
1667 1662 self._copied = (copied, nullid)
1668 1663
1669 1664 def data(self):
1670 1665 return self._data
1671 1666 def size(self):
1672 1667 return len(self.data())
1673 1668 def flags(self):
1674 1669 return self._flags
1675 1670 def renamed(self):
1676 1671 return self._copied
1677 1672
1678 1673 def remove(self, ignoremissing=False):
1679 1674 """wraps unlink for a repo's working directory"""
1680 1675 # need to figure out what to do here
1681 1676 del self._changectx[self._path]
1682 1677
1683 1678 def write(self, data, flags):
1684 1679 """wraps repo.wwrite"""
1685 1680 self._data = data
@@ -1,79 +1,82 b''
1 1 from i18n import _
2 2 from mercurial import util
3 3
4 4 def tolist(val):
5 5 """
6 6 a convenience method to return an empty list instead of None
7 7 """
8 8 if val is None:
9 9 return []
10 10 else:
11 11 return [val]
12 12
13 13 class namespaces(object):
14 14 """
15 15 provides an interface to register a generic many-to-many mapping between
16 16 some (namespaced) names and nodes. The goal here is to control the
17 17 pollution of jamming things into tags or bookmarks (in extension-land) and
18 18 to simplify internal bits of mercurial: log output, tab completion, etc.
19 19
20 20 More precisely, we define a list of names (the namespace) and a mapping of
21 21 names to nodes. This name mapping returns a list of nodes.
22 22
23 23 Furthermore, each name mapping will be passed a name to lookup which might
24 24 not be in its domain. In this case, each method should return an empty list
25 25 and not raise an error.
26 26
27 27 We'll have a dictionary '_names' where each key is a namespace and
28 28 its value is a dictionary of functions:
29 29 'namemap': function that takes a name and returns a list of nodes
30 30 """
31 31
32 32 _names_version = 0
33 33
34 34 def __init__(self):
35 35 self._names = util.sortdict()
36 36
37 37 addns = self.addnamespace
38 38
39 39 # we need current mercurial named objects (bookmarks, tags, and
40 40 # branches) to be initialized somewhere, so that place is here
41 41 addns("bookmarks",
42 42 lambda repo, name: tolist(repo._bookmarks.get(name)))
43 43
44 44 addns("tags",
45 45 lambda repo, name: tolist(repo._tagscache.tags.get(name)))
46 46
47 addns("branches",
48 lambda repo, name: tolist(repo.branchtip(name)))
49
47 50 def addnamespace(self, namespace, namemap, order=None):
48 51 """
49 52 register a namespace
50 53
51 54 namespace: the name to be registered (in plural form)
52 55 namemap: function that inputs a node, output name(s)
53 56 order: optional argument to specify the order of namespaces
54 57 (e.g. 'branches' should be listed before 'bookmarks')
55 58 """
56 59 val = {'namemap': namemap}
57 60 if order is not None:
58 61 self._names.insert(order, namespace, val)
59 62 else:
60 63 self._names[namespace] = val
61 64
62 65 def singlenode(self, repo, name):
63 66 """
64 67 Return the 'best' node for the given name. Best means the first node
65 68 in the first nonempty list returned by a name-to-nodes mapping function
66 69 in the defined precedence order.
67 70
68 71 Raises a KeyError if there is no such node.
69 72 """
70 73 for ns, v in self._names.iteritems():
71 74 n = v['namemap'](repo, name)
72 75 if n:
73 76 # return max revision number
74 77 if len(n) > 1:
75 78 cl = repo.changelog
76 79 maxrev = max(cl.rev(node) for node in n)
77 80 return cl.node(maxrev)
78 81 return n[0]
79 82 raise KeyError(_('no such name: %s') % name)
General Comments 0
You need to be logged in to leave comments. Login now