##// END OF EJS Templates
context: use new manifest.diff(clean=True) support...
Augie Fackler -
r23757:b5346480 default
parent child Browse files
Show More
@@ -1,1860 +1,1859
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
26 26 """return the first ancestor of <srcrev> introducting <fnode>
27 27
28 28 If the linkrev of the file revision does not point to an ancestor of
29 29 srcrev, we'll walk down the ancestors until we find one introducing this
30 30 file revision.
31 31
32 32 :repo: a localrepository object (used to access changelog and manifest)
33 33 :path: the file path
34 34 :fnode: the nodeid of the file revision
35 35 :filelog: the filelog of this path
36 36 :srcrev: the changeset revision we search ancestors from
37 37 :inclusive: if true, the src revision will also be checked
38 38 """
39 39 cl = repo.unfiltered().changelog
40 40 ma = repo.manifest
41 41 # fetch the linkrev
42 42 fr = filelog.rev(fnode)
43 43 lkr = filelog.linkrev(fr)
44 44 # check if this linkrev is an ancestor of srcrev
45 45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
46 46 if lkr not in anc:
47 47 for a in anc:
48 48 ac = cl.read(a) # get changeset data (we avoid object creation).
49 49 if path in ac[3]: # checking the 'files' field.
50 50 # The file has been touched, check if the content is similar
51 51 # to the one we search for.
52 52 if fnode == ma.readdelta(ac[0]).get(path):
53 53 return a
54 54 # In theory, we should never get out of that loop without a result. But
55 55 # if manifest uses a buggy file revision (not children of the one it
56 56 # replaces) we could. Such a buggy situation will likely result is crash
57 57 # somewhere else at to some point.
58 58 return lkr
59 59
60 60 class basectx(object):
61 61 """A basectx object represents the common logic for its children:
62 62 changectx: read-only context that is already present in the repo,
63 63 workingctx: a context that represents the working directory and can
64 64 be committed,
65 65 memctx: a context that represents changes in-memory and can also
66 66 be committed."""
67 67 def __new__(cls, repo, changeid='', *args, **kwargs):
68 68 if isinstance(changeid, basectx):
69 69 return changeid
70 70
71 71 o = super(basectx, cls).__new__(cls)
72 72
73 73 o._repo = repo
74 74 o._rev = nullrev
75 75 o._node = nullid
76 76
77 77 return o
78 78
79 79 def __str__(self):
80 80 return short(self.node())
81 81
82 82 def __int__(self):
83 83 return self.rev()
84 84
85 85 def __repr__(self):
86 86 return "<%s %s>" % (type(self).__name__, str(self))
87 87
88 88 def __eq__(self, other):
89 89 try:
90 90 return type(self) == type(other) and self._rev == other._rev
91 91 except AttributeError:
92 92 return False
93 93
94 94 def __ne__(self, other):
95 95 return not (self == other)
96 96
97 97 def __contains__(self, key):
98 98 return key in self._manifest
99 99
100 100 def __getitem__(self, key):
101 101 return self.filectx(key)
102 102
103 103 def __iter__(self):
104 104 for f in sorted(self._manifest):
105 105 yield f
106 106
107 107 def _manifestmatches(self, match, s):
108 108 """generate a new manifest filtered by the match argument
109 109
110 110 This method is for internal use only and mainly exists to provide an
111 111 object oriented way for other contexts to customize the manifest
112 112 generation.
113 113 """
114 114 return self.manifest().matches(match)
115 115
116 116 def _matchstatus(self, other, match):
117 117 """return match.always if match is none
118 118
119 119 This internal method provides a way for child objects to override the
120 120 match operator.
121 121 """
122 122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
123 123
124 124 def _buildstatus(self, other, s, match, listignored, listclean,
125 125 listunknown):
126 126 """build a status with respect to another context"""
127 127 # Load earliest manifest first for caching reasons. More specifically,
128 128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
129 129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
130 130 # 1000 and cache it so that when you read 1001, we just need to apply a
131 131 # delta to what's in the cache. So that's one full reconstruction + one
132 132 # delta application.
133 133 if self.rev() is not None and self.rev() < other.rev():
134 134 self.manifest()
135 135 mf1 = other._manifestmatches(match, s)
136 136 mf2 = self._manifestmatches(match, s)
137 137
138 138 modified, added = [], []
139 139 removed = []
140 clean = set()
140 clean = []
141 141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
142 142 deletedset = set(deleted)
143 d = mf1.diff(mf2)
144 for fn, ((node1, flag1), (node2, flag2)) in d.iteritems():
143 d = mf1.diff(mf2, clean=listclean)
144 for fn, value in d.iteritems():
145 145 if fn in deletedset:
146 146 continue
147 if value is None:
148 clean.append(fn)
149 continue
150 (node1, flag1), (node2, flag2) = value
147 151 if node1 is None:
148 152 added.append(fn)
149 153 elif node2 is None:
150 154 removed.append(fn)
151 155 elif node2 != _newnode:
152 156 # The file was not a new file in mf2, so an entry
153 157 # from diff is really a difference.
154 158 modified.append(fn)
155 159 elif self[fn].cmp(other[fn]):
156 160 # node2 was newnode, but the working file doesn't
157 161 # match the one in mf1.
158 162 modified.append(fn)
159 163 else:
160 clean.add(fn)
161 if listclean:
162 nondiff = (set(mf1) | set(mf2)) - set(d)
163 clean = list((clean | nondiff) - deletedset)
164 else:
165 clean = []
164 clean.append(fn)
166 165
167 166 if removed:
168 167 # need to filter files if they are already reported as removed
169 168 unknown = [fn for fn in unknown if fn not in mf1]
170 169 ignored = [fn for fn in ignored if fn not in mf1]
171 170 # if they're deleted, don't report them as removed
172 171 removed = [fn for fn in removed if fn not in deletedset]
173 172
174 173 return scmutil.status(modified, added, removed, deleted, unknown,
175 174 ignored, clean)
176 175
177 176 @propertycache
178 177 def substate(self):
179 178 return subrepo.state(self, self._repo.ui)
180 179
181 180 def subrev(self, subpath):
182 181 return self.substate[subpath][1]
183 182
184 183 def rev(self):
185 184 return self._rev
186 185 def node(self):
187 186 return self._node
188 187 def hex(self):
189 188 return hex(self.node())
190 189 def manifest(self):
191 190 return self._manifest
192 191 def phasestr(self):
193 192 return phases.phasenames[self.phase()]
194 193 def mutable(self):
195 194 return self.phase() > phases.public
196 195
197 196 def getfileset(self, expr):
198 197 return fileset.getfileset(self, expr)
199 198
200 199 def obsolete(self):
201 200 """True if the changeset is obsolete"""
202 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 202
204 203 def extinct(self):
205 204 """True if the changeset is extinct"""
206 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 206
208 207 def unstable(self):
209 208 """True if the changeset is not obsolete but it's ancestor are"""
210 209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
211 210
212 211 def bumped(self):
213 212 """True if the changeset try to be a successor of a public changeset
214 213
215 214 Only non-public and non-obsolete changesets may be bumped.
216 215 """
217 216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
218 217
219 218 def divergent(self):
220 219 """Is a successors of a changeset with multiple possible successors set
221 220
222 221 Only non-public and non-obsolete changesets may be divergent.
223 222 """
224 223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
225 224
226 225 def troubled(self):
227 226 """True if the changeset is either unstable, bumped or divergent"""
228 227 return self.unstable() or self.bumped() or self.divergent()
229 228
230 229 def troubles(self):
231 230 """return the list of troubles affecting this changesets.
232 231
233 232 Troubles are returned as strings. possible values are:
234 233 - unstable,
235 234 - bumped,
236 235 - divergent.
237 236 """
238 237 troubles = []
239 238 if self.unstable():
240 239 troubles.append('unstable')
241 240 if self.bumped():
242 241 troubles.append('bumped')
243 242 if self.divergent():
244 243 troubles.append('divergent')
245 244 return troubles
246 245
247 246 def parents(self):
248 247 """return contexts for each parent changeset"""
249 248 return self._parents
250 249
251 250 def p1(self):
252 251 return self._parents[0]
253 252
254 253 def p2(self):
255 254 if len(self._parents) == 2:
256 255 return self._parents[1]
257 256 return changectx(self._repo, -1)
258 257
259 258 def _fileinfo(self, path):
260 259 if '_manifest' in self.__dict__:
261 260 try:
262 261 return self._manifest[path], self._manifest.flags(path)
263 262 except KeyError:
264 263 raise error.ManifestLookupError(self._node, path,
265 264 _('not found in manifest'))
266 265 if '_manifestdelta' in self.__dict__ or path in self.files():
267 266 if path in self._manifestdelta:
268 267 return (self._manifestdelta[path],
269 268 self._manifestdelta.flags(path))
270 269 node, flag = self._repo.manifest.find(self._changeset[0], path)
271 270 if not node:
272 271 raise error.ManifestLookupError(self._node, path,
273 272 _('not found in manifest'))
274 273
275 274 return node, flag
276 275
277 276 def filenode(self, path):
278 277 return self._fileinfo(path)[0]
279 278
280 279 def flags(self, path):
281 280 try:
282 281 return self._fileinfo(path)[1]
283 282 except error.LookupError:
284 283 return ''
285 284
286 285 def sub(self, path):
287 286 return subrepo.subrepo(self, path)
288 287
289 288 def match(self, pats=[], include=None, exclude=None, default='glob'):
290 289 r = self._repo
291 290 return matchmod.match(r.root, r.getcwd(), pats,
292 291 include, exclude, default,
293 292 auditor=r.auditor, ctx=self)
294 293
295 294 def diff(self, ctx2=None, match=None, **opts):
296 295 """Returns a diff generator for the given contexts and matcher"""
297 296 if ctx2 is None:
298 297 ctx2 = self.p1()
299 298 if ctx2 is not None:
300 299 ctx2 = self._repo[ctx2]
301 300 diffopts = patch.diffopts(self._repo.ui, opts)
302 301 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
303 302
304 303 @propertycache
305 304 def _dirs(self):
306 305 return scmutil.dirs(self._manifest)
307 306
308 307 def dirs(self):
309 308 return self._dirs
310 309
311 310 def dirty(self, missing=False, merge=True, branch=True):
312 311 return False
313 312
314 313 def status(self, other=None, match=None, listignored=False,
315 314 listclean=False, listunknown=False, listsubrepos=False):
316 315 """return status of files between two nodes or node and working
317 316 directory.
318 317
319 318 If other is None, compare this node with working directory.
320 319
321 320 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 321 """
323 322
324 323 ctx1 = self
325 324 ctx2 = self._repo[other]
326 325
327 326 # This next code block is, admittedly, fragile logic that tests for
328 327 # reversing the contexts and wouldn't need to exist if it weren't for
329 328 # the fast (and common) code path of comparing the working directory
330 329 # with its first parent.
331 330 #
332 331 # What we're aiming for here is the ability to call:
333 332 #
334 333 # workingctx.status(parentctx)
335 334 #
336 335 # If we always built the manifest for each context and compared those,
337 336 # then we'd be done. But the special case of the above call means we
338 337 # just copy the manifest of the parent.
339 338 reversed = False
340 339 if (not isinstance(ctx1, changectx)
341 340 and isinstance(ctx2, changectx)):
342 341 reversed = True
343 342 ctx1, ctx2 = ctx2, ctx1
344 343
345 344 match = ctx2._matchstatus(ctx1, match)
346 345 r = scmutil.status([], [], [], [], [], [], [])
347 346 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
348 347 listunknown)
349 348
350 349 if reversed:
351 350 # Reverse added and removed. Clear deleted, unknown and ignored as
352 351 # these make no sense to reverse.
353 352 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
354 353 r.clean)
355 354
356 355 if listsubrepos:
357 356 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
358 357 rev2 = ctx2.subrev(subpath)
359 358 try:
360 359 submatch = matchmod.narrowmatcher(subpath, match)
361 360 s = sub.status(rev2, match=submatch, ignored=listignored,
362 361 clean=listclean, unknown=listunknown,
363 362 listsubrepos=True)
364 363 for rfiles, sfiles in zip(r, s):
365 364 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
366 365 except error.LookupError:
367 366 self._repo.ui.status(_("skipping missing "
368 367 "subrepository: %s\n") % subpath)
369 368
370 369 for l in r:
371 370 l.sort()
372 371
373 372 return r
374 373
375 374
376 375 def makememctx(repo, parents, text, user, date, branch, files, store,
377 376 editor=None):
378 377 def getfilectx(repo, memctx, path):
379 378 data, mode, copied = store.getfile(path)
380 379 if data is None:
381 380 return None
382 381 islink, isexec = mode
383 382 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
384 383 copied=copied, memctx=memctx)
385 384 extra = {}
386 385 if branch:
387 386 extra['branch'] = encoding.fromlocal(branch)
388 387 ctx = memctx(repo, parents, text, files, getfilectx, user,
389 388 date, extra, editor)
390 389 return ctx
391 390
392 391 class changectx(basectx):
393 392 """A changecontext object makes access to data related to a particular
394 393 changeset convenient. It represents a read-only context already present in
395 394 the repo."""
396 395 def __init__(self, repo, changeid=''):
397 396 """changeid is a revision number, node, or tag"""
398 397
399 398 # since basectx.__new__ already took care of copying the object, we
400 399 # don't need to do anything in __init__, so we just exit here
401 400 if isinstance(changeid, basectx):
402 401 return
403 402
404 403 if changeid == '':
405 404 changeid = '.'
406 405 self._repo = repo
407 406
408 407 try:
409 408 if isinstance(changeid, int):
410 409 self._node = repo.changelog.node(changeid)
411 410 self._rev = changeid
412 411 return
413 412 if isinstance(changeid, long):
414 413 changeid = str(changeid)
415 414 if changeid == '.':
416 415 self._node = repo.dirstate.p1()
417 416 self._rev = repo.changelog.rev(self._node)
418 417 return
419 418 if changeid == 'null':
420 419 self._node = nullid
421 420 self._rev = nullrev
422 421 return
423 422 if changeid == 'tip':
424 423 self._node = repo.changelog.tip()
425 424 self._rev = repo.changelog.rev(self._node)
426 425 return
427 426 if len(changeid) == 20:
428 427 try:
429 428 self._node = changeid
430 429 self._rev = repo.changelog.rev(changeid)
431 430 return
432 431 except error.FilteredRepoLookupError:
433 432 raise
434 433 except LookupError:
435 434 pass
436 435
437 436 try:
438 437 r = int(changeid)
439 438 if str(r) != changeid:
440 439 raise ValueError
441 440 l = len(repo.changelog)
442 441 if r < 0:
443 442 r += l
444 443 if r < 0 or r >= l:
445 444 raise ValueError
446 445 self._rev = r
447 446 self._node = repo.changelog.node(r)
448 447 return
449 448 except error.FilteredIndexError:
450 449 raise
451 450 except (ValueError, OverflowError, IndexError):
452 451 pass
453 452
454 453 if len(changeid) == 40:
455 454 try:
456 455 self._node = bin(changeid)
457 456 self._rev = repo.changelog.rev(self._node)
458 457 return
459 458 except error.FilteredLookupError:
460 459 raise
461 460 except (TypeError, LookupError):
462 461 pass
463 462
464 463 # lookup bookmarks through the name interface
465 464 try:
466 465 self._node = repo.names.singlenode(repo, changeid)
467 466 self._rev = repo.changelog.rev(self._node)
468 467 return
469 468 except KeyError:
470 469 pass
471 470 except error.FilteredRepoLookupError:
472 471 raise
473 472 except error.RepoLookupError:
474 473 pass
475 474
476 475 self._node = repo.unfiltered().changelog._partialmatch(changeid)
477 476 if self._node is not None:
478 477 self._rev = repo.changelog.rev(self._node)
479 478 return
480 479
481 480 # lookup failed
482 481 # check if it might have come from damaged dirstate
483 482 #
484 483 # XXX we could avoid the unfiltered if we had a recognizable
485 484 # exception for filtered changeset access
486 485 if changeid in repo.unfiltered().dirstate.parents():
487 486 msg = _("working directory has unknown parent '%s'!")
488 487 raise error.Abort(msg % short(changeid))
489 488 try:
490 489 if len(changeid) == 20:
491 490 changeid = hex(changeid)
492 491 except TypeError:
493 492 pass
494 493 except (error.FilteredIndexError, error.FilteredLookupError,
495 494 error.FilteredRepoLookupError):
496 495 if repo.filtername == 'visible':
497 496 msg = _("hidden revision '%s'") % changeid
498 497 hint = _('use --hidden to access hidden revisions')
499 498 raise error.FilteredRepoLookupError(msg, hint=hint)
500 499 msg = _("filtered revision '%s' (not in '%s' subset)")
501 500 msg %= (changeid, repo.filtername)
502 501 raise error.FilteredRepoLookupError(msg)
503 502 except IndexError:
504 503 pass
505 504 raise error.RepoLookupError(
506 505 _("unknown revision '%s'") % changeid)
507 506
508 507 def __hash__(self):
509 508 try:
510 509 return hash(self._rev)
511 510 except AttributeError:
512 511 return id(self)
513 512
514 513 def __nonzero__(self):
515 514 return self._rev != nullrev
516 515
517 516 @propertycache
518 517 def _changeset(self):
519 518 return self._repo.changelog.read(self.rev())
520 519
521 520 @propertycache
522 521 def _manifest(self):
523 522 return self._repo.manifest.read(self._changeset[0])
524 523
525 524 @propertycache
526 525 def _manifestdelta(self):
527 526 return self._repo.manifest.readdelta(self._changeset[0])
528 527
529 528 @propertycache
530 529 def _parents(self):
531 530 p = self._repo.changelog.parentrevs(self._rev)
532 531 if p[1] == nullrev:
533 532 p = p[:-1]
534 533 return [changectx(self._repo, x) for x in p]
535 534
536 535 def changeset(self):
537 536 return self._changeset
538 537 def manifestnode(self):
539 538 return self._changeset[0]
540 539
541 540 def user(self):
542 541 return self._changeset[1]
543 542 def date(self):
544 543 return self._changeset[2]
545 544 def files(self):
546 545 return self._changeset[3]
547 546 def description(self):
548 547 return self._changeset[4]
549 548 def branch(self):
550 549 return encoding.tolocal(self._changeset[5].get("branch"))
551 550 def closesbranch(self):
552 551 return 'close' in self._changeset[5]
553 552 def extra(self):
554 553 return self._changeset[5]
555 554 def tags(self):
556 555 return self._repo.nodetags(self._node)
557 556 def bookmarks(self):
558 557 return self._repo.nodebookmarks(self._node)
559 558 def phase(self):
560 559 return self._repo._phasecache.phase(self._repo, self._rev)
561 560 def hidden(self):
562 561 return self._rev in repoview.filterrevs(self._repo, 'visible')
563 562
564 563 def children(self):
565 564 """return contexts for each child changeset"""
566 565 c = self._repo.changelog.children(self._node)
567 566 return [changectx(self._repo, x) for x in c]
568 567
569 568 def ancestors(self):
570 569 for a in self._repo.changelog.ancestors([self._rev]):
571 570 yield changectx(self._repo, a)
572 571
573 572 def descendants(self):
574 573 for d in self._repo.changelog.descendants([self._rev]):
575 574 yield changectx(self._repo, d)
576 575
577 576 def filectx(self, path, fileid=None, filelog=None):
578 577 """get a file context from this changeset"""
579 578 if fileid is None:
580 579 fileid = self.filenode(path)
581 580 return filectx(self._repo, path, fileid=fileid,
582 581 changectx=self, filelog=filelog)
583 582
584 583 def ancestor(self, c2, warn=False):
585 584 """return the "best" ancestor context of self and c2
586 585
587 586 If there are multiple candidates, it will show a message and check
588 587 merge.preferancestor configuration before falling back to the
589 588 revlog ancestor."""
590 589 # deal with workingctxs
591 590 n2 = c2._node
592 591 if n2 is None:
593 592 n2 = c2._parents[0]._node
594 593 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
595 594 if not cahs:
596 595 anc = nullid
597 596 elif len(cahs) == 1:
598 597 anc = cahs[0]
599 598 else:
600 599 for r in self._repo.ui.configlist('merge', 'preferancestor'):
601 600 try:
602 601 ctx = changectx(self._repo, r)
603 602 except error.RepoLookupError:
604 603 continue
605 604 anc = ctx.node()
606 605 if anc in cahs:
607 606 break
608 607 else:
609 608 anc = self._repo.changelog.ancestor(self._node, n2)
610 609 if warn:
611 610 self._repo.ui.status(
612 611 (_("note: using %s as ancestor of %s and %s\n") %
613 612 (short(anc), short(self._node), short(n2))) +
614 613 ''.join(_(" alternatively, use --config "
615 614 "merge.preferancestor=%s\n") %
616 615 short(n) for n in sorted(cahs) if n != anc))
617 616 return changectx(self._repo, anc)
618 617
619 618 def descendant(self, other):
620 619 """True if other is descendant of this changeset"""
621 620 return self._repo.changelog.descendant(self._rev, other._rev)
622 621
623 622 def walk(self, match):
624 623 fset = set(match.files())
625 624 # for dirstate.walk, files=['.'] means "walk the whole tree".
626 625 # follow that here, too
627 626 fset.discard('.')
628 627
629 628 # avoid the entire walk if we're only looking for specific files
630 629 if fset and not match.anypats():
631 630 if util.all([fn in self for fn in fset]):
632 631 for fn in sorted(fset):
633 632 if match(fn):
634 633 yield fn
635 634 raise StopIteration
636 635
637 636 for fn in self:
638 637 if fn in fset:
639 638 # specified pattern is the exact name
640 639 fset.remove(fn)
641 640 if match(fn):
642 641 yield fn
643 642 for fn in sorted(fset):
644 643 if fn in self._dirs:
645 644 # specified pattern is a directory
646 645 continue
647 646 match.bad(fn, _('no such file in rev %s') % self)
648 647
649 648 def matches(self, match):
650 649 return self.walk(match)
651 650
652 651 class basefilectx(object):
653 652 """A filecontext object represents the common logic for its children:
654 653 filectx: read-only access to a filerevision that is already present
655 654 in the repo,
656 655 workingfilectx: a filecontext that represents files from the working
657 656 directory,
658 657 memfilectx: a filecontext that represents files in-memory."""
659 658 def __new__(cls, repo, path, *args, **kwargs):
660 659 return super(basefilectx, cls).__new__(cls)
661 660
662 661 @propertycache
663 662 def _filelog(self):
664 663 return self._repo.file(self._path)
665 664
666 665 @propertycache
667 666 def _changeid(self):
668 667 if '_changeid' in self.__dict__:
669 668 return self._changeid
670 669 elif '_changectx' in self.__dict__:
671 670 return self._changectx.rev()
672 671 else:
673 672 return self._filelog.linkrev(self._filerev)
674 673
675 674 @propertycache
676 675 def _filenode(self):
677 676 if '_fileid' in self.__dict__:
678 677 return self._filelog.lookup(self._fileid)
679 678 else:
680 679 return self._changectx.filenode(self._path)
681 680
682 681 @propertycache
683 682 def _filerev(self):
684 683 return self._filelog.rev(self._filenode)
685 684
686 685 @propertycache
687 686 def _repopath(self):
688 687 return self._path
689 688
690 689 def __nonzero__(self):
691 690 try:
692 691 self._filenode
693 692 return True
694 693 except error.LookupError:
695 694 # file is missing
696 695 return False
697 696
698 697 def __str__(self):
699 698 return "%s@%s" % (self.path(), self._changectx)
700 699
701 700 def __repr__(self):
702 701 return "<%s %s>" % (type(self).__name__, str(self))
703 702
704 703 def __hash__(self):
705 704 try:
706 705 return hash((self._path, self._filenode))
707 706 except AttributeError:
708 707 return id(self)
709 708
710 709 def __eq__(self, other):
711 710 try:
712 711 return (type(self) == type(other) and self._path == other._path
713 712 and self._filenode == other._filenode)
714 713 except AttributeError:
715 714 return False
716 715
717 716 def __ne__(self, other):
718 717 return not (self == other)
719 718
720 719 def filerev(self):
721 720 return self._filerev
722 721 def filenode(self):
723 722 return self._filenode
724 723 def flags(self):
725 724 return self._changectx.flags(self._path)
726 725 def filelog(self):
727 726 return self._filelog
728 727 def rev(self):
729 728 return self._changeid
730 729 def linkrev(self):
731 730 return self._filelog.linkrev(self._filerev)
732 731 def node(self):
733 732 return self._changectx.node()
734 733 def hex(self):
735 734 return self._changectx.hex()
736 735 def user(self):
737 736 return self._changectx.user()
738 737 def date(self):
739 738 return self._changectx.date()
740 739 def files(self):
741 740 return self._changectx.files()
742 741 def description(self):
743 742 return self._changectx.description()
744 743 def branch(self):
745 744 return self._changectx.branch()
746 745 def extra(self):
747 746 return self._changectx.extra()
748 747 def phase(self):
749 748 return self._changectx.phase()
750 749 def phasestr(self):
751 750 return self._changectx.phasestr()
752 751 def manifest(self):
753 752 return self._changectx.manifest()
754 753 def changectx(self):
755 754 return self._changectx
756 755
757 756 def path(self):
758 757 return self._path
759 758
760 759 def isbinary(self):
761 760 try:
762 761 return util.binary(self.data())
763 762 except IOError:
764 763 return False
765 764 def isexec(self):
766 765 return 'x' in self.flags()
767 766 def islink(self):
768 767 return 'l' in self.flags()
769 768
770 769 def cmp(self, fctx):
771 770 """compare with other file context
772 771
773 772 returns True if different than fctx.
774 773 """
775 774 if (fctx._filerev is None
776 775 and (self._repo._encodefilterpats
777 776 # if file data starts with '\1\n', empty metadata block is
778 777 # prepended, which adds 4 bytes to filelog.size().
779 778 or self.size() - 4 == fctx.size())
780 779 or self.size() == fctx.size()):
781 780 return self._filelog.cmp(self._filenode, fctx.data())
782 781
783 782 return True
784 783
785 784 def introrev(self):
786 785 """return the rev of the changeset which introduced this file revision
787 786
788 787 This method is different from linkrev because it take into account the
789 788 changeset the filectx was created from. It ensures the returned
790 789 revision is one of its ancestors. This prevents bugs from
791 790 'linkrev-shadowing' when a file revision is used by multiple
792 791 changesets.
793 792 """
794 793 lkr = self.linkrev()
795 794 attrs = vars(self)
796 795 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
797 796 if noctx or self.rev() == lkr:
798 797 return self.linkrev()
799 798 return _adjustlinkrev(self._repo, self._path, self._filelog,
800 799 self._filenode, self.rev(), inclusive=True)
801 800
802 801 def parents(self):
803 802 _path = self._path
804 803 fl = self._filelog
805 804 parents = self._filelog.parents(self._filenode)
806 805 pl = [(_path, node, fl) for node in parents if node != nullid]
807 806
808 807 r = fl.renamed(self._filenode)
809 808 if r:
810 809 # - In the simple rename case, both parent are nullid, pl is empty.
811 810 # - In case of merge, only one of the parent is null id and should
812 811 # be replaced with the rename information. This parent is -always-
813 812 # the first one.
814 813 #
815 814 # As null id have alway been filtered out in the previous list
816 815 # comprehension, inserting to 0 will always result in "replacing
817 816 # first nullid parent with rename information.
818 817 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
819 818
820 819 ret = []
821 820 for path, fnode, l in pl:
822 821 if '_changeid' in vars(self) or '_changectx' in vars(self):
823 822 # If self is associated with a changeset (probably explicitly
824 823 # fed), ensure the created filectx is associated with a
825 824 # changeset that is an ancestor of self.changectx.
826 825 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
827 826 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
828 827 changeid=rev)
829 828 else:
830 829 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
831 830 ret.append(fctx)
832 831 return ret
833 832
834 833 def p1(self):
835 834 return self.parents()[0]
836 835
837 836 def p2(self):
838 837 p = self.parents()
839 838 if len(p) == 2:
840 839 return p[1]
841 840 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
842 841
843 842 def annotate(self, follow=False, linenumber=None, diffopts=None):
844 843 '''returns a list of tuples of (ctx, line) for each line
845 844 in the file, where ctx is the filectx of the node where
846 845 that line was last changed.
847 846 This returns tuples of ((ctx, linenumber), line) for each line,
848 847 if "linenumber" parameter is NOT "None".
849 848 In such tuples, linenumber means one at the first appearance
850 849 in the managed file.
851 850 To reduce annotation cost,
852 851 this returns fixed value(False is used) as linenumber,
853 852 if "linenumber" parameter is "False".'''
854 853
855 854 if linenumber is None:
856 855 def decorate(text, rev):
857 856 return ([rev] * len(text.splitlines()), text)
858 857 elif linenumber:
859 858 def decorate(text, rev):
860 859 size = len(text.splitlines())
861 860 return ([(rev, i) for i in xrange(1, size + 1)], text)
862 861 else:
863 862 def decorate(text, rev):
864 863 return ([(rev, False)] * len(text.splitlines()), text)
865 864
866 865 def pair(parent, child):
867 866 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
868 867 refine=True)
869 868 for (a1, a2, b1, b2), t in blocks:
870 869 # Changed blocks ('!') or blocks made only of blank lines ('~')
871 870 # belong to the child.
872 871 if t == '=':
873 872 child[0][b1:b2] = parent[0][a1:a2]
874 873 return child
875 874
876 875 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
877 876
878 877 def parents(f):
879 878 pl = f.parents()
880 879
881 880 # Don't return renamed parents if we aren't following.
882 881 if not follow:
883 882 pl = [p for p in pl if p.path() == f.path()]
884 883
885 884 # renamed filectx won't have a filelog yet, so set it
886 885 # from the cache to save time
887 886 for p in pl:
888 887 if not '_filelog' in p.__dict__:
889 888 p._filelog = getlog(p.path())
890 889
891 890 return pl
892 891
893 892 # use linkrev to find the first changeset where self appeared
894 893 base = self
895 894 introrev = self.introrev()
896 895 if self.rev() != introrev:
897 896 base = filectx(self._repo, self._path, filelog=self.filelog(),
898 897 fileid=self.filenode(), changeid=introrev)
899 898
900 899 # This algorithm would prefer to be recursive, but Python is a
901 900 # bit recursion-hostile. Instead we do an iterative
902 901 # depth-first search.
903 902
904 903 visit = [base]
905 904 hist = {}
906 905 pcache = {}
907 906 needed = {base: 1}
908 907 while visit:
909 908 f = visit[-1]
910 909 pcached = f in pcache
911 910 if not pcached:
912 911 pcache[f] = parents(f)
913 912
914 913 ready = True
915 914 pl = pcache[f]
916 915 for p in pl:
917 916 if p not in hist:
918 917 ready = False
919 918 visit.append(p)
920 919 if not pcached:
921 920 needed[p] = needed.get(p, 0) + 1
922 921 if ready:
923 922 visit.pop()
924 923 reusable = f in hist
925 924 if reusable:
926 925 curr = hist[f]
927 926 else:
928 927 curr = decorate(f.data(), f)
929 928 for p in pl:
930 929 if not reusable:
931 930 curr = pair(hist[p], curr)
932 931 if needed[p] == 1:
933 932 del hist[p]
934 933 del needed[p]
935 934 else:
936 935 needed[p] -= 1
937 936
938 937 hist[f] = curr
939 938 pcache[f] = []
940 939
941 940 return zip(hist[base][0], hist[base][1].splitlines(True))
942 941
943 942 def ancestors(self, followfirst=False):
944 943 visit = {}
945 944 c = self
946 945 cut = followfirst and 1 or None
947 946 while True:
948 947 for parent in c.parents()[:cut]:
949 948 visit[(parent.rev(), parent.node())] = parent
950 949 if not visit:
951 950 break
952 951 c = visit.pop(max(visit))
953 952 yield c
954 953
955 954 class filectx(basefilectx):
956 955 """A filecontext object makes access to data related to a particular
957 956 filerevision convenient."""
958 957 def __init__(self, repo, path, changeid=None, fileid=None,
959 958 filelog=None, changectx=None):
960 959 """changeid can be a changeset revision, node, or tag.
961 960 fileid can be a file revision or node."""
962 961 self._repo = repo
963 962 self._path = path
964 963
965 964 assert (changeid is not None
966 965 or fileid is not None
967 966 or changectx is not None), \
968 967 ("bad args: changeid=%r, fileid=%r, changectx=%r"
969 968 % (changeid, fileid, changectx))
970 969
971 970 if filelog is not None:
972 971 self._filelog = filelog
973 972
974 973 if changeid is not None:
975 974 self._changeid = changeid
976 975 if changectx is not None:
977 976 self._changectx = changectx
978 977 if fileid is not None:
979 978 self._fileid = fileid
980 979
981 980 @propertycache
982 981 def _changectx(self):
983 982 try:
984 983 return changectx(self._repo, self._changeid)
985 984 except error.FilteredRepoLookupError:
986 985 # Linkrev may point to any revision in the repository. When the
987 986 # repository is filtered this may lead to `filectx` trying to build
988 987 # `changectx` for filtered revision. In such case we fallback to
989 988 # creating `changectx` on the unfiltered version of the reposition.
990 989 # This fallback should not be an issue because `changectx` from
991 990 # `filectx` are not used in complex operations that care about
992 991 # filtering.
993 992 #
994 993 # This fallback is a cheap and dirty fix that prevent several
995 994 # crashes. It does not ensure the behavior is correct. However the
996 995 # behavior was not correct before filtering either and "incorrect
997 996 # behavior" is seen as better as "crash"
998 997 #
999 998 # Linkrevs have several serious troubles with filtering that are
1000 999 # complicated to solve. Proper handling of the issue here should be
1001 1000 # considered when solving linkrev issue are on the table.
1002 1001 return changectx(self._repo.unfiltered(), self._changeid)
1003 1002
1004 1003 def filectx(self, fileid):
1005 1004 '''opens an arbitrary revision of the file without
1006 1005 opening a new filelog'''
1007 1006 return filectx(self._repo, self._path, fileid=fileid,
1008 1007 filelog=self._filelog)
1009 1008
1010 1009 def data(self):
1011 1010 try:
1012 1011 return self._filelog.read(self._filenode)
1013 1012 except error.CensoredNodeError:
1014 1013 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1015 1014 return ""
1016 1015 raise util.Abort(_("censored node: %s") % short(self._filenode),
1017 1016 hint=_("set censor.policy to ignore errors"))
1018 1017
1019 1018 def size(self):
1020 1019 return self._filelog.size(self._filerev)
1021 1020
1022 1021 def renamed(self):
1023 1022 """check if file was actually renamed in this changeset revision
1024 1023
1025 1024 If rename logged in file revision, we report copy for changeset only
1026 1025 if file revisions linkrev points back to the changeset in question
1027 1026 or both changeset parents contain different file revisions.
1028 1027 """
1029 1028
1030 1029 renamed = self._filelog.renamed(self._filenode)
1031 1030 if not renamed:
1032 1031 return renamed
1033 1032
1034 1033 if self.rev() == self.linkrev():
1035 1034 return renamed
1036 1035
1037 1036 name = self.path()
1038 1037 fnode = self._filenode
1039 1038 for p in self._changectx.parents():
1040 1039 try:
1041 1040 if fnode == p.filenode(name):
1042 1041 return None
1043 1042 except error.LookupError:
1044 1043 pass
1045 1044 return renamed
1046 1045
1047 1046 def children(self):
1048 1047 # hard for renames
1049 1048 c = self._filelog.children(self._filenode)
1050 1049 return [filectx(self._repo, self._path, fileid=x,
1051 1050 filelog=self._filelog) for x in c]
1052 1051
1053 1052 class committablectx(basectx):
1054 1053 """A committablectx object provides common functionality for a context that
1055 1054 wants the ability to commit, e.g. workingctx or memctx."""
1056 1055 def __init__(self, repo, text="", user=None, date=None, extra=None,
1057 1056 changes=None):
1058 1057 self._repo = repo
1059 1058 self._rev = None
1060 1059 self._node = None
1061 1060 self._text = text
1062 1061 if date:
1063 1062 self._date = util.parsedate(date)
1064 1063 if user:
1065 1064 self._user = user
1066 1065 if changes:
1067 1066 self._status = changes
1068 1067
1069 1068 self._extra = {}
1070 1069 if extra:
1071 1070 self._extra = extra.copy()
1072 1071 if 'branch' not in self._extra:
1073 1072 try:
1074 1073 branch = encoding.fromlocal(self._repo.dirstate.branch())
1075 1074 except UnicodeDecodeError:
1076 1075 raise util.Abort(_('branch name not in UTF-8!'))
1077 1076 self._extra['branch'] = branch
1078 1077 if self._extra['branch'] == '':
1079 1078 self._extra['branch'] = 'default'
1080 1079
1081 1080 def __str__(self):
1082 1081 return str(self._parents[0]) + "+"
1083 1082
1084 1083 def __nonzero__(self):
1085 1084 return True
1086 1085
1087 1086 def _buildflagfunc(self):
1088 1087 # Create a fallback function for getting file flags when the
1089 1088 # filesystem doesn't support them
1090 1089
1091 1090 copiesget = self._repo.dirstate.copies().get
1092 1091
1093 1092 if len(self._parents) < 2:
1094 1093 # when we have one parent, it's easy: copy from parent
1095 1094 man = self._parents[0].manifest()
1096 1095 def func(f):
1097 1096 f = copiesget(f, f)
1098 1097 return man.flags(f)
1099 1098 else:
1100 1099 # merges are tricky: we try to reconstruct the unstored
1101 1100 # result from the merge (issue1802)
1102 1101 p1, p2 = self._parents
1103 1102 pa = p1.ancestor(p2)
1104 1103 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1105 1104
1106 1105 def func(f):
1107 1106 f = copiesget(f, f) # may be wrong for merges with copies
1108 1107 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1109 1108 if fl1 == fl2:
1110 1109 return fl1
1111 1110 if fl1 == fla:
1112 1111 return fl2
1113 1112 if fl2 == fla:
1114 1113 return fl1
1115 1114 return '' # punt for conflicts
1116 1115
1117 1116 return func
1118 1117
1119 1118 @propertycache
1120 1119 def _flagfunc(self):
1121 1120 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1122 1121
1123 1122 @propertycache
1124 1123 def _manifest(self):
1125 1124 """generate a manifest corresponding to the values in self._status
1126 1125
1127 1126 This reuse the file nodeid from parent, but we append an extra letter
1128 1127 when modified. Modified files get an extra 'm' while added files get
1129 1128 an extra 'a'. This is used by manifests merge to see that files
1130 1129 are different and by update logic to avoid deleting newly added files.
1131 1130 """
1132 1131
1133 1132 man1 = self._parents[0].manifest()
1134 1133 man = man1.copy()
1135 1134 if len(self._parents) > 1:
1136 1135 man2 = self.p2().manifest()
1137 1136 def getman(f):
1138 1137 if f in man1:
1139 1138 return man1
1140 1139 return man2
1141 1140 else:
1142 1141 getman = lambda f: man1
1143 1142
1144 1143 copied = self._repo.dirstate.copies()
1145 1144 ff = self._flagfunc
1146 1145 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1147 1146 for f in l:
1148 1147 orig = copied.get(f, f)
1149 1148 man[f] = getman(orig).get(orig, nullid) + i
1150 1149 try:
1151 1150 man.setflag(f, ff(f))
1152 1151 except OSError:
1153 1152 pass
1154 1153
1155 1154 for f in self._status.deleted + self._status.removed:
1156 1155 if f in man:
1157 1156 del man[f]
1158 1157
1159 1158 return man
1160 1159
1161 1160 @propertycache
1162 1161 def _status(self):
1163 1162 return self._repo.status()
1164 1163
1165 1164 @propertycache
1166 1165 def _user(self):
1167 1166 return self._repo.ui.username()
1168 1167
1169 1168 @propertycache
1170 1169 def _date(self):
1171 1170 return util.makedate()
1172 1171
1173 1172 def subrev(self, subpath):
1174 1173 return None
1175 1174
1176 1175 def user(self):
1177 1176 return self._user or self._repo.ui.username()
1178 1177 def date(self):
1179 1178 return self._date
1180 1179 def description(self):
1181 1180 return self._text
1182 1181 def files(self):
1183 1182 return sorted(self._status.modified + self._status.added +
1184 1183 self._status.removed)
1185 1184
1186 1185 def modified(self):
1187 1186 return self._status.modified
1188 1187 def added(self):
1189 1188 return self._status.added
1190 1189 def removed(self):
1191 1190 return self._status.removed
1192 1191 def deleted(self):
1193 1192 return self._status.deleted
1194 1193 def branch(self):
1195 1194 return encoding.tolocal(self._extra['branch'])
1196 1195 def closesbranch(self):
1197 1196 return 'close' in self._extra
1198 1197 def extra(self):
1199 1198 return self._extra
1200 1199
1201 1200 def tags(self):
1202 1201 t = []
1203 1202 for p in self.parents():
1204 1203 t.extend(p.tags())
1205 1204 return t
1206 1205
1207 1206 def bookmarks(self):
1208 1207 b = []
1209 1208 for p in self.parents():
1210 1209 b.extend(p.bookmarks())
1211 1210 return b
1212 1211
1213 1212 def phase(self):
1214 1213 phase = phases.draft # default phase to draft
1215 1214 for p in self.parents():
1216 1215 phase = max(phase, p.phase())
1217 1216 return phase
1218 1217
1219 1218 def hidden(self):
1220 1219 return False
1221 1220
1222 1221 def children(self):
1223 1222 return []
1224 1223
1225 1224 def flags(self, path):
1226 1225 if '_manifest' in self.__dict__:
1227 1226 try:
1228 1227 return self._manifest.flags(path)
1229 1228 except KeyError:
1230 1229 return ''
1231 1230
1232 1231 try:
1233 1232 return self._flagfunc(path)
1234 1233 except OSError:
1235 1234 return ''
1236 1235
1237 1236 def ancestor(self, c2):
1238 1237 """return the "best" ancestor context of self and c2"""
1239 1238 return self._parents[0].ancestor(c2) # punt on two parents for now
1240 1239
1241 1240 def walk(self, match):
1242 1241 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1243 1242 True, False))
1244 1243
1245 1244 def matches(self, match):
1246 1245 return sorted(self._repo.dirstate.matches(match))
1247 1246
1248 1247 def ancestors(self):
1249 1248 for p in self._parents:
1250 1249 yield p
1251 1250 for a in self._repo.changelog.ancestors(
1252 1251 [p.rev() for p in self._parents]):
1253 1252 yield changectx(self._repo, a)
1254 1253
1255 1254 def markcommitted(self, node):
1256 1255 """Perform post-commit cleanup necessary after committing this ctx
1257 1256
1258 1257 Specifically, this updates backing stores this working context
1259 1258 wraps to reflect the fact that the changes reflected by this
1260 1259 workingctx have been committed. For example, it marks
1261 1260 modified and added files as normal in the dirstate.
1262 1261
1263 1262 """
1264 1263
1265 1264 self._repo.dirstate.beginparentchange()
1266 1265 for f in self.modified() + self.added():
1267 1266 self._repo.dirstate.normal(f)
1268 1267 for f in self.removed():
1269 1268 self._repo.dirstate.drop(f)
1270 1269 self._repo.dirstate.setparents(node)
1271 1270 self._repo.dirstate.endparentchange()
1272 1271
1273 1272 def dirs(self):
1274 1273 return self._repo.dirstate.dirs()
1275 1274
1276 1275 class workingctx(committablectx):
1277 1276 """A workingctx object makes access to data related to
1278 1277 the current working directory convenient.
1279 1278 date - any valid date string or (unixtime, offset), or None.
1280 1279 user - username string, or None.
1281 1280 extra - a dictionary of extra values, or None.
1282 1281 changes - a list of file lists as returned by localrepo.status()
1283 1282 or None to use the repository status.
1284 1283 """
1285 1284 def __init__(self, repo, text="", user=None, date=None, extra=None,
1286 1285 changes=None):
1287 1286 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1288 1287
1289 1288 def __iter__(self):
1290 1289 d = self._repo.dirstate
1291 1290 for f in d:
1292 1291 if d[f] != 'r':
1293 1292 yield f
1294 1293
1295 1294 def __contains__(self, key):
1296 1295 return self._repo.dirstate[key] not in "?r"
1297 1296
1298 1297 @propertycache
1299 1298 def _parents(self):
1300 1299 p = self._repo.dirstate.parents()
1301 1300 if p[1] == nullid:
1302 1301 p = p[:-1]
1303 1302 return [changectx(self._repo, x) for x in p]
1304 1303
1305 1304 def filectx(self, path, filelog=None):
1306 1305 """get a file context from the working directory"""
1307 1306 return workingfilectx(self._repo, path, workingctx=self,
1308 1307 filelog=filelog)
1309 1308
1310 1309 def dirty(self, missing=False, merge=True, branch=True):
1311 1310 "check whether a working directory is modified"
1312 1311 # check subrepos first
1313 1312 for s in sorted(self.substate):
1314 1313 if self.sub(s).dirty():
1315 1314 return True
1316 1315 # check current working dir
1317 1316 return ((merge and self.p2()) or
1318 1317 (branch and self.branch() != self.p1().branch()) or
1319 1318 self.modified() or self.added() or self.removed() or
1320 1319 (missing and self.deleted()))
1321 1320
1322 1321 def add(self, list, prefix=""):
1323 1322 join = lambda f: os.path.join(prefix, f)
1324 1323 wlock = self._repo.wlock()
1325 1324 ui, ds = self._repo.ui, self._repo.dirstate
1326 1325 try:
1327 1326 rejected = []
1328 1327 lstat = self._repo.wvfs.lstat
1329 1328 for f in list:
1330 1329 scmutil.checkportable(ui, join(f))
1331 1330 try:
1332 1331 st = lstat(f)
1333 1332 except OSError:
1334 1333 ui.warn(_("%s does not exist!\n") % join(f))
1335 1334 rejected.append(f)
1336 1335 continue
1337 1336 if st.st_size > 10000000:
1338 1337 ui.warn(_("%s: up to %d MB of RAM may be required "
1339 1338 "to manage this file\n"
1340 1339 "(use 'hg revert %s' to cancel the "
1341 1340 "pending addition)\n")
1342 1341 % (f, 3 * st.st_size // 1000000, join(f)))
1343 1342 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1344 1343 ui.warn(_("%s not added: only files and symlinks "
1345 1344 "supported currently\n") % join(f))
1346 1345 rejected.append(f)
1347 1346 elif ds[f] in 'amn':
1348 1347 ui.warn(_("%s already tracked!\n") % join(f))
1349 1348 elif ds[f] == 'r':
1350 1349 ds.normallookup(f)
1351 1350 else:
1352 1351 ds.add(f)
1353 1352 return rejected
1354 1353 finally:
1355 1354 wlock.release()
1356 1355
1357 1356 def forget(self, files, prefix=""):
1358 1357 join = lambda f: os.path.join(prefix, f)
1359 1358 wlock = self._repo.wlock()
1360 1359 try:
1361 1360 rejected = []
1362 1361 for f in files:
1363 1362 if f not in self._repo.dirstate:
1364 1363 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1365 1364 rejected.append(f)
1366 1365 elif self._repo.dirstate[f] != 'a':
1367 1366 self._repo.dirstate.remove(f)
1368 1367 else:
1369 1368 self._repo.dirstate.drop(f)
1370 1369 return rejected
1371 1370 finally:
1372 1371 wlock.release()
1373 1372
1374 1373 def undelete(self, list):
1375 1374 pctxs = self.parents()
1376 1375 wlock = self._repo.wlock()
1377 1376 try:
1378 1377 for f in list:
1379 1378 if self._repo.dirstate[f] != 'r':
1380 1379 self._repo.ui.warn(_("%s not removed!\n") % f)
1381 1380 else:
1382 1381 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1383 1382 t = fctx.data()
1384 1383 self._repo.wwrite(f, t, fctx.flags())
1385 1384 self._repo.dirstate.normal(f)
1386 1385 finally:
1387 1386 wlock.release()
1388 1387
1389 1388 def copy(self, source, dest):
1390 1389 try:
1391 1390 st = self._repo.wvfs.lstat(dest)
1392 1391 except OSError, err:
1393 1392 if err.errno != errno.ENOENT:
1394 1393 raise
1395 1394 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1396 1395 return
1397 1396 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1398 1397 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1399 1398 "symbolic link\n") % dest)
1400 1399 else:
1401 1400 wlock = self._repo.wlock()
1402 1401 try:
1403 1402 if self._repo.dirstate[dest] in '?':
1404 1403 self._repo.dirstate.add(dest)
1405 1404 elif self._repo.dirstate[dest] in 'r':
1406 1405 self._repo.dirstate.normallookup(dest)
1407 1406 self._repo.dirstate.copy(source, dest)
1408 1407 finally:
1409 1408 wlock.release()
1410 1409
1411 1410 def _filtersuspectsymlink(self, files):
1412 1411 if not files or self._repo.dirstate._checklink:
1413 1412 return files
1414 1413
1415 1414 # Symlink placeholders may get non-symlink-like contents
1416 1415 # via user error or dereferencing by NFS or Samba servers,
1417 1416 # so we filter out any placeholders that don't look like a
1418 1417 # symlink
1419 1418 sane = []
1420 1419 for f in files:
1421 1420 if self.flags(f) == 'l':
1422 1421 d = self[f].data()
1423 1422 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1424 1423 self._repo.ui.debug('ignoring suspect symlink placeholder'
1425 1424 ' "%s"\n' % f)
1426 1425 continue
1427 1426 sane.append(f)
1428 1427 return sane
1429 1428
1430 1429 def _checklookup(self, files):
1431 1430 # check for any possibly clean files
1432 1431 if not files:
1433 1432 return [], []
1434 1433
1435 1434 modified = []
1436 1435 fixup = []
1437 1436 pctx = self._parents[0]
1438 1437 # do a full compare of any files that might have changed
1439 1438 for f in sorted(files):
1440 1439 if (f not in pctx or self.flags(f) != pctx.flags(f)
1441 1440 or pctx[f].cmp(self[f])):
1442 1441 modified.append(f)
1443 1442 else:
1444 1443 fixup.append(f)
1445 1444
1446 1445 # update dirstate for files that are actually clean
1447 1446 if fixup:
1448 1447 try:
1449 1448 # updating the dirstate is optional
1450 1449 # so we don't wait on the lock
1451 1450 # wlock can invalidate the dirstate, so cache normal _after_
1452 1451 # taking the lock
1453 1452 wlock = self._repo.wlock(False)
1454 1453 normal = self._repo.dirstate.normal
1455 1454 try:
1456 1455 for f in fixup:
1457 1456 normal(f)
1458 1457 finally:
1459 1458 wlock.release()
1460 1459 except error.LockError:
1461 1460 pass
1462 1461 return modified, fixup
1463 1462
1464 1463 def _manifestmatches(self, match, s):
1465 1464 """Slow path for workingctx
1466 1465
1467 1466 The fast path is when we compare the working directory to its parent
1468 1467 which means this function is comparing with a non-parent; therefore we
1469 1468 need to build a manifest and return what matches.
1470 1469 """
1471 1470 mf = self._repo['.']._manifestmatches(match, s)
1472 1471 for f in s.modified + s.added:
1473 1472 mf[f] = _newnode
1474 1473 mf.setflag(f, self.flags(f))
1475 1474 for f in s.removed:
1476 1475 if f in mf:
1477 1476 del mf[f]
1478 1477 return mf
1479 1478
1480 1479 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1481 1480 unknown=False):
1482 1481 '''Gets the status from the dirstate -- internal use only.'''
1483 1482 listignored, listclean, listunknown = ignored, clean, unknown
1484 1483 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1485 1484 subrepos = []
1486 1485 if '.hgsub' in self:
1487 1486 subrepos = sorted(self.substate)
1488 1487 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1489 1488 listclean, listunknown)
1490 1489
1491 1490 # check for any possibly clean files
1492 1491 if cmp:
1493 1492 modified2, fixup = self._checklookup(cmp)
1494 1493 s.modified.extend(modified2)
1495 1494
1496 1495 # update dirstate for files that are actually clean
1497 1496 if fixup and listclean:
1498 1497 s.clean.extend(fixup)
1499 1498
1500 1499 return s
1501 1500
1502 1501 def _buildstatus(self, other, s, match, listignored, listclean,
1503 1502 listunknown):
1504 1503 """build a status with respect to another context
1505 1504
1506 1505 This includes logic for maintaining the fast path of status when
1507 1506 comparing the working directory against its parent, which is to skip
1508 1507 building a new manifest if self (working directory) is not comparing
1509 1508 against its parent (repo['.']).
1510 1509 """
1511 1510 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1512 1511 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1513 1512 # might have accidentally ended up with the entire contents of the file
1514 1513 # they are supposed to be linking to.
1515 1514 s.modified[:] = self._filtersuspectsymlink(s.modified)
1516 1515 if other != self._repo['.']:
1517 1516 s = super(workingctx, self)._buildstatus(other, s, match,
1518 1517 listignored, listclean,
1519 1518 listunknown)
1520 1519 elif match.always():
1521 1520 # cache for performance
1522 1521 if s.unknown or s.ignored or s.clean:
1523 1522 # "_status" is cached with list*=False in the normal route
1524 1523 self._status = scmutil.status(s.modified, s.added, s.removed,
1525 1524 s.deleted, [], [], [])
1526 1525 else:
1527 1526 self._status = s
1528 1527 return s
1529 1528
1530 1529 def _matchstatus(self, other, match):
1531 1530 """override the match method with a filter for directory patterns
1532 1531
1533 1532 We use inheritance to customize the match.bad method only in cases of
1534 1533 workingctx since it belongs only to the working directory when
1535 1534 comparing against the parent changeset.
1536 1535
1537 1536 If we aren't comparing against the working directory's parent, then we
1538 1537 just use the default match object sent to us.
1539 1538 """
1540 1539 superself = super(workingctx, self)
1541 1540 match = superself._matchstatus(other, match)
1542 1541 if other != self._repo['.']:
1543 1542 def bad(f, msg):
1544 1543 # 'f' may be a directory pattern from 'match.files()',
1545 1544 # so 'f not in ctx1' is not enough
1546 1545 if f not in other and f not in other.dirs():
1547 1546 self._repo.ui.warn('%s: %s\n' %
1548 1547 (self._repo.dirstate.pathto(f), msg))
1549 1548 match.bad = bad
1550 1549 return match
1551 1550
1552 1551 class committablefilectx(basefilectx):
1553 1552 """A committablefilectx provides common functionality for a file context
1554 1553 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1555 1554 def __init__(self, repo, path, filelog=None, ctx=None):
1556 1555 self._repo = repo
1557 1556 self._path = path
1558 1557 self._changeid = None
1559 1558 self._filerev = self._filenode = None
1560 1559
1561 1560 if filelog is not None:
1562 1561 self._filelog = filelog
1563 1562 if ctx:
1564 1563 self._changectx = ctx
1565 1564
1566 1565 def __nonzero__(self):
1567 1566 return True
1568 1567
1569 1568 def parents(self):
1570 1569 '''return parent filectxs, following copies if necessary'''
1571 1570 def filenode(ctx, path):
1572 1571 return ctx._manifest.get(path, nullid)
1573 1572
1574 1573 path = self._path
1575 1574 fl = self._filelog
1576 1575 pcl = self._changectx._parents
1577 1576 renamed = self.renamed()
1578 1577
1579 1578 if renamed:
1580 1579 pl = [renamed + (None,)]
1581 1580 else:
1582 1581 pl = [(path, filenode(pcl[0], path), fl)]
1583 1582
1584 1583 for pc in pcl[1:]:
1585 1584 pl.append((path, filenode(pc, path), fl))
1586 1585
1587 1586 return [filectx(self._repo, p, fileid=n, filelog=l)
1588 1587 for p, n, l in pl if n != nullid]
1589 1588
1590 1589 def children(self):
1591 1590 return []
1592 1591
1593 1592 class workingfilectx(committablefilectx):
1594 1593 """A workingfilectx object makes access to data related to a particular
1595 1594 file in the working directory convenient."""
1596 1595 def __init__(self, repo, path, filelog=None, workingctx=None):
1597 1596 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1598 1597
1599 1598 @propertycache
1600 1599 def _changectx(self):
1601 1600 return workingctx(self._repo)
1602 1601
1603 1602 def data(self):
1604 1603 return self._repo.wread(self._path)
1605 1604 def renamed(self):
1606 1605 rp = self._repo.dirstate.copied(self._path)
1607 1606 if not rp:
1608 1607 return None
1609 1608 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1610 1609
1611 1610 def size(self):
1612 1611 return self._repo.wvfs.lstat(self._path).st_size
1613 1612 def date(self):
1614 1613 t, tz = self._changectx.date()
1615 1614 try:
1616 1615 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1617 1616 except OSError, err:
1618 1617 if err.errno != errno.ENOENT:
1619 1618 raise
1620 1619 return (t, tz)
1621 1620
1622 1621 def cmp(self, fctx):
1623 1622 """compare with other file context
1624 1623
1625 1624 returns True if different than fctx.
1626 1625 """
1627 1626 # fctx should be a filectx (not a workingfilectx)
1628 1627 # invert comparison to reuse the same code path
1629 1628 return fctx.cmp(self)
1630 1629
1631 1630 def remove(self, ignoremissing=False):
1632 1631 """wraps unlink for a repo's working directory"""
1633 1632 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1634 1633
1635 1634 def write(self, data, flags):
1636 1635 """wraps repo.wwrite"""
1637 1636 self._repo.wwrite(self._path, data, flags)
1638 1637
1639 1638 class workingcommitctx(workingctx):
1640 1639 """A workingcommitctx object makes access to data related to
1641 1640 the revision being committed convenient.
1642 1641
1643 1642 This hides changes in the working directory, if they aren't
1644 1643 committed in this context.
1645 1644 """
1646 1645 def __init__(self, repo, changes,
1647 1646 text="", user=None, date=None, extra=None):
1648 1647 super(workingctx, self).__init__(repo, text, user, date, extra,
1649 1648 changes)
1650 1649
1651 1650 def _buildstatus(self, other, s, match,
1652 1651 listignored, listclean, listunknown):
1653 1652 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1654 1653 """
1655 1654 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1656 1655 if other != self._repo['.']:
1657 1656 # workingctx._buildstatus doesn't change self._status in this case
1658 1657 superself = super(workingcommitctx, self)
1659 1658 s = superself._buildstatus(other, s, match,
1660 1659 listignored, listclean, listunknown)
1661 1660 return s
1662 1661
1663 1662 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1664 1663 unknown=False):
1665 1664 """Return matched files only in ``self._status``
1666 1665
1667 1666 Uncommitted files appear "clean" via this context, even if
1668 1667 they aren't actually so in the working directory.
1669 1668 """
1670 1669 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1671 1670 if clean:
1672 1671 clean = [f for f in self._manifest if f not in self._changedset]
1673 1672 else:
1674 1673 clean = []
1675 1674 return scmutil.status([f for f in self._status.modified if match(f)],
1676 1675 [f for f in self._status.added if match(f)],
1677 1676 [f for f in self._status.removed if match(f)],
1678 1677 [], [], [], clean)
1679 1678
1680 1679 @propertycache
1681 1680 def _changedset(self):
1682 1681 """Return the set of files changed in this context
1683 1682 """
1684 1683 changed = set(self._status.modified)
1685 1684 changed.update(self._status.added)
1686 1685 changed.update(self._status.removed)
1687 1686 return changed
1688 1687
1689 1688 class memctx(committablectx):
1690 1689 """Use memctx to perform in-memory commits via localrepo.commitctx().
1691 1690
1692 1691 Revision information is supplied at initialization time while
1693 1692 related files data and is made available through a callback
1694 1693 mechanism. 'repo' is the current localrepo, 'parents' is a
1695 1694 sequence of two parent revisions identifiers (pass None for every
1696 1695 missing parent), 'text' is the commit message and 'files' lists
1697 1696 names of files touched by the revision (normalized and relative to
1698 1697 repository root).
1699 1698
1700 1699 filectxfn(repo, memctx, path) is a callable receiving the
1701 1700 repository, the current memctx object and the normalized path of
1702 1701 requested file, relative to repository root. It is fired by the
1703 1702 commit function for every file in 'files', but calls order is
1704 1703 undefined. If the file is available in the revision being
1705 1704 committed (updated or added), filectxfn returns a memfilectx
1706 1705 object. If the file was removed, filectxfn raises an
1707 1706 IOError. Moved files are represented by marking the source file
1708 1707 removed and the new file added with copy information (see
1709 1708 memfilectx).
1710 1709
1711 1710 user receives the committer name and defaults to current
1712 1711 repository username, date is the commit date in any format
1713 1712 supported by util.parsedate() and defaults to current date, extra
1714 1713 is a dictionary of metadata or is left empty.
1715 1714 """
1716 1715
1717 1716 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1718 1717 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1719 1718 # this field to determine what to do in filectxfn.
1720 1719 _returnnoneformissingfiles = True
1721 1720
1722 1721 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1723 1722 date=None, extra=None, editor=False):
1724 1723 super(memctx, self).__init__(repo, text, user, date, extra)
1725 1724 self._rev = None
1726 1725 self._node = None
1727 1726 parents = [(p or nullid) for p in parents]
1728 1727 p1, p2 = parents
1729 1728 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1730 1729 files = sorted(set(files))
1731 1730 self._files = files
1732 1731 self.substate = {}
1733 1732
1734 1733 # if store is not callable, wrap it in a function
1735 1734 if not callable(filectxfn):
1736 1735 def getfilectx(repo, memctx, path):
1737 1736 fctx = filectxfn[path]
1738 1737 # this is weird but apparently we only keep track of one parent
1739 1738 # (why not only store that instead of a tuple?)
1740 1739 copied = fctx.renamed()
1741 1740 if copied:
1742 1741 copied = copied[0]
1743 1742 return memfilectx(repo, path, fctx.data(),
1744 1743 islink=fctx.islink(), isexec=fctx.isexec(),
1745 1744 copied=copied, memctx=memctx)
1746 1745 self._filectxfn = getfilectx
1747 1746 else:
1748 1747 # "util.cachefunc" reduces invocation of possibly expensive
1749 1748 # "filectxfn" for performance (e.g. converting from another VCS)
1750 1749 self._filectxfn = util.cachefunc(filectxfn)
1751 1750
1752 1751 self._extra = extra and extra.copy() or {}
1753 1752 if self._extra.get('branch', '') == '':
1754 1753 self._extra['branch'] = 'default'
1755 1754
1756 1755 if editor:
1757 1756 self._text = editor(self._repo, self, [])
1758 1757 self._repo.savecommitmessage(self._text)
1759 1758
1760 1759 def filectx(self, path, filelog=None):
1761 1760 """get a file context from the working directory
1762 1761
1763 1762 Returns None if file doesn't exist and should be removed."""
1764 1763 return self._filectxfn(self._repo, self, path)
1765 1764
1766 1765 def commit(self):
1767 1766 """commit context to the repo"""
1768 1767 return self._repo.commitctx(self)
1769 1768
1770 1769 @propertycache
1771 1770 def _manifest(self):
1772 1771 """generate a manifest based on the return values of filectxfn"""
1773 1772
1774 1773 # keep this simple for now; just worry about p1
1775 1774 pctx = self._parents[0]
1776 1775 man = pctx.manifest().copy()
1777 1776
1778 1777 for f in self._status.modified:
1779 1778 p1node = nullid
1780 1779 p2node = nullid
1781 1780 p = pctx[f].parents() # if file isn't in pctx, check p2?
1782 1781 if len(p) > 0:
1783 1782 p1node = p[0].node()
1784 1783 if len(p) > 1:
1785 1784 p2node = p[1].node()
1786 1785 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1787 1786
1788 1787 for f in self._status.added:
1789 1788 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1790 1789
1791 1790 for f in self._status.removed:
1792 1791 if f in man:
1793 1792 del man[f]
1794 1793
1795 1794 return man
1796 1795
1797 1796 @propertycache
1798 1797 def _status(self):
1799 1798 """Calculate exact status from ``files`` specified at construction
1800 1799 """
1801 1800 man1 = self.p1().manifest()
1802 1801 p2 = self._parents[1]
1803 1802 # "1 < len(self._parents)" can't be used for checking
1804 1803 # existence of the 2nd parent, because "memctx._parents" is
1805 1804 # explicitly initialized by the list, of which length is 2.
1806 1805 if p2.node() != nullid:
1807 1806 man2 = p2.manifest()
1808 1807 managing = lambda f: f in man1 or f in man2
1809 1808 else:
1810 1809 managing = lambda f: f in man1
1811 1810
1812 1811 modified, added, removed = [], [], []
1813 1812 for f in self._files:
1814 1813 if not managing(f):
1815 1814 added.append(f)
1816 1815 elif self[f]:
1817 1816 modified.append(f)
1818 1817 else:
1819 1818 removed.append(f)
1820 1819
1821 1820 return scmutil.status(modified, added, removed, [], [], [], [])
1822 1821
1823 1822 class memfilectx(committablefilectx):
1824 1823 """memfilectx represents an in-memory file to commit.
1825 1824
1826 1825 See memctx and committablefilectx for more details.
1827 1826 """
1828 1827 def __init__(self, repo, path, data, islink=False,
1829 1828 isexec=False, copied=None, memctx=None):
1830 1829 """
1831 1830 path is the normalized file path relative to repository root.
1832 1831 data is the file content as a string.
1833 1832 islink is True if the file is a symbolic link.
1834 1833 isexec is True if the file is executable.
1835 1834 copied is the source file path if current file was copied in the
1836 1835 revision being committed, or None."""
1837 1836 super(memfilectx, self).__init__(repo, path, None, memctx)
1838 1837 self._data = data
1839 1838 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1840 1839 self._copied = None
1841 1840 if copied:
1842 1841 self._copied = (copied, nullid)
1843 1842
1844 1843 def data(self):
1845 1844 return self._data
1846 1845 def size(self):
1847 1846 return len(self.data())
1848 1847 def flags(self):
1849 1848 return self._flags
1850 1849 def renamed(self):
1851 1850 return self._copied
1852 1851
1853 1852 def remove(self, ignoremissing=False):
1854 1853 """wraps unlink for a repo's working directory"""
1855 1854 # need to figure out what to do here
1856 1855 del self._changectx[self._path]
1857 1856
1858 1857 def write(self, data, flags):
1859 1858 """wraps repo.wwrite"""
1860 1859 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now