##// END OF EJS Templates
context: use manifest.diff() to compute most of status...
Augie Fackler -
r23755:d43948a9 default
parent child Browse files
Show More
@@ -1,1849 +1,1860 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
26 26 """return the first ancestor of <srcrev> introducting <fnode>
27 27
28 28 If the linkrev of the file revision does not point to an ancestor of
29 29 srcrev, we'll walk down the ancestors until we find one introducing this
30 30 file revision.
31 31
32 32 :repo: a localrepository object (used to access changelog and manifest)
33 33 :path: the file path
34 34 :fnode: the nodeid of the file revision
35 35 :filelog: the filelog of this path
36 36 :srcrev: the changeset revision we search ancestors from
37 37 :inclusive: if true, the src revision will also be checked
38 38 """
39 39 cl = repo.unfiltered().changelog
40 40 ma = repo.manifest
41 41 # fetch the linkrev
42 42 fr = filelog.rev(fnode)
43 43 lkr = filelog.linkrev(fr)
44 44 # check if this linkrev is an ancestor of srcrev
45 45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
46 46 if lkr not in anc:
47 47 for a in anc:
48 48 ac = cl.read(a) # get changeset data (we avoid object creation).
49 49 if path in ac[3]: # checking the 'files' field.
50 50 # The file has been touched, check if the content is similar
51 51 # to the one we search for.
52 52 if fnode == ma.readdelta(ac[0]).get(path):
53 53 return a
54 54 # In theory, we should never get out of that loop without a result. But
55 55 # if manifest uses a buggy file revision (not children of the one it
56 56 # replaces) we could. Such a buggy situation will likely result is crash
57 57 # somewhere else at to some point.
58 58 return lkr
59 59
60 60 class basectx(object):
61 61 """A basectx object represents the common logic for its children:
62 62 changectx: read-only context that is already present in the repo,
63 63 workingctx: a context that represents the working directory and can
64 64 be committed,
65 65 memctx: a context that represents changes in-memory and can also
66 66 be committed."""
67 67 def __new__(cls, repo, changeid='', *args, **kwargs):
68 68 if isinstance(changeid, basectx):
69 69 return changeid
70 70
71 71 o = super(basectx, cls).__new__(cls)
72 72
73 73 o._repo = repo
74 74 o._rev = nullrev
75 75 o._node = nullid
76 76
77 77 return o
78 78
79 79 def __str__(self):
80 80 return short(self.node())
81 81
82 82 def __int__(self):
83 83 return self.rev()
84 84
85 85 def __repr__(self):
86 86 return "<%s %s>" % (type(self).__name__, str(self))
87 87
88 88 def __eq__(self, other):
89 89 try:
90 90 return type(self) == type(other) and self._rev == other._rev
91 91 except AttributeError:
92 92 return False
93 93
94 94 def __ne__(self, other):
95 95 return not (self == other)
96 96
97 97 def __contains__(self, key):
98 98 return key in self._manifest
99 99
100 100 def __getitem__(self, key):
101 101 return self.filectx(key)
102 102
103 103 def __iter__(self):
104 104 for f in sorted(self._manifest):
105 105 yield f
106 106
107 107 def _manifestmatches(self, match, s):
108 108 """generate a new manifest filtered by the match argument
109 109
110 110 This method is for internal use only and mainly exists to provide an
111 111 object oriented way for other contexts to customize the manifest
112 112 generation.
113 113 """
114 114 return self.manifest().matches(match)
115 115
116 116 def _matchstatus(self, other, match):
117 117 """return match.always if match is none
118 118
119 119 This internal method provides a way for child objects to override the
120 120 match operator.
121 121 """
122 122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
123 123
124 124 def _buildstatus(self, other, s, match, listignored, listclean,
125 125 listunknown):
126 126 """build a status with respect to another context"""
127 127 # Load earliest manifest first for caching reasons. More specifically,
128 128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
129 129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
130 130 # 1000 and cache it so that when you read 1001, we just need to apply a
131 131 # delta to what's in the cache. So that's one full reconstruction + one
132 132 # delta application.
133 133 if self.rev() is not None and self.rev() < other.rev():
134 134 self.manifest()
135 135 mf1 = other._manifestmatches(match, s)
136 136 mf2 = self._manifestmatches(match, s)
137 137
138 modified, added, clean = [], [], []
138 modified, added = [], []
139 removed = []
140 clean = set()
139 141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
140 142 deletedset = set(deleted)
141 withflags = mf1.withflags() | mf2.withflags()
142 for fn, mf2node in mf2.iteritems():
143 d = mf1.diff(mf2)
144 for fn, ((node1, flag1), (node2, flag2)) in d.iteritems():
143 145 if fn in deletedset:
144 146 continue
145 if fn in mf1:
146 if ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
147 (mf1[fn] != mf2node and
148 (mf2node != _newnode or self[fn].cmp(other[fn])))):
147 if node1 is None:
148 added.append(fn)
149 elif node2 is None:
150 removed.append(fn)
151 elif node2 != _newnode:
152 # The file was not a new file in mf2, so an entry
153 # from diff is really a difference.
149 154 modified.append(fn)
150 elif listclean:
151 clean.append(fn)
152 del mf1[fn]
155 elif self[fn].cmp(other[fn]):
156 # node2 was newnode, but the working file doesn't
157 # match the one in mf1.
158 modified.append(fn)
153 159 else:
154 added.append(fn)
155 removed = mf1.keys()
160 clean.add(fn)
161 if listclean:
162 nondiff = (set(mf1) | set(mf2)) - set(d)
163 clean = list((clean | nondiff) - deletedset)
164 else:
165 clean = []
166
156 167 if removed:
157 168 # need to filter files if they are already reported as removed
158 169 unknown = [fn for fn in unknown if fn not in mf1]
159 170 ignored = [fn for fn in ignored if fn not in mf1]
160 171 # if they're deleted, don't report them as removed
161 172 removed = [fn for fn in removed if fn not in deletedset]
162 173
163 174 return scmutil.status(modified, added, removed, deleted, unknown,
164 175 ignored, clean)
165 176
166 177 @propertycache
167 178 def substate(self):
168 179 return subrepo.state(self, self._repo.ui)
169 180
170 181 def subrev(self, subpath):
171 182 return self.substate[subpath][1]
172 183
173 184 def rev(self):
174 185 return self._rev
175 186 def node(self):
176 187 return self._node
177 188 def hex(self):
178 189 return hex(self.node())
179 190 def manifest(self):
180 191 return self._manifest
181 192 def phasestr(self):
182 193 return phases.phasenames[self.phase()]
183 194 def mutable(self):
184 195 return self.phase() > phases.public
185 196
186 197 def getfileset(self, expr):
187 198 return fileset.getfileset(self, expr)
188 199
189 200 def obsolete(self):
190 201 """True if the changeset is obsolete"""
191 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
192 203
193 204 def extinct(self):
194 205 """True if the changeset is extinct"""
195 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
196 207
197 208 def unstable(self):
198 209 """True if the changeset is not obsolete but it's ancestor are"""
199 210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
200 211
201 212 def bumped(self):
202 213 """True if the changeset try to be a successor of a public changeset
203 214
204 215 Only non-public and non-obsolete changesets may be bumped.
205 216 """
206 217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
207 218
208 219 def divergent(self):
209 220 """Is a successors of a changeset with multiple possible successors set
210 221
211 222 Only non-public and non-obsolete changesets may be divergent.
212 223 """
213 224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
214 225
215 226 def troubled(self):
216 227 """True if the changeset is either unstable, bumped or divergent"""
217 228 return self.unstable() or self.bumped() or self.divergent()
218 229
219 230 def troubles(self):
220 231 """return the list of troubles affecting this changesets.
221 232
222 233 Troubles are returned as strings. possible values are:
223 234 - unstable,
224 235 - bumped,
225 236 - divergent.
226 237 """
227 238 troubles = []
228 239 if self.unstable():
229 240 troubles.append('unstable')
230 241 if self.bumped():
231 242 troubles.append('bumped')
232 243 if self.divergent():
233 244 troubles.append('divergent')
234 245 return troubles
235 246
236 247 def parents(self):
237 248 """return contexts for each parent changeset"""
238 249 return self._parents
239 250
240 251 def p1(self):
241 252 return self._parents[0]
242 253
243 254 def p2(self):
244 255 if len(self._parents) == 2:
245 256 return self._parents[1]
246 257 return changectx(self._repo, -1)
247 258
248 259 def _fileinfo(self, path):
249 260 if '_manifest' in self.__dict__:
250 261 try:
251 262 return self._manifest[path], self._manifest.flags(path)
252 263 except KeyError:
253 264 raise error.ManifestLookupError(self._node, path,
254 265 _('not found in manifest'))
255 266 if '_manifestdelta' in self.__dict__ or path in self.files():
256 267 if path in self._manifestdelta:
257 268 return (self._manifestdelta[path],
258 269 self._manifestdelta.flags(path))
259 270 node, flag = self._repo.manifest.find(self._changeset[0], path)
260 271 if not node:
261 272 raise error.ManifestLookupError(self._node, path,
262 273 _('not found in manifest'))
263 274
264 275 return node, flag
265 276
266 277 def filenode(self, path):
267 278 return self._fileinfo(path)[0]
268 279
269 280 def flags(self, path):
270 281 try:
271 282 return self._fileinfo(path)[1]
272 283 except error.LookupError:
273 284 return ''
274 285
275 286 def sub(self, path):
276 287 return subrepo.subrepo(self, path)
277 288
278 289 def match(self, pats=[], include=None, exclude=None, default='glob'):
279 290 r = self._repo
280 291 return matchmod.match(r.root, r.getcwd(), pats,
281 292 include, exclude, default,
282 293 auditor=r.auditor, ctx=self)
283 294
284 295 def diff(self, ctx2=None, match=None, **opts):
285 296 """Returns a diff generator for the given contexts and matcher"""
286 297 if ctx2 is None:
287 298 ctx2 = self.p1()
288 299 if ctx2 is not None:
289 300 ctx2 = self._repo[ctx2]
290 301 diffopts = patch.diffopts(self._repo.ui, opts)
291 302 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
292 303
293 304 @propertycache
294 305 def _dirs(self):
295 306 return scmutil.dirs(self._manifest)
296 307
297 308 def dirs(self):
298 309 return self._dirs
299 310
300 311 def dirty(self, missing=False, merge=True, branch=True):
301 312 return False
302 313
303 314 def status(self, other=None, match=None, listignored=False,
304 315 listclean=False, listunknown=False, listsubrepos=False):
305 316 """return status of files between two nodes or node and working
306 317 directory.
307 318
308 319 If other is None, compare this node with working directory.
309 320
310 321 returns (modified, added, removed, deleted, unknown, ignored, clean)
311 322 """
312 323
313 324 ctx1 = self
314 325 ctx2 = self._repo[other]
315 326
316 327 # This next code block is, admittedly, fragile logic that tests for
317 328 # reversing the contexts and wouldn't need to exist if it weren't for
318 329 # the fast (and common) code path of comparing the working directory
319 330 # with its first parent.
320 331 #
321 332 # What we're aiming for here is the ability to call:
322 333 #
323 334 # workingctx.status(parentctx)
324 335 #
325 336 # If we always built the manifest for each context and compared those,
326 337 # then we'd be done. But the special case of the above call means we
327 338 # just copy the manifest of the parent.
328 339 reversed = False
329 340 if (not isinstance(ctx1, changectx)
330 341 and isinstance(ctx2, changectx)):
331 342 reversed = True
332 343 ctx1, ctx2 = ctx2, ctx1
333 344
334 345 match = ctx2._matchstatus(ctx1, match)
335 346 r = scmutil.status([], [], [], [], [], [], [])
336 347 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
337 348 listunknown)
338 349
339 350 if reversed:
340 351 # Reverse added and removed. Clear deleted, unknown and ignored as
341 352 # these make no sense to reverse.
342 353 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
343 354 r.clean)
344 355
345 356 if listsubrepos:
346 357 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
347 358 rev2 = ctx2.subrev(subpath)
348 359 try:
349 360 submatch = matchmod.narrowmatcher(subpath, match)
350 361 s = sub.status(rev2, match=submatch, ignored=listignored,
351 362 clean=listclean, unknown=listunknown,
352 363 listsubrepos=True)
353 364 for rfiles, sfiles in zip(r, s):
354 365 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
355 366 except error.LookupError:
356 367 self._repo.ui.status(_("skipping missing "
357 368 "subrepository: %s\n") % subpath)
358 369
359 370 for l in r:
360 371 l.sort()
361 372
362 373 return r
363 374
364 375
365 376 def makememctx(repo, parents, text, user, date, branch, files, store,
366 377 editor=None):
367 378 def getfilectx(repo, memctx, path):
368 379 data, mode, copied = store.getfile(path)
369 380 if data is None:
370 381 return None
371 382 islink, isexec = mode
372 383 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
373 384 copied=copied, memctx=memctx)
374 385 extra = {}
375 386 if branch:
376 387 extra['branch'] = encoding.fromlocal(branch)
377 388 ctx = memctx(repo, parents, text, files, getfilectx, user,
378 389 date, extra, editor)
379 390 return ctx
380 391
381 392 class changectx(basectx):
382 393 """A changecontext object makes access to data related to a particular
383 394 changeset convenient. It represents a read-only context already present in
384 395 the repo."""
385 396 def __init__(self, repo, changeid=''):
386 397 """changeid is a revision number, node, or tag"""
387 398
388 399 # since basectx.__new__ already took care of copying the object, we
389 400 # don't need to do anything in __init__, so we just exit here
390 401 if isinstance(changeid, basectx):
391 402 return
392 403
393 404 if changeid == '':
394 405 changeid = '.'
395 406 self._repo = repo
396 407
397 408 try:
398 409 if isinstance(changeid, int):
399 410 self._node = repo.changelog.node(changeid)
400 411 self._rev = changeid
401 412 return
402 413 if isinstance(changeid, long):
403 414 changeid = str(changeid)
404 415 if changeid == '.':
405 416 self._node = repo.dirstate.p1()
406 417 self._rev = repo.changelog.rev(self._node)
407 418 return
408 419 if changeid == 'null':
409 420 self._node = nullid
410 421 self._rev = nullrev
411 422 return
412 423 if changeid == 'tip':
413 424 self._node = repo.changelog.tip()
414 425 self._rev = repo.changelog.rev(self._node)
415 426 return
416 427 if len(changeid) == 20:
417 428 try:
418 429 self._node = changeid
419 430 self._rev = repo.changelog.rev(changeid)
420 431 return
421 432 except error.FilteredRepoLookupError:
422 433 raise
423 434 except LookupError:
424 435 pass
425 436
426 437 try:
427 438 r = int(changeid)
428 439 if str(r) != changeid:
429 440 raise ValueError
430 441 l = len(repo.changelog)
431 442 if r < 0:
432 443 r += l
433 444 if r < 0 or r >= l:
434 445 raise ValueError
435 446 self._rev = r
436 447 self._node = repo.changelog.node(r)
437 448 return
438 449 except error.FilteredIndexError:
439 450 raise
440 451 except (ValueError, OverflowError, IndexError):
441 452 pass
442 453
443 454 if len(changeid) == 40:
444 455 try:
445 456 self._node = bin(changeid)
446 457 self._rev = repo.changelog.rev(self._node)
447 458 return
448 459 except error.FilteredLookupError:
449 460 raise
450 461 except (TypeError, LookupError):
451 462 pass
452 463
453 464 # lookup bookmarks through the name interface
454 465 try:
455 466 self._node = repo.names.singlenode(repo, changeid)
456 467 self._rev = repo.changelog.rev(self._node)
457 468 return
458 469 except KeyError:
459 470 pass
460 471 except error.FilteredRepoLookupError:
461 472 raise
462 473 except error.RepoLookupError:
463 474 pass
464 475
465 476 self._node = repo.unfiltered().changelog._partialmatch(changeid)
466 477 if self._node is not None:
467 478 self._rev = repo.changelog.rev(self._node)
468 479 return
469 480
470 481 # lookup failed
471 482 # check if it might have come from damaged dirstate
472 483 #
473 484 # XXX we could avoid the unfiltered if we had a recognizable
474 485 # exception for filtered changeset access
475 486 if changeid in repo.unfiltered().dirstate.parents():
476 487 msg = _("working directory has unknown parent '%s'!")
477 488 raise error.Abort(msg % short(changeid))
478 489 try:
479 490 if len(changeid) == 20:
480 491 changeid = hex(changeid)
481 492 except TypeError:
482 493 pass
483 494 except (error.FilteredIndexError, error.FilteredLookupError,
484 495 error.FilteredRepoLookupError):
485 496 if repo.filtername == 'visible':
486 497 msg = _("hidden revision '%s'") % changeid
487 498 hint = _('use --hidden to access hidden revisions')
488 499 raise error.FilteredRepoLookupError(msg, hint=hint)
489 500 msg = _("filtered revision '%s' (not in '%s' subset)")
490 501 msg %= (changeid, repo.filtername)
491 502 raise error.FilteredRepoLookupError(msg)
492 503 except IndexError:
493 504 pass
494 505 raise error.RepoLookupError(
495 506 _("unknown revision '%s'") % changeid)
496 507
497 508 def __hash__(self):
498 509 try:
499 510 return hash(self._rev)
500 511 except AttributeError:
501 512 return id(self)
502 513
503 514 def __nonzero__(self):
504 515 return self._rev != nullrev
505 516
506 517 @propertycache
507 518 def _changeset(self):
508 519 return self._repo.changelog.read(self.rev())
509 520
510 521 @propertycache
511 522 def _manifest(self):
512 523 return self._repo.manifest.read(self._changeset[0])
513 524
514 525 @propertycache
515 526 def _manifestdelta(self):
516 527 return self._repo.manifest.readdelta(self._changeset[0])
517 528
518 529 @propertycache
519 530 def _parents(self):
520 531 p = self._repo.changelog.parentrevs(self._rev)
521 532 if p[1] == nullrev:
522 533 p = p[:-1]
523 534 return [changectx(self._repo, x) for x in p]
524 535
525 536 def changeset(self):
526 537 return self._changeset
527 538 def manifestnode(self):
528 539 return self._changeset[0]
529 540
530 541 def user(self):
531 542 return self._changeset[1]
532 543 def date(self):
533 544 return self._changeset[2]
534 545 def files(self):
535 546 return self._changeset[3]
536 547 def description(self):
537 548 return self._changeset[4]
538 549 def branch(self):
539 550 return encoding.tolocal(self._changeset[5].get("branch"))
540 551 def closesbranch(self):
541 552 return 'close' in self._changeset[5]
542 553 def extra(self):
543 554 return self._changeset[5]
544 555 def tags(self):
545 556 return self._repo.nodetags(self._node)
546 557 def bookmarks(self):
547 558 return self._repo.nodebookmarks(self._node)
548 559 def phase(self):
549 560 return self._repo._phasecache.phase(self._repo, self._rev)
550 561 def hidden(self):
551 562 return self._rev in repoview.filterrevs(self._repo, 'visible')
552 563
553 564 def children(self):
554 565 """return contexts for each child changeset"""
555 566 c = self._repo.changelog.children(self._node)
556 567 return [changectx(self._repo, x) for x in c]
557 568
558 569 def ancestors(self):
559 570 for a in self._repo.changelog.ancestors([self._rev]):
560 571 yield changectx(self._repo, a)
561 572
562 573 def descendants(self):
563 574 for d in self._repo.changelog.descendants([self._rev]):
564 575 yield changectx(self._repo, d)
565 576
566 577 def filectx(self, path, fileid=None, filelog=None):
567 578 """get a file context from this changeset"""
568 579 if fileid is None:
569 580 fileid = self.filenode(path)
570 581 return filectx(self._repo, path, fileid=fileid,
571 582 changectx=self, filelog=filelog)
572 583
573 584 def ancestor(self, c2, warn=False):
574 585 """return the "best" ancestor context of self and c2
575 586
576 587 If there are multiple candidates, it will show a message and check
577 588 merge.preferancestor configuration before falling back to the
578 589 revlog ancestor."""
579 590 # deal with workingctxs
580 591 n2 = c2._node
581 592 if n2 is None:
582 593 n2 = c2._parents[0]._node
583 594 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
584 595 if not cahs:
585 596 anc = nullid
586 597 elif len(cahs) == 1:
587 598 anc = cahs[0]
588 599 else:
589 600 for r in self._repo.ui.configlist('merge', 'preferancestor'):
590 601 try:
591 602 ctx = changectx(self._repo, r)
592 603 except error.RepoLookupError:
593 604 continue
594 605 anc = ctx.node()
595 606 if anc in cahs:
596 607 break
597 608 else:
598 609 anc = self._repo.changelog.ancestor(self._node, n2)
599 610 if warn:
600 611 self._repo.ui.status(
601 612 (_("note: using %s as ancestor of %s and %s\n") %
602 613 (short(anc), short(self._node), short(n2))) +
603 614 ''.join(_(" alternatively, use --config "
604 615 "merge.preferancestor=%s\n") %
605 616 short(n) for n in sorted(cahs) if n != anc))
606 617 return changectx(self._repo, anc)
607 618
608 619 def descendant(self, other):
609 620 """True if other is descendant of this changeset"""
610 621 return self._repo.changelog.descendant(self._rev, other._rev)
611 622
612 623 def walk(self, match):
613 624 fset = set(match.files())
614 625 # for dirstate.walk, files=['.'] means "walk the whole tree".
615 626 # follow that here, too
616 627 fset.discard('.')
617 628
618 629 # avoid the entire walk if we're only looking for specific files
619 630 if fset and not match.anypats():
620 631 if util.all([fn in self for fn in fset]):
621 632 for fn in sorted(fset):
622 633 if match(fn):
623 634 yield fn
624 635 raise StopIteration
625 636
626 637 for fn in self:
627 638 if fn in fset:
628 639 # specified pattern is the exact name
629 640 fset.remove(fn)
630 641 if match(fn):
631 642 yield fn
632 643 for fn in sorted(fset):
633 644 if fn in self._dirs:
634 645 # specified pattern is a directory
635 646 continue
636 647 match.bad(fn, _('no such file in rev %s') % self)
637 648
638 649 def matches(self, match):
639 650 return self.walk(match)
640 651
641 652 class basefilectx(object):
642 653 """A filecontext object represents the common logic for its children:
643 654 filectx: read-only access to a filerevision that is already present
644 655 in the repo,
645 656 workingfilectx: a filecontext that represents files from the working
646 657 directory,
647 658 memfilectx: a filecontext that represents files in-memory."""
648 659 def __new__(cls, repo, path, *args, **kwargs):
649 660 return super(basefilectx, cls).__new__(cls)
650 661
651 662 @propertycache
652 663 def _filelog(self):
653 664 return self._repo.file(self._path)
654 665
655 666 @propertycache
656 667 def _changeid(self):
657 668 if '_changeid' in self.__dict__:
658 669 return self._changeid
659 670 elif '_changectx' in self.__dict__:
660 671 return self._changectx.rev()
661 672 else:
662 673 return self._filelog.linkrev(self._filerev)
663 674
664 675 @propertycache
665 676 def _filenode(self):
666 677 if '_fileid' in self.__dict__:
667 678 return self._filelog.lookup(self._fileid)
668 679 else:
669 680 return self._changectx.filenode(self._path)
670 681
671 682 @propertycache
672 683 def _filerev(self):
673 684 return self._filelog.rev(self._filenode)
674 685
675 686 @propertycache
676 687 def _repopath(self):
677 688 return self._path
678 689
679 690 def __nonzero__(self):
680 691 try:
681 692 self._filenode
682 693 return True
683 694 except error.LookupError:
684 695 # file is missing
685 696 return False
686 697
687 698 def __str__(self):
688 699 return "%s@%s" % (self.path(), self._changectx)
689 700
690 701 def __repr__(self):
691 702 return "<%s %s>" % (type(self).__name__, str(self))
692 703
693 704 def __hash__(self):
694 705 try:
695 706 return hash((self._path, self._filenode))
696 707 except AttributeError:
697 708 return id(self)
698 709
699 710 def __eq__(self, other):
700 711 try:
701 712 return (type(self) == type(other) and self._path == other._path
702 713 and self._filenode == other._filenode)
703 714 except AttributeError:
704 715 return False
705 716
706 717 def __ne__(self, other):
707 718 return not (self == other)
708 719
709 720 def filerev(self):
710 721 return self._filerev
711 722 def filenode(self):
712 723 return self._filenode
713 724 def flags(self):
714 725 return self._changectx.flags(self._path)
715 726 def filelog(self):
716 727 return self._filelog
717 728 def rev(self):
718 729 return self._changeid
719 730 def linkrev(self):
720 731 return self._filelog.linkrev(self._filerev)
721 732 def node(self):
722 733 return self._changectx.node()
723 734 def hex(self):
724 735 return self._changectx.hex()
725 736 def user(self):
726 737 return self._changectx.user()
727 738 def date(self):
728 739 return self._changectx.date()
729 740 def files(self):
730 741 return self._changectx.files()
731 742 def description(self):
732 743 return self._changectx.description()
733 744 def branch(self):
734 745 return self._changectx.branch()
735 746 def extra(self):
736 747 return self._changectx.extra()
737 748 def phase(self):
738 749 return self._changectx.phase()
739 750 def phasestr(self):
740 751 return self._changectx.phasestr()
741 752 def manifest(self):
742 753 return self._changectx.manifest()
743 754 def changectx(self):
744 755 return self._changectx
745 756
746 757 def path(self):
747 758 return self._path
748 759
749 760 def isbinary(self):
750 761 try:
751 762 return util.binary(self.data())
752 763 except IOError:
753 764 return False
754 765 def isexec(self):
755 766 return 'x' in self.flags()
756 767 def islink(self):
757 768 return 'l' in self.flags()
758 769
759 770 def cmp(self, fctx):
760 771 """compare with other file context
761 772
762 773 returns True if different than fctx.
763 774 """
764 775 if (fctx._filerev is None
765 776 and (self._repo._encodefilterpats
766 777 # if file data starts with '\1\n', empty metadata block is
767 778 # prepended, which adds 4 bytes to filelog.size().
768 779 or self.size() - 4 == fctx.size())
769 780 or self.size() == fctx.size()):
770 781 return self._filelog.cmp(self._filenode, fctx.data())
771 782
772 783 return True
773 784
774 785 def introrev(self):
775 786 """return the rev of the changeset which introduced this file revision
776 787
777 788 This method is different from linkrev because it take into account the
778 789 changeset the filectx was created from. It ensures the returned
779 790 revision is one of its ancestors. This prevents bugs from
780 791 'linkrev-shadowing' when a file revision is used by multiple
781 792 changesets.
782 793 """
783 794 lkr = self.linkrev()
784 795 attrs = vars(self)
785 796 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
786 797 if noctx or self.rev() == lkr:
787 798 return self.linkrev()
788 799 return _adjustlinkrev(self._repo, self._path, self._filelog,
789 800 self._filenode, self.rev(), inclusive=True)
790 801
791 802 def parents(self):
792 803 _path = self._path
793 804 fl = self._filelog
794 805 parents = self._filelog.parents(self._filenode)
795 806 pl = [(_path, node, fl) for node in parents if node != nullid]
796 807
797 808 r = fl.renamed(self._filenode)
798 809 if r:
799 810 # - In the simple rename case, both parent are nullid, pl is empty.
800 811 # - In case of merge, only one of the parent is null id and should
801 812 # be replaced with the rename information. This parent is -always-
802 813 # the first one.
803 814 #
804 815 # As null id have alway been filtered out in the previous list
805 816 # comprehension, inserting to 0 will always result in "replacing
806 817 # first nullid parent with rename information.
807 818 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
808 819
809 820 ret = []
810 821 for path, fnode, l in pl:
811 822 if '_changeid' in vars(self) or '_changectx' in vars(self):
812 823 # If self is associated with a changeset (probably explicitly
813 824 # fed), ensure the created filectx is associated with a
814 825 # changeset that is an ancestor of self.changectx.
815 826 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
816 827 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
817 828 changeid=rev)
818 829 else:
819 830 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
820 831 ret.append(fctx)
821 832 return ret
822 833
823 834 def p1(self):
824 835 return self.parents()[0]
825 836
826 837 def p2(self):
827 838 p = self.parents()
828 839 if len(p) == 2:
829 840 return p[1]
830 841 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
831 842
832 843 def annotate(self, follow=False, linenumber=None, diffopts=None):
833 844 '''returns a list of tuples of (ctx, line) for each line
834 845 in the file, where ctx is the filectx of the node where
835 846 that line was last changed.
836 847 This returns tuples of ((ctx, linenumber), line) for each line,
837 848 if "linenumber" parameter is NOT "None".
838 849 In such tuples, linenumber means one at the first appearance
839 850 in the managed file.
840 851 To reduce annotation cost,
841 852 this returns fixed value(False is used) as linenumber,
842 853 if "linenumber" parameter is "False".'''
843 854
844 855 if linenumber is None:
845 856 def decorate(text, rev):
846 857 return ([rev] * len(text.splitlines()), text)
847 858 elif linenumber:
848 859 def decorate(text, rev):
849 860 size = len(text.splitlines())
850 861 return ([(rev, i) for i in xrange(1, size + 1)], text)
851 862 else:
852 863 def decorate(text, rev):
853 864 return ([(rev, False)] * len(text.splitlines()), text)
854 865
855 866 def pair(parent, child):
856 867 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
857 868 refine=True)
858 869 for (a1, a2, b1, b2), t in blocks:
859 870 # Changed blocks ('!') or blocks made only of blank lines ('~')
860 871 # belong to the child.
861 872 if t == '=':
862 873 child[0][b1:b2] = parent[0][a1:a2]
863 874 return child
864 875
865 876 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
866 877
867 878 def parents(f):
868 879 pl = f.parents()
869 880
870 881 # Don't return renamed parents if we aren't following.
871 882 if not follow:
872 883 pl = [p for p in pl if p.path() == f.path()]
873 884
874 885 # renamed filectx won't have a filelog yet, so set it
875 886 # from the cache to save time
876 887 for p in pl:
877 888 if not '_filelog' in p.__dict__:
878 889 p._filelog = getlog(p.path())
879 890
880 891 return pl
881 892
882 893 # use linkrev to find the first changeset where self appeared
883 894 base = self
884 895 introrev = self.introrev()
885 896 if self.rev() != introrev:
886 897 base = filectx(self._repo, self._path, filelog=self.filelog(),
887 898 fileid=self.filenode(), changeid=introrev)
888 899
889 900 # This algorithm would prefer to be recursive, but Python is a
890 901 # bit recursion-hostile. Instead we do an iterative
891 902 # depth-first search.
892 903
893 904 visit = [base]
894 905 hist = {}
895 906 pcache = {}
896 907 needed = {base: 1}
897 908 while visit:
898 909 f = visit[-1]
899 910 pcached = f in pcache
900 911 if not pcached:
901 912 pcache[f] = parents(f)
902 913
903 914 ready = True
904 915 pl = pcache[f]
905 916 for p in pl:
906 917 if p not in hist:
907 918 ready = False
908 919 visit.append(p)
909 920 if not pcached:
910 921 needed[p] = needed.get(p, 0) + 1
911 922 if ready:
912 923 visit.pop()
913 924 reusable = f in hist
914 925 if reusable:
915 926 curr = hist[f]
916 927 else:
917 928 curr = decorate(f.data(), f)
918 929 for p in pl:
919 930 if not reusable:
920 931 curr = pair(hist[p], curr)
921 932 if needed[p] == 1:
922 933 del hist[p]
923 934 del needed[p]
924 935 else:
925 936 needed[p] -= 1
926 937
927 938 hist[f] = curr
928 939 pcache[f] = []
929 940
930 941 return zip(hist[base][0], hist[base][1].splitlines(True))
931 942
932 943 def ancestors(self, followfirst=False):
933 944 visit = {}
934 945 c = self
935 946 cut = followfirst and 1 or None
936 947 while True:
937 948 for parent in c.parents()[:cut]:
938 949 visit[(parent.rev(), parent.node())] = parent
939 950 if not visit:
940 951 break
941 952 c = visit.pop(max(visit))
942 953 yield c
943 954
944 955 class filectx(basefilectx):
945 956 """A filecontext object makes access to data related to a particular
946 957 filerevision convenient."""
947 958 def __init__(self, repo, path, changeid=None, fileid=None,
948 959 filelog=None, changectx=None):
949 960 """changeid can be a changeset revision, node, or tag.
950 961 fileid can be a file revision or node."""
951 962 self._repo = repo
952 963 self._path = path
953 964
954 965 assert (changeid is not None
955 966 or fileid is not None
956 967 or changectx is not None), \
957 968 ("bad args: changeid=%r, fileid=%r, changectx=%r"
958 969 % (changeid, fileid, changectx))
959 970
960 971 if filelog is not None:
961 972 self._filelog = filelog
962 973
963 974 if changeid is not None:
964 975 self._changeid = changeid
965 976 if changectx is not None:
966 977 self._changectx = changectx
967 978 if fileid is not None:
968 979 self._fileid = fileid
969 980
970 981 @propertycache
971 982 def _changectx(self):
972 983 try:
973 984 return changectx(self._repo, self._changeid)
974 985 except error.FilteredRepoLookupError:
975 986 # Linkrev may point to any revision in the repository. When the
976 987 # repository is filtered this may lead to `filectx` trying to build
977 988 # `changectx` for filtered revision. In such case we fallback to
978 989 # creating `changectx` on the unfiltered version of the reposition.
979 990 # This fallback should not be an issue because `changectx` from
980 991 # `filectx` are not used in complex operations that care about
981 992 # filtering.
982 993 #
983 994 # This fallback is a cheap and dirty fix that prevent several
984 995 # crashes. It does not ensure the behavior is correct. However the
985 996 # behavior was not correct before filtering either and "incorrect
986 997 # behavior" is seen as better as "crash"
987 998 #
988 999 # Linkrevs have several serious troubles with filtering that are
989 1000 # complicated to solve. Proper handling of the issue here should be
990 1001 # considered when solving linkrev issue are on the table.
991 1002 return changectx(self._repo.unfiltered(), self._changeid)
992 1003
993 1004 def filectx(self, fileid):
994 1005 '''opens an arbitrary revision of the file without
995 1006 opening a new filelog'''
996 1007 return filectx(self._repo, self._path, fileid=fileid,
997 1008 filelog=self._filelog)
998 1009
999 1010 def data(self):
1000 1011 try:
1001 1012 return self._filelog.read(self._filenode)
1002 1013 except error.CensoredNodeError:
1003 1014 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1004 1015 return ""
1005 1016 raise util.Abort(_("censored node: %s") % short(self._filenode),
1006 1017 hint=_("set censor.policy to ignore errors"))
1007 1018
1008 1019 def size(self):
1009 1020 return self._filelog.size(self._filerev)
1010 1021
1011 1022 def renamed(self):
1012 1023 """check if file was actually renamed in this changeset revision
1013 1024
1014 1025 If rename logged in file revision, we report copy for changeset only
1015 1026 if file revisions linkrev points back to the changeset in question
1016 1027 or both changeset parents contain different file revisions.
1017 1028 """
1018 1029
1019 1030 renamed = self._filelog.renamed(self._filenode)
1020 1031 if not renamed:
1021 1032 return renamed
1022 1033
1023 1034 if self.rev() == self.linkrev():
1024 1035 return renamed
1025 1036
1026 1037 name = self.path()
1027 1038 fnode = self._filenode
1028 1039 for p in self._changectx.parents():
1029 1040 try:
1030 1041 if fnode == p.filenode(name):
1031 1042 return None
1032 1043 except error.LookupError:
1033 1044 pass
1034 1045 return renamed
1035 1046
1036 1047 def children(self):
1037 1048 # hard for renames
1038 1049 c = self._filelog.children(self._filenode)
1039 1050 return [filectx(self._repo, self._path, fileid=x,
1040 1051 filelog=self._filelog) for x in c]
1041 1052
1042 1053 class committablectx(basectx):
1043 1054 """A committablectx object provides common functionality for a context that
1044 1055 wants the ability to commit, e.g. workingctx or memctx."""
1045 1056 def __init__(self, repo, text="", user=None, date=None, extra=None,
1046 1057 changes=None):
1047 1058 self._repo = repo
1048 1059 self._rev = None
1049 1060 self._node = None
1050 1061 self._text = text
1051 1062 if date:
1052 1063 self._date = util.parsedate(date)
1053 1064 if user:
1054 1065 self._user = user
1055 1066 if changes:
1056 1067 self._status = changes
1057 1068
1058 1069 self._extra = {}
1059 1070 if extra:
1060 1071 self._extra = extra.copy()
1061 1072 if 'branch' not in self._extra:
1062 1073 try:
1063 1074 branch = encoding.fromlocal(self._repo.dirstate.branch())
1064 1075 except UnicodeDecodeError:
1065 1076 raise util.Abort(_('branch name not in UTF-8!'))
1066 1077 self._extra['branch'] = branch
1067 1078 if self._extra['branch'] == '':
1068 1079 self._extra['branch'] = 'default'
1069 1080
1070 1081 def __str__(self):
1071 1082 return str(self._parents[0]) + "+"
1072 1083
1073 1084 def __nonzero__(self):
1074 1085 return True
1075 1086
1076 1087 def _buildflagfunc(self):
1077 1088 # Create a fallback function for getting file flags when the
1078 1089 # filesystem doesn't support them
1079 1090
1080 1091 copiesget = self._repo.dirstate.copies().get
1081 1092
1082 1093 if len(self._parents) < 2:
1083 1094 # when we have one parent, it's easy: copy from parent
1084 1095 man = self._parents[0].manifest()
1085 1096 def func(f):
1086 1097 f = copiesget(f, f)
1087 1098 return man.flags(f)
1088 1099 else:
1089 1100 # merges are tricky: we try to reconstruct the unstored
1090 1101 # result from the merge (issue1802)
1091 1102 p1, p2 = self._parents
1092 1103 pa = p1.ancestor(p2)
1093 1104 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1094 1105
1095 1106 def func(f):
1096 1107 f = copiesget(f, f) # may be wrong for merges with copies
1097 1108 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1098 1109 if fl1 == fl2:
1099 1110 return fl1
1100 1111 if fl1 == fla:
1101 1112 return fl2
1102 1113 if fl2 == fla:
1103 1114 return fl1
1104 1115 return '' # punt for conflicts
1105 1116
1106 1117 return func
1107 1118
1108 1119 @propertycache
1109 1120 def _flagfunc(self):
1110 1121 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1111 1122
1112 1123 @propertycache
1113 1124 def _manifest(self):
1114 1125 """generate a manifest corresponding to the values in self._status
1115 1126
1116 1127 This reuse the file nodeid from parent, but we append an extra letter
1117 1128 when modified. Modified files get an extra 'm' while added files get
1118 1129 an extra 'a'. This is used by manifests merge to see that files
1119 1130 are different and by update logic to avoid deleting newly added files.
1120 1131 """
1121 1132
1122 1133 man1 = self._parents[0].manifest()
1123 1134 man = man1.copy()
1124 1135 if len(self._parents) > 1:
1125 1136 man2 = self.p2().manifest()
1126 1137 def getman(f):
1127 1138 if f in man1:
1128 1139 return man1
1129 1140 return man2
1130 1141 else:
1131 1142 getman = lambda f: man1
1132 1143
1133 1144 copied = self._repo.dirstate.copies()
1134 1145 ff = self._flagfunc
1135 1146 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1136 1147 for f in l:
1137 1148 orig = copied.get(f, f)
1138 1149 man[f] = getman(orig).get(orig, nullid) + i
1139 1150 try:
1140 1151 man.setflag(f, ff(f))
1141 1152 except OSError:
1142 1153 pass
1143 1154
1144 1155 for f in self._status.deleted + self._status.removed:
1145 1156 if f in man:
1146 1157 del man[f]
1147 1158
1148 1159 return man
1149 1160
1150 1161 @propertycache
1151 1162 def _status(self):
1152 1163 return self._repo.status()
1153 1164
1154 1165 @propertycache
1155 1166 def _user(self):
1156 1167 return self._repo.ui.username()
1157 1168
1158 1169 @propertycache
1159 1170 def _date(self):
1160 1171 return util.makedate()
1161 1172
1162 1173 def subrev(self, subpath):
1163 1174 return None
1164 1175
1165 1176 def user(self):
1166 1177 return self._user or self._repo.ui.username()
1167 1178 def date(self):
1168 1179 return self._date
1169 1180 def description(self):
1170 1181 return self._text
1171 1182 def files(self):
1172 1183 return sorted(self._status.modified + self._status.added +
1173 1184 self._status.removed)
1174 1185
1175 1186 def modified(self):
1176 1187 return self._status.modified
1177 1188 def added(self):
1178 1189 return self._status.added
1179 1190 def removed(self):
1180 1191 return self._status.removed
1181 1192 def deleted(self):
1182 1193 return self._status.deleted
1183 1194 def branch(self):
1184 1195 return encoding.tolocal(self._extra['branch'])
1185 1196 def closesbranch(self):
1186 1197 return 'close' in self._extra
1187 1198 def extra(self):
1188 1199 return self._extra
1189 1200
1190 1201 def tags(self):
1191 1202 t = []
1192 1203 for p in self.parents():
1193 1204 t.extend(p.tags())
1194 1205 return t
1195 1206
1196 1207 def bookmarks(self):
1197 1208 b = []
1198 1209 for p in self.parents():
1199 1210 b.extend(p.bookmarks())
1200 1211 return b
1201 1212
1202 1213 def phase(self):
1203 1214 phase = phases.draft # default phase to draft
1204 1215 for p in self.parents():
1205 1216 phase = max(phase, p.phase())
1206 1217 return phase
1207 1218
1208 1219 def hidden(self):
1209 1220 return False
1210 1221
1211 1222 def children(self):
1212 1223 return []
1213 1224
1214 1225 def flags(self, path):
1215 1226 if '_manifest' in self.__dict__:
1216 1227 try:
1217 1228 return self._manifest.flags(path)
1218 1229 except KeyError:
1219 1230 return ''
1220 1231
1221 1232 try:
1222 1233 return self._flagfunc(path)
1223 1234 except OSError:
1224 1235 return ''
1225 1236
1226 1237 def ancestor(self, c2):
1227 1238 """return the "best" ancestor context of self and c2"""
1228 1239 return self._parents[0].ancestor(c2) # punt on two parents for now
1229 1240
1230 1241 def walk(self, match):
1231 1242 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1232 1243 True, False))
1233 1244
1234 1245 def matches(self, match):
1235 1246 return sorted(self._repo.dirstate.matches(match))
1236 1247
1237 1248 def ancestors(self):
1238 1249 for p in self._parents:
1239 1250 yield p
1240 1251 for a in self._repo.changelog.ancestors(
1241 1252 [p.rev() for p in self._parents]):
1242 1253 yield changectx(self._repo, a)
1243 1254
1244 1255 def markcommitted(self, node):
1245 1256 """Perform post-commit cleanup necessary after committing this ctx
1246 1257
1247 1258 Specifically, this updates backing stores this working context
1248 1259 wraps to reflect the fact that the changes reflected by this
1249 1260 workingctx have been committed. For example, it marks
1250 1261 modified and added files as normal in the dirstate.
1251 1262
1252 1263 """
1253 1264
1254 1265 self._repo.dirstate.beginparentchange()
1255 1266 for f in self.modified() + self.added():
1256 1267 self._repo.dirstate.normal(f)
1257 1268 for f in self.removed():
1258 1269 self._repo.dirstate.drop(f)
1259 1270 self._repo.dirstate.setparents(node)
1260 1271 self._repo.dirstate.endparentchange()
1261 1272
1262 1273 def dirs(self):
1263 1274 return self._repo.dirstate.dirs()
1264 1275
1265 1276 class workingctx(committablectx):
1266 1277 """A workingctx object makes access to data related to
1267 1278 the current working directory convenient.
1268 1279 date - any valid date string or (unixtime, offset), or None.
1269 1280 user - username string, or None.
1270 1281 extra - a dictionary of extra values, or None.
1271 1282 changes - a list of file lists as returned by localrepo.status()
1272 1283 or None to use the repository status.
1273 1284 """
1274 1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1275 1286 changes=None):
1276 1287 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1277 1288
1278 1289 def __iter__(self):
1279 1290 d = self._repo.dirstate
1280 1291 for f in d:
1281 1292 if d[f] != 'r':
1282 1293 yield f
1283 1294
1284 1295 def __contains__(self, key):
1285 1296 return self._repo.dirstate[key] not in "?r"
1286 1297
1287 1298 @propertycache
1288 1299 def _parents(self):
1289 1300 p = self._repo.dirstate.parents()
1290 1301 if p[1] == nullid:
1291 1302 p = p[:-1]
1292 1303 return [changectx(self._repo, x) for x in p]
1293 1304
1294 1305 def filectx(self, path, filelog=None):
1295 1306 """get a file context from the working directory"""
1296 1307 return workingfilectx(self._repo, path, workingctx=self,
1297 1308 filelog=filelog)
1298 1309
1299 1310 def dirty(self, missing=False, merge=True, branch=True):
1300 1311 "check whether a working directory is modified"
1301 1312 # check subrepos first
1302 1313 for s in sorted(self.substate):
1303 1314 if self.sub(s).dirty():
1304 1315 return True
1305 1316 # check current working dir
1306 1317 return ((merge and self.p2()) or
1307 1318 (branch and self.branch() != self.p1().branch()) or
1308 1319 self.modified() or self.added() or self.removed() or
1309 1320 (missing and self.deleted()))
1310 1321
1311 1322 def add(self, list, prefix=""):
1312 1323 join = lambda f: os.path.join(prefix, f)
1313 1324 wlock = self._repo.wlock()
1314 1325 ui, ds = self._repo.ui, self._repo.dirstate
1315 1326 try:
1316 1327 rejected = []
1317 1328 lstat = self._repo.wvfs.lstat
1318 1329 for f in list:
1319 1330 scmutil.checkportable(ui, join(f))
1320 1331 try:
1321 1332 st = lstat(f)
1322 1333 except OSError:
1323 1334 ui.warn(_("%s does not exist!\n") % join(f))
1324 1335 rejected.append(f)
1325 1336 continue
1326 1337 if st.st_size > 10000000:
1327 1338 ui.warn(_("%s: up to %d MB of RAM may be required "
1328 1339 "to manage this file\n"
1329 1340 "(use 'hg revert %s' to cancel the "
1330 1341 "pending addition)\n")
1331 1342 % (f, 3 * st.st_size // 1000000, join(f)))
1332 1343 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1333 1344 ui.warn(_("%s not added: only files and symlinks "
1334 1345 "supported currently\n") % join(f))
1335 1346 rejected.append(f)
1336 1347 elif ds[f] in 'amn':
1337 1348 ui.warn(_("%s already tracked!\n") % join(f))
1338 1349 elif ds[f] == 'r':
1339 1350 ds.normallookup(f)
1340 1351 else:
1341 1352 ds.add(f)
1342 1353 return rejected
1343 1354 finally:
1344 1355 wlock.release()
1345 1356
1346 1357 def forget(self, files, prefix=""):
1347 1358 join = lambda f: os.path.join(prefix, f)
1348 1359 wlock = self._repo.wlock()
1349 1360 try:
1350 1361 rejected = []
1351 1362 for f in files:
1352 1363 if f not in self._repo.dirstate:
1353 1364 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1354 1365 rejected.append(f)
1355 1366 elif self._repo.dirstate[f] != 'a':
1356 1367 self._repo.dirstate.remove(f)
1357 1368 else:
1358 1369 self._repo.dirstate.drop(f)
1359 1370 return rejected
1360 1371 finally:
1361 1372 wlock.release()
1362 1373
1363 1374 def undelete(self, list):
1364 1375 pctxs = self.parents()
1365 1376 wlock = self._repo.wlock()
1366 1377 try:
1367 1378 for f in list:
1368 1379 if self._repo.dirstate[f] != 'r':
1369 1380 self._repo.ui.warn(_("%s not removed!\n") % f)
1370 1381 else:
1371 1382 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1372 1383 t = fctx.data()
1373 1384 self._repo.wwrite(f, t, fctx.flags())
1374 1385 self._repo.dirstate.normal(f)
1375 1386 finally:
1376 1387 wlock.release()
1377 1388
1378 1389 def copy(self, source, dest):
1379 1390 try:
1380 1391 st = self._repo.wvfs.lstat(dest)
1381 1392 except OSError, err:
1382 1393 if err.errno != errno.ENOENT:
1383 1394 raise
1384 1395 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1385 1396 return
1386 1397 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1387 1398 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1388 1399 "symbolic link\n") % dest)
1389 1400 else:
1390 1401 wlock = self._repo.wlock()
1391 1402 try:
1392 1403 if self._repo.dirstate[dest] in '?':
1393 1404 self._repo.dirstate.add(dest)
1394 1405 elif self._repo.dirstate[dest] in 'r':
1395 1406 self._repo.dirstate.normallookup(dest)
1396 1407 self._repo.dirstate.copy(source, dest)
1397 1408 finally:
1398 1409 wlock.release()
1399 1410
1400 1411 def _filtersuspectsymlink(self, files):
1401 1412 if not files or self._repo.dirstate._checklink:
1402 1413 return files
1403 1414
1404 1415 # Symlink placeholders may get non-symlink-like contents
1405 1416 # via user error or dereferencing by NFS or Samba servers,
1406 1417 # so we filter out any placeholders that don't look like a
1407 1418 # symlink
1408 1419 sane = []
1409 1420 for f in files:
1410 1421 if self.flags(f) == 'l':
1411 1422 d = self[f].data()
1412 1423 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1413 1424 self._repo.ui.debug('ignoring suspect symlink placeholder'
1414 1425 ' "%s"\n' % f)
1415 1426 continue
1416 1427 sane.append(f)
1417 1428 return sane
1418 1429
1419 1430 def _checklookup(self, files):
1420 1431 # check for any possibly clean files
1421 1432 if not files:
1422 1433 return [], []
1423 1434
1424 1435 modified = []
1425 1436 fixup = []
1426 1437 pctx = self._parents[0]
1427 1438 # do a full compare of any files that might have changed
1428 1439 for f in sorted(files):
1429 1440 if (f not in pctx or self.flags(f) != pctx.flags(f)
1430 1441 or pctx[f].cmp(self[f])):
1431 1442 modified.append(f)
1432 1443 else:
1433 1444 fixup.append(f)
1434 1445
1435 1446 # update dirstate for files that are actually clean
1436 1447 if fixup:
1437 1448 try:
1438 1449 # updating the dirstate is optional
1439 1450 # so we don't wait on the lock
1440 1451 # wlock can invalidate the dirstate, so cache normal _after_
1441 1452 # taking the lock
1442 1453 wlock = self._repo.wlock(False)
1443 1454 normal = self._repo.dirstate.normal
1444 1455 try:
1445 1456 for f in fixup:
1446 1457 normal(f)
1447 1458 finally:
1448 1459 wlock.release()
1449 1460 except error.LockError:
1450 1461 pass
1451 1462 return modified, fixup
1452 1463
1453 1464 def _manifestmatches(self, match, s):
1454 1465 """Slow path for workingctx
1455 1466
1456 1467 The fast path is when we compare the working directory to its parent
1457 1468 which means this function is comparing with a non-parent; therefore we
1458 1469 need to build a manifest and return what matches.
1459 1470 """
1460 1471 mf = self._repo['.']._manifestmatches(match, s)
1461 1472 for f in s.modified + s.added:
1462 1473 mf[f] = _newnode
1463 1474 mf.setflag(f, self.flags(f))
1464 1475 for f in s.removed:
1465 1476 if f in mf:
1466 1477 del mf[f]
1467 1478 return mf
1468 1479
1469 1480 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1470 1481 unknown=False):
1471 1482 '''Gets the status from the dirstate -- internal use only.'''
1472 1483 listignored, listclean, listunknown = ignored, clean, unknown
1473 1484 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1474 1485 subrepos = []
1475 1486 if '.hgsub' in self:
1476 1487 subrepos = sorted(self.substate)
1477 1488 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1478 1489 listclean, listunknown)
1479 1490
1480 1491 # check for any possibly clean files
1481 1492 if cmp:
1482 1493 modified2, fixup = self._checklookup(cmp)
1483 1494 s.modified.extend(modified2)
1484 1495
1485 1496 # update dirstate for files that are actually clean
1486 1497 if fixup and listclean:
1487 1498 s.clean.extend(fixup)
1488 1499
1489 1500 return s
1490 1501
1491 1502 def _buildstatus(self, other, s, match, listignored, listclean,
1492 1503 listunknown):
1493 1504 """build a status with respect to another context
1494 1505
1495 1506 This includes logic for maintaining the fast path of status when
1496 1507 comparing the working directory against its parent, which is to skip
1497 1508 building a new manifest if self (working directory) is not comparing
1498 1509 against its parent (repo['.']).
1499 1510 """
1500 1511 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1501 1512 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1502 1513 # might have accidentally ended up with the entire contents of the file
1503 1514 # they are supposed to be linking to.
1504 1515 s.modified[:] = self._filtersuspectsymlink(s.modified)
1505 1516 if other != self._repo['.']:
1506 1517 s = super(workingctx, self)._buildstatus(other, s, match,
1507 1518 listignored, listclean,
1508 1519 listunknown)
1509 1520 elif match.always():
1510 1521 # cache for performance
1511 1522 if s.unknown or s.ignored or s.clean:
1512 1523 # "_status" is cached with list*=False in the normal route
1513 1524 self._status = scmutil.status(s.modified, s.added, s.removed,
1514 1525 s.deleted, [], [], [])
1515 1526 else:
1516 1527 self._status = s
1517 1528 return s
1518 1529
1519 1530 def _matchstatus(self, other, match):
1520 1531 """override the match method with a filter for directory patterns
1521 1532
1522 1533 We use inheritance to customize the match.bad method only in cases of
1523 1534 workingctx since it belongs only to the working directory when
1524 1535 comparing against the parent changeset.
1525 1536
1526 1537 If we aren't comparing against the working directory's parent, then we
1527 1538 just use the default match object sent to us.
1528 1539 """
1529 1540 superself = super(workingctx, self)
1530 1541 match = superself._matchstatus(other, match)
1531 1542 if other != self._repo['.']:
1532 1543 def bad(f, msg):
1533 1544 # 'f' may be a directory pattern from 'match.files()',
1534 1545 # so 'f not in ctx1' is not enough
1535 1546 if f not in other and f not in other.dirs():
1536 1547 self._repo.ui.warn('%s: %s\n' %
1537 1548 (self._repo.dirstate.pathto(f), msg))
1538 1549 match.bad = bad
1539 1550 return match
1540 1551
1541 1552 class committablefilectx(basefilectx):
1542 1553 """A committablefilectx provides common functionality for a file context
1543 1554 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1544 1555 def __init__(self, repo, path, filelog=None, ctx=None):
1545 1556 self._repo = repo
1546 1557 self._path = path
1547 1558 self._changeid = None
1548 1559 self._filerev = self._filenode = None
1549 1560
1550 1561 if filelog is not None:
1551 1562 self._filelog = filelog
1552 1563 if ctx:
1553 1564 self._changectx = ctx
1554 1565
1555 1566 def __nonzero__(self):
1556 1567 return True
1557 1568
1558 1569 def parents(self):
1559 1570 '''return parent filectxs, following copies if necessary'''
1560 1571 def filenode(ctx, path):
1561 1572 return ctx._manifest.get(path, nullid)
1562 1573
1563 1574 path = self._path
1564 1575 fl = self._filelog
1565 1576 pcl = self._changectx._parents
1566 1577 renamed = self.renamed()
1567 1578
1568 1579 if renamed:
1569 1580 pl = [renamed + (None,)]
1570 1581 else:
1571 1582 pl = [(path, filenode(pcl[0], path), fl)]
1572 1583
1573 1584 for pc in pcl[1:]:
1574 1585 pl.append((path, filenode(pc, path), fl))
1575 1586
1576 1587 return [filectx(self._repo, p, fileid=n, filelog=l)
1577 1588 for p, n, l in pl if n != nullid]
1578 1589
1579 1590 def children(self):
1580 1591 return []
1581 1592
1582 1593 class workingfilectx(committablefilectx):
1583 1594 """A workingfilectx object makes access to data related to a particular
1584 1595 file in the working directory convenient."""
1585 1596 def __init__(self, repo, path, filelog=None, workingctx=None):
1586 1597 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1587 1598
1588 1599 @propertycache
1589 1600 def _changectx(self):
1590 1601 return workingctx(self._repo)
1591 1602
1592 1603 def data(self):
1593 1604 return self._repo.wread(self._path)
1594 1605 def renamed(self):
1595 1606 rp = self._repo.dirstate.copied(self._path)
1596 1607 if not rp:
1597 1608 return None
1598 1609 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1599 1610
1600 1611 def size(self):
1601 1612 return self._repo.wvfs.lstat(self._path).st_size
1602 1613 def date(self):
1603 1614 t, tz = self._changectx.date()
1604 1615 try:
1605 1616 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1606 1617 except OSError, err:
1607 1618 if err.errno != errno.ENOENT:
1608 1619 raise
1609 1620 return (t, tz)
1610 1621
1611 1622 def cmp(self, fctx):
1612 1623 """compare with other file context
1613 1624
1614 1625 returns True if different than fctx.
1615 1626 """
1616 1627 # fctx should be a filectx (not a workingfilectx)
1617 1628 # invert comparison to reuse the same code path
1618 1629 return fctx.cmp(self)
1619 1630
1620 1631 def remove(self, ignoremissing=False):
1621 1632 """wraps unlink for a repo's working directory"""
1622 1633 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1623 1634
1624 1635 def write(self, data, flags):
1625 1636 """wraps repo.wwrite"""
1626 1637 self._repo.wwrite(self._path, data, flags)
1627 1638
1628 1639 class workingcommitctx(workingctx):
1629 1640 """A workingcommitctx object makes access to data related to
1630 1641 the revision being committed convenient.
1631 1642
1632 1643 This hides changes in the working directory, if they aren't
1633 1644 committed in this context.
1634 1645 """
1635 1646 def __init__(self, repo, changes,
1636 1647 text="", user=None, date=None, extra=None):
1637 1648 super(workingctx, self).__init__(repo, text, user, date, extra,
1638 1649 changes)
1639 1650
1640 1651 def _buildstatus(self, other, s, match,
1641 1652 listignored, listclean, listunknown):
1642 1653 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1643 1654 """
1644 1655 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1645 1656 if other != self._repo['.']:
1646 1657 # workingctx._buildstatus doesn't change self._status in this case
1647 1658 superself = super(workingcommitctx, self)
1648 1659 s = superself._buildstatus(other, s, match,
1649 1660 listignored, listclean, listunknown)
1650 1661 return s
1651 1662
1652 1663 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1653 1664 unknown=False):
1654 1665 """Return matched files only in ``self._status``
1655 1666
1656 1667 Uncommitted files appear "clean" via this context, even if
1657 1668 they aren't actually so in the working directory.
1658 1669 """
1659 1670 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1660 1671 if clean:
1661 1672 clean = [f for f in self._manifest if f not in self._changedset]
1662 1673 else:
1663 1674 clean = []
1664 1675 return scmutil.status([f for f in self._status.modified if match(f)],
1665 1676 [f for f in self._status.added if match(f)],
1666 1677 [f for f in self._status.removed if match(f)],
1667 1678 [], [], [], clean)
1668 1679
1669 1680 @propertycache
1670 1681 def _changedset(self):
1671 1682 """Return the set of files changed in this context
1672 1683 """
1673 1684 changed = set(self._status.modified)
1674 1685 changed.update(self._status.added)
1675 1686 changed.update(self._status.removed)
1676 1687 return changed
1677 1688
1678 1689 class memctx(committablectx):
1679 1690 """Use memctx to perform in-memory commits via localrepo.commitctx().
1680 1691
1681 1692 Revision information is supplied at initialization time while
1682 1693 related files data and is made available through a callback
1683 1694 mechanism. 'repo' is the current localrepo, 'parents' is a
1684 1695 sequence of two parent revisions identifiers (pass None for every
1685 1696 missing parent), 'text' is the commit message and 'files' lists
1686 1697 names of files touched by the revision (normalized and relative to
1687 1698 repository root).
1688 1699
1689 1700 filectxfn(repo, memctx, path) is a callable receiving the
1690 1701 repository, the current memctx object and the normalized path of
1691 1702 requested file, relative to repository root. It is fired by the
1692 1703 commit function for every file in 'files', but calls order is
1693 1704 undefined. If the file is available in the revision being
1694 1705 committed (updated or added), filectxfn returns a memfilectx
1695 1706 object. If the file was removed, filectxfn raises an
1696 1707 IOError. Moved files are represented by marking the source file
1697 1708 removed and the new file added with copy information (see
1698 1709 memfilectx).
1699 1710
1700 1711 user receives the committer name and defaults to current
1701 1712 repository username, date is the commit date in any format
1702 1713 supported by util.parsedate() and defaults to current date, extra
1703 1714 is a dictionary of metadata or is left empty.
1704 1715 """
1705 1716
1706 1717 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1707 1718 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1708 1719 # this field to determine what to do in filectxfn.
1709 1720 _returnnoneformissingfiles = True
1710 1721
1711 1722 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1712 1723 date=None, extra=None, editor=False):
1713 1724 super(memctx, self).__init__(repo, text, user, date, extra)
1714 1725 self._rev = None
1715 1726 self._node = None
1716 1727 parents = [(p or nullid) for p in parents]
1717 1728 p1, p2 = parents
1718 1729 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1719 1730 files = sorted(set(files))
1720 1731 self._files = files
1721 1732 self.substate = {}
1722 1733
1723 1734 # if store is not callable, wrap it in a function
1724 1735 if not callable(filectxfn):
1725 1736 def getfilectx(repo, memctx, path):
1726 1737 fctx = filectxfn[path]
1727 1738 # this is weird but apparently we only keep track of one parent
1728 1739 # (why not only store that instead of a tuple?)
1729 1740 copied = fctx.renamed()
1730 1741 if copied:
1731 1742 copied = copied[0]
1732 1743 return memfilectx(repo, path, fctx.data(),
1733 1744 islink=fctx.islink(), isexec=fctx.isexec(),
1734 1745 copied=copied, memctx=memctx)
1735 1746 self._filectxfn = getfilectx
1736 1747 else:
1737 1748 # "util.cachefunc" reduces invocation of possibly expensive
1738 1749 # "filectxfn" for performance (e.g. converting from another VCS)
1739 1750 self._filectxfn = util.cachefunc(filectxfn)
1740 1751
1741 1752 self._extra = extra and extra.copy() or {}
1742 1753 if self._extra.get('branch', '') == '':
1743 1754 self._extra['branch'] = 'default'
1744 1755
1745 1756 if editor:
1746 1757 self._text = editor(self._repo, self, [])
1747 1758 self._repo.savecommitmessage(self._text)
1748 1759
1749 1760 def filectx(self, path, filelog=None):
1750 1761 """get a file context from the working directory
1751 1762
1752 1763 Returns None if file doesn't exist and should be removed."""
1753 1764 return self._filectxfn(self._repo, self, path)
1754 1765
1755 1766 def commit(self):
1756 1767 """commit context to the repo"""
1757 1768 return self._repo.commitctx(self)
1758 1769
1759 1770 @propertycache
1760 1771 def _manifest(self):
1761 1772 """generate a manifest based on the return values of filectxfn"""
1762 1773
1763 1774 # keep this simple for now; just worry about p1
1764 1775 pctx = self._parents[0]
1765 1776 man = pctx.manifest().copy()
1766 1777
1767 1778 for f in self._status.modified:
1768 1779 p1node = nullid
1769 1780 p2node = nullid
1770 1781 p = pctx[f].parents() # if file isn't in pctx, check p2?
1771 1782 if len(p) > 0:
1772 1783 p1node = p[0].node()
1773 1784 if len(p) > 1:
1774 1785 p2node = p[1].node()
1775 1786 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1776 1787
1777 1788 for f in self._status.added:
1778 1789 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1779 1790
1780 1791 for f in self._status.removed:
1781 1792 if f in man:
1782 1793 del man[f]
1783 1794
1784 1795 return man
1785 1796
1786 1797 @propertycache
1787 1798 def _status(self):
1788 1799 """Calculate exact status from ``files`` specified at construction
1789 1800 """
1790 1801 man1 = self.p1().manifest()
1791 1802 p2 = self._parents[1]
1792 1803 # "1 < len(self._parents)" can't be used for checking
1793 1804 # existence of the 2nd parent, because "memctx._parents" is
1794 1805 # explicitly initialized by the list, of which length is 2.
1795 1806 if p2.node() != nullid:
1796 1807 man2 = p2.manifest()
1797 1808 managing = lambda f: f in man1 or f in man2
1798 1809 else:
1799 1810 managing = lambda f: f in man1
1800 1811
1801 1812 modified, added, removed = [], [], []
1802 1813 for f in self._files:
1803 1814 if not managing(f):
1804 1815 added.append(f)
1805 1816 elif self[f]:
1806 1817 modified.append(f)
1807 1818 else:
1808 1819 removed.append(f)
1809 1820
1810 1821 return scmutil.status(modified, added, removed, [], [], [], [])
1811 1822
1812 1823 class memfilectx(committablefilectx):
1813 1824 """memfilectx represents an in-memory file to commit.
1814 1825
1815 1826 See memctx and committablefilectx for more details.
1816 1827 """
1817 1828 def __init__(self, repo, path, data, islink=False,
1818 1829 isexec=False, copied=None, memctx=None):
1819 1830 """
1820 1831 path is the normalized file path relative to repository root.
1821 1832 data is the file content as a string.
1822 1833 islink is True if the file is a symbolic link.
1823 1834 isexec is True if the file is executable.
1824 1835 copied is the source file path if current file was copied in the
1825 1836 revision being committed, or None."""
1826 1837 super(memfilectx, self).__init__(repo, path, None, memctx)
1827 1838 self._data = data
1828 1839 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1829 1840 self._copied = None
1830 1841 if copied:
1831 1842 self._copied = (copied, nullid)
1832 1843
1833 1844 def data(self):
1834 1845 return self._data
1835 1846 def size(self):
1836 1847 return len(self.data())
1837 1848 def flags(self):
1838 1849 return self._flags
1839 1850 def renamed(self):
1840 1851 return self._copied
1841 1852
1842 1853 def remove(self, ignoremissing=False):
1843 1854 """wraps unlink for a repo's working directory"""
1844 1855 # need to figure out what to do here
1845 1856 del self._changectx[self._path]
1846 1857
1847 1858 def write(self, data, flags):
1848 1859 """wraps repo.wwrite"""
1849 1860 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now