##// END OF EJS Templates
manifest: add matches() method...
Martin von Zweigbergk -
r23305:0cc283f4 default
parent child Browse files
Show More
@@ -1,1688 +1,1676 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 class basectx(object):
21 21 """A basectx object represents the common logic for its children:
22 22 changectx: read-only context that is already present in the repo,
23 23 workingctx: a context that represents the working directory and can
24 24 be committed,
25 25 memctx: a context that represents changes in-memory and can also
26 26 be committed."""
27 27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 28 if isinstance(changeid, basectx):
29 29 return changeid
30 30
31 31 o = super(basectx, cls).__new__(cls)
32 32
33 33 o._repo = repo
34 34 o._rev = nullrev
35 35 o._node = nullid
36 36
37 37 return o
38 38
39 39 def __str__(self):
40 40 return short(self.node())
41 41
42 42 def __int__(self):
43 43 return self.rev()
44 44
45 45 def __repr__(self):
46 46 return "<%s %s>" % (type(self).__name__, str(self))
47 47
48 48 def __eq__(self, other):
49 49 try:
50 50 return type(self) == type(other) and self._rev == other._rev
51 51 except AttributeError:
52 52 return False
53 53
54 54 def __ne__(self, other):
55 55 return not (self == other)
56 56
57 57 def __contains__(self, key):
58 58 return key in self._manifest
59 59
60 60 def __getitem__(self, key):
61 61 return self.filectx(key)
62 62
63 63 def __iter__(self):
64 64 for f in sorted(self._manifest):
65 65 yield f
66 66
67 67 def _manifestmatches(self, match, s):
68 68 """generate a new manifest filtered by the match argument
69 69
70 70 This method is for internal use only and mainly exists to provide an
71 71 object oriented way for other contexts to customize the manifest
72 72 generation.
73 73 """
74 if match.always():
75 return self.manifest().copy()
76
77 files = match.files()
78 if (match.matchfn == match.exact or
79 (not match.anypats() and util.all(fn in self for fn in files))):
80 return self.manifest().intersectfiles(files)
81
82 mf = self.manifest().copy()
83 for fn in mf.keys():
84 if not match(fn):
85 del mf[fn]
86 return mf
74 return self.manifest().matches(match)
87 75
88 76 def _matchstatus(self, other, match):
89 77 """return match.always if match is none
90 78
91 79 This internal method provides a way for child objects to override the
92 80 match operator.
93 81 """
94 82 return match or matchmod.always(self._repo.root, self._repo.getcwd())
95 83
96 84 def _buildstatus(self, other, s, match, listignored, listclean,
97 85 listunknown):
98 86 """build a status with respect to another context"""
99 87 # Load earliest manifest first for caching reasons. More specifically,
100 88 # if you have revisions 1000 and 1001, 1001 is probably stored as a
101 89 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
102 90 # 1000 and cache it so that when you read 1001, we just need to apply a
103 91 # delta to what's in the cache. So that's one full reconstruction + one
104 92 # delta application.
105 93 if self.rev() is not None and self.rev() < other.rev():
106 94 self.manifest()
107 95 mf1 = other._manifestmatches(match, s)
108 96 mf2 = self._manifestmatches(match, s)
109 97
110 98 modified, added, clean = [], [], []
111 99 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
112 100 deletedset = set(deleted)
113 101 withflags = mf1.withflags() | mf2.withflags()
114 102 for fn, mf2node in mf2.iteritems():
115 103 if fn in mf1:
116 104 if (fn not in deletedset and
117 105 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
118 106 (mf1[fn] != mf2node and
119 107 (mf2node or self[fn].cmp(other[fn]))))):
120 108 modified.append(fn)
121 109 elif listclean:
122 110 clean.append(fn)
123 111 del mf1[fn]
124 112 elif fn not in deletedset:
125 113 added.append(fn)
126 114 removed = mf1.keys()
127 115 if removed:
128 116 # need to filter files if they are already reported as removed
129 117 unknown = [fn for fn in unknown if fn not in mf1]
130 118 ignored = [fn for fn in ignored if fn not in mf1]
131 119
132 120 return scmutil.status(modified, added, removed, deleted, unknown,
133 121 ignored, clean)
134 122
135 123 @propertycache
136 124 def substate(self):
137 125 return subrepo.state(self, self._repo.ui)
138 126
139 127 def subrev(self, subpath):
140 128 return self.substate[subpath][1]
141 129
142 130 def rev(self):
143 131 return self._rev
144 132 def node(self):
145 133 return self._node
146 134 def hex(self):
147 135 return hex(self.node())
148 136 def manifest(self):
149 137 return self._manifest
150 138 def phasestr(self):
151 139 return phases.phasenames[self.phase()]
152 140 def mutable(self):
153 141 return self.phase() > phases.public
154 142
155 143 def getfileset(self, expr):
156 144 return fileset.getfileset(self, expr)
157 145
158 146 def obsolete(self):
159 147 """True if the changeset is obsolete"""
160 148 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
161 149
162 150 def extinct(self):
163 151 """True if the changeset is extinct"""
164 152 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
165 153
166 154 def unstable(self):
167 155 """True if the changeset is not obsolete but it's ancestor are"""
168 156 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
169 157
170 158 def bumped(self):
171 159 """True if the changeset try to be a successor of a public changeset
172 160
173 161 Only non-public and non-obsolete changesets may be bumped.
174 162 """
175 163 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
176 164
177 165 def divergent(self):
178 166 """Is a successors of a changeset with multiple possible successors set
179 167
180 168 Only non-public and non-obsolete changesets may be divergent.
181 169 """
182 170 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
183 171
184 172 def troubled(self):
185 173 """True if the changeset is either unstable, bumped or divergent"""
186 174 return self.unstable() or self.bumped() or self.divergent()
187 175
188 176 def troubles(self):
189 177 """return the list of troubles affecting this changesets.
190 178
191 179 Troubles are returned as strings. possible values are:
192 180 - unstable,
193 181 - bumped,
194 182 - divergent.
195 183 """
196 184 troubles = []
197 185 if self.unstable():
198 186 troubles.append('unstable')
199 187 if self.bumped():
200 188 troubles.append('bumped')
201 189 if self.divergent():
202 190 troubles.append('divergent')
203 191 return troubles
204 192
205 193 def parents(self):
206 194 """return contexts for each parent changeset"""
207 195 return self._parents
208 196
209 197 def p1(self):
210 198 return self._parents[0]
211 199
212 200 def p2(self):
213 201 if len(self._parents) == 2:
214 202 return self._parents[1]
215 203 return changectx(self._repo, -1)
216 204
217 205 def _fileinfo(self, path):
218 206 if '_manifest' in self.__dict__:
219 207 try:
220 208 return self._manifest[path], self._manifest.flags(path)
221 209 except KeyError:
222 210 raise error.ManifestLookupError(self._node, path,
223 211 _('not found in manifest'))
224 212 if '_manifestdelta' in self.__dict__ or path in self.files():
225 213 if path in self._manifestdelta:
226 214 return (self._manifestdelta[path],
227 215 self._manifestdelta.flags(path))
228 216 node, flag = self._repo.manifest.find(self._changeset[0], path)
229 217 if not node:
230 218 raise error.ManifestLookupError(self._node, path,
231 219 _('not found in manifest'))
232 220
233 221 return node, flag
234 222
235 223 def filenode(self, path):
236 224 return self._fileinfo(path)[0]
237 225
238 226 def flags(self, path):
239 227 try:
240 228 return self._fileinfo(path)[1]
241 229 except error.LookupError:
242 230 return ''
243 231
244 232 def sub(self, path):
245 233 return subrepo.subrepo(self, path)
246 234
247 235 def match(self, pats=[], include=None, exclude=None, default='glob'):
248 236 r = self._repo
249 237 return matchmod.match(r.root, r.getcwd(), pats,
250 238 include, exclude, default,
251 239 auditor=r.auditor, ctx=self)
252 240
253 241 def diff(self, ctx2=None, match=None, **opts):
254 242 """Returns a diff generator for the given contexts and matcher"""
255 243 if ctx2 is None:
256 244 ctx2 = self.p1()
257 245 if ctx2 is not None:
258 246 ctx2 = self._repo[ctx2]
259 247 diffopts = patch.diffopts(self._repo.ui, opts)
260 248 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
261 249
262 250 @propertycache
263 251 def _dirs(self):
264 252 return scmutil.dirs(self._manifest)
265 253
266 254 def dirs(self):
267 255 return self._dirs
268 256
269 257 def dirty(self, missing=False, merge=True, branch=True):
270 258 return False
271 259
272 260 def status(self, other=None, match=None, listignored=False,
273 261 listclean=False, listunknown=False, listsubrepos=False):
274 262 """return status of files between two nodes or node and working
275 263 directory.
276 264
277 265 If other is None, compare this node with working directory.
278 266
279 267 returns (modified, added, removed, deleted, unknown, ignored, clean)
280 268 """
281 269
282 270 ctx1 = self
283 271 ctx2 = self._repo[other]
284 272
285 273 # This next code block is, admittedly, fragile logic that tests for
286 274 # reversing the contexts and wouldn't need to exist if it weren't for
287 275 # the fast (and common) code path of comparing the working directory
288 276 # with its first parent.
289 277 #
290 278 # What we're aiming for here is the ability to call:
291 279 #
292 280 # workingctx.status(parentctx)
293 281 #
294 282 # If we always built the manifest for each context and compared those,
295 283 # then we'd be done. But the special case of the above call means we
296 284 # just copy the manifest of the parent.
297 285 reversed = False
298 286 if (not isinstance(ctx1, changectx)
299 287 and isinstance(ctx2, changectx)):
300 288 reversed = True
301 289 ctx1, ctx2 = ctx2, ctx1
302 290
303 291 match = ctx2._matchstatus(ctx1, match)
304 292 r = scmutil.status([], [], [], [], [], [], [])
305 293 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
306 294 listunknown)
307 295
308 296 if reversed:
309 297 # Reverse added and removed. Clear deleted, unknown and ignored as
310 298 # these make no sense to reverse.
311 299 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
312 300 r.clean)
313 301
314 302 if listsubrepos:
315 303 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
316 304 rev2 = ctx2.subrev(subpath)
317 305 try:
318 306 submatch = matchmod.narrowmatcher(subpath, match)
319 307 s = sub.status(rev2, match=submatch, ignored=listignored,
320 308 clean=listclean, unknown=listunknown,
321 309 listsubrepos=True)
322 310 for rfiles, sfiles in zip(r, s):
323 311 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
324 312 except error.LookupError:
325 313 self._repo.ui.status(_("skipping missing "
326 314 "subrepository: %s\n") % subpath)
327 315
328 316 for l in r:
329 317 l.sort()
330 318
331 319 return r
332 320
333 321
334 322 def makememctx(repo, parents, text, user, date, branch, files, store,
335 323 editor=None):
336 324 def getfilectx(repo, memctx, path):
337 325 data, mode, copied = store.getfile(path)
338 326 if data is None:
339 327 return None
340 328 islink, isexec = mode
341 329 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
342 330 copied=copied, memctx=memctx)
343 331 extra = {}
344 332 if branch:
345 333 extra['branch'] = encoding.fromlocal(branch)
346 334 ctx = memctx(repo, parents, text, files, getfilectx, user,
347 335 date, extra, editor)
348 336 return ctx
349 337
350 338 class changectx(basectx):
351 339 """A changecontext object makes access to data related to a particular
352 340 changeset convenient. It represents a read-only context already present in
353 341 the repo."""
354 342 def __init__(self, repo, changeid=''):
355 343 """changeid is a revision number, node, or tag"""
356 344
357 345 # since basectx.__new__ already took care of copying the object, we
358 346 # don't need to do anything in __init__, so we just exit here
359 347 if isinstance(changeid, basectx):
360 348 return
361 349
362 350 if changeid == '':
363 351 changeid = '.'
364 352 self._repo = repo
365 353
366 354 try:
367 355 if isinstance(changeid, int):
368 356 self._node = repo.changelog.node(changeid)
369 357 self._rev = changeid
370 358 return
371 359 if isinstance(changeid, long):
372 360 changeid = str(changeid)
373 361 if changeid == '.':
374 362 self._node = repo.dirstate.p1()
375 363 self._rev = repo.changelog.rev(self._node)
376 364 return
377 365 if changeid == 'null':
378 366 self._node = nullid
379 367 self._rev = nullrev
380 368 return
381 369 if changeid == 'tip':
382 370 self._node = repo.changelog.tip()
383 371 self._rev = repo.changelog.rev(self._node)
384 372 return
385 373 if len(changeid) == 20:
386 374 try:
387 375 self._node = changeid
388 376 self._rev = repo.changelog.rev(changeid)
389 377 return
390 378 except error.FilteredRepoLookupError:
391 379 raise
392 380 except LookupError:
393 381 pass
394 382
395 383 try:
396 384 r = int(changeid)
397 385 if str(r) != changeid:
398 386 raise ValueError
399 387 l = len(repo.changelog)
400 388 if r < 0:
401 389 r += l
402 390 if r < 0 or r >= l:
403 391 raise ValueError
404 392 self._rev = r
405 393 self._node = repo.changelog.node(r)
406 394 return
407 395 except error.FilteredIndexError:
408 396 raise
409 397 except (ValueError, OverflowError, IndexError):
410 398 pass
411 399
412 400 if len(changeid) == 40:
413 401 try:
414 402 self._node = bin(changeid)
415 403 self._rev = repo.changelog.rev(self._node)
416 404 return
417 405 except error.FilteredLookupError:
418 406 raise
419 407 except (TypeError, LookupError):
420 408 pass
421 409
422 410 if changeid in repo._bookmarks:
423 411 self._node = repo._bookmarks[changeid]
424 412 self._rev = repo.changelog.rev(self._node)
425 413 return
426 414 if changeid in repo._tagscache.tags:
427 415 self._node = repo._tagscache.tags[changeid]
428 416 self._rev = repo.changelog.rev(self._node)
429 417 return
430 418 try:
431 419 self._node = repo.branchtip(changeid)
432 420 self._rev = repo.changelog.rev(self._node)
433 421 return
434 422 except error.FilteredRepoLookupError:
435 423 raise
436 424 except error.RepoLookupError:
437 425 pass
438 426
439 427 self._node = repo.unfiltered().changelog._partialmatch(changeid)
440 428 if self._node is not None:
441 429 self._rev = repo.changelog.rev(self._node)
442 430 return
443 431
444 432 # lookup failed
445 433 # check if it might have come from damaged dirstate
446 434 #
447 435 # XXX we could avoid the unfiltered if we had a recognizable
448 436 # exception for filtered changeset access
449 437 if changeid in repo.unfiltered().dirstate.parents():
450 438 msg = _("working directory has unknown parent '%s'!")
451 439 raise error.Abort(msg % short(changeid))
452 440 try:
453 441 if len(changeid) == 20:
454 442 changeid = hex(changeid)
455 443 except TypeError:
456 444 pass
457 445 except (error.FilteredIndexError, error.FilteredLookupError,
458 446 error.FilteredRepoLookupError):
459 447 if repo.filtername == 'visible':
460 448 msg = _("hidden revision '%s'") % changeid
461 449 hint = _('use --hidden to access hidden revisions')
462 450 raise error.FilteredRepoLookupError(msg, hint=hint)
463 451 msg = _("filtered revision '%s' (not in '%s' subset)")
464 452 msg %= (changeid, repo.filtername)
465 453 raise error.FilteredRepoLookupError(msg)
466 454 except IndexError:
467 455 pass
468 456 raise error.RepoLookupError(
469 457 _("unknown revision '%s'") % changeid)
470 458
471 459 def __hash__(self):
472 460 try:
473 461 return hash(self._rev)
474 462 except AttributeError:
475 463 return id(self)
476 464
477 465 def __nonzero__(self):
478 466 return self._rev != nullrev
479 467
480 468 @propertycache
481 469 def _changeset(self):
482 470 return self._repo.changelog.read(self.rev())
483 471
484 472 @propertycache
485 473 def _manifest(self):
486 474 return self._repo.manifest.read(self._changeset[0])
487 475
488 476 @propertycache
489 477 def _manifestdelta(self):
490 478 return self._repo.manifest.readdelta(self._changeset[0])
491 479
492 480 @propertycache
493 481 def _parents(self):
494 482 p = self._repo.changelog.parentrevs(self._rev)
495 483 if p[1] == nullrev:
496 484 p = p[:-1]
497 485 return [changectx(self._repo, x) for x in p]
498 486
499 487 def changeset(self):
500 488 return self._changeset
501 489 def manifestnode(self):
502 490 return self._changeset[0]
503 491
504 492 def user(self):
505 493 return self._changeset[1]
506 494 def date(self):
507 495 return self._changeset[2]
508 496 def files(self):
509 497 return self._changeset[3]
510 498 def description(self):
511 499 return self._changeset[4]
512 500 def branch(self):
513 501 return encoding.tolocal(self._changeset[5].get("branch"))
514 502 def closesbranch(self):
515 503 return 'close' in self._changeset[5]
516 504 def extra(self):
517 505 return self._changeset[5]
518 506 def tags(self):
519 507 return self._repo.nodetags(self._node)
520 508 def bookmarks(self):
521 509 return self._repo.nodebookmarks(self._node)
522 510 def phase(self):
523 511 return self._repo._phasecache.phase(self._repo, self._rev)
524 512 def hidden(self):
525 513 return self._rev in repoview.filterrevs(self._repo, 'visible')
526 514
527 515 def children(self):
528 516 """return contexts for each child changeset"""
529 517 c = self._repo.changelog.children(self._node)
530 518 return [changectx(self._repo, x) for x in c]
531 519
532 520 def ancestors(self):
533 521 for a in self._repo.changelog.ancestors([self._rev]):
534 522 yield changectx(self._repo, a)
535 523
536 524 def descendants(self):
537 525 for d in self._repo.changelog.descendants([self._rev]):
538 526 yield changectx(self._repo, d)
539 527
540 528 def filectx(self, path, fileid=None, filelog=None):
541 529 """get a file context from this changeset"""
542 530 if fileid is None:
543 531 fileid = self.filenode(path)
544 532 return filectx(self._repo, path, fileid=fileid,
545 533 changectx=self, filelog=filelog)
546 534
547 535 def ancestor(self, c2, warn=False):
548 536 """return the "best" ancestor context of self and c2
549 537
550 538 If there are multiple candidates, it will show a message and check
551 539 merge.preferancestor configuration before falling back to the
552 540 revlog ancestor."""
553 541 # deal with workingctxs
554 542 n2 = c2._node
555 543 if n2 is None:
556 544 n2 = c2._parents[0]._node
557 545 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
558 546 if not cahs:
559 547 anc = nullid
560 548 elif len(cahs) == 1:
561 549 anc = cahs[0]
562 550 else:
563 551 for r in self._repo.ui.configlist('merge', 'preferancestor'):
564 552 try:
565 553 ctx = changectx(self._repo, r)
566 554 except error.RepoLookupError:
567 555 continue
568 556 anc = ctx.node()
569 557 if anc in cahs:
570 558 break
571 559 else:
572 560 anc = self._repo.changelog.ancestor(self._node, n2)
573 561 if warn:
574 562 self._repo.ui.status(
575 563 (_("note: using %s as ancestor of %s and %s\n") %
576 564 (short(anc), short(self._node), short(n2))) +
577 565 ''.join(_(" alternatively, use --config "
578 566 "merge.preferancestor=%s\n") %
579 567 short(n) for n in sorted(cahs) if n != anc))
580 568 return changectx(self._repo, anc)
581 569
582 570 def descendant(self, other):
583 571 """True if other is descendant of this changeset"""
584 572 return self._repo.changelog.descendant(self._rev, other._rev)
585 573
586 574 def walk(self, match):
587 575 fset = set(match.files())
588 576 # for dirstate.walk, files=['.'] means "walk the whole tree".
589 577 # follow that here, too
590 578 fset.discard('.')
591 579
592 580 # avoid the entire walk if we're only looking for specific files
593 581 if fset and not match.anypats():
594 582 if util.all([fn in self for fn in fset]):
595 583 for fn in sorted(fset):
596 584 if match(fn):
597 585 yield fn
598 586 raise StopIteration
599 587
600 588 for fn in self:
601 589 if fn in fset:
602 590 # specified pattern is the exact name
603 591 fset.remove(fn)
604 592 if match(fn):
605 593 yield fn
606 594 for fn in sorted(fset):
607 595 if fn in self._dirs:
608 596 # specified pattern is a directory
609 597 continue
610 598 match.bad(fn, _('no such file in rev %s') % self)
611 599
612 600 def matches(self, match):
613 601 return self.walk(match)
614 602
615 603 class basefilectx(object):
616 604 """A filecontext object represents the common logic for its children:
617 605 filectx: read-only access to a filerevision that is already present
618 606 in the repo,
619 607 workingfilectx: a filecontext that represents files from the working
620 608 directory,
621 609 memfilectx: a filecontext that represents files in-memory."""
622 610 def __new__(cls, repo, path, *args, **kwargs):
623 611 return super(basefilectx, cls).__new__(cls)
624 612
625 613 @propertycache
626 614 def _filelog(self):
627 615 return self._repo.file(self._path)
628 616
629 617 @propertycache
630 618 def _changeid(self):
631 619 if '_changeid' in self.__dict__:
632 620 return self._changeid
633 621 elif '_changectx' in self.__dict__:
634 622 return self._changectx.rev()
635 623 else:
636 624 return self._filelog.linkrev(self._filerev)
637 625
638 626 @propertycache
639 627 def _filenode(self):
640 628 if '_fileid' in self.__dict__:
641 629 return self._filelog.lookup(self._fileid)
642 630 else:
643 631 return self._changectx.filenode(self._path)
644 632
645 633 @propertycache
646 634 def _filerev(self):
647 635 return self._filelog.rev(self._filenode)
648 636
649 637 @propertycache
650 638 def _repopath(self):
651 639 return self._path
652 640
653 641 def __nonzero__(self):
654 642 try:
655 643 self._filenode
656 644 return True
657 645 except error.LookupError:
658 646 # file is missing
659 647 return False
660 648
661 649 def __str__(self):
662 650 return "%s@%s" % (self.path(), self._changectx)
663 651
664 652 def __repr__(self):
665 653 return "<%s %s>" % (type(self).__name__, str(self))
666 654
667 655 def __hash__(self):
668 656 try:
669 657 return hash((self._path, self._filenode))
670 658 except AttributeError:
671 659 return id(self)
672 660
673 661 def __eq__(self, other):
674 662 try:
675 663 return (type(self) == type(other) and self._path == other._path
676 664 and self._filenode == other._filenode)
677 665 except AttributeError:
678 666 return False
679 667
680 668 def __ne__(self, other):
681 669 return not (self == other)
682 670
683 671 def filerev(self):
684 672 return self._filerev
685 673 def filenode(self):
686 674 return self._filenode
687 675 def flags(self):
688 676 return self._changectx.flags(self._path)
689 677 def filelog(self):
690 678 return self._filelog
691 679 def rev(self):
692 680 return self._changeid
693 681 def linkrev(self):
694 682 return self._filelog.linkrev(self._filerev)
695 683 def node(self):
696 684 return self._changectx.node()
697 685 def hex(self):
698 686 return self._changectx.hex()
699 687 def user(self):
700 688 return self._changectx.user()
701 689 def date(self):
702 690 return self._changectx.date()
703 691 def files(self):
704 692 return self._changectx.files()
705 693 def description(self):
706 694 return self._changectx.description()
707 695 def branch(self):
708 696 return self._changectx.branch()
709 697 def extra(self):
710 698 return self._changectx.extra()
711 699 def phase(self):
712 700 return self._changectx.phase()
713 701 def phasestr(self):
714 702 return self._changectx.phasestr()
715 703 def manifest(self):
716 704 return self._changectx.manifest()
717 705 def changectx(self):
718 706 return self._changectx
719 707
720 708 def path(self):
721 709 return self._path
722 710
723 711 def isbinary(self):
724 712 try:
725 713 return util.binary(self.data())
726 714 except IOError:
727 715 return False
728 716 def isexec(self):
729 717 return 'x' in self.flags()
730 718 def islink(self):
731 719 return 'l' in self.flags()
732 720
733 721 def cmp(self, fctx):
734 722 """compare with other file context
735 723
736 724 returns True if different than fctx.
737 725 """
738 726 if (fctx._filerev is None
739 727 and (self._repo._encodefilterpats
740 728 # if file data starts with '\1\n', empty metadata block is
741 729 # prepended, which adds 4 bytes to filelog.size().
742 730 or self.size() - 4 == fctx.size())
743 731 or self.size() == fctx.size()):
744 732 return self._filelog.cmp(self._filenode, fctx.data())
745 733
746 734 return True
747 735
748 736 def parents(self):
749 737 _path = self._path
750 738 fl = self._filelog
751 739 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
752 740
753 741 r = self._filelog.renamed(self._filenode)
754 742 if r:
755 743 pl[0] = (r[0], r[1], None)
756 744
757 745 return [filectx(self._repo, p, fileid=n, filelog=l)
758 746 for p, n, l in pl if n != nullid]
759 747
760 748 def p1(self):
761 749 return self.parents()[0]
762 750
763 751 def p2(self):
764 752 p = self.parents()
765 753 if len(p) == 2:
766 754 return p[1]
767 755 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
768 756
769 757 def annotate(self, follow=False, linenumber=None, diffopts=None):
770 758 '''returns a list of tuples of (ctx, line) for each line
771 759 in the file, where ctx is the filectx of the node where
772 760 that line was last changed.
773 761 This returns tuples of ((ctx, linenumber), line) for each line,
774 762 if "linenumber" parameter is NOT "None".
775 763 In such tuples, linenumber means one at the first appearance
776 764 in the managed file.
777 765 To reduce annotation cost,
778 766 this returns fixed value(False is used) as linenumber,
779 767 if "linenumber" parameter is "False".'''
780 768
781 769 if linenumber is None:
782 770 def decorate(text, rev):
783 771 return ([rev] * len(text.splitlines()), text)
784 772 elif linenumber:
785 773 def decorate(text, rev):
786 774 size = len(text.splitlines())
787 775 return ([(rev, i) for i in xrange(1, size + 1)], text)
788 776 else:
789 777 def decorate(text, rev):
790 778 return ([(rev, False)] * len(text.splitlines()), text)
791 779
792 780 def pair(parent, child):
793 781 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
794 782 refine=True)
795 783 for (a1, a2, b1, b2), t in blocks:
796 784 # Changed blocks ('!') or blocks made only of blank lines ('~')
797 785 # belong to the child.
798 786 if t == '=':
799 787 child[0][b1:b2] = parent[0][a1:a2]
800 788 return child
801 789
802 790 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
803 791
804 792 def parents(f):
805 793 pl = f.parents()
806 794
807 795 # Don't return renamed parents if we aren't following.
808 796 if not follow:
809 797 pl = [p for p in pl if p.path() == f.path()]
810 798
811 799 # renamed filectx won't have a filelog yet, so set it
812 800 # from the cache to save time
813 801 for p in pl:
814 802 if not '_filelog' in p.__dict__:
815 803 p._filelog = getlog(p.path())
816 804
817 805 return pl
818 806
819 807 # use linkrev to find the first changeset where self appeared
820 808 if self.rev() != self.linkrev():
821 809 base = self.filectx(self.filenode())
822 810 else:
823 811 base = self
824 812
825 813 # This algorithm would prefer to be recursive, but Python is a
826 814 # bit recursion-hostile. Instead we do an iterative
827 815 # depth-first search.
828 816
829 817 visit = [base]
830 818 hist = {}
831 819 pcache = {}
832 820 needed = {base: 1}
833 821 while visit:
834 822 f = visit[-1]
835 823 pcached = f in pcache
836 824 if not pcached:
837 825 pcache[f] = parents(f)
838 826
839 827 ready = True
840 828 pl = pcache[f]
841 829 for p in pl:
842 830 if p not in hist:
843 831 ready = False
844 832 visit.append(p)
845 833 if not pcached:
846 834 needed[p] = needed.get(p, 0) + 1
847 835 if ready:
848 836 visit.pop()
849 837 reusable = f in hist
850 838 if reusable:
851 839 curr = hist[f]
852 840 else:
853 841 curr = decorate(f.data(), f)
854 842 for p in pl:
855 843 if not reusable:
856 844 curr = pair(hist[p], curr)
857 845 if needed[p] == 1:
858 846 del hist[p]
859 847 del needed[p]
860 848 else:
861 849 needed[p] -= 1
862 850
863 851 hist[f] = curr
864 852 pcache[f] = []
865 853
866 854 return zip(hist[base][0], hist[base][1].splitlines(True))
867 855
868 856 def ancestors(self, followfirst=False):
869 857 visit = {}
870 858 c = self
871 859 cut = followfirst and 1 or None
872 860 while True:
873 861 for parent in c.parents()[:cut]:
874 862 visit[(parent.rev(), parent.node())] = parent
875 863 if not visit:
876 864 break
877 865 c = visit.pop(max(visit))
878 866 yield c
879 867
880 868 class filectx(basefilectx):
881 869 """A filecontext object makes access to data related to a particular
882 870 filerevision convenient."""
883 871 def __init__(self, repo, path, changeid=None, fileid=None,
884 872 filelog=None, changectx=None):
885 873 """changeid can be a changeset revision, node, or tag.
886 874 fileid can be a file revision or node."""
887 875 self._repo = repo
888 876 self._path = path
889 877
890 878 assert (changeid is not None
891 879 or fileid is not None
892 880 or changectx is not None), \
893 881 ("bad args: changeid=%r, fileid=%r, changectx=%r"
894 882 % (changeid, fileid, changectx))
895 883
896 884 if filelog is not None:
897 885 self._filelog = filelog
898 886
899 887 if changeid is not None:
900 888 self._changeid = changeid
901 889 if changectx is not None:
902 890 self._changectx = changectx
903 891 if fileid is not None:
904 892 self._fileid = fileid
905 893
906 894 @propertycache
907 895 def _changectx(self):
908 896 try:
909 897 return changectx(self._repo, self._changeid)
910 898 except error.RepoLookupError:
911 899 # Linkrev may point to any revision in the repository. When the
912 900 # repository is filtered this may lead to `filectx` trying to build
913 901 # `changectx` for filtered revision. In such case we fallback to
914 902 # creating `changectx` on the unfiltered version of the reposition.
915 903 # This fallback should not be an issue because `changectx` from
916 904 # `filectx` are not used in complex operations that care about
917 905 # filtering.
918 906 #
919 907 # This fallback is a cheap and dirty fix that prevent several
920 908 # crashes. It does not ensure the behavior is correct. However the
921 909 # behavior was not correct before filtering either and "incorrect
922 910 # behavior" is seen as better as "crash"
923 911 #
924 912 # Linkrevs have several serious troubles with filtering that are
925 913 # complicated to solve. Proper handling of the issue here should be
926 914 # considered when solving linkrev issue are on the table.
927 915 return changectx(self._repo.unfiltered(), self._changeid)
928 916
929 917 def filectx(self, fileid):
930 918 '''opens an arbitrary revision of the file without
931 919 opening a new filelog'''
932 920 return filectx(self._repo, self._path, fileid=fileid,
933 921 filelog=self._filelog)
934 922
935 923 def data(self):
936 924 try:
937 925 return self._filelog.read(self._filenode)
938 926 except error.CensoredNodeError:
939 927 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
940 928 return ""
941 929 raise util.Abort(_("censored node: %s") % short(self._filenode),
942 930 hint=_("set censor.policy to ignore errors"))
943 931
944 932 def size(self):
945 933 return self._filelog.size(self._filerev)
946 934
947 935 def renamed(self):
948 936 """check if file was actually renamed in this changeset revision
949 937
950 938 If rename logged in file revision, we report copy for changeset only
951 939 if file revisions linkrev points back to the changeset in question
952 940 or both changeset parents contain different file revisions.
953 941 """
954 942
955 943 renamed = self._filelog.renamed(self._filenode)
956 944 if not renamed:
957 945 return renamed
958 946
959 947 if self.rev() == self.linkrev():
960 948 return renamed
961 949
962 950 name = self.path()
963 951 fnode = self._filenode
964 952 for p in self._changectx.parents():
965 953 try:
966 954 if fnode == p.filenode(name):
967 955 return None
968 956 except error.LookupError:
969 957 pass
970 958 return renamed
971 959
972 960 def children(self):
973 961 # hard for renames
974 962 c = self._filelog.children(self._filenode)
975 963 return [filectx(self._repo, self._path, fileid=x,
976 964 filelog=self._filelog) for x in c]
977 965
978 966 class committablectx(basectx):
979 967 """A committablectx object provides common functionality for a context that
980 968 wants the ability to commit, e.g. workingctx or memctx."""
981 969 def __init__(self, repo, text="", user=None, date=None, extra=None,
982 970 changes=None):
983 971 self._repo = repo
984 972 self._rev = None
985 973 self._node = None
986 974 self._text = text
987 975 if date:
988 976 self._date = util.parsedate(date)
989 977 if user:
990 978 self._user = user
991 979 if changes:
992 980 self._status = changes
993 981
994 982 self._extra = {}
995 983 if extra:
996 984 self._extra = extra.copy()
997 985 if 'branch' not in self._extra:
998 986 try:
999 987 branch = encoding.fromlocal(self._repo.dirstate.branch())
1000 988 except UnicodeDecodeError:
1001 989 raise util.Abort(_('branch name not in UTF-8!'))
1002 990 self._extra['branch'] = branch
1003 991 if self._extra['branch'] == '':
1004 992 self._extra['branch'] = 'default'
1005 993
1006 994 def __str__(self):
1007 995 return str(self._parents[0]) + "+"
1008 996
1009 997 def __nonzero__(self):
1010 998 return True
1011 999
1012 1000 def _buildflagfunc(self):
1013 1001 # Create a fallback function for getting file flags when the
1014 1002 # filesystem doesn't support them
1015 1003
1016 1004 copiesget = self._repo.dirstate.copies().get
1017 1005
1018 1006 if len(self._parents) < 2:
1019 1007 # when we have one parent, it's easy: copy from parent
1020 1008 man = self._parents[0].manifest()
1021 1009 def func(f):
1022 1010 f = copiesget(f, f)
1023 1011 return man.flags(f)
1024 1012 else:
1025 1013 # merges are tricky: we try to reconstruct the unstored
1026 1014 # result from the merge (issue1802)
1027 1015 p1, p2 = self._parents
1028 1016 pa = p1.ancestor(p2)
1029 1017 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1030 1018
1031 1019 def func(f):
1032 1020 f = copiesget(f, f) # may be wrong for merges with copies
1033 1021 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1034 1022 if fl1 == fl2:
1035 1023 return fl1
1036 1024 if fl1 == fla:
1037 1025 return fl2
1038 1026 if fl2 == fla:
1039 1027 return fl1
1040 1028 return '' # punt for conflicts
1041 1029
1042 1030 return func
1043 1031
1044 1032 @propertycache
1045 1033 def _flagfunc(self):
1046 1034 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1047 1035
1048 1036 @propertycache
1049 1037 def _manifest(self):
1050 1038 """generate a manifest corresponding to the values in self._status"""
1051 1039
1052 1040 man = self._parents[0].manifest().copy()
1053 1041 if len(self._parents) > 1:
1054 1042 man2 = self.p2().manifest()
1055 1043 def getman(f):
1056 1044 if f in man:
1057 1045 return man
1058 1046 return man2
1059 1047 else:
1060 1048 getman = lambda f: man
1061 1049
1062 1050 copied = self._repo.dirstate.copies()
1063 1051 ff = self._flagfunc
1064 1052 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1065 1053 for f in l:
1066 1054 orig = copied.get(f, f)
1067 1055 man[f] = getman(orig).get(orig, nullid) + i
1068 1056 try:
1069 1057 man.setflag(f, ff(f))
1070 1058 except OSError:
1071 1059 pass
1072 1060
1073 1061 for f in self._status.deleted + self._status.removed:
1074 1062 if f in man:
1075 1063 del man[f]
1076 1064
1077 1065 return man
1078 1066
1079 1067 @propertycache
1080 1068 def _status(self):
1081 1069 return self._repo.status()
1082 1070
1083 1071 @propertycache
1084 1072 def _user(self):
1085 1073 return self._repo.ui.username()
1086 1074
1087 1075 @propertycache
1088 1076 def _date(self):
1089 1077 return util.makedate()
1090 1078
1091 1079 def subrev(self, subpath):
1092 1080 return None
1093 1081
1094 1082 def user(self):
1095 1083 return self._user or self._repo.ui.username()
1096 1084 def date(self):
1097 1085 return self._date
1098 1086 def description(self):
1099 1087 return self._text
1100 1088 def files(self):
1101 1089 return sorted(self._status.modified + self._status.added +
1102 1090 self._status.removed)
1103 1091
1104 1092 def modified(self):
1105 1093 return self._status.modified
1106 1094 def added(self):
1107 1095 return self._status.added
1108 1096 def removed(self):
1109 1097 return self._status.removed
1110 1098 def deleted(self):
1111 1099 return self._status.deleted
1112 1100 def unknown(self):
1113 1101 return self._status.unknown
1114 1102 def ignored(self):
1115 1103 return self._status.ignored
1116 1104 def clean(self):
1117 1105 return self._status.clean
1118 1106 def branch(self):
1119 1107 return encoding.tolocal(self._extra['branch'])
1120 1108 def closesbranch(self):
1121 1109 return 'close' in self._extra
1122 1110 def extra(self):
1123 1111 return self._extra
1124 1112
1125 1113 def tags(self):
1126 1114 t = []
1127 1115 for p in self.parents():
1128 1116 t.extend(p.tags())
1129 1117 return t
1130 1118
1131 1119 def bookmarks(self):
1132 1120 b = []
1133 1121 for p in self.parents():
1134 1122 b.extend(p.bookmarks())
1135 1123 return b
1136 1124
1137 1125 def phase(self):
1138 1126 phase = phases.draft # default phase to draft
1139 1127 for p in self.parents():
1140 1128 phase = max(phase, p.phase())
1141 1129 return phase
1142 1130
1143 1131 def hidden(self):
1144 1132 return False
1145 1133
1146 1134 def children(self):
1147 1135 return []
1148 1136
1149 1137 def flags(self, path):
1150 1138 if '_manifest' in self.__dict__:
1151 1139 try:
1152 1140 return self._manifest.flags(path)
1153 1141 except KeyError:
1154 1142 return ''
1155 1143
1156 1144 try:
1157 1145 return self._flagfunc(path)
1158 1146 except OSError:
1159 1147 return ''
1160 1148
1161 1149 def ancestor(self, c2):
1162 1150 """return the "best" ancestor context of self and c2"""
1163 1151 return self._parents[0].ancestor(c2) # punt on two parents for now
1164 1152
1165 1153 def walk(self, match):
1166 1154 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1167 1155 True, False))
1168 1156
1169 1157 def matches(self, match):
1170 1158 return sorted(self._repo.dirstate.matches(match))
1171 1159
1172 1160 def ancestors(self):
1173 1161 for a in self._repo.changelog.ancestors(
1174 1162 [p.rev() for p in self._parents]):
1175 1163 yield changectx(self._repo, a)
1176 1164
1177 1165 def markcommitted(self, node):
1178 1166 """Perform post-commit cleanup necessary after committing this ctx
1179 1167
1180 1168 Specifically, this updates backing stores this working context
1181 1169 wraps to reflect the fact that the changes reflected by this
1182 1170 workingctx have been committed. For example, it marks
1183 1171 modified and added files as normal in the dirstate.
1184 1172
1185 1173 """
1186 1174
1187 1175 self._repo.dirstate.beginparentchange()
1188 1176 for f in self.modified() + self.added():
1189 1177 self._repo.dirstate.normal(f)
1190 1178 for f in self.removed():
1191 1179 self._repo.dirstate.drop(f)
1192 1180 self._repo.dirstate.setparents(node)
1193 1181 self._repo.dirstate.endparentchange()
1194 1182
1195 1183 def dirs(self):
1196 1184 return self._repo.dirstate.dirs()
1197 1185
1198 1186 class workingctx(committablectx):
1199 1187 """A workingctx object makes access to data related to
1200 1188 the current working directory convenient.
1201 1189 date - any valid date string or (unixtime, offset), or None.
1202 1190 user - username string, or None.
1203 1191 extra - a dictionary of extra values, or None.
1204 1192 changes - a list of file lists as returned by localrepo.status()
1205 1193 or None to use the repository status.
1206 1194 """
1207 1195 def __init__(self, repo, text="", user=None, date=None, extra=None,
1208 1196 changes=None):
1209 1197 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1210 1198
1211 1199 def __iter__(self):
1212 1200 d = self._repo.dirstate
1213 1201 for f in d:
1214 1202 if d[f] != 'r':
1215 1203 yield f
1216 1204
1217 1205 def __contains__(self, key):
1218 1206 return self._repo.dirstate[key] not in "?r"
1219 1207
1220 1208 @propertycache
1221 1209 def _parents(self):
1222 1210 p = self._repo.dirstate.parents()
1223 1211 if p[1] == nullid:
1224 1212 p = p[:-1]
1225 1213 return [changectx(self._repo, x) for x in p]
1226 1214
1227 1215 def filectx(self, path, filelog=None):
1228 1216 """get a file context from the working directory"""
1229 1217 return workingfilectx(self._repo, path, workingctx=self,
1230 1218 filelog=filelog)
1231 1219
1232 1220 def dirty(self, missing=False, merge=True, branch=True):
1233 1221 "check whether a working directory is modified"
1234 1222 # check subrepos first
1235 1223 for s in sorted(self.substate):
1236 1224 if self.sub(s).dirty():
1237 1225 return True
1238 1226 # check current working dir
1239 1227 return ((merge and self.p2()) or
1240 1228 (branch and self.branch() != self.p1().branch()) or
1241 1229 self.modified() or self.added() or self.removed() or
1242 1230 (missing and self.deleted()))
1243 1231
1244 1232 def add(self, list, prefix=""):
1245 1233 join = lambda f: os.path.join(prefix, f)
1246 1234 wlock = self._repo.wlock()
1247 1235 ui, ds = self._repo.ui, self._repo.dirstate
1248 1236 try:
1249 1237 rejected = []
1250 1238 lstat = self._repo.wvfs.lstat
1251 1239 for f in list:
1252 1240 scmutil.checkportable(ui, join(f))
1253 1241 try:
1254 1242 st = lstat(f)
1255 1243 except OSError:
1256 1244 ui.warn(_("%s does not exist!\n") % join(f))
1257 1245 rejected.append(f)
1258 1246 continue
1259 1247 if st.st_size > 10000000:
1260 1248 ui.warn(_("%s: up to %d MB of RAM may be required "
1261 1249 "to manage this file\n"
1262 1250 "(use 'hg revert %s' to cancel the "
1263 1251 "pending addition)\n")
1264 1252 % (f, 3 * st.st_size // 1000000, join(f)))
1265 1253 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1266 1254 ui.warn(_("%s not added: only files and symlinks "
1267 1255 "supported currently\n") % join(f))
1268 1256 rejected.append(f)
1269 1257 elif ds[f] in 'amn':
1270 1258 ui.warn(_("%s already tracked!\n") % join(f))
1271 1259 elif ds[f] == 'r':
1272 1260 ds.normallookup(f)
1273 1261 else:
1274 1262 ds.add(f)
1275 1263 return rejected
1276 1264 finally:
1277 1265 wlock.release()
1278 1266
1279 1267 def forget(self, files, prefix=""):
1280 1268 join = lambda f: os.path.join(prefix, f)
1281 1269 wlock = self._repo.wlock()
1282 1270 try:
1283 1271 rejected = []
1284 1272 for f in files:
1285 1273 if f not in self._repo.dirstate:
1286 1274 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1287 1275 rejected.append(f)
1288 1276 elif self._repo.dirstate[f] != 'a':
1289 1277 self._repo.dirstate.remove(f)
1290 1278 else:
1291 1279 self._repo.dirstate.drop(f)
1292 1280 return rejected
1293 1281 finally:
1294 1282 wlock.release()
1295 1283
1296 1284 def undelete(self, list):
1297 1285 pctxs = self.parents()
1298 1286 wlock = self._repo.wlock()
1299 1287 try:
1300 1288 for f in list:
1301 1289 if self._repo.dirstate[f] != 'r':
1302 1290 self._repo.ui.warn(_("%s not removed!\n") % f)
1303 1291 else:
1304 1292 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1305 1293 t = fctx.data()
1306 1294 self._repo.wwrite(f, t, fctx.flags())
1307 1295 self._repo.dirstate.normal(f)
1308 1296 finally:
1309 1297 wlock.release()
1310 1298
1311 1299 def copy(self, source, dest):
1312 1300 try:
1313 1301 st = self._repo.wvfs.lstat(dest)
1314 1302 except OSError, err:
1315 1303 if err.errno != errno.ENOENT:
1316 1304 raise
1317 1305 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1318 1306 return
1319 1307 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1320 1308 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1321 1309 "symbolic link\n") % dest)
1322 1310 else:
1323 1311 wlock = self._repo.wlock()
1324 1312 try:
1325 1313 if self._repo.dirstate[dest] in '?r':
1326 1314 self._repo.dirstate.add(dest)
1327 1315 self._repo.dirstate.copy(source, dest)
1328 1316 finally:
1329 1317 wlock.release()
1330 1318
1331 1319 def _filtersuspectsymlink(self, files):
1332 1320 if not files or self._repo.dirstate._checklink:
1333 1321 return files
1334 1322
1335 1323 # Symlink placeholders may get non-symlink-like contents
1336 1324 # via user error or dereferencing by NFS or Samba servers,
1337 1325 # so we filter out any placeholders that don't look like a
1338 1326 # symlink
1339 1327 sane = []
1340 1328 for f in files:
1341 1329 if self.flags(f) == 'l':
1342 1330 d = self[f].data()
1343 1331 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1344 1332 self._repo.ui.debug('ignoring suspect symlink placeholder'
1345 1333 ' "%s"\n' % f)
1346 1334 continue
1347 1335 sane.append(f)
1348 1336 return sane
1349 1337
1350 1338 def _checklookup(self, files):
1351 1339 # check for any possibly clean files
1352 1340 if not files:
1353 1341 return [], []
1354 1342
1355 1343 modified = []
1356 1344 fixup = []
1357 1345 pctx = self._parents[0]
1358 1346 # do a full compare of any files that might have changed
1359 1347 for f in sorted(files):
1360 1348 if (f not in pctx or self.flags(f) != pctx.flags(f)
1361 1349 or pctx[f].cmp(self[f])):
1362 1350 modified.append(f)
1363 1351 else:
1364 1352 fixup.append(f)
1365 1353
1366 1354 # update dirstate for files that are actually clean
1367 1355 if fixup:
1368 1356 try:
1369 1357 # updating the dirstate is optional
1370 1358 # so we don't wait on the lock
1371 1359 # wlock can invalidate the dirstate, so cache normal _after_
1372 1360 # taking the lock
1373 1361 wlock = self._repo.wlock(False)
1374 1362 normal = self._repo.dirstate.normal
1375 1363 try:
1376 1364 for f in fixup:
1377 1365 normal(f)
1378 1366 finally:
1379 1367 wlock.release()
1380 1368 except error.LockError:
1381 1369 pass
1382 1370 return modified, fixup
1383 1371
1384 1372 def _manifestmatches(self, match, s):
1385 1373 """Slow path for workingctx
1386 1374
1387 1375 The fast path is when we compare the working directory to its parent
1388 1376 which means this function is comparing with a non-parent; therefore we
1389 1377 need to build a manifest and return what matches.
1390 1378 """
1391 1379 mf = self._repo['.']._manifestmatches(match, s)
1392 1380 for f in s.modified + s.added:
1393 1381 mf[f] = None
1394 1382 mf.setflag(f, self.flags(f))
1395 1383 for f in s.removed:
1396 1384 if f in mf:
1397 1385 del mf[f]
1398 1386 return mf
1399 1387
1400 1388 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1401 1389 unknown=False):
1402 1390 '''Gets the status from the dirstate -- internal use only.'''
1403 1391 listignored, listclean, listunknown = ignored, clean, unknown
1404 1392 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1405 1393 subrepos = []
1406 1394 if '.hgsub' in self:
1407 1395 subrepos = sorted(self.substate)
1408 1396 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1409 1397 listclean, listunknown)
1410 1398
1411 1399 # check for any possibly clean files
1412 1400 if cmp:
1413 1401 modified2, fixup = self._checklookup(cmp)
1414 1402 s.modified.extend(modified2)
1415 1403
1416 1404 # update dirstate for files that are actually clean
1417 1405 if fixup and listclean:
1418 1406 s.clean.extend(fixup)
1419 1407
1420 1408 return s
1421 1409
1422 1410 def _buildstatus(self, other, s, match, listignored, listclean,
1423 1411 listunknown):
1424 1412 """build a status with respect to another context
1425 1413
1426 1414 This includes logic for maintaining the fast path of status when
1427 1415 comparing the working directory against its parent, which is to skip
1428 1416 building a new manifest if self (working directory) is not comparing
1429 1417 against its parent (repo['.']).
1430 1418 """
1431 1419 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1432 1420 # Filter out symlinks that, in the case of FAT32 and NTFS filesytems,
1433 1421 # might have accidentally ended up with the entire contents of the file
1434 1422 # they are susposed to be linking to.
1435 1423 s.modified[:] = self._filtersuspectsymlink(s.modified)
1436 1424 if other != self._repo['.']:
1437 1425 s = super(workingctx, self)._buildstatus(other, s, match,
1438 1426 listignored, listclean,
1439 1427 listunknown)
1440 1428 self._status = s
1441 1429 return s
1442 1430
1443 1431 def _matchstatus(self, other, match):
1444 1432 """override the match method with a filter for directory patterns
1445 1433
1446 1434 We use inheritance to customize the match.bad method only in cases of
1447 1435 workingctx since it belongs only to the working directory when
1448 1436 comparing against the parent changeset.
1449 1437
1450 1438 If we aren't comparing against the working directory's parent, then we
1451 1439 just use the default match object sent to us.
1452 1440 """
1453 1441 superself = super(workingctx, self)
1454 1442 match = superself._matchstatus(other, match)
1455 1443 if other != self._repo['.']:
1456 1444 def bad(f, msg):
1457 1445 # 'f' may be a directory pattern from 'match.files()',
1458 1446 # so 'f not in ctx1' is not enough
1459 1447 if f not in other and f not in other.dirs():
1460 1448 self._repo.ui.warn('%s: %s\n' %
1461 1449 (self._repo.dirstate.pathto(f), msg))
1462 1450 match.bad = bad
1463 1451 return match
1464 1452
1465 1453 class committablefilectx(basefilectx):
1466 1454 """A committablefilectx provides common functionality for a file context
1467 1455 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1468 1456 def __init__(self, repo, path, filelog=None, ctx=None):
1469 1457 self._repo = repo
1470 1458 self._path = path
1471 1459 self._changeid = None
1472 1460 self._filerev = self._filenode = None
1473 1461
1474 1462 if filelog is not None:
1475 1463 self._filelog = filelog
1476 1464 if ctx:
1477 1465 self._changectx = ctx
1478 1466
1479 1467 def __nonzero__(self):
1480 1468 return True
1481 1469
1482 1470 def parents(self):
1483 1471 '''return parent filectxs, following copies if necessary'''
1484 1472 def filenode(ctx, path):
1485 1473 return ctx._manifest.get(path, nullid)
1486 1474
1487 1475 path = self._path
1488 1476 fl = self._filelog
1489 1477 pcl = self._changectx._parents
1490 1478 renamed = self.renamed()
1491 1479
1492 1480 if renamed:
1493 1481 pl = [renamed + (None,)]
1494 1482 else:
1495 1483 pl = [(path, filenode(pcl[0], path), fl)]
1496 1484
1497 1485 for pc in pcl[1:]:
1498 1486 pl.append((path, filenode(pc, path), fl))
1499 1487
1500 1488 return [filectx(self._repo, p, fileid=n, filelog=l)
1501 1489 for p, n, l in pl if n != nullid]
1502 1490
1503 1491 def children(self):
1504 1492 return []
1505 1493
1506 1494 class workingfilectx(committablefilectx):
1507 1495 """A workingfilectx object makes access to data related to a particular
1508 1496 file in the working directory convenient."""
1509 1497 def __init__(self, repo, path, filelog=None, workingctx=None):
1510 1498 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1511 1499
1512 1500 @propertycache
1513 1501 def _changectx(self):
1514 1502 return workingctx(self._repo)
1515 1503
1516 1504 def data(self):
1517 1505 return self._repo.wread(self._path)
1518 1506 def renamed(self):
1519 1507 rp = self._repo.dirstate.copied(self._path)
1520 1508 if not rp:
1521 1509 return None
1522 1510 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1523 1511
1524 1512 def size(self):
1525 1513 return self._repo.wvfs.lstat(self._path).st_size
1526 1514 def date(self):
1527 1515 t, tz = self._changectx.date()
1528 1516 try:
1529 1517 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1530 1518 except OSError, err:
1531 1519 if err.errno != errno.ENOENT:
1532 1520 raise
1533 1521 return (t, tz)
1534 1522
1535 1523 def cmp(self, fctx):
1536 1524 """compare with other file context
1537 1525
1538 1526 returns True if different than fctx.
1539 1527 """
1540 1528 # fctx should be a filectx (not a workingfilectx)
1541 1529 # invert comparison to reuse the same code path
1542 1530 return fctx.cmp(self)
1543 1531
1544 1532 def remove(self, ignoremissing=False):
1545 1533 """wraps unlink for a repo's working directory"""
1546 1534 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1547 1535
1548 1536 def write(self, data, flags):
1549 1537 """wraps repo.wwrite"""
1550 1538 self._repo.wwrite(self._path, data, flags)
1551 1539
1552 1540 class memctx(committablectx):
1553 1541 """Use memctx to perform in-memory commits via localrepo.commitctx().
1554 1542
1555 1543 Revision information is supplied at initialization time while
1556 1544 related files data and is made available through a callback
1557 1545 mechanism. 'repo' is the current localrepo, 'parents' is a
1558 1546 sequence of two parent revisions identifiers (pass None for every
1559 1547 missing parent), 'text' is the commit message and 'files' lists
1560 1548 names of files touched by the revision (normalized and relative to
1561 1549 repository root).
1562 1550
1563 1551 filectxfn(repo, memctx, path) is a callable receiving the
1564 1552 repository, the current memctx object and the normalized path of
1565 1553 requested file, relative to repository root. It is fired by the
1566 1554 commit function for every file in 'files', but calls order is
1567 1555 undefined. If the file is available in the revision being
1568 1556 committed (updated or added), filectxfn returns a memfilectx
1569 1557 object. If the file was removed, filectxfn raises an
1570 1558 IOError. Moved files are represented by marking the source file
1571 1559 removed and the new file added with copy information (see
1572 1560 memfilectx).
1573 1561
1574 1562 user receives the committer name and defaults to current
1575 1563 repository username, date is the commit date in any format
1576 1564 supported by util.parsedate() and defaults to current date, extra
1577 1565 is a dictionary of metadata or is left empty.
1578 1566 """
1579 1567
1580 1568 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1581 1569 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1582 1570 # this field to determine what to do in filectxfn.
1583 1571 _returnnoneformissingfiles = True
1584 1572
1585 1573 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1586 1574 date=None, extra=None, editor=False):
1587 1575 super(memctx, self).__init__(repo, text, user, date, extra)
1588 1576 self._rev = None
1589 1577 self._node = None
1590 1578 parents = [(p or nullid) for p in parents]
1591 1579 p1, p2 = parents
1592 1580 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1593 1581 files = sorted(set(files))
1594 1582 self._status = scmutil.status(files, [], [], [], [], [], [])
1595 1583 self._filectxfn = filectxfn
1596 1584 self.substate = {}
1597 1585
1598 1586 # if store is not callable, wrap it in a function
1599 1587 if not callable(filectxfn):
1600 1588 def getfilectx(repo, memctx, path):
1601 1589 fctx = filectxfn[path]
1602 1590 # this is weird but apparently we only keep track of one parent
1603 1591 # (why not only store that instead of a tuple?)
1604 1592 copied = fctx.renamed()
1605 1593 if copied:
1606 1594 copied = copied[0]
1607 1595 return memfilectx(repo, path, fctx.data(),
1608 1596 islink=fctx.islink(), isexec=fctx.isexec(),
1609 1597 copied=copied, memctx=memctx)
1610 1598 self._filectxfn = getfilectx
1611 1599
1612 1600 self._extra = extra and extra.copy() or {}
1613 1601 if self._extra.get('branch', '') == '':
1614 1602 self._extra['branch'] = 'default'
1615 1603
1616 1604 if editor:
1617 1605 self._text = editor(self._repo, self, [])
1618 1606 self._repo.savecommitmessage(self._text)
1619 1607
1620 1608 def filectx(self, path, filelog=None):
1621 1609 """get a file context from the working directory
1622 1610
1623 1611 Returns None if file doesn't exist and should be removed."""
1624 1612 return self._filectxfn(self._repo, self, path)
1625 1613
1626 1614 def commit(self):
1627 1615 """commit context to the repo"""
1628 1616 return self._repo.commitctx(self)
1629 1617
1630 1618 @propertycache
1631 1619 def _manifest(self):
1632 1620 """generate a manifest based on the return values of filectxfn"""
1633 1621
1634 1622 # keep this simple for now; just worry about p1
1635 1623 pctx = self._parents[0]
1636 1624 man = pctx.manifest().copy()
1637 1625
1638 1626 for f, fnode in man.iteritems():
1639 1627 p1node = nullid
1640 1628 p2node = nullid
1641 1629 p = pctx[f].parents() # if file isn't in pctx, check p2?
1642 1630 if len(p) > 0:
1643 1631 p1node = p[0].node()
1644 1632 if len(p) > 1:
1645 1633 p2node = p[1].node()
1646 1634 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1647 1635
1648 1636 return man
1649 1637
1650 1638
1651 1639 class memfilectx(committablefilectx):
1652 1640 """memfilectx represents an in-memory file to commit.
1653 1641
1654 1642 See memctx and committablefilectx for more details.
1655 1643 """
1656 1644 def __init__(self, repo, path, data, islink=False,
1657 1645 isexec=False, copied=None, memctx=None):
1658 1646 """
1659 1647 path is the normalized file path relative to repository root.
1660 1648 data is the file content as a string.
1661 1649 islink is True if the file is a symbolic link.
1662 1650 isexec is True if the file is executable.
1663 1651 copied is the source file path if current file was copied in the
1664 1652 revision being committed, or None."""
1665 1653 super(memfilectx, self).__init__(repo, path, None, memctx)
1666 1654 self._data = data
1667 1655 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1668 1656 self._copied = None
1669 1657 if copied:
1670 1658 self._copied = (copied, nullid)
1671 1659
1672 1660 def data(self):
1673 1661 return self._data
1674 1662 def size(self):
1675 1663 return len(self.data())
1676 1664 def flags(self):
1677 1665 return self._flags
1678 1666 def renamed(self):
1679 1667 return self._copied
1680 1668
1681 1669 def remove(self, ignoremissing=False):
1682 1670 """wraps unlink for a repo's working directory"""
1683 1671 # need to figure out what to do here
1684 1672 del self._changectx[self._path]
1685 1673
1686 1674 def write(self, data, flags):
1687 1675 """wraps repo.wwrite"""
1688 1676 self._data = data
@@ -1,268 +1,284 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import mdiff, parsers, error, revlog, util
10 10 import array, struct
11 11
12 12 class manifestdict(dict):
13 13 def __init__(self, mapping=None, flags=None):
14 14 if mapping is None:
15 15 mapping = {}
16 16 if flags is None:
17 17 flags = {}
18 18 dict.__init__(self, mapping)
19 19 self._flags = flags
20 20 def flags(self, f):
21 21 return self._flags.get(f, "")
22 22 def withflags(self):
23 23 return set(self._flags.keys())
24 24 def setflag(self, f, flags):
25 25 """Set the flags (symlink, executable) for path f."""
26 26 self._flags[f] = flags
27 27 def copy(self):
28 28 return manifestdict(self, dict.copy(self._flags))
29 29 def intersectfiles(self, files):
30 30 '''make a new manifestdict with the intersection of self with files
31 31
32 32 The algorithm assumes that files is much smaller than self.'''
33 33 ret = manifestdict()
34 34 for fn in files:
35 35 if fn in self:
36 36 ret[fn] = self[fn]
37 37 flags = self._flags.get(fn, None)
38 38 if flags:
39 39 ret._flags[fn] = flags
40 40 return ret
41 41
42 def matches(self, match):
43 '''generate a new manifest filtered by the match argument'''
44 if match.always():
45 return self.copy()
46
47 files = match.files()
48 if (match.matchfn == match.exact or
49 (not match.anypats() and util.all(fn in self for fn in files))):
50 return self.intersectfiles(files)
51
52 mf = self.copy()
53 for fn in mf.keys():
54 if not match(fn):
55 del mf[fn]
56 return mf
57
42 58 def diff(self, m2):
43 59 '''Finds changes between the current manifest and m2. The result is
44 60 returned as a dict with filename as key and values of the form
45 61 ((n1,fl1),(n2,fl2)), where n1/n2 is the nodeid in the current/other
46 62 manifest and fl1/fl2 is the flag in the current/other manifest. Where
47 63 the file does not exist, the nodeid will be None and the flags will be
48 64 the empty string.'''
49 65 diff = {}
50 66
51 67 for fn, n1 in self.iteritems():
52 68 fl1 = self._flags.get(fn, '')
53 69 n2 = m2.get(fn, None)
54 70 fl2 = m2._flags.get(fn, '')
55 71 if n2 is None:
56 72 fl2 = ''
57 73 if n1 != n2 or fl1 != fl2:
58 74 diff[fn] = ((n1, fl1), (n2, fl2))
59 75
60 76 for fn, n2 in m2.iteritems():
61 77 if fn not in self:
62 78 fl2 = m2._flags.get(fn, '')
63 79 diff[fn] = ((None, ''), (n2, fl2))
64 80
65 81 return diff
66 82
67 83 def text(self):
68 84 """Get the full data of this manifest as a bytestring."""
69 85 fl = sorted(self)
70 86 _checkforbidden(fl)
71 87
72 88 hex, flags = revlog.hex, self.flags
73 89 # if this is changed to support newlines in filenames,
74 90 # be sure to check the templates/ dir again (especially *-raw.tmpl)
75 91 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
76 92
77 93 def fastdelta(self, base, changes):
78 94 """Given a base manifest text as an array.array and a list of changes
79 95 relative to that text, compute a delta that can be used by revlog.
80 96 """
81 97 delta = []
82 98 dstart = None
83 99 dend = None
84 100 dline = [""]
85 101 start = 0
86 102 # zero copy representation of base as a buffer
87 103 addbuf = util.buffer(base)
88 104
89 105 # start with a readonly loop that finds the offset of
90 106 # each line and creates the deltas
91 107 for f, todelete in changes:
92 108 # bs will either be the index of the item or the insert point
93 109 start, end = _msearch(addbuf, f, start)
94 110 if not todelete:
95 111 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
96 112 else:
97 113 if start == end:
98 114 # item we want to delete was not found, error out
99 115 raise AssertionError(
100 116 _("failed to remove %s from manifest") % f)
101 117 l = ""
102 118 if dstart is not None and dstart <= start and dend >= start:
103 119 if dend < end:
104 120 dend = end
105 121 if l:
106 122 dline.append(l)
107 123 else:
108 124 if dstart is not None:
109 125 delta.append([dstart, dend, "".join(dline)])
110 126 dstart = start
111 127 dend = end
112 128 dline = [l]
113 129
114 130 if dstart is not None:
115 131 delta.append([dstart, dend, "".join(dline)])
116 132 # apply the delta to the base, and get a delta for addrevision
117 133 deltatext, arraytext = _addlistdelta(base, delta)
118 134 return arraytext, deltatext
119 135
120 136 def _msearch(m, s, lo=0, hi=None):
121 137 '''return a tuple (start, end) that says where to find s within m.
122 138
123 139 If the string is found m[start:end] are the line containing
124 140 that string. If start == end the string was not found and
125 141 they indicate the proper sorted insertion point.
126 142
127 143 m should be a buffer or a string
128 144 s is a string'''
129 145 def advance(i, c):
130 146 while i < lenm and m[i] != c:
131 147 i += 1
132 148 return i
133 149 if not s:
134 150 return (lo, lo)
135 151 lenm = len(m)
136 152 if not hi:
137 153 hi = lenm
138 154 while lo < hi:
139 155 mid = (lo + hi) // 2
140 156 start = mid
141 157 while start > 0 and m[start - 1] != '\n':
142 158 start -= 1
143 159 end = advance(start, '\0')
144 160 if m[start:end] < s:
145 161 # we know that after the null there are 40 bytes of sha1
146 162 # this translates to the bisect lo = mid + 1
147 163 lo = advance(end + 40, '\n') + 1
148 164 else:
149 165 # this translates to the bisect hi = mid
150 166 hi = start
151 167 end = advance(lo, '\0')
152 168 found = m[lo:end]
153 169 if s == found:
154 170 # we know that after the null there are 40 bytes of sha1
155 171 end = advance(end + 40, '\n')
156 172 return (lo, end + 1)
157 173 else:
158 174 return (lo, lo)
159 175
160 176 def _checkforbidden(l):
161 177 """Check filenames for illegal characters."""
162 178 for f in l:
163 179 if '\n' in f or '\r' in f:
164 180 raise error.RevlogError(
165 181 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
166 182
167 183
168 184 # apply the changes collected during the bisect loop to our addlist
169 185 # return a delta suitable for addrevision
170 186 def _addlistdelta(addlist, x):
171 187 # for large addlist arrays, building a new array is cheaper
172 188 # than repeatedly modifying the existing one
173 189 currentposition = 0
174 190 newaddlist = array.array('c')
175 191
176 192 for start, end, content in x:
177 193 newaddlist += addlist[currentposition:start]
178 194 if content:
179 195 newaddlist += array.array('c', content)
180 196
181 197 currentposition = end
182 198
183 199 newaddlist += addlist[currentposition:]
184 200
185 201 deltatext = "".join(struct.pack(">lll", start, end, len(content))
186 202 + content for start, end, content in x)
187 203 return deltatext, newaddlist
188 204
189 205 def _parse(lines):
190 206 mfdict = manifestdict()
191 207 parsers.parse_manifest(mfdict, mfdict._flags, lines)
192 208 return mfdict
193 209
194 210 class manifest(revlog.revlog):
195 211 def __init__(self, opener):
196 212 # we expect to deal with not more than four revs at a time,
197 213 # during a commit --amend
198 214 self._mancache = util.lrucachedict(4)
199 215 revlog.revlog.__init__(self, opener, "00manifest.i")
200 216
201 217 def readdelta(self, node):
202 218 r = self.rev(node)
203 219 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
204 220
205 221 def readfast(self, node):
206 222 '''use the faster of readdelta or read'''
207 223 r = self.rev(node)
208 224 deltaparent = self.deltaparent(r)
209 225 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
210 226 return self.readdelta(node)
211 227 return self.read(node)
212 228
213 229 def read(self, node):
214 230 if node == revlog.nullid:
215 231 return manifestdict() # don't upset local cache
216 232 if node in self._mancache:
217 233 return self._mancache[node][0]
218 234 text = self.revision(node)
219 235 arraytext = array.array('c', text)
220 236 mapping = _parse(text)
221 237 self._mancache[node] = (mapping, arraytext)
222 238 return mapping
223 239
224 240 def find(self, node, f):
225 241 '''look up entry for a single file efficiently.
226 242 return (node, flags) pair if found, (None, None) if not.'''
227 243 if node in self._mancache:
228 244 mapping = self._mancache[node][0]
229 245 return mapping.get(f), mapping.flags(f)
230 246 text = self.revision(node)
231 247 start, end = _msearch(text, f)
232 248 if start == end:
233 249 return None, None
234 250 l = text[start:end]
235 251 f, n = l.split('\0')
236 252 return revlog.bin(n[:40]), n[40:-1]
237 253
238 254 def add(self, map, transaction, link, p1, p2, added, removed):
239 255 if p1 in self._mancache:
240 256 # If our first parent is in the manifest cache, we can
241 257 # compute a delta here using properties we know about the
242 258 # manifest up-front, which may save time later for the
243 259 # revlog layer.
244 260
245 261 _checkforbidden(added)
246 262 # combine the changed lists into one list for sorting
247 263 work = [(x, False) for x in added]
248 264 work.extend((x, True) for x in removed)
249 265 # this could use heapq.merge() (from Python 2.6+) or equivalent
250 266 # since the lists are already sorted
251 267 work.sort()
252 268
253 269 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
254 270 cachedelta = self.rev(p1), deltatext
255 271 text = util.buffer(arraytext)
256 272 else:
257 273 # The first parent manifest isn't already loaded, so we'll
258 274 # just encode a fulltext of the manifest and pass that
259 275 # through to the revlog layer, and let it handle the delta
260 276 # process.
261 277 text = map.text()
262 278 arraytext = array.array('c', text)
263 279 cachedelta = None
264 280
265 281 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
266 282 self._mancache[n] = (map, arraytext)
267 283
268 284 return n
General Comments 0
You need to be logged in to leave comments. Login now