##// END OF EJS Templates
context: simplify call to icase matcher in 'match()'...
Pierre-Yves David -
r31464:0e7a6279 default
parent child Browse files
Show More
@@ -1,2116 +1,2114 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 revlog,
39 39 scmutil,
40 40 subrepo,
41 41 util,
42 42 )
43 43
44 44 propertycache = util.propertycache
45 45
46 46 nonascii = re.compile(r'[^\x21-\x7f]').search
47 47
48 48 class basectx(object):
49 49 """A basectx object represents the common logic for its children:
50 50 changectx: read-only context that is already present in the repo,
51 51 workingctx: a context that represents the working directory and can
52 52 be committed,
53 53 memctx: a context that represents changes in-memory and can also
54 54 be committed."""
55 55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 56 if isinstance(changeid, basectx):
57 57 return changeid
58 58
59 59 o = super(basectx, cls).__new__(cls)
60 60
61 61 o._repo = repo
62 62 o._rev = nullrev
63 63 o._node = nullid
64 64
65 65 return o
66 66
67 67 def __str__(self):
68 68 r = short(self.node())
69 69 if pycompat.ispy3:
70 70 return r.decode('ascii')
71 71 return r
72 72
73 73 def __bytes__(self):
74 74 return short(self.node())
75 75
76 76 def __int__(self):
77 77 return self.rev()
78 78
79 79 def __repr__(self):
80 80 return "<%s %s>" % (type(self).__name__, str(self))
81 81
82 82 def __eq__(self, other):
83 83 try:
84 84 return type(self) == type(other) and self._rev == other._rev
85 85 except AttributeError:
86 86 return False
87 87
88 88 def __ne__(self, other):
89 89 return not (self == other)
90 90
91 91 def __contains__(self, key):
92 92 return key in self._manifest
93 93
94 94 def __getitem__(self, key):
95 95 return self.filectx(key)
96 96
97 97 def __iter__(self):
98 98 return iter(self._manifest)
99 99
100 100 def _buildstatusmanifest(self, status):
101 101 """Builds a manifest that includes the given status results, if this is
102 102 a working copy context. For non-working copy contexts, it just returns
103 103 the normal manifest."""
104 104 return self.manifest()
105 105
106 106 def _matchstatus(self, other, match):
107 107 """return match.always if match is none
108 108
109 109 This internal method provides a way for child objects to override the
110 110 match operator.
111 111 """
112 112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 113
114 114 def _buildstatus(self, other, s, match, listignored, listclean,
115 115 listunknown):
116 116 """build a status with respect to another context"""
117 117 # Load earliest manifest first for caching reasons. More specifically,
118 118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 121 # delta to what's in the cache. So that's one full reconstruction + one
122 122 # delta application.
123 123 mf2 = None
124 124 if self.rev() is not None and self.rev() < other.rev():
125 125 mf2 = self._buildstatusmanifest(s)
126 126 mf1 = other._buildstatusmanifest(s)
127 127 if mf2 is None:
128 128 mf2 = self._buildstatusmanifest(s)
129 129
130 130 modified, added = [], []
131 131 removed = []
132 132 clean = []
133 133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 134 deletedset = set(deleted)
135 135 d = mf1.diff(mf2, match=match, clean=listclean)
136 136 for fn, value in d.iteritems():
137 137 if fn in deletedset:
138 138 continue
139 139 if value is None:
140 140 clean.append(fn)
141 141 continue
142 142 (node1, flag1), (node2, flag2) = value
143 143 if node1 is None:
144 144 added.append(fn)
145 145 elif node2 is None:
146 146 removed.append(fn)
147 147 elif flag1 != flag2:
148 148 modified.append(fn)
149 149 elif node2 not in wdirnodes:
150 150 # When comparing files between two commits, we save time by
151 151 # not comparing the file contents when the nodeids differ.
152 152 # Note that this means we incorrectly report a reverted change
153 153 # to a file as a modification.
154 154 modified.append(fn)
155 155 elif self[fn].cmp(other[fn]):
156 156 modified.append(fn)
157 157 else:
158 158 clean.append(fn)
159 159
160 160 if removed:
161 161 # need to filter files if they are already reported as removed
162 162 unknown = [fn for fn in unknown if fn not in mf1 and
163 163 (not match or match(fn))]
164 164 ignored = [fn for fn in ignored if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(modified, added, removed, deleted, unknown,
170 170 ignored, clean)
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepo.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181 def node(self):
182 182 return self._node
183 183 def hex(self):
184 184 return hex(self.node())
185 185 def manifest(self):
186 186 return self._manifest
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189 def repo(self):
190 190 return self._repo
191 191 def phasestr(self):
192 192 return phases.phasenames[self.phase()]
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def getfileset(self, expr):
197 197 return fileset.getfileset(self, expr)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 206
207 207 def unstable(self):
208 208 """True if the changeset is not obsolete but it's ancestor are"""
209 209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 210
211 211 def bumped(self):
212 212 """True if the changeset try to be a successor of a public changeset
213 213
214 214 Only non-public and non-obsolete changesets may be bumped.
215 215 """
216 216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 217
218 218 def divergent(self):
219 219 """Is a successors of a changeset with multiple possible successors set
220 220
221 221 Only non-public and non-obsolete changesets may be divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 224
225 225 def troubled(self):
226 226 """True if the changeset is either unstable, bumped or divergent"""
227 227 return self.unstable() or self.bumped() or self.divergent()
228 228
229 229 def troubles(self):
230 230 """return the list of troubles affecting this changesets.
231 231
232 232 Troubles are returned as strings. possible values are:
233 233 - unstable,
234 234 - bumped,
235 235 - divergent.
236 236 """
237 237 troubles = []
238 238 if self.unstable():
239 239 troubles.append('unstable')
240 240 if self.bumped():
241 241 troubles.append('bumped')
242 242 if self.divergent():
243 243 troubles.append('divergent')
244 244 return troubles
245 245
246 246 def parents(self):
247 247 """return contexts for each parent changeset"""
248 248 return self._parents
249 249
250 250 def p1(self):
251 251 return self._parents[0]
252 252
253 253 def p2(self):
254 254 parents = self._parents
255 255 if len(parents) == 2:
256 256 return parents[1]
257 257 return changectx(self._repo, nullrev)
258 258
259 259 def _fileinfo(self, path):
260 260 if '_manifest' in self.__dict__:
261 261 try:
262 262 return self._manifest[path], self._manifest.flags(path)
263 263 except KeyError:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266 if '_manifestdelta' in self.__dict__ or path in self.files():
267 267 if path in self._manifestdelta:
268 268 return (self._manifestdelta[path],
269 269 self._manifestdelta.flags(path))
270 270 mfl = self._repo.manifestlog
271 271 try:
272 272 node, flag = mfl[self._changeset.manifest].find(path)
273 273 except KeyError:
274 274 raise error.ManifestLookupError(self._node, path,
275 275 _('not found in manifest'))
276 276
277 277 return node, flag
278 278
279 279 def filenode(self, path):
280 280 return self._fileinfo(path)[0]
281 281
282 282 def flags(self, path):
283 283 try:
284 284 return self._fileinfo(path)[1]
285 285 except error.LookupError:
286 286 return ''
287 287
288 288 def sub(self, path, allowcreate=True):
289 289 '''return a subrepo for the stored revision of path, never wdir()'''
290 290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 291
292 292 def nullsub(self, path, pctx):
293 293 return subrepo.nullsubrepo(self, path, pctx)
294 294
295 295 def workingsub(self, path):
296 296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 297 context.
298 298 '''
299 299 return subrepo.subrepo(self, path, allowwdir=True)
300 300
301 301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 302 listsubrepos=False, badfn=None):
303 303 if pats is None:
304 304 pats = []
305 305 r = self._repo
306 306 return matchmod.match(r.root, r.getcwd(), pats,
307 307 include, exclude, default,
308 308 auditor=r.nofsauditor, ctx=self,
309 309 listsubrepos=listsubrepos, badfn=badfn)
310 310
311 311 def diff(self, ctx2=None, match=None, **opts):
312 312 """Returns a diff generator for the given contexts and matcher"""
313 313 if ctx2 is None:
314 314 ctx2 = self.p1()
315 315 if ctx2 is not None:
316 316 ctx2 = self._repo[ctx2]
317 317 diffopts = patch.diffopts(self._repo.ui, opts)
318 318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319 319
320 320 def dirs(self):
321 321 return self._manifest.dirs()
322 322
323 323 def hasdir(self, dir):
324 324 return self._manifest.hasdir(dir)
325 325
326 326 def dirty(self, missing=False, merge=True, branch=True):
327 327 return False
328 328
329 329 def status(self, other=None, match=None, listignored=False,
330 330 listclean=False, listunknown=False, listsubrepos=False):
331 331 """return status of files between two nodes or node and working
332 332 directory.
333 333
334 334 If other is None, compare this node with working directory.
335 335
336 336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 337 """
338 338
339 339 ctx1 = self
340 340 ctx2 = self._repo[other]
341 341
342 342 # This next code block is, admittedly, fragile logic that tests for
343 343 # reversing the contexts and wouldn't need to exist if it weren't for
344 344 # the fast (and common) code path of comparing the working directory
345 345 # with its first parent.
346 346 #
347 347 # What we're aiming for here is the ability to call:
348 348 #
349 349 # workingctx.status(parentctx)
350 350 #
351 351 # If we always built the manifest for each context and compared those,
352 352 # then we'd be done. But the special case of the above call means we
353 353 # just copy the manifest of the parent.
354 354 reversed = False
355 355 if (not isinstance(ctx1, changectx)
356 356 and isinstance(ctx2, changectx)):
357 357 reversed = True
358 358 ctx1, ctx2 = ctx2, ctx1
359 359
360 360 match = ctx2._matchstatus(ctx1, match)
361 361 r = scmutil.status([], [], [], [], [], [], [])
362 362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 363 listunknown)
364 364
365 365 if reversed:
366 366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 367 # these make no sense to reverse.
368 368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 369 r.clean)
370 370
371 371 if listsubrepos:
372 372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 373 try:
374 374 rev2 = ctx2.subrev(subpath)
375 375 except KeyError:
376 376 # A subrepo that existed in node1 was deleted between
377 377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 378 # won't contain that subpath. The best we can do ignore it.
379 379 rev2 = None
380 380 submatch = matchmod.subdirmatcher(subpath, match)
381 381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 382 clean=listclean, unknown=listunknown,
383 383 listsubrepos=True)
384 384 for rfiles, sfiles in zip(r, s):
385 385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386 386
387 387 for l in r:
388 388 l.sort()
389 389
390 390 return r
391 391
392 392
393 393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 394 editor=None, extra=None):
395 395 def getfilectx(repo, memctx, path):
396 396 data, mode, copied = store.getfile(path)
397 397 if data is None:
398 398 return None
399 399 islink, isexec = mode
400 400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 401 copied=copied, memctx=memctx)
402 402 if extra is None:
403 403 extra = {}
404 404 if branch:
405 405 extra['branch'] = encoding.fromlocal(branch)
406 406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 407 date, extra, editor)
408 408 return ctx
409 409
410 410 class changectx(basectx):
411 411 """A changecontext object makes access to data related to a particular
412 412 changeset convenient. It represents a read-only context already present in
413 413 the repo."""
414 414 def __init__(self, repo, changeid=''):
415 415 """changeid is a revision number, node, or tag"""
416 416
417 417 # since basectx.__new__ already took care of copying the object, we
418 418 # don't need to do anything in __init__, so we just exit here
419 419 if isinstance(changeid, basectx):
420 420 return
421 421
422 422 if changeid == '':
423 423 changeid = '.'
424 424 self._repo = repo
425 425
426 426 try:
427 427 if isinstance(changeid, int):
428 428 self._node = repo.changelog.node(changeid)
429 429 self._rev = changeid
430 430 return
431 431 if not pycompat.ispy3 and isinstance(changeid, long):
432 432 changeid = str(changeid)
433 433 if changeid == 'null':
434 434 self._node = nullid
435 435 self._rev = nullrev
436 436 return
437 437 if changeid == 'tip':
438 438 self._node = repo.changelog.tip()
439 439 self._rev = repo.changelog.rev(self._node)
440 440 return
441 441 if changeid == '.' or changeid == repo.dirstate.p1():
442 442 # this is a hack to delay/avoid loading obsmarkers
443 443 # when we know that '.' won't be hidden
444 444 self._node = repo.dirstate.p1()
445 445 self._rev = repo.unfiltered().changelog.rev(self._node)
446 446 return
447 447 if len(changeid) == 20:
448 448 try:
449 449 self._node = changeid
450 450 self._rev = repo.changelog.rev(changeid)
451 451 return
452 452 except error.FilteredRepoLookupError:
453 453 raise
454 454 except LookupError:
455 455 pass
456 456
457 457 try:
458 458 r = int(changeid)
459 459 if '%d' % r != changeid:
460 460 raise ValueError
461 461 l = len(repo.changelog)
462 462 if r < 0:
463 463 r += l
464 464 if r < 0 or r >= l:
465 465 raise ValueError
466 466 self._rev = r
467 467 self._node = repo.changelog.node(r)
468 468 return
469 469 except error.FilteredIndexError:
470 470 raise
471 471 except (ValueError, OverflowError, IndexError):
472 472 pass
473 473
474 474 if len(changeid) == 40:
475 475 try:
476 476 self._node = bin(changeid)
477 477 self._rev = repo.changelog.rev(self._node)
478 478 return
479 479 except error.FilteredLookupError:
480 480 raise
481 481 except (TypeError, LookupError):
482 482 pass
483 483
484 484 # lookup bookmarks through the name interface
485 485 try:
486 486 self._node = repo.names.singlenode(repo, changeid)
487 487 self._rev = repo.changelog.rev(self._node)
488 488 return
489 489 except KeyError:
490 490 pass
491 491 except error.FilteredRepoLookupError:
492 492 raise
493 493 except error.RepoLookupError:
494 494 pass
495 495
496 496 self._node = repo.unfiltered().changelog._partialmatch(changeid)
497 497 if self._node is not None:
498 498 self._rev = repo.changelog.rev(self._node)
499 499 return
500 500
501 501 # lookup failed
502 502 # check if it might have come from damaged dirstate
503 503 #
504 504 # XXX we could avoid the unfiltered if we had a recognizable
505 505 # exception for filtered changeset access
506 506 if changeid in repo.unfiltered().dirstate.parents():
507 507 msg = _("working directory has unknown parent '%s'!")
508 508 raise error.Abort(msg % short(changeid))
509 509 try:
510 510 if len(changeid) == 20 and nonascii(changeid):
511 511 changeid = hex(changeid)
512 512 except TypeError:
513 513 pass
514 514 except (error.FilteredIndexError, error.FilteredLookupError,
515 515 error.FilteredRepoLookupError):
516 516 if repo.filtername.startswith('visible'):
517 517 msg = _("hidden revision '%s'") % changeid
518 518 hint = _('use --hidden to access hidden revisions')
519 519 raise error.FilteredRepoLookupError(msg, hint=hint)
520 520 msg = _("filtered revision '%s' (not in '%s' subset)")
521 521 msg %= (changeid, repo.filtername)
522 522 raise error.FilteredRepoLookupError(msg)
523 523 except IndexError:
524 524 pass
525 525 raise error.RepoLookupError(
526 526 _("unknown revision '%s'") % changeid)
527 527
528 528 def __hash__(self):
529 529 try:
530 530 return hash(self._rev)
531 531 except AttributeError:
532 532 return id(self)
533 533
534 534 def __nonzero__(self):
535 535 return self._rev != nullrev
536 536
537 537 @propertycache
538 538 def _changeset(self):
539 539 return self._repo.changelog.changelogrevision(self.rev())
540 540
541 541 @propertycache
542 542 def _manifest(self):
543 543 return self._manifestctx.read()
544 544
545 545 @propertycache
546 546 def _manifestctx(self):
547 547 return self._repo.manifestlog[self._changeset.manifest]
548 548
549 549 @propertycache
550 550 def _manifestdelta(self):
551 551 return self._manifestctx.readdelta()
552 552
553 553 @propertycache
554 554 def _parents(self):
555 555 repo = self._repo
556 556 p1, p2 = repo.changelog.parentrevs(self._rev)
557 557 if p2 == nullrev:
558 558 return [changectx(repo, p1)]
559 559 return [changectx(repo, p1), changectx(repo, p2)]
560 560
561 561 def changeset(self):
562 562 c = self._changeset
563 563 return (
564 564 c.manifest,
565 565 c.user,
566 566 c.date,
567 567 c.files,
568 568 c.description,
569 569 c.extra,
570 570 )
571 571 def manifestnode(self):
572 572 return self._changeset.manifest
573 573
574 574 def user(self):
575 575 return self._changeset.user
576 576 def date(self):
577 577 return self._changeset.date
578 578 def files(self):
579 579 return self._changeset.files
580 580 def description(self):
581 581 return self._changeset.description
582 582 def branch(self):
583 583 return encoding.tolocal(self._changeset.extra.get("branch"))
584 584 def closesbranch(self):
585 585 return 'close' in self._changeset.extra
586 586 def extra(self):
587 587 return self._changeset.extra
588 588 def tags(self):
589 589 return self._repo.nodetags(self._node)
590 590 def bookmarks(self):
591 591 return self._repo.nodebookmarks(self._node)
592 592 def phase(self):
593 593 return self._repo._phasecache.phase(self._repo, self._rev)
594 594 def hidden(self):
595 595 return self._rev in repoview.filterrevs(self._repo, 'visible')
596 596
597 597 def children(self):
598 598 """return contexts for each child changeset"""
599 599 c = self._repo.changelog.children(self._node)
600 600 return [changectx(self._repo, x) for x in c]
601 601
602 602 def ancestors(self):
603 603 for a in self._repo.changelog.ancestors([self._rev]):
604 604 yield changectx(self._repo, a)
605 605
606 606 def descendants(self):
607 607 for d in self._repo.changelog.descendants([self._rev]):
608 608 yield changectx(self._repo, d)
609 609
610 610 def filectx(self, path, fileid=None, filelog=None):
611 611 """get a file context from this changeset"""
612 612 if fileid is None:
613 613 fileid = self.filenode(path)
614 614 return filectx(self._repo, path, fileid=fileid,
615 615 changectx=self, filelog=filelog)
616 616
617 617 def ancestor(self, c2, warn=False):
618 618 """return the "best" ancestor context of self and c2
619 619
620 620 If there are multiple candidates, it will show a message and check
621 621 merge.preferancestor configuration before falling back to the
622 622 revlog ancestor."""
623 623 # deal with workingctxs
624 624 n2 = c2._node
625 625 if n2 is None:
626 626 n2 = c2._parents[0]._node
627 627 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
628 628 if not cahs:
629 629 anc = nullid
630 630 elif len(cahs) == 1:
631 631 anc = cahs[0]
632 632 else:
633 633 # experimental config: merge.preferancestor
634 634 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
635 635 try:
636 636 ctx = changectx(self._repo, r)
637 637 except error.RepoLookupError:
638 638 continue
639 639 anc = ctx.node()
640 640 if anc in cahs:
641 641 break
642 642 else:
643 643 anc = self._repo.changelog.ancestor(self._node, n2)
644 644 if warn:
645 645 self._repo.ui.status(
646 646 (_("note: using %s as ancestor of %s and %s\n") %
647 647 (short(anc), short(self._node), short(n2))) +
648 648 ''.join(_(" alternatively, use --config "
649 649 "merge.preferancestor=%s\n") %
650 650 short(n) for n in sorted(cahs) if n != anc))
651 651 return changectx(self._repo, anc)
652 652
653 653 def descendant(self, other):
654 654 """True if other is descendant of this changeset"""
655 655 return self._repo.changelog.descendant(self._rev, other._rev)
656 656
657 657 def walk(self, match):
658 658 '''Generates matching file names.'''
659 659
660 660 # Wrap match.bad method to have message with nodeid
661 661 def bad(fn, msg):
662 662 # The manifest doesn't know about subrepos, so don't complain about
663 663 # paths into valid subrepos.
664 664 if any(fn == s or fn.startswith(s + '/')
665 665 for s in self.substate):
666 666 return
667 667 match.bad(fn, _('no such file in rev %s') % self)
668 668
669 669 m = matchmod.badmatch(match, bad)
670 670 return self._manifest.walk(m)
671 671
672 672 def matches(self, match):
673 673 return self.walk(match)
674 674
675 675 class basefilectx(object):
676 676 """A filecontext object represents the common logic for its children:
677 677 filectx: read-only access to a filerevision that is already present
678 678 in the repo,
679 679 workingfilectx: a filecontext that represents files from the working
680 680 directory,
681 681 memfilectx: a filecontext that represents files in-memory."""
682 682 def __new__(cls, repo, path, *args, **kwargs):
683 683 return super(basefilectx, cls).__new__(cls)
684 684
685 685 @propertycache
686 686 def _filelog(self):
687 687 return self._repo.file(self._path)
688 688
689 689 @propertycache
690 690 def _changeid(self):
691 691 if '_changeid' in self.__dict__:
692 692 return self._changeid
693 693 elif '_changectx' in self.__dict__:
694 694 return self._changectx.rev()
695 695 elif '_descendantrev' in self.__dict__:
696 696 # this file context was created from a revision with a known
697 697 # descendant, we can (lazily) correct for linkrev aliases
698 698 return self._adjustlinkrev(self._descendantrev)
699 699 else:
700 700 return self._filelog.linkrev(self._filerev)
701 701
702 702 @propertycache
703 703 def _filenode(self):
704 704 if '_fileid' in self.__dict__:
705 705 return self._filelog.lookup(self._fileid)
706 706 else:
707 707 return self._changectx.filenode(self._path)
708 708
709 709 @propertycache
710 710 def _filerev(self):
711 711 return self._filelog.rev(self._filenode)
712 712
713 713 @propertycache
714 714 def _repopath(self):
715 715 return self._path
716 716
717 717 def __nonzero__(self):
718 718 try:
719 719 self._filenode
720 720 return True
721 721 except error.LookupError:
722 722 # file is missing
723 723 return False
724 724
725 725 def __str__(self):
726 726 try:
727 727 return "%s@%s" % (self.path(), self._changectx)
728 728 except error.LookupError:
729 729 return "%s@???" % self.path()
730 730
731 731 def __repr__(self):
732 732 return "<%s %s>" % (type(self).__name__, str(self))
733 733
734 734 def __hash__(self):
735 735 try:
736 736 return hash((self._path, self._filenode))
737 737 except AttributeError:
738 738 return id(self)
739 739
740 740 def __eq__(self, other):
741 741 try:
742 742 return (type(self) == type(other) and self._path == other._path
743 743 and self._filenode == other._filenode)
744 744 except AttributeError:
745 745 return False
746 746
747 747 def __ne__(self, other):
748 748 return not (self == other)
749 749
750 750 def filerev(self):
751 751 return self._filerev
752 752 def filenode(self):
753 753 return self._filenode
754 754 def flags(self):
755 755 return self._changectx.flags(self._path)
756 756 def filelog(self):
757 757 return self._filelog
758 758 def rev(self):
759 759 return self._changeid
760 760 def linkrev(self):
761 761 return self._filelog.linkrev(self._filerev)
762 762 def node(self):
763 763 return self._changectx.node()
764 764 def hex(self):
765 765 return self._changectx.hex()
766 766 def user(self):
767 767 return self._changectx.user()
768 768 def date(self):
769 769 return self._changectx.date()
770 770 def files(self):
771 771 return self._changectx.files()
772 772 def description(self):
773 773 return self._changectx.description()
774 774 def branch(self):
775 775 return self._changectx.branch()
776 776 def extra(self):
777 777 return self._changectx.extra()
778 778 def phase(self):
779 779 return self._changectx.phase()
780 780 def phasestr(self):
781 781 return self._changectx.phasestr()
782 782 def manifest(self):
783 783 return self._changectx.manifest()
784 784 def changectx(self):
785 785 return self._changectx
786 786 def repo(self):
787 787 return self._repo
788 788
789 789 def path(self):
790 790 return self._path
791 791
792 792 def isbinary(self):
793 793 try:
794 794 return util.binary(self.data())
795 795 except IOError:
796 796 return False
797 797 def isexec(self):
798 798 return 'x' in self.flags()
799 799 def islink(self):
800 800 return 'l' in self.flags()
801 801
802 802 def isabsent(self):
803 803 """whether this filectx represents a file not in self._changectx
804 804
805 805 This is mainly for merge code to detect change/delete conflicts. This is
806 806 expected to be True for all subclasses of basectx."""
807 807 return False
808 808
809 809 _customcmp = False
810 810 def cmp(self, fctx):
811 811 """compare with other file context
812 812
813 813 returns True if different than fctx.
814 814 """
815 815 if fctx._customcmp:
816 816 return fctx.cmp(self)
817 817
818 818 if (fctx._filenode is None
819 819 and (self._repo._encodefilterpats
820 820 # if file data starts with '\1\n', empty metadata block is
821 821 # prepended, which adds 4 bytes to filelog.size().
822 822 or self.size() - 4 == fctx.size())
823 823 or self.size() == fctx.size()):
824 824 return self._filelog.cmp(self._filenode, fctx.data())
825 825
826 826 return True
827 827
828 828 def _adjustlinkrev(self, srcrev, inclusive=False):
829 829 """return the first ancestor of <srcrev> introducing <fnode>
830 830
831 831 If the linkrev of the file revision does not point to an ancestor of
832 832 srcrev, we'll walk down the ancestors until we find one introducing
833 833 this file revision.
834 834
835 835 :srcrev: the changeset revision we search ancestors from
836 836 :inclusive: if true, the src revision will also be checked
837 837 """
838 838 repo = self._repo
839 839 cl = repo.unfiltered().changelog
840 840 mfl = repo.manifestlog
841 841 # fetch the linkrev
842 842 lkr = self.linkrev()
843 843 # hack to reuse ancestor computation when searching for renames
844 844 memberanc = getattr(self, '_ancestrycontext', None)
845 845 iteranc = None
846 846 if srcrev is None:
847 847 # wctx case, used by workingfilectx during mergecopy
848 848 revs = [p.rev() for p in self._repo[None].parents()]
849 849 inclusive = True # we skipped the real (revless) source
850 850 else:
851 851 revs = [srcrev]
852 852 if memberanc is None:
853 853 memberanc = iteranc = cl.ancestors(revs, lkr,
854 854 inclusive=inclusive)
855 855 # check if this linkrev is an ancestor of srcrev
856 856 if lkr not in memberanc:
857 857 if iteranc is None:
858 858 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
859 859 fnode = self._filenode
860 860 path = self._path
861 861 for a in iteranc:
862 862 ac = cl.read(a) # get changeset data (we avoid object creation)
863 863 if path in ac[3]: # checking the 'files' field.
864 864 # The file has been touched, check if the content is
865 865 # similar to the one we search for.
866 866 if fnode == mfl[ac[0]].readfast().get(path):
867 867 return a
868 868 # In theory, we should never get out of that loop without a result.
869 869 # But if manifest uses a buggy file revision (not children of the
870 870 # one it replaces) we could. Such a buggy situation will likely
871 871 # result is crash somewhere else at to some point.
872 872 return lkr
873 873
874 874 def introrev(self):
875 875 """return the rev of the changeset which introduced this file revision
876 876
877 877 This method is different from linkrev because it take into account the
878 878 changeset the filectx was created from. It ensures the returned
879 879 revision is one of its ancestors. This prevents bugs from
880 880 'linkrev-shadowing' when a file revision is used by multiple
881 881 changesets.
882 882 """
883 883 lkr = self.linkrev()
884 884 attrs = vars(self)
885 885 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
886 886 if noctx or self.rev() == lkr:
887 887 return self.linkrev()
888 888 return self._adjustlinkrev(self.rev(), inclusive=True)
889 889
890 890 def _parentfilectx(self, path, fileid, filelog):
891 891 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
892 892 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
893 893 if '_changeid' in vars(self) or '_changectx' in vars(self):
894 894 # If self is associated with a changeset (probably explicitly
895 895 # fed), ensure the created filectx is associated with a
896 896 # changeset that is an ancestor of self.changectx.
897 897 # This lets us later use _adjustlinkrev to get a correct link.
898 898 fctx._descendantrev = self.rev()
899 899 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
900 900 elif '_descendantrev' in vars(self):
901 901 # Otherwise propagate _descendantrev if we have one associated.
902 902 fctx._descendantrev = self._descendantrev
903 903 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
904 904 return fctx
905 905
906 906 def parents(self):
907 907 _path = self._path
908 908 fl = self._filelog
909 909 parents = self._filelog.parents(self._filenode)
910 910 pl = [(_path, node, fl) for node in parents if node != nullid]
911 911
912 912 r = fl.renamed(self._filenode)
913 913 if r:
914 914 # - In the simple rename case, both parent are nullid, pl is empty.
915 915 # - In case of merge, only one of the parent is null id and should
916 916 # be replaced with the rename information. This parent is -always-
917 917 # the first one.
918 918 #
919 919 # As null id have always been filtered out in the previous list
920 920 # comprehension, inserting to 0 will always result in "replacing
921 921 # first nullid parent with rename information.
922 922 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
923 923
924 924 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
925 925
926 926 def p1(self):
927 927 return self.parents()[0]
928 928
929 929 def p2(self):
930 930 p = self.parents()
931 931 if len(p) == 2:
932 932 return p[1]
933 933 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
934 934
935 935 def annotate(self, follow=False, linenumber=False, diffopts=None):
936 936 '''returns a list of tuples of ((ctx, number), line) for each line
937 937 in the file, where ctx is the filectx of the node where
938 938 that line was last changed; if linenumber parameter is true, number is
939 939 the line number at the first appearance in the managed file, otherwise,
940 940 number has a fixed value of False.
941 941 '''
942 942
943 943 def lines(text):
944 944 if text.endswith("\n"):
945 945 return text.count("\n")
946 946 return text.count("\n") + int(bool(text))
947 947
948 948 if linenumber:
949 949 def decorate(text, rev):
950 950 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
951 951 else:
952 952 def decorate(text, rev):
953 953 return ([(rev, False)] * lines(text), text)
954 954
955 955 def pair(parent, child):
956 956 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
957 957 for (a1, a2, b1, b2), t in blocks:
958 958 # Changed blocks ('!') or blocks made only of blank lines ('~')
959 959 # belong to the child.
960 960 if t == '=':
961 961 child[0][b1:b2] = parent[0][a1:a2]
962 962 return child
963 963
964 964 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
965 965
966 966 def parents(f):
967 967 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
968 968 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
969 969 # from the topmost introrev (= srcrev) down to p.linkrev() if it
970 970 # isn't an ancestor of the srcrev.
971 971 f._changeid
972 972 pl = f.parents()
973 973
974 974 # Don't return renamed parents if we aren't following.
975 975 if not follow:
976 976 pl = [p for p in pl if p.path() == f.path()]
977 977
978 978 # renamed filectx won't have a filelog yet, so set it
979 979 # from the cache to save time
980 980 for p in pl:
981 981 if not '_filelog' in p.__dict__:
982 982 p._filelog = getlog(p.path())
983 983
984 984 return pl
985 985
986 986 # use linkrev to find the first changeset where self appeared
987 987 base = self
988 988 introrev = self.introrev()
989 989 if self.rev() != introrev:
990 990 base = self.filectx(self.filenode(), changeid=introrev)
991 991 if getattr(base, '_ancestrycontext', None) is None:
992 992 cl = self._repo.changelog
993 993 if introrev is None:
994 994 # wctx is not inclusive, but works because _ancestrycontext
995 995 # is used to test filelog revisions
996 996 ac = cl.ancestors([p.rev() for p in base.parents()],
997 997 inclusive=True)
998 998 else:
999 999 ac = cl.ancestors([introrev], inclusive=True)
1000 1000 base._ancestrycontext = ac
1001 1001
1002 1002 # This algorithm would prefer to be recursive, but Python is a
1003 1003 # bit recursion-hostile. Instead we do an iterative
1004 1004 # depth-first search.
1005 1005
1006 1006 # 1st DFS pre-calculates pcache and needed
1007 1007 visit = [base]
1008 1008 pcache = {}
1009 1009 needed = {base: 1}
1010 1010 while visit:
1011 1011 f = visit.pop()
1012 1012 if f in pcache:
1013 1013 continue
1014 1014 pl = parents(f)
1015 1015 pcache[f] = pl
1016 1016 for p in pl:
1017 1017 needed[p] = needed.get(p, 0) + 1
1018 1018 if p not in pcache:
1019 1019 visit.append(p)
1020 1020
1021 1021 # 2nd DFS does the actual annotate
1022 1022 visit[:] = [base]
1023 1023 hist = {}
1024 1024 while visit:
1025 1025 f = visit[-1]
1026 1026 if f in hist:
1027 1027 visit.pop()
1028 1028 continue
1029 1029
1030 1030 ready = True
1031 1031 pl = pcache[f]
1032 1032 for p in pl:
1033 1033 if p not in hist:
1034 1034 ready = False
1035 1035 visit.append(p)
1036 1036 if ready:
1037 1037 visit.pop()
1038 1038 curr = decorate(f.data(), f)
1039 1039 for p in pl:
1040 1040 curr = pair(hist[p], curr)
1041 1041 if needed[p] == 1:
1042 1042 del hist[p]
1043 1043 del needed[p]
1044 1044 else:
1045 1045 needed[p] -= 1
1046 1046
1047 1047 hist[f] = curr
1048 1048 del pcache[f]
1049 1049
1050 1050 return zip(hist[base][0], hist[base][1].splitlines(True))
1051 1051
1052 1052 def ancestors(self, followfirst=False):
1053 1053 visit = {}
1054 1054 c = self
1055 1055 if followfirst:
1056 1056 cut = 1
1057 1057 else:
1058 1058 cut = None
1059 1059
1060 1060 while True:
1061 1061 for parent in c.parents()[:cut]:
1062 1062 visit[(parent.linkrev(), parent.filenode())] = parent
1063 1063 if not visit:
1064 1064 break
1065 1065 c = visit.pop(max(visit))
1066 1066 yield c
1067 1067
1068 1068 class filectx(basefilectx):
1069 1069 """A filecontext object makes access to data related to a particular
1070 1070 filerevision convenient."""
1071 1071 def __init__(self, repo, path, changeid=None, fileid=None,
1072 1072 filelog=None, changectx=None):
1073 1073 """changeid can be a changeset revision, node, or tag.
1074 1074 fileid can be a file revision or node."""
1075 1075 self._repo = repo
1076 1076 self._path = path
1077 1077
1078 1078 assert (changeid is not None
1079 1079 or fileid is not None
1080 1080 or changectx is not None), \
1081 1081 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1082 1082 % (changeid, fileid, changectx))
1083 1083
1084 1084 if filelog is not None:
1085 1085 self._filelog = filelog
1086 1086
1087 1087 if changeid is not None:
1088 1088 self._changeid = changeid
1089 1089 if changectx is not None:
1090 1090 self._changectx = changectx
1091 1091 if fileid is not None:
1092 1092 self._fileid = fileid
1093 1093
1094 1094 @propertycache
1095 1095 def _changectx(self):
1096 1096 try:
1097 1097 return changectx(self._repo, self._changeid)
1098 1098 except error.FilteredRepoLookupError:
1099 1099 # Linkrev may point to any revision in the repository. When the
1100 1100 # repository is filtered this may lead to `filectx` trying to build
1101 1101 # `changectx` for filtered revision. In such case we fallback to
1102 1102 # creating `changectx` on the unfiltered version of the reposition.
1103 1103 # This fallback should not be an issue because `changectx` from
1104 1104 # `filectx` are not used in complex operations that care about
1105 1105 # filtering.
1106 1106 #
1107 1107 # This fallback is a cheap and dirty fix that prevent several
1108 1108 # crashes. It does not ensure the behavior is correct. However the
1109 1109 # behavior was not correct before filtering either and "incorrect
1110 1110 # behavior" is seen as better as "crash"
1111 1111 #
1112 1112 # Linkrevs have several serious troubles with filtering that are
1113 1113 # complicated to solve. Proper handling of the issue here should be
1114 1114 # considered when solving linkrev issue are on the table.
1115 1115 return changectx(self._repo.unfiltered(), self._changeid)
1116 1116
1117 1117 def filectx(self, fileid, changeid=None):
1118 1118 '''opens an arbitrary revision of the file without
1119 1119 opening a new filelog'''
1120 1120 return filectx(self._repo, self._path, fileid=fileid,
1121 1121 filelog=self._filelog, changeid=changeid)
1122 1122
1123 1123 def rawdata(self):
1124 1124 return self._filelog.revision(self._filenode, raw=True)
1125 1125
1126 1126 def data(self):
1127 1127 try:
1128 1128 return self._filelog.read(self._filenode)
1129 1129 except error.CensoredNodeError:
1130 1130 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1131 1131 return ""
1132 1132 raise error.Abort(_("censored node: %s") % short(self._filenode),
1133 1133 hint=_("set censor.policy to ignore errors"))
1134 1134
1135 1135 def size(self):
1136 1136 return self._filelog.size(self._filerev)
1137 1137
1138 1138 def renamed(self):
1139 1139 """check if file was actually renamed in this changeset revision
1140 1140
1141 1141 If rename logged in file revision, we report copy for changeset only
1142 1142 if file revisions linkrev points back to the changeset in question
1143 1143 or both changeset parents contain different file revisions.
1144 1144 """
1145 1145
1146 1146 renamed = self._filelog.renamed(self._filenode)
1147 1147 if not renamed:
1148 1148 return renamed
1149 1149
1150 1150 if self.rev() == self.linkrev():
1151 1151 return renamed
1152 1152
1153 1153 name = self.path()
1154 1154 fnode = self._filenode
1155 1155 for p in self._changectx.parents():
1156 1156 try:
1157 1157 if fnode == p.filenode(name):
1158 1158 return None
1159 1159 except error.LookupError:
1160 1160 pass
1161 1161 return renamed
1162 1162
1163 1163 def children(self):
1164 1164 # hard for renames
1165 1165 c = self._filelog.children(self._filenode)
1166 1166 return [filectx(self._repo, self._path, fileid=x,
1167 1167 filelog=self._filelog) for x in c]
1168 1168
1169 1169 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1170 1170 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1171 1171 if diff from fctx2 to fctx1 has changes in linerange2 and
1172 1172 `linerange1` is the new line range for fctx1.
1173 1173 """
1174 1174 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1175 1175 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1176 1176 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1177 1177 return diffinrange, linerange1
1178 1178
1179 1179 def blockancestors(fctx, fromline, toline, followfirst=False):
1180 1180 """Yield ancestors of `fctx` with respect to the block of lines within
1181 1181 `fromline`-`toline` range.
1182 1182 """
1183 1183 diffopts = patch.diffopts(fctx._repo.ui)
1184 1184 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1185 1185 while visit:
1186 1186 c, linerange2 = visit.pop(max(visit))
1187 1187 pl = c.parents()
1188 1188 if followfirst:
1189 1189 pl = pl[:1]
1190 1190 if not pl:
1191 1191 # The block originates from the initial revision.
1192 1192 yield c, linerange2
1193 1193 continue
1194 1194 inrange = False
1195 1195 for p in pl:
1196 1196 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1197 1197 inrange = inrange or inrangep
1198 1198 if linerange1[0] == linerange1[1]:
1199 1199 # Parent's linerange is empty, meaning that the block got
1200 1200 # introduced in this revision; no need to go futher in this
1201 1201 # branch.
1202 1202 continue
1203 1203 visit[p.linkrev(), p.filenode()] = p, linerange1
1204 1204 if inrange:
1205 1205 yield c, linerange2
1206 1206
1207 1207 class committablectx(basectx):
1208 1208 """A committablectx object provides common functionality for a context that
1209 1209 wants the ability to commit, e.g. workingctx or memctx."""
1210 1210 def __init__(self, repo, text="", user=None, date=None, extra=None,
1211 1211 changes=None):
1212 1212 self._repo = repo
1213 1213 self._rev = None
1214 1214 self._node = None
1215 1215 self._text = text
1216 1216 if date:
1217 1217 self._date = util.parsedate(date)
1218 1218 if user:
1219 1219 self._user = user
1220 1220 if changes:
1221 1221 self._status = changes
1222 1222
1223 1223 self._extra = {}
1224 1224 if extra:
1225 1225 self._extra = extra.copy()
1226 1226 if 'branch' not in self._extra:
1227 1227 try:
1228 1228 branch = encoding.fromlocal(self._repo.dirstate.branch())
1229 1229 except UnicodeDecodeError:
1230 1230 raise error.Abort(_('branch name not in UTF-8!'))
1231 1231 self._extra['branch'] = branch
1232 1232 if self._extra['branch'] == '':
1233 1233 self._extra['branch'] = 'default'
1234 1234
1235 1235 def __str__(self):
1236 1236 return str(self._parents[0]) + "+"
1237 1237
1238 1238 def __nonzero__(self):
1239 1239 return True
1240 1240
1241 1241 def _buildflagfunc(self):
1242 1242 # Create a fallback function for getting file flags when the
1243 1243 # filesystem doesn't support them
1244 1244
1245 1245 copiesget = self._repo.dirstate.copies().get
1246 1246 parents = self.parents()
1247 1247 if len(parents) < 2:
1248 1248 # when we have one parent, it's easy: copy from parent
1249 1249 man = parents[0].manifest()
1250 1250 def func(f):
1251 1251 f = copiesget(f, f)
1252 1252 return man.flags(f)
1253 1253 else:
1254 1254 # merges are tricky: we try to reconstruct the unstored
1255 1255 # result from the merge (issue1802)
1256 1256 p1, p2 = parents
1257 1257 pa = p1.ancestor(p2)
1258 1258 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1259 1259
1260 1260 def func(f):
1261 1261 f = copiesget(f, f) # may be wrong for merges with copies
1262 1262 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1263 1263 if fl1 == fl2:
1264 1264 return fl1
1265 1265 if fl1 == fla:
1266 1266 return fl2
1267 1267 if fl2 == fla:
1268 1268 return fl1
1269 1269 return '' # punt for conflicts
1270 1270
1271 1271 return func
1272 1272
1273 1273 @propertycache
1274 1274 def _flagfunc(self):
1275 1275 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1276 1276
1277 1277 @propertycache
1278 1278 def _status(self):
1279 1279 return self._repo.status()
1280 1280
1281 1281 @propertycache
1282 1282 def _user(self):
1283 1283 return self._repo.ui.username()
1284 1284
1285 1285 @propertycache
1286 1286 def _date(self):
1287 1287 return util.makedate()
1288 1288
1289 1289 def subrev(self, subpath):
1290 1290 return None
1291 1291
1292 1292 def manifestnode(self):
1293 1293 return None
1294 1294 def user(self):
1295 1295 return self._user or self._repo.ui.username()
1296 1296 def date(self):
1297 1297 return self._date
1298 1298 def description(self):
1299 1299 return self._text
1300 1300 def files(self):
1301 1301 return sorted(self._status.modified + self._status.added +
1302 1302 self._status.removed)
1303 1303
1304 1304 def modified(self):
1305 1305 return self._status.modified
1306 1306 def added(self):
1307 1307 return self._status.added
1308 1308 def removed(self):
1309 1309 return self._status.removed
1310 1310 def deleted(self):
1311 1311 return self._status.deleted
1312 1312 def branch(self):
1313 1313 return encoding.tolocal(self._extra['branch'])
1314 1314 def closesbranch(self):
1315 1315 return 'close' in self._extra
1316 1316 def extra(self):
1317 1317 return self._extra
1318 1318
1319 1319 def tags(self):
1320 1320 return []
1321 1321
1322 1322 def bookmarks(self):
1323 1323 b = []
1324 1324 for p in self.parents():
1325 1325 b.extend(p.bookmarks())
1326 1326 return b
1327 1327
1328 1328 def phase(self):
1329 1329 phase = phases.draft # default phase to draft
1330 1330 for p in self.parents():
1331 1331 phase = max(phase, p.phase())
1332 1332 return phase
1333 1333
1334 1334 def hidden(self):
1335 1335 return False
1336 1336
1337 1337 def children(self):
1338 1338 return []
1339 1339
1340 1340 def flags(self, path):
1341 1341 if '_manifest' in self.__dict__:
1342 1342 try:
1343 1343 return self._manifest.flags(path)
1344 1344 except KeyError:
1345 1345 return ''
1346 1346
1347 1347 try:
1348 1348 return self._flagfunc(path)
1349 1349 except OSError:
1350 1350 return ''
1351 1351
1352 1352 def ancestor(self, c2):
1353 1353 """return the "best" ancestor context of self and c2"""
1354 1354 return self._parents[0].ancestor(c2) # punt on two parents for now
1355 1355
1356 1356 def walk(self, match):
1357 1357 '''Generates matching file names.'''
1358 1358 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1359 1359 True, False))
1360 1360
1361 1361 def matches(self, match):
1362 1362 return sorted(self._repo.dirstate.matches(match))
1363 1363
1364 1364 def ancestors(self):
1365 1365 for p in self._parents:
1366 1366 yield p
1367 1367 for a in self._repo.changelog.ancestors(
1368 1368 [p.rev() for p in self._parents]):
1369 1369 yield changectx(self._repo, a)
1370 1370
1371 1371 def markcommitted(self, node):
1372 1372 """Perform post-commit cleanup necessary after committing this ctx
1373 1373
1374 1374 Specifically, this updates backing stores this working context
1375 1375 wraps to reflect the fact that the changes reflected by this
1376 1376 workingctx have been committed. For example, it marks
1377 1377 modified and added files as normal in the dirstate.
1378 1378
1379 1379 """
1380 1380
1381 1381 self._repo.dirstate.beginparentchange()
1382 1382 for f in self.modified() + self.added():
1383 1383 self._repo.dirstate.normal(f)
1384 1384 for f in self.removed():
1385 1385 self._repo.dirstate.drop(f)
1386 1386 self._repo.dirstate.setparents(node)
1387 1387 self._repo.dirstate.endparentchange()
1388 1388
1389 1389 # write changes out explicitly, because nesting wlock at
1390 1390 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1391 1391 # from immediately doing so for subsequent changing files
1392 1392 self._repo.dirstate.write(self._repo.currenttransaction())
1393 1393
1394 1394 class workingctx(committablectx):
1395 1395 """A workingctx object makes access to data related to
1396 1396 the current working directory convenient.
1397 1397 date - any valid date string or (unixtime, offset), or None.
1398 1398 user - username string, or None.
1399 1399 extra - a dictionary of extra values, or None.
1400 1400 changes - a list of file lists as returned by localrepo.status()
1401 1401 or None to use the repository status.
1402 1402 """
1403 1403 def __init__(self, repo, text="", user=None, date=None, extra=None,
1404 1404 changes=None):
1405 1405 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1406 1406
1407 1407 def __iter__(self):
1408 1408 d = self._repo.dirstate
1409 1409 for f in d:
1410 1410 if d[f] != 'r':
1411 1411 yield f
1412 1412
1413 1413 def __contains__(self, key):
1414 1414 return self._repo.dirstate[key] not in "?r"
1415 1415
1416 1416 def hex(self):
1417 1417 return hex(wdirid)
1418 1418
1419 1419 @propertycache
1420 1420 def _parents(self):
1421 1421 p = self._repo.dirstate.parents()
1422 1422 if p[1] == nullid:
1423 1423 p = p[:-1]
1424 1424 return [changectx(self._repo, x) for x in p]
1425 1425
1426 1426 def filectx(self, path, filelog=None):
1427 1427 """get a file context from the working directory"""
1428 1428 return workingfilectx(self._repo, path, workingctx=self,
1429 1429 filelog=filelog)
1430 1430
1431 1431 def dirty(self, missing=False, merge=True, branch=True):
1432 1432 "check whether a working directory is modified"
1433 1433 # check subrepos first
1434 1434 for s in sorted(self.substate):
1435 1435 if self.sub(s).dirty():
1436 1436 return True
1437 1437 # check current working dir
1438 1438 return ((merge and self.p2()) or
1439 1439 (branch and self.branch() != self.p1().branch()) or
1440 1440 self.modified() or self.added() or self.removed() or
1441 1441 (missing and self.deleted()))
1442 1442
1443 1443 def add(self, list, prefix=""):
1444 1444 join = lambda f: os.path.join(prefix, f)
1445 1445 with self._repo.wlock():
1446 1446 ui, ds = self._repo.ui, self._repo.dirstate
1447 1447 rejected = []
1448 1448 lstat = self._repo.wvfs.lstat
1449 1449 for f in list:
1450 1450 scmutil.checkportable(ui, join(f))
1451 1451 try:
1452 1452 st = lstat(f)
1453 1453 except OSError:
1454 1454 ui.warn(_("%s does not exist!\n") % join(f))
1455 1455 rejected.append(f)
1456 1456 continue
1457 1457 if st.st_size > 10000000:
1458 1458 ui.warn(_("%s: up to %d MB of RAM may be required "
1459 1459 "to manage this file\n"
1460 1460 "(use 'hg revert %s' to cancel the "
1461 1461 "pending addition)\n")
1462 1462 % (f, 3 * st.st_size // 1000000, join(f)))
1463 1463 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1464 1464 ui.warn(_("%s not added: only files and symlinks "
1465 1465 "supported currently\n") % join(f))
1466 1466 rejected.append(f)
1467 1467 elif ds[f] in 'amn':
1468 1468 ui.warn(_("%s already tracked!\n") % join(f))
1469 1469 elif ds[f] == 'r':
1470 1470 ds.normallookup(f)
1471 1471 else:
1472 1472 ds.add(f)
1473 1473 return rejected
1474 1474
1475 1475 def forget(self, files, prefix=""):
1476 1476 join = lambda f: os.path.join(prefix, f)
1477 1477 with self._repo.wlock():
1478 1478 rejected = []
1479 1479 for f in files:
1480 1480 if f not in self._repo.dirstate:
1481 1481 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1482 1482 rejected.append(f)
1483 1483 elif self._repo.dirstate[f] != 'a':
1484 1484 self._repo.dirstate.remove(f)
1485 1485 else:
1486 1486 self._repo.dirstate.drop(f)
1487 1487 return rejected
1488 1488
1489 1489 def undelete(self, list):
1490 1490 pctxs = self.parents()
1491 1491 with self._repo.wlock():
1492 1492 for f in list:
1493 1493 if self._repo.dirstate[f] != 'r':
1494 1494 self._repo.ui.warn(_("%s not removed!\n") % f)
1495 1495 else:
1496 1496 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1497 1497 t = fctx.data()
1498 1498 self._repo.wwrite(f, t, fctx.flags())
1499 1499 self._repo.dirstate.normal(f)
1500 1500
1501 1501 def copy(self, source, dest):
1502 1502 try:
1503 1503 st = self._repo.wvfs.lstat(dest)
1504 1504 except OSError as err:
1505 1505 if err.errno != errno.ENOENT:
1506 1506 raise
1507 1507 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1508 1508 return
1509 1509 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1510 1510 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1511 1511 "symbolic link\n") % dest)
1512 1512 else:
1513 1513 with self._repo.wlock():
1514 1514 if self._repo.dirstate[dest] in '?':
1515 1515 self._repo.dirstate.add(dest)
1516 1516 elif self._repo.dirstate[dest] in 'r':
1517 1517 self._repo.dirstate.normallookup(dest)
1518 1518 self._repo.dirstate.copy(source, dest)
1519 1519
1520 1520 def match(self, pats=None, include=None, exclude=None, default='glob',
1521 1521 listsubrepos=False, badfn=None):
1522 1522 if pats is None:
1523 1523 pats = []
1524 1524 r = self._repo
1525 1525
1526 1526 # Only a case insensitive filesystem needs magic to translate user input
1527 1527 # to actual case in the filesystem.
1528 matcherfunc = matchmod.match
1528 1529 if not util.fscasesensitive(r.root):
1529 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats,
1530 include, exclude, default, r.auditor,
1531 self, listsubrepos=listsubrepos,
1532 badfn=badfn)
1533 return matchmod.match(r.root, r.getcwd(), pats,
1530 matcherfunc = matchmod.icasefsmatcher
1531 return matcherfunc(r.root, r.getcwd(), pats,
1534 1532 include, exclude, default,
1535 1533 auditor=r.auditor, ctx=self,
1536 1534 listsubrepos=listsubrepos, badfn=badfn)
1537 1535
1538 1536 def _filtersuspectsymlink(self, files):
1539 1537 if not files or self._repo.dirstate._checklink:
1540 1538 return files
1541 1539
1542 1540 # Symlink placeholders may get non-symlink-like contents
1543 1541 # via user error or dereferencing by NFS or Samba servers,
1544 1542 # so we filter out any placeholders that don't look like a
1545 1543 # symlink
1546 1544 sane = []
1547 1545 for f in files:
1548 1546 if self.flags(f) == 'l':
1549 1547 d = self[f].data()
1550 1548 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1551 1549 self._repo.ui.debug('ignoring suspect symlink placeholder'
1552 1550 ' "%s"\n' % f)
1553 1551 continue
1554 1552 sane.append(f)
1555 1553 return sane
1556 1554
1557 1555 def _checklookup(self, files):
1558 1556 # check for any possibly clean files
1559 1557 if not files:
1560 1558 return [], []
1561 1559
1562 1560 modified = []
1563 1561 fixup = []
1564 1562 pctx = self._parents[0]
1565 1563 # do a full compare of any files that might have changed
1566 1564 for f in sorted(files):
1567 1565 if (f not in pctx or self.flags(f) != pctx.flags(f)
1568 1566 or pctx[f].cmp(self[f])):
1569 1567 modified.append(f)
1570 1568 else:
1571 1569 fixup.append(f)
1572 1570
1573 1571 # update dirstate for files that are actually clean
1574 1572 if fixup:
1575 1573 try:
1576 1574 # updating the dirstate is optional
1577 1575 # so we don't wait on the lock
1578 1576 # wlock can invalidate the dirstate, so cache normal _after_
1579 1577 # taking the lock
1580 1578 with self._repo.wlock(False):
1581 1579 normal = self._repo.dirstate.normal
1582 1580 for f in fixup:
1583 1581 normal(f)
1584 1582 # write changes out explicitly, because nesting
1585 1583 # wlock at runtime may prevent 'wlock.release()'
1586 1584 # after this block from doing so for subsequent
1587 1585 # changing files
1588 1586 self._repo.dirstate.write(self._repo.currenttransaction())
1589 1587 except error.LockError:
1590 1588 pass
1591 1589 return modified, fixup
1592 1590
1593 1591 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1594 1592 unknown=False):
1595 1593 '''Gets the status from the dirstate -- internal use only.'''
1596 1594 listignored, listclean, listunknown = ignored, clean, unknown
1597 1595 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1598 1596 subrepos = []
1599 1597 if '.hgsub' in self:
1600 1598 subrepos = sorted(self.substate)
1601 1599 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1602 1600 listclean, listunknown)
1603 1601
1604 1602 # check for any possibly clean files
1605 1603 if cmp:
1606 1604 modified2, fixup = self._checklookup(cmp)
1607 1605 s.modified.extend(modified2)
1608 1606
1609 1607 # update dirstate for files that are actually clean
1610 1608 if fixup and listclean:
1611 1609 s.clean.extend(fixup)
1612 1610
1613 1611 if match.always():
1614 1612 # cache for performance
1615 1613 if s.unknown or s.ignored or s.clean:
1616 1614 # "_status" is cached with list*=False in the normal route
1617 1615 self._status = scmutil.status(s.modified, s.added, s.removed,
1618 1616 s.deleted, [], [], [])
1619 1617 else:
1620 1618 self._status = s
1621 1619
1622 1620 return s
1623 1621
1624 1622 @propertycache
1625 1623 def _manifest(self):
1626 1624 """generate a manifest corresponding to the values in self._status
1627 1625
1628 1626 This reuse the file nodeid from parent, but we use special node
1629 1627 identifiers for added and modified files. This is used by manifests
1630 1628 merge to see that files are different and by update logic to avoid
1631 1629 deleting newly added files.
1632 1630 """
1633 1631 return self._buildstatusmanifest(self._status)
1634 1632
1635 1633 def _buildstatusmanifest(self, status):
1636 1634 """Builds a manifest that includes the given status results."""
1637 1635 parents = self.parents()
1638 1636
1639 1637 man = parents[0].manifest().copy()
1640 1638
1641 1639 ff = self._flagfunc
1642 1640 for i, l in ((addednodeid, status.added),
1643 1641 (modifiednodeid, status.modified)):
1644 1642 for f in l:
1645 1643 man[f] = i
1646 1644 try:
1647 1645 man.setflag(f, ff(f))
1648 1646 except OSError:
1649 1647 pass
1650 1648
1651 1649 for f in status.deleted + status.removed:
1652 1650 if f in man:
1653 1651 del man[f]
1654 1652
1655 1653 return man
1656 1654
1657 1655 def _buildstatus(self, other, s, match, listignored, listclean,
1658 1656 listunknown):
1659 1657 """build a status with respect to another context
1660 1658
1661 1659 This includes logic for maintaining the fast path of status when
1662 1660 comparing the working directory against its parent, which is to skip
1663 1661 building a new manifest if self (working directory) is not comparing
1664 1662 against its parent (repo['.']).
1665 1663 """
1666 1664 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1667 1665 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1668 1666 # might have accidentally ended up with the entire contents of the file
1669 1667 # they are supposed to be linking to.
1670 1668 s.modified[:] = self._filtersuspectsymlink(s.modified)
1671 1669 if other != self._repo['.']:
1672 1670 s = super(workingctx, self)._buildstatus(other, s, match,
1673 1671 listignored, listclean,
1674 1672 listunknown)
1675 1673 return s
1676 1674
1677 1675 def _matchstatus(self, other, match):
1678 1676 """override the match method with a filter for directory patterns
1679 1677
1680 1678 We use inheritance to customize the match.bad method only in cases of
1681 1679 workingctx since it belongs only to the working directory when
1682 1680 comparing against the parent changeset.
1683 1681
1684 1682 If we aren't comparing against the working directory's parent, then we
1685 1683 just use the default match object sent to us.
1686 1684 """
1687 1685 superself = super(workingctx, self)
1688 1686 match = superself._matchstatus(other, match)
1689 1687 if other != self._repo['.']:
1690 1688 def bad(f, msg):
1691 1689 # 'f' may be a directory pattern from 'match.files()',
1692 1690 # so 'f not in ctx1' is not enough
1693 1691 if f not in other and not other.hasdir(f):
1694 1692 self._repo.ui.warn('%s: %s\n' %
1695 1693 (self._repo.dirstate.pathto(f), msg))
1696 1694 match.bad = bad
1697 1695 return match
1698 1696
1699 1697 class committablefilectx(basefilectx):
1700 1698 """A committablefilectx provides common functionality for a file context
1701 1699 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1702 1700 def __init__(self, repo, path, filelog=None, ctx=None):
1703 1701 self._repo = repo
1704 1702 self._path = path
1705 1703 self._changeid = None
1706 1704 self._filerev = self._filenode = None
1707 1705
1708 1706 if filelog is not None:
1709 1707 self._filelog = filelog
1710 1708 if ctx:
1711 1709 self._changectx = ctx
1712 1710
1713 1711 def __nonzero__(self):
1714 1712 return True
1715 1713
1716 1714 def linkrev(self):
1717 1715 # linked to self._changectx no matter if file is modified or not
1718 1716 return self.rev()
1719 1717
1720 1718 def parents(self):
1721 1719 '''return parent filectxs, following copies if necessary'''
1722 1720 def filenode(ctx, path):
1723 1721 return ctx._manifest.get(path, nullid)
1724 1722
1725 1723 path = self._path
1726 1724 fl = self._filelog
1727 1725 pcl = self._changectx._parents
1728 1726 renamed = self.renamed()
1729 1727
1730 1728 if renamed:
1731 1729 pl = [renamed + (None,)]
1732 1730 else:
1733 1731 pl = [(path, filenode(pcl[0], path), fl)]
1734 1732
1735 1733 for pc in pcl[1:]:
1736 1734 pl.append((path, filenode(pc, path), fl))
1737 1735
1738 1736 return [self._parentfilectx(p, fileid=n, filelog=l)
1739 1737 for p, n, l in pl if n != nullid]
1740 1738
1741 1739 def children(self):
1742 1740 return []
1743 1741
1744 1742 class workingfilectx(committablefilectx):
1745 1743 """A workingfilectx object makes access to data related to a particular
1746 1744 file in the working directory convenient."""
1747 1745 def __init__(self, repo, path, filelog=None, workingctx=None):
1748 1746 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1749 1747
1750 1748 @propertycache
1751 1749 def _changectx(self):
1752 1750 return workingctx(self._repo)
1753 1751
1754 1752 def data(self):
1755 1753 return self._repo.wread(self._path)
1756 1754 def renamed(self):
1757 1755 rp = self._repo.dirstate.copied(self._path)
1758 1756 if not rp:
1759 1757 return None
1760 1758 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1761 1759
1762 1760 def size(self):
1763 1761 return self._repo.wvfs.lstat(self._path).st_size
1764 1762 def date(self):
1765 1763 t, tz = self._changectx.date()
1766 1764 try:
1767 1765 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1768 1766 except OSError as err:
1769 1767 if err.errno != errno.ENOENT:
1770 1768 raise
1771 1769 return (t, tz)
1772 1770
1773 1771 def cmp(self, fctx):
1774 1772 """compare with other file context
1775 1773
1776 1774 returns True if different than fctx.
1777 1775 """
1778 1776 # fctx should be a filectx (not a workingfilectx)
1779 1777 # invert comparison to reuse the same code path
1780 1778 return fctx.cmp(self)
1781 1779
1782 1780 def remove(self, ignoremissing=False):
1783 1781 """wraps unlink for a repo's working directory"""
1784 1782 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1785 1783
1786 1784 def write(self, data, flags):
1787 1785 """wraps repo.wwrite"""
1788 1786 self._repo.wwrite(self._path, data, flags)
1789 1787
1790 1788 class workingcommitctx(workingctx):
1791 1789 """A workingcommitctx object makes access to data related to
1792 1790 the revision being committed convenient.
1793 1791
1794 1792 This hides changes in the working directory, if they aren't
1795 1793 committed in this context.
1796 1794 """
1797 1795 def __init__(self, repo, changes,
1798 1796 text="", user=None, date=None, extra=None):
1799 1797 super(workingctx, self).__init__(repo, text, user, date, extra,
1800 1798 changes)
1801 1799
1802 1800 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1803 1801 unknown=False):
1804 1802 """Return matched files only in ``self._status``
1805 1803
1806 1804 Uncommitted files appear "clean" via this context, even if
1807 1805 they aren't actually so in the working directory.
1808 1806 """
1809 1807 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1810 1808 if clean:
1811 1809 clean = [f for f in self._manifest if f not in self._changedset]
1812 1810 else:
1813 1811 clean = []
1814 1812 return scmutil.status([f for f in self._status.modified if match(f)],
1815 1813 [f for f in self._status.added if match(f)],
1816 1814 [f for f in self._status.removed if match(f)],
1817 1815 [], [], [], clean)
1818 1816
1819 1817 @propertycache
1820 1818 def _changedset(self):
1821 1819 """Return the set of files changed in this context
1822 1820 """
1823 1821 changed = set(self._status.modified)
1824 1822 changed.update(self._status.added)
1825 1823 changed.update(self._status.removed)
1826 1824 return changed
1827 1825
1828 1826 def makecachingfilectxfn(func):
1829 1827 """Create a filectxfn that caches based on the path.
1830 1828
1831 1829 We can't use util.cachefunc because it uses all arguments as the cache
1832 1830 key and this creates a cycle since the arguments include the repo and
1833 1831 memctx.
1834 1832 """
1835 1833 cache = {}
1836 1834
1837 1835 def getfilectx(repo, memctx, path):
1838 1836 if path not in cache:
1839 1837 cache[path] = func(repo, memctx, path)
1840 1838 return cache[path]
1841 1839
1842 1840 return getfilectx
1843 1841
1844 1842 class memctx(committablectx):
1845 1843 """Use memctx to perform in-memory commits via localrepo.commitctx().
1846 1844
1847 1845 Revision information is supplied at initialization time while
1848 1846 related files data and is made available through a callback
1849 1847 mechanism. 'repo' is the current localrepo, 'parents' is a
1850 1848 sequence of two parent revisions identifiers (pass None for every
1851 1849 missing parent), 'text' is the commit message and 'files' lists
1852 1850 names of files touched by the revision (normalized and relative to
1853 1851 repository root).
1854 1852
1855 1853 filectxfn(repo, memctx, path) is a callable receiving the
1856 1854 repository, the current memctx object and the normalized path of
1857 1855 requested file, relative to repository root. It is fired by the
1858 1856 commit function for every file in 'files', but calls order is
1859 1857 undefined. If the file is available in the revision being
1860 1858 committed (updated or added), filectxfn returns a memfilectx
1861 1859 object. If the file was removed, filectxfn raises an
1862 1860 IOError. Moved files are represented by marking the source file
1863 1861 removed and the new file added with copy information (see
1864 1862 memfilectx).
1865 1863
1866 1864 user receives the committer name and defaults to current
1867 1865 repository username, date is the commit date in any format
1868 1866 supported by util.parsedate() and defaults to current date, extra
1869 1867 is a dictionary of metadata or is left empty.
1870 1868 """
1871 1869
1872 1870 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1873 1871 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1874 1872 # this field to determine what to do in filectxfn.
1875 1873 _returnnoneformissingfiles = True
1876 1874
1877 1875 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1878 1876 date=None, extra=None, editor=False):
1879 1877 super(memctx, self).__init__(repo, text, user, date, extra)
1880 1878 self._rev = None
1881 1879 self._node = None
1882 1880 parents = [(p or nullid) for p in parents]
1883 1881 p1, p2 = parents
1884 1882 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1885 1883 files = sorted(set(files))
1886 1884 self._files = files
1887 1885 self.substate = {}
1888 1886
1889 1887 # if store is not callable, wrap it in a function
1890 1888 if not callable(filectxfn):
1891 1889 def getfilectx(repo, memctx, path):
1892 1890 fctx = filectxfn[path]
1893 1891 # this is weird but apparently we only keep track of one parent
1894 1892 # (why not only store that instead of a tuple?)
1895 1893 copied = fctx.renamed()
1896 1894 if copied:
1897 1895 copied = copied[0]
1898 1896 return memfilectx(repo, path, fctx.data(),
1899 1897 islink=fctx.islink(), isexec=fctx.isexec(),
1900 1898 copied=copied, memctx=memctx)
1901 1899 self._filectxfn = getfilectx
1902 1900 else:
1903 1901 # memoizing increases performance for e.g. vcs convert scenarios.
1904 1902 self._filectxfn = makecachingfilectxfn(filectxfn)
1905 1903
1906 1904 if extra:
1907 1905 self._extra = extra.copy()
1908 1906 else:
1909 1907 self._extra = {}
1910 1908
1911 1909 if self._extra.get('branch', '') == '':
1912 1910 self._extra['branch'] = 'default'
1913 1911
1914 1912 if editor:
1915 1913 self._text = editor(self._repo, self, [])
1916 1914 self._repo.savecommitmessage(self._text)
1917 1915
1918 1916 def filectx(self, path, filelog=None):
1919 1917 """get a file context from the working directory
1920 1918
1921 1919 Returns None if file doesn't exist and should be removed."""
1922 1920 return self._filectxfn(self._repo, self, path)
1923 1921
1924 1922 def commit(self):
1925 1923 """commit context to the repo"""
1926 1924 return self._repo.commitctx(self)
1927 1925
1928 1926 @propertycache
1929 1927 def _manifest(self):
1930 1928 """generate a manifest based on the return values of filectxfn"""
1931 1929
1932 1930 # keep this simple for now; just worry about p1
1933 1931 pctx = self._parents[0]
1934 1932 man = pctx.manifest().copy()
1935 1933
1936 1934 for f in self._status.modified:
1937 1935 p1node = nullid
1938 1936 p2node = nullid
1939 1937 p = pctx[f].parents() # if file isn't in pctx, check p2?
1940 1938 if len(p) > 0:
1941 1939 p1node = p[0].filenode()
1942 1940 if len(p) > 1:
1943 1941 p2node = p[1].filenode()
1944 1942 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1945 1943
1946 1944 for f in self._status.added:
1947 1945 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1948 1946
1949 1947 for f in self._status.removed:
1950 1948 if f in man:
1951 1949 del man[f]
1952 1950
1953 1951 return man
1954 1952
1955 1953 @propertycache
1956 1954 def _status(self):
1957 1955 """Calculate exact status from ``files`` specified at construction
1958 1956 """
1959 1957 man1 = self.p1().manifest()
1960 1958 p2 = self._parents[1]
1961 1959 # "1 < len(self._parents)" can't be used for checking
1962 1960 # existence of the 2nd parent, because "memctx._parents" is
1963 1961 # explicitly initialized by the list, of which length is 2.
1964 1962 if p2.node() != nullid:
1965 1963 man2 = p2.manifest()
1966 1964 managing = lambda f: f in man1 or f in man2
1967 1965 else:
1968 1966 managing = lambda f: f in man1
1969 1967
1970 1968 modified, added, removed = [], [], []
1971 1969 for f in self._files:
1972 1970 if not managing(f):
1973 1971 added.append(f)
1974 1972 elif self[f]:
1975 1973 modified.append(f)
1976 1974 else:
1977 1975 removed.append(f)
1978 1976
1979 1977 return scmutil.status(modified, added, removed, [], [], [], [])
1980 1978
1981 1979 class memfilectx(committablefilectx):
1982 1980 """memfilectx represents an in-memory file to commit.
1983 1981
1984 1982 See memctx and committablefilectx for more details.
1985 1983 """
1986 1984 def __init__(self, repo, path, data, islink=False,
1987 1985 isexec=False, copied=None, memctx=None):
1988 1986 """
1989 1987 path is the normalized file path relative to repository root.
1990 1988 data is the file content as a string.
1991 1989 islink is True if the file is a symbolic link.
1992 1990 isexec is True if the file is executable.
1993 1991 copied is the source file path if current file was copied in the
1994 1992 revision being committed, or None."""
1995 1993 super(memfilectx, self).__init__(repo, path, None, memctx)
1996 1994 self._data = data
1997 1995 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1998 1996 self._copied = None
1999 1997 if copied:
2000 1998 self._copied = (copied, nullid)
2001 1999
2002 2000 def data(self):
2003 2001 return self._data
2004 2002 def size(self):
2005 2003 return len(self.data())
2006 2004 def flags(self):
2007 2005 return self._flags
2008 2006 def renamed(self):
2009 2007 return self._copied
2010 2008
2011 2009 def remove(self, ignoremissing=False):
2012 2010 """wraps unlink for a repo's working directory"""
2013 2011 # need to figure out what to do here
2014 2012 del self._changectx[self._path]
2015 2013
2016 2014 def write(self, data, flags):
2017 2015 """wraps repo.wwrite"""
2018 2016 self._data = data
2019 2017
2020 2018 class metadataonlyctx(committablectx):
2021 2019 """Like memctx but it's reusing the manifest of different commit.
2022 2020 Intended to be used by lightweight operations that are creating
2023 2021 metadata-only changes.
2024 2022
2025 2023 Revision information is supplied at initialization time. 'repo' is the
2026 2024 current localrepo, 'ctx' is original revision which manifest we're reuisng
2027 2025 'parents' is a sequence of two parent revisions identifiers (pass None for
2028 2026 every missing parent), 'text' is the commit.
2029 2027
2030 2028 user receives the committer name and defaults to current repository
2031 2029 username, date is the commit date in any format supported by
2032 2030 util.parsedate() and defaults to current date, extra is a dictionary of
2033 2031 metadata or is left empty.
2034 2032 """
2035 2033 def __new__(cls, repo, originalctx, *args, **kwargs):
2036 2034 return super(metadataonlyctx, cls).__new__(cls, repo)
2037 2035
2038 2036 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2039 2037 extra=None, editor=False):
2040 2038 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2041 2039 self._rev = None
2042 2040 self._node = None
2043 2041 self._originalctx = originalctx
2044 2042 self._manifestnode = originalctx.manifestnode()
2045 2043 parents = [(p or nullid) for p in parents]
2046 2044 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2047 2045
2048 2046 # sanity check to ensure that the reused manifest parents are
2049 2047 # manifests of our commit parents
2050 2048 mp1, mp2 = self.manifestctx().parents
2051 2049 if p1 != nullid and p1.manifestctx().node() != mp1:
2052 2050 raise RuntimeError('can\'t reuse the manifest: '
2053 2051 'its p1 doesn\'t match the new ctx p1')
2054 2052 if p2 != nullid and p2.manifestctx().node() != mp2:
2055 2053 raise RuntimeError('can\'t reuse the manifest: '
2056 2054 'its p2 doesn\'t match the new ctx p2')
2057 2055
2058 2056 self._files = originalctx.files()
2059 2057 self.substate = {}
2060 2058
2061 2059 if extra:
2062 2060 self._extra = extra.copy()
2063 2061 else:
2064 2062 self._extra = {}
2065 2063
2066 2064 if self._extra.get('branch', '') == '':
2067 2065 self._extra['branch'] = 'default'
2068 2066
2069 2067 if editor:
2070 2068 self._text = editor(self._repo, self, [])
2071 2069 self._repo.savecommitmessage(self._text)
2072 2070
2073 2071 def manifestnode(self):
2074 2072 return self._manifestnode
2075 2073
2076 2074 @propertycache
2077 2075 def _manifestctx(self):
2078 2076 return self._repo.manifestlog[self._manifestnode]
2079 2077
2080 2078 def filectx(self, path, filelog=None):
2081 2079 return self._originalctx.filectx(path, filelog=filelog)
2082 2080
2083 2081 def commit(self):
2084 2082 """commit context to the repo"""
2085 2083 return self._repo.commitctx(self)
2086 2084
2087 2085 @property
2088 2086 def _manifest(self):
2089 2087 return self._originalctx.manifest()
2090 2088
2091 2089 @propertycache
2092 2090 def _status(self):
2093 2091 """Calculate exact status from ``files`` specified in the ``origctx``
2094 2092 and parents manifests.
2095 2093 """
2096 2094 man1 = self.p1().manifest()
2097 2095 p2 = self._parents[1]
2098 2096 # "1 < len(self._parents)" can't be used for checking
2099 2097 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2100 2098 # explicitly initialized by the list, of which length is 2.
2101 2099 if p2.node() != nullid:
2102 2100 man2 = p2.manifest()
2103 2101 managing = lambda f: f in man1 or f in man2
2104 2102 else:
2105 2103 managing = lambda f: f in man1
2106 2104
2107 2105 modified, added, removed = [], [], []
2108 2106 for f in self._files:
2109 2107 if not managing(f):
2110 2108 added.append(f)
2111 2109 elif self[f]:
2112 2110 modified.append(f)
2113 2111 else:
2114 2112 removed.append(f)
2115 2113
2116 2114 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now