##// END OF EJS Templates
context: work around `long` not existing on Python 3...
Augie Fackler -
r31343:ff2f9050 default
parent child Browse files
Show More
@@ -1,2105 +1,2106
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 pycompat,
36 37 repoview,
37 38 revlog,
38 39 scmutil,
39 40 subrepo,
40 41 util,
41 42 )
42 43
43 44 propertycache = util.propertycache
44 45
45 46 nonascii = re.compile(r'[^\x21-\x7f]').search
46 47
47 48 class basectx(object):
48 49 """A basectx object represents the common logic for its children:
49 50 changectx: read-only context that is already present in the repo,
50 51 workingctx: a context that represents the working directory and can
51 52 be committed,
52 53 memctx: a context that represents changes in-memory and can also
53 54 be committed."""
54 55 def __new__(cls, repo, changeid='', *args, **kwargs):
55 56 if isinstance(changeid, basectx):
56 57 return changeid
57 58
58 59 o = super(basectx, cls).__new__(cls)
59 60
60 61 o._repo = repo
61 62 o._rev = nullrev
62 63 o._node = nullid
63 64
64 65 return o
65 66
66 67 def __str__(self):
67 68 return short(self.node())
68 69
69 70 def __int__(self):
70 71 return self.rev()
71 72
72 73 def __repr__(self):
73 74 return "<%s %s>" % (type(self).__name__, str(self))
74 75
75 76 def __eq__(self, other):
76 77 try:
77 78 return type(self) == type(other) and self._rev == other._rev
78 79 except AttributeError:
79 80 return False
80 81
81 82 def __ne__(self, other):
82 83 return not (self == other)
83 84
84 85 def __contains__(self, key):
85 86 return key in self._manifest
86 87
87 88 def __getitem__(self, key):
88 89 return self.filectx(key)
89 90
90 91 def __iter__(self):
91 92 return iter(self._manifest)
92 93
93 94 def _buildstatusmanifest(self, status):
94 95 """Builds a manifest that includes the given status results, if this is
95 96 a working copy context. For non-working copy contexts, it just returns
96 97 the normal manifest."""
97 98 return self.manifest()
98 99
99 100 def _matchstatus(self, other, match):
100 101 """return match.always if match is none
101 102
102 103 This internal method provides a way for child objects to override the
103 104 match operator.
104 105 """
105 106 return match or matchmod.always(self._repo.root, self._repo.getcwd())
106 107
107 108 def _buildstatus(self, other, s, match, listignored, listclean,
108 109 listunknown):
109 110 """build a status with respect to another context"""
110 111 # Load earliest manifest first for caching reasons. More specifically,
111 112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 114 # 1000 and cache it so that when you read 1001, we just need to apply a
114 115 # delta to what's in the cache. So that's one full reconstruction + one
115 116 # delta application.
116 117 mf2 = None
117 118 if self.rev() is not None and self.rev() < other.rev():
118 119 mf2 = self._buildstatusmanifest(s)
119 120 mf1 = other._buildstatusmanifest(s)
120 121 if mf2 is None:
121 122 mf2 = self._buildstatusmanifest(s)
122 123
123 124 modified, added = [], []
124 125 removed = []
125 126 clean = []
126 127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 128 deletedset = set(deleted)
128 129 d = mf1.diff(mf2, match=match, clean=listclean)
129 130 for fn, value in d.iteritems():
130 131 if fn in deletedset:
131 132 continue
132 133 if value is None:
133 134 clean.append(fn)
134 135 continue
135 136 (node1, flag1), (node2, flag2) = value
136 137 if node1 is None:
137 138 added.append(fn)
138 139 elif node2 is None:
139 140 removed.append(fn)
140 141 elif flag1 != flag2:
141 142 modified.append(fn)
142 143 elif node2 not in wdirnodes:
143 144 # When comparing files between two commits, we save time by
144 145 # not comparing the file contents when the nodeids differ.
145 146 # Note that this means we incorrectly report a reverted change
146 147 # to a file as a modification.
147 148 modified.append(fn)
148 149 elif self[fn].cmp(other[fn]):
149 150 modified.append(fn)
150 151 else:
151 152 clean.append(fn)
152 153
153 154 if removed:
154 155 # need to filter files if they are already reported as removed
155 156 unknown = [fn for fn in unknown if fn not in mf1 and
156 157 (not match or match(fn))]
157 158 ignored = [fn for fn in ignored if fn not in mf1 and
158 159 (not match or match(fn))]
159 160 # if they're deleted, don't report them as removed
160 161 removed = [fn for fn in removed if fn not in deletedset]
161 162
162 163 return scmutil.status(modified, added, removed, deleted, unknown,
163 164 ignored, clean)
164 165
165 166 @propertycache
166 167 def substate(self):
167 168 return subrepo.state(self, self._repo.ui)
168 169
169 170 def subrev(self, subpath):
170 171 return self.substate[subpath][1]
171 172
172 173 def rev(self):
173 174 return self._rev
174 175 def node(self):
175 176 return self._node
176 177 def hex(self):
177 178 return hex(self.node())
178 179 def manifest(self):
179 180 return self._manifest
180 181 def manifestctx(self):
181 182 return self._manifestctx
182 183 def repo(self):
183 184 return self._repo
184 185 def phasestr(self):
185 186 return phases.phasenames[self.phase()]
186 187 def mutable(self):
187 188 return self.phase() > phases.public
188 189
189 190 def getfileset(self, expr):
190 191 return fileset.getfileset(self, expr)
191 192
192 193 def obsolete(self):
193 194 """True if the changeset is obsolete"""
194 195 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
195 196
196 197 def extinct(self):
197 198 """True if the changeset is extinct"""
198 199 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
199 200
200 201 def unstable(self):
201 202 """True if the changeset is not obsolete but it's ancestor are"""
202 203 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
203 204
204 205 def bumped(self):
205 206 """True if the changeset try to be a successor of a public changeset
206 207
207 208 Only non-public and non-obsolete changesets may be bumped.
208 209 """
209 210 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
210 211
211 212 def divergent(self):
212 213 """Is a successors of a changeset with multiple possible successors set
213 214
214 215 Only non-public and non-obsolete changesets may be divergent.
215 216 """
216 217 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
217 218
218 219 def troubled(self):
219 220 """True if the changeset is either unstable, bumped or divergent"""
220 221 return self.unstable() or self.bumped() or self.divergent()
221 222
222 223 def troubles(self):
223 224 """return the list of troubles affecting this changesets.
224 225
225 226 Troubles are returned as strings. possible values are:
226 227 - unstable,
227 228 - bumped,
228 229 - divergent.
229 230 """
230 231 troubles = []
231 232 if self.unstable():
232 233 troubles.append('unstable')
233 234 if self.bumped():
234 235 troubles.append('bumped')
235 236 if self.divergent():
236 237 troubles.append('divergent')
237 238 return troubles
238 239
239 240 def parents(self):
240 241 """return contexts for each parent changeset"""
241 242 return self._parents
242 243
243 244 def p1(self):
244 245 return self._parents[0]
245 246
246 247 def p2(self):
247 248 parents = self._parents
248 249 if len(parents) == 2:
249 250 return parents[1]
250 251 return changectx(self._repo, nullrev)
251 252
252 253 def _fileinfo(self, path):
253 254 if '_manifest' in self.__dict__:
254 255 try:
255 256 return self._manifest[path], self._manifest.flags(path)
256 257 except KeyError:
257 258 raise error.ManifestLookupError(self._node, path,
258 259 _('not found in manifest'))
259 260 if '_manifestdelta' in self.__dict__ or path in self.files():
260 261 if path in self._manifestdelta:
261 262 return (self._manifestdelta[path],
262 263 self._manifestdelta.flags(path))
263 264 mfl = self._repo.manifestlog
264 265 try:
265 266 node, flag = mfl[self._changeset.manifest].find(path)
266 267 except KeyError:
267 268 raise error.ManifestLookupError(self._node, path,
268 269 _('not found in manifest'))
269 270
270 271 return node, flag
271 272
272 273 def filenode(self, path):
273 274 return self._fileinfo(path)[0]
274 275
275 276 def flags(self, path):
276 277 try:
277 278 return self._fileinfo(path)[1]
278 279 except error.LookupError:
279 280 return ''
280 281
281 282 def sub(self, path, allowcreate=True):
282 283 '''return a subrepo for the stored revision of path, never wdir()'''
283 284 return subrepo.subrepo(self, path, allowcreate=allowcreate)
284 285
285 286 def nullsub(self, path, pctx):
286 287 return subrepo.nullsubrepo(self, path, pctx)
287 288
288 289 def workingsub(self, path):
289 290 '''return a subrepo for the stored revision, or wdir if this is a wdir
290 291 context.
291 292 '''
292 293 return subrepo.subrepo(self, path, allowwdir=True)
293 294
294 295 def match(self, pats=[], include=None, exclude=None, default='glob',
295 296 listsubrepos=False, badfn=None):
296 297 r = self._repo
297 298 return matchmod.match(r.root, r.getcwd(), pats,
298 299 include, exclude, default,
299 300 auditor=r.nofsauditor, ctx=self,
300 301 listsubrepos=listsubrepos, badfn=badfn)
301 302
302 303 def diff(self, ctx2=None, match=None, **opts):
303 304 """Returns a diff generator for the given contexts and matcher"""
304 305 if ctx2 is None:
305 306 ctx2 = self.p1()
306 307 if ctx2 is not None:
307 308 ctx2 = self._repo[ctx2]
308 309 diffopts = patch.diffopts(self._repo.ui, opts)
309 310 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
310 311
311 312 def dirs(self):
312 313 return self._manifest.dirs()
313 314
314 315 def hasdir(self, dir):
315 316 return self._manifest.hasdir(dir)
316 317
317 318 def dirty(self, missing=False, merge=True, branch=True):
318 319 return False
319 320
320 321 def status(self, other=None, match=None, listignored=False,
321 322 listclean=False, listunknown=False, listsubrepos=False):
322 323 """return status of files between two nodes or node and working
323 324 directory.
324 325
325 326 If other is None, compare this node with working directory.
326 327
327 328 returns (modified, added, removed, deleted, unknown, ignored, clean)
328 329 """
329 330
330 331 ctx1 = self
331 332 ctx2 = self._repo[other]
332 333
333 334 # This next code block is, admittedly, fragile logic that tests for
334 335 # reversing the contexts and wouldn't need to exist if it weren't for
335 336 # the fast (and common) code path of comparing the working directory
336 337 # with its first parent.
337 338 #
338 339 # What we're aiming for here is the ability to call:
339 340 #
340 341 # workingctx.status(parentctx)
341 342 #
342 343 # If we always built the manifest for each context and compared those,
343 344 # then we'd be done. But the special case of the above call means we
344 345 # just copy the manifest of the parent.
345 346 reversed = False
346 347 if (not isinstance(ctx1, changectx)
347 348 and isinstance(ctx2, changectx)):
348 349 reversed = True
349 350 ctx1, ctx2 = ctx2, ctx1
350 351
351 352 match = ctx2._matchstatus(ctx1, match)
352 353 r = scmutil.status([], [], [], [], [], [], [])
353 354 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
354 355 listunknown)
355 356
356 357 if reversed:
357 358 # Reverse added and removed. Clear deleted, unknown and ignored as
358 359 # these make no sense to reverse.
359 360 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
360 361 r.clean)
361 362
362 363 if listsubrepos:
363 364 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
364 365 try:
365 366 rev2 = ctx2.subrev(subpath)
366 367 except KeyError:
367 368 # A subrepo that existed in node1 was deleted between
368 369 # node1 and node2 (inclusive). Thus, ctx2's substate
369 370 # won't contain that subpath. The best we can do ignore it.
370 371 rev2 = None
371 372 submatch = matchmod.subdirmatcher(subpath, match)
372 373 s = sub.status(rev2, match=submatch, ignored=listignored,
373 374 clean=listclean, unknown=listunknown,
374 375 listsubrepos=True)
375 376 for rfiles, sfiles in zip(r, s):
376 377 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
377 378
378 379 for l in r:
379 380 l.sort()
380 381
381 382 return r
382 383
383 384
384 385 def makememctx(repo, parents, text, user, date, branch, files, store,
385 386 editor=None, extra=None):
386 387 def getfilectx(repo, memctx, path):
387 388 data, mode, copied = store.getfile(path)
388 389 if data is None:
389 390 return None
390 391 islink, isexec = mode
391 392 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
392 393 copied=copied, memctx=memctx)
393 394 if extra is None:
394 395 extra = {}
395 396 if branch:
396 397 extra['branch'] = encoding.fromlocal(branch)
397 398 ctx = memctx(repo, parents, text, files, getfilectx, user,
398 399 date, extra, editor)
399 400 return ctx
400 401
401 402 class changectx(basectx):
402 403 """A changecontext object makes access to data related to a particular
403 404 changeset convenient. It represents a read-only context already present in
404 405 the repo."""
405 406 def __init__(self, repo, changeid=''):
406 407 """changeid is a revision number, node, or tag"""
407 408
408 409 # since basectx.__new__ already took care of copying the object, we
409 410 # don't need to do anything in __init__, so we just exit here
410 411 if isinstance(changeid, basectx):
411 412 return
412 413
413 414 if changeid == '':
414 415 changeid = '.'
415 416 self._repo = repo
416 417
417 418 try:
418 419 if isinstance(changeid, int):
419 420 self._node = repo.changelog.node(changeid)
420 421 self._rev = changeid
421 422 return
422 if isinstance(changeid, long):
423 if not pycompat.ispy3 and isinstance(changeid, long):
423 424 changeid = str(changeid)
424 425 if changeid == 'null':
425 426 self._node = nullid
426 427 self._rev = nullrev
427 428 return
428 429 if changeid == 'tip':
429 430 self._node = repo.changelog.tip()
430 431 self._rev = repo.changelog.rev(self._node)
431 432 return
432 433 if changeid == '.' or changeid == repo.dirstate.p1():
433 434 # this is a hack to delay/avoid loading obsmarkers
434 435 # when we know that '.' won't be hidden
435 436 self._node = repo.dirstate.p1()
436 437 self._rev = repo.unfiltered().changelog.rev(self._node)
437 438 return
438 439 if len(changeid) == 20:
439 440 try:
440 441 self._node = changeid
441 442 self._rev = repo.changelog.rev(changeid)
442 443 return
443 444 except error.FilteredRepoLookupError:
444 445 raise
445 446 except LookupError:
446 447 pass
447 448
448 449 try:
449 450 r = int(changeid)
450 451 if str(r) != changeid:
451 452 raise ValueError
452 453 l = len(repo.changelog)
453 454 if r < 0:
454 455 r += l
455 456 if r < 0 or r >= l:
456 457 raise ValueError
457 458 self._rev = r
458 459 self._node = repo.changelog.node(r)
459 460 return
460 461 except error.FilteredIndexError:
461 462 raise
462 463 except (ValueError, OverflowError, IndexError):
463 464 pass
464 465
465 466 if len(changeid) == 40:
466 467 try:
467 468 self._node = bin(changeid)
468 469 self._rev = repo.changelog.rev(self._node)
469 470 return
470 471 except error.FilteredLookupError:
471 472 raise
472 473 except (TypeError, LookupError):
473 474 pass
474 475
475 476 # lookup bookmarks through the name interface
476 477 try:
477 478 self._node = repo.names.singlenode(repo, changeid)
478 479 self._rev = repo.changelog.rev(self._node)
479 480 return
480 481 except KeyError:
481 482 pass
482 483 except error.FilteredRepoLookupError:
483 484 raise
484 485 except error.RepoLookupError:
485 486 pass
486 487
487 488 self._node = repo.unfiltered().changelog._partialmatch(changeid)
488 489 if self._node is not None:
489 490 self._rev = repo.changelog.rev(self._node)
490 491 return
491 492
492 493 # lookup failed
493 494 # check if it might have come from damaged dirstate
494 495 #
495 496 # XXX we could avoid the unfiltered if we had a recognizable
496 497 # exception for filtered changeset access
497 498 if changeid in repo.unfiltered().dirstate.parents():
498 499 msg = _("working directory has unknown parent '%s'!")
499 500 raise error.Abort(msg % short(changeid))
500 501 try:
501 502 if len(changeid) == 20 and nonascii(changeid):
502 503 changeid = hex(changeid)
503 504 except TypeError:
504 505 pass
505 506 except (error.FilteredIndexError, error.FilteredLookupError,
506 507 error.FilteredRepoLookupError):
507 508 if repo.filtername.startswith('visible'):
508 509 msg = _("hidden revision '%s'") % changeid
509 510 hint = _('use --hidden to access hidden revisions')
510 511 raise error.FilteredRepoLookupError(msg, hint=hint)
511 512 msg = _("filtered revision '%s' (not in '%s' subset)")
512 513 msg %= (changeid, repo.filtername)
513 514 raise error.FilteredRepoLookupError(msg)
514 515 except IndexError:
515 516 pass
516 517 raise error.RepoLookupError(
517 518 _("unknown revision '%s'") % changeid)
518 519
519 520 def __hash__(self):
520 521 try:
521 522 return hash(self._rev)
522 523 except AttributeError:
523 524 return id(self)
524 525
525 526 def __nonzero__(self):
526 527 return self._rev != nullrev
527 528
528 529 @propertycache
529 530 def _changeset(self):
530 531 return self._repo.changelog.changelogrevision(self.rev())
531 532
532 533 @propertycache
533 534 def _manifest(self):
534 535 return self._manifestctx.read()
535 536
536 537 @propertycache
537 538 def _manifestctx(self):
538 539 return self._repo.manifestlog[self._changeset.manifest]
539 540
540 541 @propertycache
541 542 def _manifestdelta(self):
542 543 return self._manifestctx.readdelta()
543 544
544 545 @propertycache
545 546 def _parents(self):
546 547 repo = self._repo
547 548 p1, p2 = repo.changelog.parentrevs(self._rev)
548 549 if p2 == nullrev:
549 550 return [changectx(repo, p1)]
550 551 return [changectx(repo, p1), changectx(repo, p2)]
551 552
552 553 def changeset(self):
553 554 c = self._changeset
554 555 return (
555 556 c.manifest,
556 557 c.user,
557 558 c.date,
558 559 c.files,
559 560 c.description,
560 561 c.extra,
561 562 )
562 563 def manifestnode(self):
563 564 return self._changeset.manifest
564 565
565 566 def user(self):
566 567 return self._changeset.user
567 568 def date(self):
568 569 return self._changeset.date
569 570 def files(self):
570 571 return self._changeset.files
571 572 def description(self):
572 573 return self._changeset.description
573 574 def branch(self):
574 575 return encoding.tolocal(self._changeset.extra.get("branch"))
575 576 def closesbranch(self):
576 577 return 'close' in self._changeset.extra
577 578 def extra(self):
578 579 return self._changeset.extra
579 580 def tags(self):
580 581 return self._repo.nodetags(self._node)
581 582 def bookmarks(self):
582 583 return self._repo.nodebookmarks(self._node)
583 584 def phase(self):
584 585 return self._repo._phasecache.phase(self._repo, self._rev)
585 586 def hidden(self):
586 587 return self._rev in repoview.filterrevs(self._repo, 'visible')
587 588
588 589 def children(self):
589 590 """return contexts for each child changeset"""
590 591 c = self._repo.changelog.children(self._node)
591 592 return [changectx(self._repo, x) for x in c]
592 593
593 594 def ancestors(self):
594 595 for a in self._repo.changelog.ancestors([self._rev]):
595 596 yield changectx(self._repo, a)
596 597
597 598 def descendants(self):
598 599 for d in self._repo.changelog.descendants([self._rev]):
599 600 yield changectx(self._repo, d)
600 601
601 602 def filectx(self, path, fileid=None, filelog=None):
602 603 """get a file context from this changeset"""
603 604 if fileid is None:
604 605 fileid = self.filenode(path)
605 606 return filectx(self._repo, path, fileid=fileid,
606 607 changectx=self, filelog=filelog)
607 608
608 609 def ancestor(self, c2, warn=False):
609 610 """return the "best" ancestor context of self and c2
610 611
611 612 If there are multiple candidates, it will show a message and check
612 613 merge.preferancestor configuration before falling back to the
613 614 revlog ancestor."""
614 615 # deal with workingctxs
615 616 n2 = c2._node
616 617 if n2 is None:
617 618 n2 = c2._parents[0]._node
618 619 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
619 620 if not cahs:
620 621 anc = nullid
621 622 elif len(cahs) == 1:
622 623 anc = cahs[0]
623 624 else:
624 625 # experimental config: merge.preferancestor
625 626 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
626 627 try:
627 628 ctx = changectx(self._repo, r)
628 629 except error.RepoLookupError:
629 630 continue
630 631 anc = ctx.node()
631 632 if anc in cahs:
632 633 break
633 634 else:
634 635 anc = self._repo.changelog.ancestor(self._node, n2)
635 636 if warn:
636 637 self._repo.ui.status(
637 638 (_("note: using %s as ancestor of %s and %s\n") %
638 639 (short(anc), short(self._node), short(n2))) +
639 640 ''.join(_(" alternatively, use --config "
640 641 "merge.preferancestor=%s\n") %
641 642 short(n) for n in sorted(cahs) if n != anc))
642 643 return changectx(self._repo, anc)
643 644
644 645 def descendant(self, other):
645 646 """True if other is descendant of this changeset"""
646 647 return self._repo.changelog.descendant(self._rev, other._rev)
647 648
648 649 def walk(self, match):
649 650 '''Generates matching file names.'''
650 651
651 652 # Wrap match.bad method to have message with nodeid
652 653 def bad(fn, msg):
653 654 # The manifest doesn't know about subrepos, so don't complain about
654 655 # paths into valid subrepos.
655 656 if any(fn == s or fn.startswith(s + '/')
656 657 for s in self.substate):
657 658 return
658 659 match.bad(fn, _('no such file in rev %s') % self)
659 660
660 661 m = matchmod.badmatch(match, bad)
661 662 return self._manifest.walk(m)
662 663
663 664 def matches(self, match):
664 665 return self.walk(match)
665 666
666 667 class basefilectx(object):
667 668 """A filecontext object represents the common logic for its children:
668 669 filectx: read-only access to a filerevision that is already present
669 670 in the repo,
670 671 workingfilectx: a filecontext that represents files from the working
671 672 directory,
672 673 memfilectx: a filecontext that represents files in-memory."""
673 674 def __new__(cls, repo, path, *args, **kwargs):
674 675 return super(basefilectx, cls).__new__(cls)
675 676
676 677 @propertycache
677 678 def _filelog(self):
678 679 return self._repo.file(self._path)
679 680
680 681 @propertycache
681 682 def _changeid(self):
682 683 if '_changeid' in self.__dict__:
683 684 return self._changeid
684 685 elif '_changectx' in self.__dict__:
685 686 return self._changectx.rev()
686 687 elif '_descendantrev' in self.__dict__:
687 688 # this file context was created from a revision with a known
688 689 # descendant, we can (lazily) correct for linkrev aliases
689 690 return self._adjustlinkrev(self._descendantrev)
690 691 else:
691 692 return self._filelog.linkrev(self._filerev)
692 693
693 694 @propertycache
694 695 def _filenode(self):
695 696 if '_fileid' in self.__dict__:
696 697 return self._filelog.lookup(self._fileid)
697 698 else:
698 699 return self._changectx.filenode(self._path)
699 700
700 701 @propertycache
701 702 def _filerev(self):
702 703 return self._filelog.rev(self._filenode)
703 704
704 705 @propertycache
705 706 def _repopath(self):
706 707 return self._path
707 708
708 709 def __nonzero__(self):
709 710 try:
710 711 self._filenode
711 712 return True
712 713 except error.LookupError:
713 714 # file is missing
714 715 return False
715 716
716 717 def __str__(self):
717 718 try:
718 719 return "%s@%s" % (self.path(), self._changectx)
719 720 except error.LookupError:
720 721 return "%s@???" % self.path()
721 722
722 723 def __repr__(self):
723 724 return "<%s %s>" % (type(self).__name__, str(self))
724 725
725 726 def __hash__(self):
726 727 try:
727 728 return hash((self._path, self._filenode))
728 729 except AttributeError:
729 730 return id(self)
730 731
731 732 def __eq__(self, other):
732 733 try:
733 734 return (type(self) == type(other) and self._path == other._path
734 735 and self._filenode == other._filenode)
735 736 except AttributeError:
736 737 return False
737 738
738 739 def __ne__(self, other):
739 740 return not (self == other)
740 741
741 742 def filerev(self):
742 743 return self._filerev
743 744 def filenode(self):
744 745 return self._filenode
745 746 def flags(self):
746 747 return self._changectx.flags(self._path)
747 748 def filelog(self):
748 749 return self._filelog
749 750 def rev(self):
750 751 return self._changeid
751 752 def linkrev(self):
752 753 return self._filelog.linkrev(self._filerev)
753 754 def node(self):
754 755 return self._changectx.node()
755 756 def hex(self):
756 757 return self._changectx.hex()
757 758 def user(self):
758 759 return self._changectx.user()
759 760 def date(self):
760 761 return self._changectx.date()
761 762 def files(self):
762 763 return self._changectx.files()
763 764 def description(self):
764 765 return self._changectx.description()
765 766 def branch(self):
766 767 return self._changectx.branch()
767 768 def extra(self):
768 769 return self._changectx.extra()
769 770 def phase(self):
770 771 return self._changectx.phase()
771 772 def phasestr(self):
772 773 return self._changectx.phasestr()
773 774 def manifest(self):
774 775 return self._changectx.manifest()
775 776 def changectx(self):
776 777 return self._changectx
777 778 def repo(self):
778 779 return self._repo
779 780
780 781 def path(self):
781 782 return self._path
782 783
783 784 def isbinary(self):
784 785 try:
785 786 return util.binary(self.data())
786 787 except IOError:
787 788 return False
788 789 def isexec(self):
789 790 return 'x' in self.flags()
790 791 def islink(self):
791 792 return 'l' in self.flags()
792 793
793 794 def isabsent(self):
794 795 """whether this filectx represents a file not in self._changectx
795 796
796 797 This is mainly for merge code to detect change/delete conflicts. This is
797 798 expected to be True for all subclasses of basectx."""
798 799 return False
799 800
800 801 _customcmp = False
801 802 def cmp(self, fctx):
802 803 """compare with other file context
803 804
804 805 returns True if different than fctx.
805 806 """
806 807 if fctx._customcmp:
807 808 return fctx.cmp(self)
808 809
809 810 if (fctx._filenode is None
810 811 and (self._repo._encodefilterpats
811 812 # if file data starts with '\1\n', empty metadata block is
812 813 # prepended, which adds 4 bytes to filelog.size().
813 814 or self.size() - 4 == fctx.size())
814 815 or self.size() == fctx.size()):
815 816 return self._filelog.cmp(self._filenode, fctx.data())
816 817
817 818 return True
818 819
819 820 def _adjustlinkrev(self, srcrev, inclusive=False):
820 821 """return the first ancestor of <srcrev> introducing <fnode>
821 822
822 823 If the linkrev of the file revision does not point to an ancestor of
823 824 srcrev, we'll walk down the ancestors until we find one introducing
824 825 this file revision.
825 826
826 827 :srcrev: the changeset revision we search ancestors from
827 828 :inclusive: if true, the src revision will also be checked
828 829 """
829 830 repo = self._repo
830 831 cl = repo.unfiltered().changelog
831 832 mfl = repo.manifestlog
832 833 # fetch the linkrev
833 834 lkr = self.linkrev()
834 835 # hack to reuse ancestor computation when searching for renames
835 836 memberanc = getattr(self, '_ancestrycontext', None)
836 837 iteranc = None
837 838 if srcrev is None:
838 839 # wctx case, used by workingfilectx during mergecopy
839 840 revs = [p.rev() for p in self._repo[None].parents()]
840 841 inclusive = True # we skipped the real (revless) source
841 842 else:
842 843 revs = [srcrev]
843 844 if memberanc is None:
844 845 memberanc = iteranc = cl.ancestors(revs, lkr,
845 846 inclusive=inclusive)
846 847 # check if this linkrev is an ancestor of srcrev
847 848 if lkr not in memberanc:
848 849 if iteranc is None:
849 850 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
850 851 fnode = self._filenode
851 852 path = self._path
852 853 for a in iteranc:
853 854 ac = cl.read(a) # get changeset data (we avoid object creation)
854 855 if path in ac[3]: # checking the 'files' field.
855 856 # The file has been touched, check if the content is
856 857 # similar to the one we search for.
857 858 if fnode == mfl[ac[0]].readfast().get(path):
858 859 return a
859 860 # In theory, we should never get out of that loop without a result.
860 861 # But if manifest uses a buggy file revision (not children of the
861 862 # one it replaces) we could. Such a buggy situation will likely
862 863 # result is crash somewhere else at to some point.
863 864 return lkr
864 865
865 866 def introrev(self):
866 867 """return the rev of the changeset which introduced this file revision
867 868
868 869 This method is different from linkrev because it take into account the
869 870 changeset the filectx was created from. It ensures the returned
870 871 revision is one of its ancestors. This prevents bugs from
871 872 'linkrev-shadowing' when a file revision is used by multiple
872 873 changesets.
873 874 """
874 875 lkr = self.linkrev()
875 876 attrs = vars(self)
876 877 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
877 878 if noctx or self.rev() == lkr:
878 879 return self.linkrev()
879 880 return self._adjustlinkrev(self.rev(), inclusive=True)
880 881
881 882 def _parentfilectx(self, path, fileid, filelog):
882 883 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
883 884 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
884 885 if '_changeid' in vars(self) or '_changectx' in vars(self):
885 886 # If self is associated with a changeset (probably explicitly
886 887 # fed), ensure the created filectx is associated with a
887 888 # changeset that is an ancestor of self.changectx.
888 889 # This lets us later use _adjustlinkrev to get a correct link.
889 890 fctx._descendantrev = self.rev()
890 891 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 892 elif '_descendantrev' in vars(self):
892 893 # Otherwise propagate _descendantrev if we have one associated.
893 894 fctx._descendantrev = self._descendantrev
894 895 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 896 return fctx
896 897
897 898 def parents(self):
898 899 _path = self._path
899 900 fl = self._filelog
900 901 parents = self._filelog.parents(self._filenode)
901 902 pl = [(_path, node, fl) for node in parents if node != nullid]
902 903
903 904 r = fl.renamed(self._filenode)
904 905 if r:
905 906 # - In the simple rename case, both parent are nullid, pl is empty.
906 907 # - In case of merge, only one of the parent is null id and should
907 908 # be replaced with the rename information. This parent is -always-
908 909 # the first one.
909 910 #
910 911 # As null id have always been filtered out in the previous list
911 912 # comprehension, inserting to 0 will always result in "replacing
912 913 # first nullid parent with rename information.
913 914 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
914 915
915 916 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
916 917
917 918 def p1(self):
918 919 return self.parents()[0]
919 920
920 921 def p2(self):
921 922 p = self.parents()
922 923 if len(p) == 2:
923 924 return p[1]
924 925 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
925 926
926 927 def annotate(self, follow=False, linenumber=False, diffopts=None):
927 928 '''returns a list of tuples of ((ctx, number), line) for each line
928 929 in the file, where ctx is the filectx of the node where
929 930 that line was last changed; if linenumber parameter is true, number is
930 931 the line number at the first appearance in the managed file, otherwise,
931 932 number has a fixed value of False.
932 933 '''
933 934
934 935 def lines(text):
935 936 if text.endswith("\n"):
936 937 return text.count("\n")
937 938 return text.count("\n") + int(bool(text))
938 939
939 940 if linenumber:
940 941 def decorate(text, rev):
941 942 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
942 943 else:
943 944 def decorate(text, rev):
944 945 return ([(rev, False)] * lines(text), text)
945 946
946 947 def pair(parent, child):
947 948 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
948 949 for (a1, a2, b1, b2), t in blocks:
949 950 # Changed blocks ('!') or blocks made only of blank lines ('~')
950 951 # belong to the child.
951 952 if t == '=':
952 953 child[0][b1:b2] = parent[0][a1:a2]
953 954 return child
954 955
955 956 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
956 957
957 958 def parents(f):
958 959 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
959 960 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
960 961 # from the topmost introrev (= srcrev) down to p.linkrev() if it
961 962 # isn't an ancestor of the srcrev.
962 963 f._changeid
963 964 pl = f.parents()
964 965
965 966 # Don't return renamed parents if we aren't following.
966 967 if not follow:
967 968 pl = [p for p in pl if p.path() == f.path()]
968 969
969 970 # renamed filectx won't have a filelog yet, so set it
970 971 # from the cache to save time
971 972 for p in pl:
972 973 if not '_filelog' in p.__dict__:
973 974 p._filelog = getlog(p.path())
974 975
975 976 return pl
976 977
977 978 # use linkrev to find the first changeset where self appeared
978 979 base = self
979 980 introrev = self.introrev()
980 981 if self.rev() != introrev:
981 982 base = self.filectx(self.filenode(), changeid=introrev)
982 983 if getattr(base, '_ancestrycontext', None) is None:
983 984 cl = self._repo.changelog
984 985 if introrev is None:
985 986 # wctx is not inclusive, but works because _ancestrycontext
986 987 # is used to test filelog revisions
987 988 ac = cl.ancestors([p.rev() for p in base.parents()],
988 989 inclusive=True)
989 990 else:
990 991 ac = cl.ancestors([introrev], inclusive=True)
991 992 base._ancestrycontext = ac
992 993
993 994 # This algorithm would prefer to be recursive, but Python is a
994 995 # bit recursion-hostile. Instead we do an iterative
995 996 # depth-first search.
996 997
997 998 # 1st DFS pre-calculates pcache and needed
998 999 visit = [base]
999 1000 pcache = {}
1000 1001 needed = {base: 1}
1001 1002 while visit:
1002 1003 f = visit.pop()
1003 1004 if f in pcache:
1004 1005 continue
1005 1006 pl = parents(f)
1006 1007 pcache[f] = pl
1007 1008 for p in pl:
1008 1009 needed[p] = needed.get(p, 0) + 1
1009 1010 if p not in pcache:
1010 1011 visit.append(p)
1011 1012
1012 1013 # 2nd DFS does the actual annotate
1013 1014 visit[:] = [base]
1014 1015 hist = {}
1015 1016 while visit:
1016 1017 f = visit[-1]
1017 1018 if f in hist:
1018 1019 visit.pop()
1019 1020 continue
1020 1021
1021 1022 ready = True
1022 1023 pl = pcache[f]
1023 1024 for p in pl:
1024 1025 if p not in hist:
1025 1026 ready = False
1026 1027 visit.append(p)
1027 1028 if ready:
1028 1029 visit.pop()
1029 1030 curr = decorate(f.data(), f)
1030 1031 for p in pl:
1031 1032 curr = pair(hist[p], curr)
1032 1033 if needed[p] == 1:
1033 1034 del hist[p]
1034 1035 del needed[p]
1035 1036 else:
1036 1037 needed[p] -= 1
1037 1038
1038 1039 hist[f] = curr
1039 1040 del pcache[f]
1040 1041
1041 1042 return zip(hist[base][0], hist[base][1].splitlines(True))
1042 1043
1043 1044 def ancestors(self, followfirst=False):
1044 1045 visit = {}
1045 1046 c = self
1046 1047 if followfirst:
1047 1048 cut = 1
1048 1049 else:
1049 1050 cut = None
1050 1051
1051 1052 while True:
1052 1053 for parent in c.parents()[:cut]:
1053 1054 visit[(parent.linkrev(), parent.filenode())] = parent
1054 1055 if not visit:
1055 1056 break
1056 1057 c = visit.pop(max(visit))
1057 1058 yield c
1058 1059
1059 1060 class filectx(basefilectx):
1060 1061 """A filecontext object makes access to data related to a particular
1061 1062 filerevision convenient."""
1062 1063 def __init__(self, repo, path, changeid=None, fileid=None,
1063 1064 filelog=None, changectx=None):
1064 1065 """changeid can be a changeset revision, node, or tag.
1065 1066 fileid can be a file revision or node."""
1066 1067 self._repo = repo
1067 1068 self._path = path
1068 1069
1069 1070 assert (changeid is not None
1070 1071 or fileid is not None
1071 1072 or changectx is not None), \
1072 1073 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1073 1074 % (changeid, fileid, changectx))
1074 1075
1075 1076 if filelog is not None:
1076 1077 self._filelog = filelog
1077 1078
1078 1079 if changeid is not None:
1079 1080 self._changeid = changeid
1080 1081 if changectx is not None:
1081 1082 self._changectx = changectx
1082 1083 if fileid is not None:
1083 1084 self._fileid = fileid
1084 1085
1085 1086 @propertycache
1086 1087 def _changectx(self):
1087 1088 try:
1088 1089 return changectx(self._repo, self._changeid)
1089 1090 except error.FilteredRepoLookupError:
1090 1091 # Linkrev may point to any revision in the repository. When the
1091 1092 # repository is filtered this may lead to `filectx` trying to build
1092 1093 # `changectx` for filtered revision. In such case we fallback to
1093 1094 # creating `changectx` on the unfiltered version of the reposition.
1094 1095 # This fallback should not be an issue because `changectx` from
1095 1096 # `filectx` are not used in complex operations that care about
1096 1097 # filtering.
1097 1098 #
1098 1099 # This fallback is a cheap and dirty fix that prevent several
1099 1100 # crashes. It does not ensure the behavior is correct. However the
1100 1101 # behavior was not correct before filtering either and "incorrect
1101 1102 # behavior" is seen as better as "crash"
1102 1103 #
1103 1104 # Linkrevs have several serious troubles with filtering that are
1104 1105 # complicated to solve. Proper handling of the issue here should be
1105 1106 # considered when solving linkrev issue are on the table.
1106 1107 return changectx(self._repo.unfiltered(), self._changeid)
1107 1108
1108 1109 def filectx(self, fileid, changeid=None):
1109 1110 '''opens an arbitrary revision of the file without
1110 1111 opening a new filelog'''
1111 1112 return filectx(self._repo, self._path, fileid=fileid,
1112 1113 filelog=self._filelog, changeid=changeid)
1113 1114
1114 1115 def rawdata(self):
1115 1116 return self._filelog.revision(self._filenode, raw=True)
1116 1117
1117 1118 def data(self):
1118 1119 try:
1119 1120 return self._filelog.read(self._filenode)
1120 1121 except error.CensoredNodeError:
1121 1122 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1122 1123 return ""
1123 1124 raise error.Abort(_("censored node: %s") % short(self._filenode),
1124 1125 hint=_("set censor.policy to ignore errors"))
1125 1126
1126 1127 def size(self):
1127 1128 return self._filelog.size(self._filerev)
1128 1129
1129 1130 def renamed(self):
1130 1131 """check if file was actually renamed in this changeset revision
1131 1132
1132 1133 If rename logged in file revision, we report copy for changeset only
1133 1134 if file revisions linkrev points back to the changeset in question
1134 1135 or both changeset parents contain different file revisions.
1135 1136 """
1136 1137
1137 1138 renamed = self._filelog.renamed(self._filenode)
1138 1139 if not renamed:
1139 1140 return renamed
1140 1141
1141 1142 if self.rev() == self.linkrev():
1142 1143 return renamed
1143 1144
1144 1145 name = self.path()
1145 1146 fnode = self._filenode
1146 1147 for p in self._changectx.parents():
1147 1148 try:
1148 1149 if fnode == p.filenode(name):
1149 1150 return None
1150 1151 except error.LookupError:
1151 1152 pass
1152 1153 return renamed
1153 1154
1154 1155 def children(self):
1155 1156 # hard for renames
1156 1157 c = self._filelog.children(self._filenode)
1157 1158 return [filectx(self._repo, self._path, fileid=x,
1158 1159 filelog=self._filelog) for x in c]
1159 1160
1160 1161 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1161 1162 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1162 1163 if diff from fctx2 to fctx1 has changes in linerange2 and
1163 1164 `linerange1` is the new line range for fctx1.
1164 1165 """
1165 1166 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1166 1167 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1167 1168 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1168 1169 return diffinrange, linerange1
1169 1170
1170 1171 def blockancestors(fctx, fromline, toline, followfirst=False):
1171 1172 """Yield ancestors of `fctx` with respect to the block of lines within
1172 1173 `fromline`-`toline` range.
1173 1174 """
1174 1175 diffopts = patch.diffopts(fctx._repo.ui)
1175 1176 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1176 1177 while visit:
1177 1178 c, linerange2 = visit.pop(max(visit))
1178 1179 pl = c.parents()
1179 1180 if followfirst:
1180 1181 pl = pl[:1]
1181 1182 if not pl:
1182 1183 # The block originates from the initial revision.
1183 1184 yield c, linerange2
1184 1185 continue
1185 1186 inrange = False
1186 1187 for p in pl:
1187 1188 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1188 1189 inrange = inrange or inrangep
1189 1190 if linerange1[0] == linerange1[1]:
1190 1191 # Parent's linerange is empty, meaning that the block got
1191 1192 # introduced in this revision; no need to go futher in this
1192 1193 # branch.
1193 1194 continue
1194 1195 visit[p.linkrev(), p.filenode()] = p, linerange1
1195 1196 if inrange:
1196 1197 yield c, linerange2
1197 1198
1198 1199 class committablectx(basectx):
1199 1200 """A committablectx object provides common functionality for a context that
1200 1201 wants the ability to commit, e.g. workingctx or memctx."""
1201 1202 def __init__(self, repo, text="", user=None, date=None, extra=None,
1202 1203 changes=None):
1203 1204 self._repo = repo
1204 1205 self._rev = None
1205 1206 self._node = None
1206 1207 self._text = text
1207 1208 if date:
1208 1209 self._date = util.parsedate(date)
1209 1210 if user:
1210 1211 self._user = user
1211 1212 if changes:
1212 1213 self._status = changes
1213 1214
1214 1215 self._extra = {}
1215 1216 if extra:
1216 1217 self._extra = extra.copy()
1217 1218 if 'branch' not in self._extra:
1218 1219 try:
1219 1220 branch = encoding.fromlocal(self._repo.dirstate.branch())
1220 1221 except UnicodeDecodeError:
1221 1222 raise error.Abort(_('branch name not in UTF-8!'))
1222 1223 self._extra['branch'] = branch
1223 1224 if self._extra['branch'] == '':
1224 1225 self._extra['branch'] = 'default'
1225 1226
1226 1227 def __str__(self):
1227 1228 return str(self._parents[0]) + "+"
1228 1229
1229 1230 def __nonzero__(self):
1230 1231 return True
1231 1232
1232 1233 def _buildflagfunc(self):
1233 1234 # Create a fallback function for getting file flags when the
1234 1235 # filesystem doesn't support them
1235 1236
1236 1237 copiesget = self._repo.dirstate.copies().get
1237 1238 parents = self.parents()
1238 1239 if len(parents) < 2:
1239 1240 # when we have one parent, it's easy: copy from parent
1240 1241 man = parents[0].manifest()
1241 1242 def func(f):
1242 1243 f = copiesget(f, f)
1243 1244 return man.flags(f)
1244 1245 else:
1245 1246 # merges are tricky: we try to reconstruct the unstored
1246 1247 # result from the merge (issue1802)
1247 1248 p1, p2 = parents
1248 1249 pa = p1.ancestor(p2)
1249 1250 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1250 1251
1251 1252 def func(f):
1252 1253 f = copiesget(f, f) # may be wrong for merges with copies
1253 1254 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1254 1255 if fl1 == fl2:
1255 1256 return fl1
1256 1257 if fl1 == fla:
1257 1258 return fl2
1258 1259 if fl2 == fla:
1259 1260 return fl1
1260 1261 return '' # punt for conflicts
1261 1262
1262 1263 return func
1263 1264
1264 1265 @propertycache
1265 1266 def _flagfunc(self):
1266 1267 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1267 1268
1268 1269 @propertycache
1269 1270 def _status(self):
1270 1271 return self._repo.status()
1271 1272
1272 1273 @propertycache
1273 1274 def _user(self):
1274 1275 return self._repo.ui.username()
1275 1276
1276 1277 @propertycache
1277 1278 def _date(self):
1278 1279 return util.makedate()
1279 1280
1280 1281 def subrev(self, subpath):
1281 1282 return None
1282 1283
1283 1284 def manifestnode(self):
1284 1285 return None
1285 1286 def user(self):
1286 1287 return self._user or self._repo.ui.username()
1287 1288 def date(self):
1288 1289 return self._date
1289 1290 def description(self):
1290 1291 return self._text
1291 1292 def files(self):
1292 1293 return sorted(self._status.modified + self._status.added +
1293 1294 self._status.removed)
1294 1295
1295 1296 def modified(self):
1296 1297 return self._status.modified
1297 1298 def added(self):
1298 1299 return self._status.added
1299 1300 def removed(self):
1300 1301 return self._status.removed
1301 1302 def deleted(self):
1302 1303 return self._status.deleted
1303 1304 def branch(self):
1304 1305 return encoding.tolocal(self._extra['branch'])
1305 1306 def closesbranch(self):
1306 1307 return 'close' in self._extra
1307 1308 def extra(self):
1308 1309 return self._extra
1309 1310
1310 1311 def tags(self):
1311 1312 return []
1312 1313
1313 1314 def bookmarks(self):
1314 1315 b = []
1315 1316 for p in self.parents():
1316 1317 b.extend(p.bookmarks())
1317 1318 return b
1318 1319
1319 1320 def phase(self):
1320 1321 phase = phases.draft # default phase to draft
1321 1322 for p in self.parents():
1322 1323 phase = max(phase, p.phase())
1323 1324 return phase
1324 1325
1325 1326 def hidden(self):
1326 1327 return False
1327 1328
1328 1329 def children(self):
1329 1330 return []
1330 1331
1331 1332 def flags(self, path):
1332 1333 if '_manifest' in self.__dict__:
1333 1334 try:
1334 1335 return self._manifest.flags(path)
1335 1336 except KeyError:
1336 1337 return ''
1337 1338
1338 1339 try:
1339 1340 return self._flagfunc(path)
1340 1341 except OSError:
1341 1342 return ''
1342 1343
1343 1344 def ancestor(self, c2):
1344 1345 """return the "best" ancestor context of self and c2"""
1345 1346 return self._parents[0].ancestor(c2) # punt on two parents for now
1346 1347
1347 1348 def walk(self, match):
1348 1349 '''Generates matching file names.'''
1349 1350 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1350 1351 True, False))
1351 1352
1352 1353 def matches(self, match):
1353 1354 return sorted(self._repo.dirstate.matches(match))
1354 1355
1355 1356 def ancestors(self):
1356 1357 for p in self._parents:
1357 1358 yield p
1358 1359 for a in self._repo.changelog.ancestors(
1359 1360 [p.rev() for p in self._parents]):
1360 1361 yield changectx(self._repo, a)
1361 1362
1362 1363 def markcommitted(self, node):
1363 1364 """Perform post-commit cleanup necessary after committing this ctx
1364 1365
1365 1366 Specifically, this updates backing stores this working context
1366 1367 wraps to reflect the fact that the changes reflected by this
1367 1368 workingctx have been committed. For example, it marks
1368 1369 modified and added files as normal in the dirstate.
1369 1370
1370 1371 """
1371 1372
1372 1373 self._repo.dirstate.beginparentchange()
1373 1374 for f in self.modified() + self.added():
1374 1375 self._repo.dirstate.normal(f)
1375 1376 for f in self.removed():
1376 1377 self._repo.dirstate.drop(f)
1377 1378 self._repo.dirstate.setparents(node)
1378 1379 self._repo.dirstate.endparentchange()
1379 1380
1380 1381 # write changes out explicitly, because nesting wlock at
1381 1382 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1382 1383 # from immediately doing so for subsequent changing files
1383 1384 self._repo.dirstate.write(self._repo.currenttransaction())
1384 1385
1385 1386 class workingctx(committablectx):
1386 1387 """A workingctx object makes access to data related to
1387 1388 the current working directory convenient.
1388 1389 date - any valid date string or (unixtime, offset), or None.
1389 1390 user - username string, or None.
1390 1391 extra - a dictionary of extra values, or None.
1391 1392 changes - a list of file lists as returned by localrepo.status()
1392 1393 or None to use the repository status.
1393 1394 """
1394 1395 def __init__(self, repo, text="", user=None, date=None, extra=None,
1395 1396 changes=None):
1396 1397 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1397 1398
1398 1399 def __iter__(self):
1399 1400 d = self._repo.dirstate
1400 1401 for f in d:
1401 1402 if d[f] != 'r':
1402 1403 yield f
1403 1404
1404 1405 def __contains__(self, key):
1405 1406 return self._repo.dirstate[key] not in "?r"
1406 1407
1407 1408 def hex(self):
1408 1409 return hex(wdirid)
1409 1410
1410 1411 @propertycache
1411 1412 def _parents(self):
1412 1413 p = self._repo.dirstate.parents()
1413 1414 if p[1] == nullid:
1414 1415 p = p[:-1]
1415 1416 return [changectx(self._repo, x) for x in p]
1416 1417
1417 1418 def filectx(self, path, filelog=None):
1418 1419 """get a file context from the working directory"""
1419 1420 return workingfilectx(self._repo, path, workingctx=self,
1420 1421 filelog=filelog)
1421 1422
1422 1423 def dirty(self, missing=False, merge=True, branch=True):
1423 1424 "check whether a working directory is modified"
1424 1425 # check subrepos first
1425 1426 for s in sorted(self.substate):
1426 1427 if self.sub(s).dirty():
1427 1428 return True
1428 1429 # check current working dir
1429 1430 return ((merge and self.p2()) or
1430 1431 (branch and self.branch() != self.p1().branch()) or
1431 1432 self.modified() or self.added() or self.removed() or
1432 1433 (missing and self.deleted()))
1433 1434
1434 1435 def add(self, list, prefix=""):
1435 1436 join = lambda f: os.path.join(prefix, f)
1436 1437 with self._repo.wlock():
1437 1438 ui, ds = self._repo.ui, self._repo.dirstate
1438 1439 rejected = []
1439 1440 lstat = self._repo.wvfs.lstat
1440 1441 for f in list:
1441 1442 scmutil.checkportable(ui, join(f))
1442 1443 try:
1443 1444 st = lstat(f)
1444 1445 except OSError:
1445 1446 ui.warn(_("%s does not exist!\n") % join(f))
1446 1447 rejected.append(f)
1447 1448 continue
1448 1449 if st.st_size > 10000000:
1449 1450 ui.warn(_("%s: up to %d MB of RAM may be required "
1450 1451 "to manage this file\n"
1451 1452 "(use 'hg revert %s' to cancel the "
1452 1453 "pending addition)\n")
1453 1454 % (f, 3 * st.st_size // 1000000, join(f)))
1454 1455 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1455 1456 ui.warn(_("%s not added: only files and symlinks "
1456 1457 "supported currently\n") % join(f))
1457 1458 rejected.append(f)
1458 1459 elif ds[f] in 'amn':
1459 1460 ui.warn(_("%s already tracked!\n") % join(f))
1460 1461 elif ds[f] == 'r':
1461 1462 ds.normallookup(f)
1462 1463 else:
1463 1464 ds.add(f)
1464 1465 return rejected
1465 1466
1466 1467 def forget(self, files, prefix=""):
1467 1468 join = lambda f: os.path.join(prefix, f)
1468 1469 with self._repo.wlock():
1469 1470 rejected = []
1470 1471 for f in files:
1471 1472 if f not in self._repo.dirstate:
1472 1473 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1473 1474 rejected.append(f)
1474 1475 elif self._repo.dirstate[f] != 'a':
1475 1476 self._repo.dirstate.remove(f)
1476 1477 else:
1477 1478 self._repo.dirstate.drop(f)
1478 1479 return rejected
1479 1480
1480 1481 def undelete(self, list):
1481 1482 pctxs = self.parents()
1482 1483 with self._repo.wlock():
1483 1484 for f in list:
1484 1485 if self._repo.dirstate[f] != 'r':
1485 1486 self._repo.ui.warn(_("%s not removed!\n") % f)
1486 1487 else:
1487 1488 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1488 1489 t = fctx.data()
1489 1490 self._repo.wwrite(f, t, fctx.flags())
1490 1491 self._repo.dirstate.normal(f)
1491 1492
1492 1493 def copy(self, source, dest):
1493 1494 try:
1494 1495 st = self._repo.wvfs.lstat(dest)
1495 1496 except OSError as err:
1496 1497 if err.errno != errno.ENOENT:
1497 1498 raise
1498 1499 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1499 1500 return
1500 1501 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1501 1502 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1502 1503 "symbolic link\n") % dest)
1503 1504 else:
1504 1505 with self._repo.wlock():
1505 1506 if self._repo.dirstate[dest] in '?':
1506 1507 self._repo.dirstate.add(dest)
1507 1508 elif self._repo.dirstate[dest] in 'r':
1508 1509 self._repo.dirstate.normallookup(dest)
1509 1510 self._repo.dirstate.copy(source, dest)
1510 1511
1511 1512 def match(self, pats=[], include=None, exclude=None, default='glob',
1512 1513 listsubrepos=False, badfn=None):
1513 1514 r = self._repo
1514 1515
1515 1516 # Only a case insensitive filesystem needs magic to translate user input
1516 1517 # to actual case in the filesystem.
1517 1518 if not util.fscasesensitive(r.root):
1518 1519 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1519 1520 exclude, default, r.auditor, self,
1520 1521 listsubrepos=listsubrepos,
1521 1522 badfn=badfn)
1522 1523 return matchmod.match(r.root, r.getcwd(), pats,
1523 1524 include, exclude, default,
1524 1525 auditor=r.auditor, ctx=self,
1525 1526 listsubrepos=listsubrepos, badfn=badfn)
1526 1527
1527 1528 def _filtersuspectsymlink(self, files):
1528 1529 if not files or self._repo.dirstate._checklink:
1529 1530 return files
1530 1531
1531 1532 # Symlink placeholders may get non-symlink-like contents
1532 1533 # via user error or dereferencing by NFS or Samba servers,
1533 1534 # so we filter out any placeholders that don't look like a
1534 1535 # symlink
1535 1536 sane = []
1536 1537 for f in files:
1537 1538 if self.flags(f) == 'l':
1538 1539 d = self[f].data()
1539 1540 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1540 1541 self._repo.ui.debug('ignoring suspect symlink placeholder'
1541 1542 ' "%s"\n' % f)
1542 1543 continue
1543 1544 sane.append(f)
1544 1545 return sane
1545 1546
1546 1547 def _checklookup(self, files):
1547 1548 # check for any possibly clean files
1548 1549 if not files:
1549 1550 return [], []
1550 1551
1551 1552 modified = []
1552 1553 fixup = []
1553 1554 pctx = self._parents[0]
1554 1555 # do a full compare of any files that might have changed
1555 1556 for f in sorted(files):
1556 1557 if (f not in pctx or self.flags(f) != pctx.flags(f)
1557 1558 or pctx[f].cmp(self[f])):
1558 1559 modified.append(f)
1559 1560 else:
1560 1561 fixup.append(f)
1561 1562
1562 1563 # update dirstate for files that are actually clean
1563 1564 if fixup:
1564 1565 try:
1565 1566 # updating the dirstate is optional
1566 1567 # so we don't wait on the lock
1567 1568 # wlock can invalidate the dirstate, so cache normal _after_
1568 1569 # taking the lock
1569 1570 with self._repo.wlock(False):
1570 1571 normal = self._repo.dirstate.normal
1571 1572 for f in fixup:
1572 1573 normal(f)
1573 1574 # write changes out explicitly, because nesting
1574 1575 # wlock at runtime may prevent 'wlock.release()'
1575 1576 # after this block from doing so for subsequent
1576 1577 # changing files
1577 1578 self._repo.dirstate.write(self._repo.currenttransaction())
1578 1579 except error.LockError:
1579 1580 pass
1580 1581 return modified, fixup
1581 1582
1582 1583 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1583 1584 unknown=False):
1584 1585 '''Gets the status from the dirstate -- internal use only.'''
1585 1586 listignored, listclean, listunknown = ignored, clean, unknown
1586 1587 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1587 1588 subrepos = []
1588 1589 if '.hgsub' in self:
1589 1590 subrepos = sorted(self.substate)
1590 1591 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1591 1592 listclean, listunknown)
1592 1593
1593 1594 # check for any possibly clean files
1594 1595 if cmp:
1595 1596 modified2, fixup = self._checklookup(cmp)
1596 1597 s.modified.extend(modified2)
1597 1598
1598 1599 # update dirstate for files that are actually clean
1599 1600 if fixup and listclean:
1600 1601 s.clean.extend(fixup)
1601 1602
1602 1603 if match.always():
1603 1604 # cache for performance
1604 1605 if s.unknown or s.ignored or s.clean:
1605 1606 # "_status" is cached with list*=False in the normal route
1606 1607 self._status = scmutil.status(s.modified, s.added, s.removed,
1607 1608 s.deleted, [], [], [])
1608 1609 else:
1609 1610 self._status = s
1610 1611
1611 1612 return s
1612 1613
1613 1614 @propertycache
1614 1615 def _manifest(self):
1615 1616 """generate a manifest corresponding to the values in self._status
1616 1617
1617 1618 This reuse the file nodeid from parent, but we use special node
1618 1619 identifiers for added and modified files. This is used by manifests
1619 1620 merge to see that files are different and by update logic to avoid
1620 1621 deleting newly added files.
1621 1622 """
1622 1623 return self._buildstatusmanifest(self._status)
1623 1624
1624 1625 def _buildstatusmanifest(self, status):
1625 1626 """Builds a manifest that includes the given status results."""
1626 1627 parents = self.parents()
1627 1628
1628 1629 man = parents[0].manifest().copy()
1629 1630
1630 1631 ff = self._flagfunc
1631 1632 for i, l in ((addednodeid, status.added),
1632 1633 (modifiednodeid, status.modified)):
1633 1634 for f in l:
1634 1635 man[f] = i
1635 1636 try:
1636 1637 man.setflag(f, ff(f))
1637 1638 except OSError:
1638 1639 pass
1639 1640
1640 1641 for f in status.deleted + status.removed:
1641 1642 if f in man:
1642 1643 del man[f]
1643 1644
1644 1645 return man
1645 1646
1646 1647 def _buildstatus(self, other, s, match, listignored, listclean,
1647 1648 listunknown):
1648 1649 """build a status with respect to another context
1649 1650
1650 1651 This includes logic for maintaining the fast path of status when
1651 1652 comparing the working directory against its parent, which is to skip
1652 1653 building a new manifest if self (working directory) is not comparing
1653 1654 against its parent (repo['.']).
1654 1655 """
1655 1656 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1656 1657 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1657 1658 # might have accidentally ended up with the entire contents of the file
1658 1659 # they are supposed to be linking to.
1659 1660 s.modified[:] = self._filtersuspectsymlink(s.modified)
1660 1661 if other != self._repo['.']:
1661 1662 s = super(workingctx, self)._buildstatus(other, s, match,
1662 1663 listignored, listclean,
1663 1664 listunknown)
1664 1665 return s
1665 1666
1666 1667 def _matchstatus(self, other, match):
1667 1668 """override the match method with a filter for directory patterns
1668 1669
1669 1670 We use inheritance to customize the match.bad method only in cases of
1670 1671 workingctx since it belongs only to the working directory when
1671 1672 comparing against the parent changeset.
1672 1673
1673 1674 If we aren't comparing against the working directory's parent, then we
1674 1675 just use the default match object sent to us.
1675 1676 """
1676 1677 superself = super(workingctx, self)
1677 1678 match = superself._matchstatus(other, match)
1678 1679 if other != self._repo['.']:
1679 1680 def bad(f, msg):
1680 1681 # 'f' may be a directory pattern from 'match.files()',
1681 1682 # so 'f not in ctx1' is not enough
1682 1683 if f not in other and not other.hasdir(f):
1683 1684 self._repo.ui.warn('%s: %s\n' %
1684 1685 (self._repo.dirstate.pathto(f), msg))
1685 1686 match.bad = bad
1686 1687 return match
1687 1688
1688 1689 class committablefilectx(basefilectx):
1689 1690 """A committablefilectx provides common functionality for a file context
1690 1691 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1691 1692 def __init__(self, repo, path, filelog=None, ctx=None):
1692 1693 self._repo = repo
1693 1694 self._path = path
1694 1695 self._changeid = None
1695 1696 self._filerev = self._filenode = None
1696 1697
1697 1698 if filelog is not None:
1698 1699 self._filelog = filelog
1699 1700 if ctx:
1700 1701 self._changectx = ctx
1701 1702
1702 1703 def __nonzero__(self):
1703 1704 return True
1704 1705
1705 1706 def linkrev(self):
1706 1707 # linked to self._changectx no matter if file is modified or not
1707 1708 return self.rev()
1708 1709
1709 1710 def parents(self):
1710 1711 '''return parent filectxs, following copies if necessary'''
1711 1712 def filenode(ctx, path):
1712 1713 return ctx._manifest.get(path, nullid)
1713 1714
1714 1715 path = self._path
1715 1716 fl = self._filelog
1716 1717 pcl = self._changectx._parents
1717 1718 renamed = self.renamed()
1718 1719
1719 1720 if renamed:
1720 1721 pl = [renamed + (None,)]
1721 1722 else:
1722 1723 pl = [(path, filenode(pcl[0], path), fl)]
1723 1724
1724 1725 for pc in pcl[1:]:
1725 1726 pl.append((path, filenode(pc, path), fl))
1726 1727
1727 1728 return [self._parentfilectx(p, fileid=n, filelog=l)
1728 1729 for p, n, l in pl if n != nullid]
1729 1730
1730 1731 def children(self):
1731 1732 return []
1732 1733
1733 1734 class workingfilectx(committablefilectx):
1734 1735 """A workingfilectx object makes access to data related to a particular
1735 1736 file in the working directory convenient."""
1736 1737 def __init__(self, repo, path, filelog=None, workingctx=None):
1737 1738 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1738 1739
1739 1740 @propertycache
1740 1741 def _changectx(self):
1741 1742 return workingctx(self._repo)
1742 1743
1743 1744 def data(self):
1744 1745 return self._repo.wread(self._path)
1745 1746 def renamed(self):
1746 1747 rp = self._repo.dirstate.copied(self._path)
1747 1748 if not rp:
1748 1749 return None
1749 1750 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1750 1751
1751 1752 def size(self):
1752 1753 return self._repo.wvfs.lstat(self._path).st_size
1753 1754 def date(self):
1754 1755 t, tz = self._changectx.date()
1755 1756 try:
1756 1757 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1757 1758 except OSError as err:
1758 1759 if err.errno != errno.ENOENT:
1759 1760 raise
1760 1761 return (t, tz)
1761 1762
1762 1763 def cmp(self, fctx):
1763 1764 """compare with other file context
1764 1765
1765 1766 returns True if different than fctx.
1766 1767 """
1767 1768 # fctx should be a filectx (not a workingfilectx)
1768 1769 # invert comparison to reuse the same code path
1769 1770 return fctx.cmp(self)
1770 1771
1771 1772 def remove(self, ignoremissing=False):
1772 1773 """wraps unlink for a repo's working directory"""
1773 1774 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1774 1775
1775 1776 def write(self, data, flags):
1776 1777 """wraps repo.wwrite"""
1777 1778 self._repo.wwrite(self._path, data, flags)
1778 1779
1779 1780 class workingcommitctx(workingctx):
1780 1781 """A workingcommitctx object makes access to data related to
1781 1782 the revision being committed convenient.
1782 1783
1783 1784 This hides changes in the working directory, if they aren't
1784 1785 committed in this context.
1785 1786 """
1786 1787 def __init__(self, repo, changes,
1787 1788 text="", user=None, date=None, extra=None):
1788 1789 super(workingctx, self).__init__(repo, text, user, date, extra,
1789 1790 changes)
1790 1791
1791 1792 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1792 1793 unknown=False):
1793 1794 """Return matched files only in ``self._status``
1794 1795
1795 1796 Uncommitted files appear "clean" via this context, even if
1796 1797 they aren't actually so in the working directory.
1797 1798 """
1798 1799 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1799 1800 if clean:
1800 1801 clean = [f for f in self._manifest if f not in self._changedset]
1801 1802 else:
1802 1803 clean = []
1803 1804 return scmutil.status([f for f in self._status.modified if match(f)],
1804 1805 [f for f in self._status.added if match(f)],
1805 1806 [f for f in self._status.removed if match(f)],
1806 1807 [], [], [], clean)
1807 1808
1808 1809 @propertycache
1809 1810 def _changedset(self):
1810 1811 """Return the set of files changed in this context
1811 1812 """
1812 1813 changed = set(self._status.modified)
1813 1814 changed.update(self._status.added)
1814 1815 changed.update(self._status.removed)
1815 1816 return changed
1816 1817
1817 1818 def makecachingfilectxfn(func):
1818 1819 """Create a filectxfn that caches based on the path.
1819 1820
1820 1821 We can't use util.cachefunc because it uses all arguments as the cache
1821 1822 key and this creates a cycle since the arguments include the repo and
1822 1823 memctx.
1823 1824 """
1824 1825 cache = {}
1825 1826
1826 1827 def getfilectx(repo, memctx, path):
1827 1828 if path not in cache:
1828 1829 cache[path] = func(repo, memctx, path)
1829 1830 return cache[path]
1830 1831
1831 1832 return getfilectx
1832 1833
1833 1834 class memctx(committablectx):
1834 1835 """Use memctx to perform in-memory commits via localrepo.commitctx().
1835 1836
1836 1837 Revision information is supplied at initialization time while
1837 1838 related files data and is made available through a callback
1838 1839 mechanism. 'repo' is the current localrepo, 'parents' is a
1839 1840 sequence of two parent revisions identifiers (pass None for every
1840 1841 missing parent), 'text' is the commit message and 'files' lists
1841 1842 names of files touched by the revision (normalized and relative to
1842 1843 repository root).
1843 1844
1844 1845 filectxfn(repo, memctx, path) is a callable receiving the
1845 1846 repository, the current memctx object and the normalized path of
1846 1847 requested file, relative to repository root. It is fired by the
1847 1848 commit function for every file in 'files', but calls order is
1848 1849 undefined. If the file is available in the revision being
1849 1850 committed (updated or added), filectxfn returns a memfilectx
1850 1851 object. If the file was removed, filectxfn raises an
1851 1852 IOError. Moved files are represented by marking the source file
1852 1853 removed and the new file added with copy information (see
1853 1854 memfilectx).
1854 1855
1855 1856 user receives the committer name and defaults to current
1856 1857 repository username, date is the commit date in any format
1857 1858 supported by util.parsedate() and defaults to current date, extra
1858 1859 is a dictionary of metadata or is left empty.
1859 1860 """
1860 1861
1861 1862 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1862 1863 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1863 1864 # this field to determine what to do in filectxfn.
1864 1865 _returnnoneformissingfiles = True
1865 1866
1866 1867 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1867 1868 date=None, extra=None, editor=False):
1868 1869 super(memctx, self).__init__(repo, text, user, date, extra)
1869 1870 self._rev = None
1870 1871 self._node = None
1871 1872 parents = [(p or nullid) for p in parents]
1872 1873 p1, p2 = parents
1873 1874 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1874 1875 files = sorted(set(files))
1875 1876 self._files = files
1876 1877 self.substate = {}
1877 1878
1878 1879 # if store is not callable, wrap it in a function
1879 1880 if not callable(filectxfn):
1880 1881 def getfilectx(repo, memctx, path):
1881 1882 fctx = filectxfn[path]
1882 1883 # this is weird but apparently we only keep track of one parent
1883 1884 # (why not only store that instead of a tuple?)
1884 1885 copied = fctx.renamed()
1885 1886 if copied:
1886 1887 copied = copied[0]
1887 1888 return memfilectx(repo, path, fctx.data(),
1888 1889 islink=fctx.islink(), isexec=fctx.isexec(),
1889 1890 copied=copied, memctx=memctx)
1890 1891 self._filectxfn = getfilectx
1891 1892 else:
1892 1893 # memoizing increases performance for e.g. vcs convert scenarios.
1893 1894 self._filectxfn = makecachingfilectxfn(filectxfn)
1894 1895
1895 1896 if extra:
1896 1897 self._extra = extra.copy()
1897 1898 else:
1898 1899 self._extra = {}
1899 1900
1900 1901 if self._extra.get('branch', '') == '':
1901 1902 self._extra['branch'] = 'default'
1902 1903
1903 1904 if editor:
1904 1905 self._text = editor(self._repo, self, [])
1905 1906 self._repo.savecommitmessage(self._text)
1906 1907
1907 1908 def filectx(self, path, filelog=None):
1908 1909 """get a file context from the working directory
1909 1910
1910 1911 Returns None if file doesn't exist and should be removed."""
1911 1912 return self._filectxfn(self._repo, self, path)
1912 1913
1913 1914 def commit(self):
1914 1915 """commit context to the repo"""
1915 1916 return self._repo.commitctx(self)
1916 1917
1917 1918 @propertycache
1918 1919 def _manifest(self):
1919 1920 """generate a manifest based on the return values of filectxfn"""
1920 1921
1921 1922 # keep this simple for now; just worry about p1
1922 1923 pctx = self._parents[0]
1923 1924 man = pctx.manifest().copy()
1924 1925
1925 1926 for f in self._status.modified:
1926 1927 p1node = nullid
1927 1928 p2node = nullid
1928 1929 p = pctx[f].parents() # if file isn't in pctx, check p2?
1929 1930 if len(p) > 0:
1930 1931 p1node = p[0].filenode()
1931 1932 if len(p) > 1:
1932 1933 p2node = p[1].filenode()
1933 1934 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1934 1935
1935 1936 for f in self._status.added:
1936 1937 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1937 1938
1938 1939 for f in self._status.removed:
1939 1940 if f in man:
1940 1941 del man[f]
1941 1942
1942 1943 return man
1943 1944
1944 1945 @propertycache
1945 1946 def _status(self):
1946 1947 """Calculate exact status from ``files`` specified at construction
1947 1948 """
1948 1949 man1 = self.p1().manifest()
1949 1950 p2 = self._parents[1]
1950 1951 # "1 < len(self._parents)" can't be used for checking
1951 1952 # existence of the 2nd parent, because "memctx._parents" is
1952 1953 # explicitly initialized by the list, of which length is 2.
1953 1954 if p2.node() != nullid:
1954 1955 man2 = p2.manifest()
1955 1956 managing = lambda f: f in man1 or f in man2
1956 1957 else:
1957 1958 managing = lambda f: f in man1
1958 1959
1959 1960 modified, added, removed = [], [], []
1960 1961 for f in self._files:
1961 1962 if not managing(f):
1962 1963 added.append(f)
1963 1964 elif self[f]:
1964 1965 modified.append(f)
1965 1966 else:
1966 1967 removed.append(f)
1967 1968
1968 1969 return scmutil.status(modified, added, removed, [], [], [], [])
1969 1970
1970 1971 class memfilectx(committablefilectx):
1971 1972 """memfilectx represents an in-memory file to commit.
1972 1973
1973 1974 See memctx and committablefilectx for more details.
1974 1975 """
1975 1976 def __init__(self, repo, path, data, islink=False,
1976 1977 isexec=False, copied=None, memctx=None):
1977 1978 """
1978 1979 path is the normalized file path relative to repository root.
1979 1980 data is the file content as a string.
1980 1981 islink is True if the file is a symbolic link.
1981 1982 isexec is True if the file is executable.
1982 1983 copied is the source file path if current file was copied in the
1983 1984 revision being committed, or None."""
1984 1985 super(memfilectx, self).__init__(repo, path, None, memctx)
1985 1986 self._data = data
1986 1987 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1987 1988 self._copied = None
1988 1989 if copied:
1989 1990 self._copied = (copied, nullid)
1990 1991
1991 1992 def data(self):
1992 1993 return self._data
1993 1994 def size(self):
1994 1995 return len(self.data())
1995 1996 def flags(self):
1996 1997 return self._flags
1997 1998 def renamed(self):
1998 1999 return self._copied
1999 2000
2000 2001 def remove(self, ignoremissing=False):
2001 2002 """wraps unlink for a repo's working directory"""
2002 2003 # need to figure out what to do here
2003 2004 del self._changectx[self._path]
2004 2005
2005 2006 def write(self, data, flags):
2006 2007 """wraps repo.wwrite"""
2007 2008 self._data = data
2008 2009
2009 2010 class metadataonlyctx(committablectx):
2010 2011 """Like memctx but it's reusing the manifest of different commit.
2011 2012 Intended to be used by lightweight operations that are creating
2012 2013 metadata-only changes.
2013 2014
2014 2015 Revision information is supplied at initialization time. 'repo' is the
2015 2016 current localrepo, 'ctx' is original revision which manifest we're reuisng
2016 2017 'parents' is a sequence of two parent revisions identifiers (pass None for
2017 2018 every missing parent), 'text' is the commit.
2018 2019
2019 2020 user receives the committer name and defaults to current repository
2020 2021 username, date is the commit date in any format supported by
2021 2022 util.parsedate() and defaults to current date, extra is a dictionary of
2022 2023 metadata or is left empty.
2023 2024 """
2024 2025 def __new__(cls, repo, originalctx, *args, **kwargs):
2025 2026 return super(metadataonlyctx, cls).__new__(cls, repo)
2026 2027
2027 2028 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2028 2029 extra=None, editor=False):
2029 2030 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2030 2031 self._rev = None
2031 2032 self._node = None
2032 2033 self._originalctx = originalctx
2033 2034 self._manifestnode = originalctx.manifestnode()
2034 2035 parents = [(p or nullid) for p in parents]
2035 2036 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2036 2037
2037 2038 # sanity check to ensure that the reused manifest parents are
2038 2039 # manifests of our commit parents
2039 2040 mp1, mp2 = self.manifestctx().parents
2040 2041 if p1 != nullid and p1.manifestctx().node() != mp1:
2041 2042 raise RuntimeError('can\'t reuse the manifest: '
2042 2043 'its p1 doesn\'t match the new ctx p1')
2043 2044 if p2 != nullid and p2.manifestctx().node() != mp2:
2044 2045 raise RuntimeError('can\'t reuse the manifest: '
2045 2046 'its p2 doesn\'t match the new ctx p2')
2046 2047
2047 2048 self._files = originalctx.files()
2048 2049 self.substate = {}
2049 2050
2050 2051 if extra:
2051 2052 self._extra = extra.copy()
2052 2053 else:
2053 2054 self._extra = {}
2054 2055
2055 2056 if self._extra.get('branch', '') == '':
2056 2057 self._extra['branch'] = 'default'
2057 2058
2058 2059 if editor:
2059 2060 self._text = editor(self._repo, self, [])
2060 2061 self._repo.savecommitmessage(self._text)
2061 2062
2062 2063 def manifestnode(self):
2063 2064 return self._manifestnode
2064 2065
2065 2066 @propertycache
2066 2067 def _manifestctx(self):
2067 2068 return self._repo.manifestlog[self._manifestnode]
2068 2069
2069 2070 def filectx(self, path, filelog=None):
2070 2071 return self._originalctx.filectx(path, filelog=filelog)
2071 2072
2072 2073 def commit(self):
2073 2074 """commit context to the repo"""
2074 2075 return self._repo.commitctx(self)
2075 2076
2076 2077 @property
2077 2078 def _manifest(self):
2078 2079 return self._originalctx.manifest()
2079 2080
2080 2081 @propertycache
2081 2082 def _status(self):
2082 2083 """Calculate exact status from ``files`` specified in the ``origctx``
2083 2084 and parents manifests.
2084 2085 """
2085 2086 man1 = self.p1().manifest()
2086 2087 p2 = self._parents[1]
2087 2088 # "1 < len(self._parents)" can't be used for checking
2088 2089 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2089 2090 # explicitly initialized by the list, of which length is 2.
2090 2091 if p2.node() != nullid:
2091 2092 man2 = p2.manifest()
2092 2093 managing = lambda f: f in man1 or f in man2
2093 2094 else:
2094 2095 managing = lambda f: f in man1
2095 2096
2096 2097 modified, added, removed = [], [], []
2097 2098 for f in self._files:
2098 2099 if not managing(f):
2099 2100 added.append(f)
2100 2101 elif self[f]:
2101 2102 modified.append(f)
2102 2103 else:
2103 2104 removed.append(f)
2104 2105
2105 2106 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now