context.py
3011 lines
| 95.5 KiB
| text/x-python
|
PythonLexer
/ mercurial / context.py
Matt Mackall
|
r2563 | # context.py - changeset and file context objects for mercurial | ||
# | ||||
Thomas Arendsen Hein
|
r4635 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | ||
Matt Mackall
|
r2563 | # | ||
Martin Geisler
|
r8225 | # This software may be used and distributed according to the terms of the | ||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Matt Mackall
|
r2563 | |||
Gregory Szorc
|
r27506 | from __future__ import absolute_import | ||
import errno | ||||
Phil Cohen
|
r34686 | import filecmp | ||
Gregory Szorc
|
r27506 | import os | ||
import stat | ||||
Mads Kiilerich
|
r26604 | |||
Gregory Szorc
|
r27506 | from .i18n import _ | ||
from .node import ( | ||||
Durham Goode
|
r30361 | addednodeid, | ||
Gregory Szorc
|
r27506 | hex, | ||
Durham Goode
|
r30361 | modifiednodeid, | ||
Gregory Szorc
|
r27506 | nullid, | ||
nullrev, | ||||
short, | ||||
Yuya Nishihara
|
r37466 | wdirfilenodeids, | ||
Martin von Zweigbergk
|
r42133 | wdirhex, | ||
Gregory Szorc
|
r27506 | ) | ||
Gregory Szorc
|
r43359 | from .pycompat import ( | ||
getattr, | ||||
open, | ||||
) | ||||
Gregory Szorc
|
r27506 | from . import ( | ||
r42935 | copies, | |||
Yuya Nishihara
|
r36935 | dagop, | ||
Gregory Szorc
|
r27506 | encoding, | ||
error, | ||||
fileset, | ||||
match as matchmod, | ||||
obsolete as obsmod, | ||||
patch, | ||||
Matt Harbison
|
r33501 | pathutil, | ||
Gregory Szorc
|
r27506 | phases, | ||
Augie Fackler
|
r31343 | pycompat, | ||
Gregory Szorc
|
r27506 | repoview, | ||
scmutil, | ||||
Gregory Szorc
|
r33353 | sparse, | ||
Gregory Szorc
|
r27506 | subrepo, | ||
Yuya Nishihara
|
r36026 | subrepoutil, | ||
Gregory Szorc
|
r27506 | util, | ||
) | ||||
Yuya Nishihara
|
r37102 | from .utils import ( | ||
dateutil, | ||||
stringutil, | ||||
) | ||||
Matt Mackall
|
r3122 | |||
Matt Mackall
|
r8207 | propertycache = util.propertycache | ||
Dirkjan Ochtman
|
r7368 | |||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19537 | class basectx(object): | ||
"""A basectx object represents the common logic for its children: | ||||
changectx: read-only context that is already present in the repo, | ||||
workingctx: a context that represents the working directory and can | ||||
be committed, | ||||
memctx: a context that represents changes in-memory and can also | ||||
be committed.""" | ||||
Martin von Zweigbergk
|
r37192 | def __init__(self, repo): | ||
self._repo = repo | ||||
Augie Fackler
|
r31344 | def __bytes__(self): | ||
Sean Farley
|
r19540 | return short(self.node()) | ||
Yuya Nishihara
|
r33022 | __str__ = encoding.strmethod(__bytes__) | ||
Sean Farley
|
r19546 | def __repr__(self): | ||
Augie Fackler
|
r43809 | return "<%s %s>" % (type(self).__name__, str(self)) | ||
Sean Farley
|
r19546 | |||
Sean Farley
|
r19547 | def __eq__(self, other): | ||
try: | ||||
return type(self) == type(other) and self._rev == other._rev | ||||
except AttributeError: | ||||
return False | ||||
Sean Farley
|
r19548 | def __ne__(self, other): | ||
return not (self == other) | ||||
Sean Farley
|
r19550 | def __contains__(self, key): | ||
return key in self._manifest | ||||
Sean Farley
|
r19551 | def __getitem__(self, key): | ||
return self.filectx(key) | ||||
Sean Farley
|
r19552 | def __iter__(self): | ||
Augie Fackler
|
r24227 | return iter(self._manifest) | ||
Sean Farley
|
r19552 | |||
Durham Goode
|
r31261 | def _buildstatusmanifest(self, status): | ||
"""Builds a manifest that includes the given status results, if this is | ||||
a working copy context. For non-working copy contexts, it just returns | ||||
the normal manifest.""" | ||||
return self.manifest() | ||||
Siddharth Agarwal
|
r21880 | |||
Martin von Zweigbergk
|
r23237 | def _matchstatus(self, other, match): | ||
Martin von Zweigbergk
|
r33937 | """This internal method provides a way for child objects to override the | ||
Sean Farley
|
r21481 | match operator. | ||
""" | ||||
Martin von Zweigbergk
|
r33937 | return match | ||
Sean Farley
|
r21481 | |||
Augie Fackler
|
r43346 | def _buildstatus( | ||
self, other, s, match, listignored, listclean, listunknown | ||||
): | ||||
Sean Farley
|
r21471 | """build a status with respect to another context""" | ||
Martin von Zweigbergk
|
r23257 | # Load earliest manifest first for caching reasons. More specifically, | ||
# if you have revisions 1000 and 1001, 1001 is probably stored as a | ||||
# delta against 1000. Thus, if you read 1000 first, we'll reconstruct | ||||
# 1000 and cache it so that when you read 1001, we just need to apply a | ||||
# delta to what's in the cache. So that's one full reconstruction + one | ||||
# delta application. | ||||
Durham Goode
|
r31260 | mf2 = None | ||
Martin von Zweigbergk
|
r23238 | if self.rev() is not None and self.rev() < other.rev(): | ||
Durham Goode
|
r31261 | mf2 = self._buildstatusmanifest(s) | ||
mf1 = other._buildstatusmanifest(s) | ||||
Durham Goode
|
r31260 | if mf2 is None: | ||
Durham Goode
|
r31261 | mf2 = self._buildstatusmanifest(s) | ||
Sean Farley
|
r21471 | |||
Augie Fackler
|
r23755 | modified, added = [], [] | ||
removed = [] | ||||
Augie Fackler
|
r23757 | clean = [] | ||
Martin von Zweigbergk
|
r23304 | deleted, unknown, ignored = s.deleted, s.unknown, s.ignored | ||
Martin von Zweigbergk
|
r23085 | deletedset = set(deleted) | ||
Durham Goode
|
r31261 | d = mf1.diff(mf2, match=match, clean=listclean) | ||
Gregory Szorc
|
r43376 | for fn, value in pycompat.iteritems(d): | ||
Martin von Zweigbergk
|
r23731 | if fn in deletedset: | ||
continue | ||||
Augie Fackler
|
r23757 | if value is None: | ||
clean.append(fn) | ||||
continue | ||||
(node1, flag1), (node2, flag2) = value | ||||
Augie Fackler
|
r23755 | if node1 is None: | ||
added.append(fn) | ||||
elif node2 is None: | ||||
removed.append(fn) | ||||
Martin von Zweigbergk
|
r27749 | elif flag1 != flag2: | ||
modified.append(fn) | ||||
Yuya Nishihara
|
r37466 | elif node2 not in wdirfilenodeids: | ||
Martin von Zweigbergk
|
r27748 | # When comparing files between two commits, we save time by | ||
# not comparing the file contents when the nodeids differ. | ||||
# Note that this means we incorrectly report a reverted change | ||||
# to a file as a modification. | ||||
Martin von Zweigbergk
|
r27747 | modified.append(fn) | ||
Augie Fackler
|
r23755 | elif self[fn].cmp(other[fn]): | ||
modified.append(fn) | ||||
Martin von Zweigbergk
|
r23731 | else: | ||
Augie Fackler
|
r23757 | clean.append(fn) | ||
Augie Fackler
|
r23755 | |||
Pierre-Yves David
|
r21971 | if removed: | ||
# need to filter files if they are already reported as removed | ||||
Augie Fackler
|
r43346 | unknown = [ | ||
fn | ||||
for fn in unknown | ||||
if fn not in mf1 and (not match or match(fn)) | ||||
] | ||||
ignored = [ | ||||
fn | ||||
for fn in ignored | ||||
if fn not in mf1 and (not match or match(fn)) | ||||
] | ||||
Martin von Zweigbergk
|
r23730 | # if they're deleted, don't report them as removed | ||
removed = [fn for fn in removed if fn not in deletedset] | ||||
Sean Farley
|
r21471 | |||
Augie Fackler
|
r43346 | return scmutil.status( | ||
modified, added, removed, deleted, unknown, ignored, clean | ||||
) | ||||
Sean Farley
|
r21471 | |||
Sean Farley
|
r19549 | @propertycache | ||
def substate(self): | ||||
Yuya Nishihara
|
r36026 | return subrepoutil.state(self, self._repo.ui) | ||
Sean Farley
|
r19549 | |||
Sean Farley
|
r21586 | def subrev(self, subpath): | ||
return self.substate[subpath][1] | ||||
Sean Farley
|
r19541 | def rev(self): | ||
return self._rev | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19542 | def node(self): | ||
return self._node | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19543 | def hex(self): | ||
Sean Farley
|
r19544 | return hex(self.node()) | ||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19553 | def manifest(self): | ||
return self._manifest | ||||
Augie Fackler
|
r43346 | |||
Durham Goode
|
r30344 | def manifestctx(self): | ||
return self._manifestctx | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r24300 | def repo(self): | ||
return self._repo | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19554 | def phasestr(self): | ||
return phases.phasenames[self.phase()] | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19555 | def mutable(self): | ||
return self.phase() > phases.public | ||||
Sean Farley
|
r19541 | |||
Yuya Nishihara
|
r38631 | def matchfileset(self, expr, badfn=None): | ||
return fileset.match(self, expr, badfn=badfn) | ||||
Augie Fackler
|
r20400 | |||
Sean Farley
|
r19734 | def obsolete(self): | ||
"""True if the changeset is obsolete""" | ||||
Augie Fackler
|
r43347 | return self.rev() in obsmod.getrevs(self._repo, b'obsolete') | ||
Sean Farley
|
r19734 | |||
def extinct(self): | ||||
"""True if the changeset is extinct""" | ||||
Augie Fackler
|
r43347 | return self.rev() in obsmod.getrevs(self._repo, b'extinct') | ||
Sean Farley
|
r19734 | |||
Boris Feld
|
r33693 | def orphan(self): | ||
r39328 | """True if the changeset is not obsolete, but its ancestor is""" | |||
Augie Fackler
|
r43347 | return self.rev() in obsmod.getrevs(self._repo, b'orphan') | ||
Sean Farley
|
r19734 | |||
Boris Feld
|
r33695 | def phasedivergent(self): | ||
r39328 | """True if the changeset tries to be a successor of a public changeset | |||
Sean Farley
|
r19734 | |||
r39328 | Only non-public and non-obsolete changesets may be phase-divergent. | |||
Sean Farley
|
r19734 | """ | ||
Augie Fackler
|
r43347 | return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent') | ||
Sean Farley
|
r19734 | |||
Boris Feld
|
r33694 | def contentdivergent(self): | ||
r39328 | """Is a successor of a changeset with multiple possible successor sets | |||
Sean Farley
|
r19734 | |||
r39328 | Only non-public and non-obsolete changesets may be content-divergent. | |||
Sean Farley
|
r19734 | """ | ||
Augie Fackler
|
r43347 | return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent') | ||
Sean Farley
|
r19734 | |||
Boris Feld
|
r33696 | def isunstable(self): | ||
r39328 | """True if the changeset is either orphan, phase-divergent or | |||
content-divergent""" | ||||
Boris Feld
|
r33695 | return self.orphan() or self.phasedivergent() or self.contentdivergent() | ||
Sean Farley
|
r19734 | |||
Boris Feld
|
r33692 | def instabilities(self): | ||
"""return the list of instabilities affecting this changeset. | ||||
Instabilities are returned as strings. possible values are: | ||||
Boris Feld
|
r33632 | - orphan, | ||
Boris Feld
|
r33652 | - phase-divergent, | ||
Boris Feld
|
r33651 | - content-divergent. | ||
Sean Farley
|
r19734 | """ | ||
Boris Feld
|
r33692 | instabilities = [] | ||
Boris Feld
|
r33693 | if self.orphan(): | ||
Augie Fackler
|
r43347 | instabilities.append(b'orphan') | ||
Boris Feld
|
r33695 | if self.phasedivergent(): | ||
Augie Fackler
|
r43347 | instabilities.append(b'phase-divergent') | ||
Boris Feld
|
r33694 | if self.contentdivergent(): | ||
Augie Fackler
|
r43347 | instabilities.append(b'content-divergent') | ||
Boris Feld
|
r33692 | return instabilities | ||
Sean Farley
|
r19734 | |||
Sean Farley
|
r19556 | def parents(self): | ||
"""return contexts for each parent changeset""" | ||||
return self._parents | ||||
Sean Farley
|
r19557 | def p1(self): | ||
return self._parents[0] | ||||
Sean Farley
|
r19558 | def p2(self): | ||
Gregory Szorc
|
r27064 | parents = self._parents | ||
if len(parents) == 2: | ||||
return parents[1] | ||||
Martin von Zweigbergk
|
r39993 | return self._repo[nullrev] | ||
Sean Farley
|
r19558 | |||
Sean Farley
|
r19559 | def _fileinfo(self, path): | ||
Augie Fackler
|
r43906 | if '_manifest' in self.__dict__: | ||
Sean Farley
|
r19559 | try: | ||
return self._manifest[path], self._manifest.flags(path) | ||||
except KeyError: | ||||
Augie Fackler
|
r43346 | raise error.ManifestLookupError( | ||
Augie Fackler
|
r43347 | self._node, path, _(b'not found in manifest') | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r43906 | if '_manifestdelta' in self.__dict__ or path in self.files(): | ||
Sean Farley
|
r19559 | if path in self._manifestdelta: | ||
Augie Fackler
|
r43346 | return ( | ||
self._manifestdelta[path], | ||||
self._manifestdelta.flags(path), | ||||
) | ||||
Durham Goode
|
r30340 | mfl = self._repo.manifestlog | ||
try: | ||||
node, flag = mfl[self._changeset.manifest].find(path) | ||||
except KeyError: | ||||
Augie Fackler
|
r43346 | raise error.ManifestLookupError( | ||
Augie Fackler
|
r43347 | self._node, path, _(b'not found in manifest') | ||
Augie Fackler
|
r43346 | ) | ||
Sean Farley
|
r19559 | |||
return node, flag | ||||
Sean Farley
|
r19560 | def filenode(self, path): | ||
return self._fileinfo(path)[0] | ||||
Sean Farley
|
r19561 | def flags(self, path): | ||
try: | ||||
return self._fileinfo(path)[1] | ||||
except error.LookupError: | ||||
Augie Fackler
|
r43347 | return b'' | ||
Sean Farley
|
r19561 | |||
Martin von Zweigbergk
|
r42477 | @propertycache | ||
def _copies(self): | ||||
r42935 | return copies.computechangesetcopies(self) | |||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r42477 | def p1copies(self): | ||
return self._copies[0] | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r42477 | def p2copies(self): | ||
return self._copies[1] | ||||
Matt Harbison
|
r29021 | def sub(self, path, allowcreate=True): | ||
Matt Harbison
|
r25600 | '''return a subrepo for the stored revision of path, never wdir()''' | ||
Matt Harbison
|
r29021 | return subrepo.subrepo(self, path, allowcreate=allowcreate) | ||
Sean Farley
|
r19562 | |||
Matt Harbison
|
r25417 | def nullsub(self, path, pctx): | ||
return subrepo.nullsubrepo(self, path, pctx) | ||||
Matt Harbison
|
r25600 | def workingsub(self, path): | ||
'''return a subrepo for the stored revision, or wdir if this is a wdir | ||||
context. | ||||
''' | ||||
return subrepo.subrepo(self, path, allowwdir=True) | ||||
Augie Fackler
|
r43346 | def match( | ||
self, | ||||
pats=None, | ||||
include=None, | ||||
exclude=None, | ||||
Augie Fackler
|
r43347 | default=b'glob', | ||
Augie Fackler
|
r43346 | listsubrepos=False, | ||
badfn=None, | ||||
): | ||||
Sean Farley
|
r19563 | r = self._repo | ||
Augie Fackler
|
r43346 | return matchmod.match( | ||
r.root, | ||||
r.getcwd(), | ||||
pats, | ||||
include, | ||||
exclude, | ||||
default, | ||||
auditor=r.nofsauditor, | ||||
ctx=self, | ||||
listsubrepos=listsubrepos, | ||||
badfn=badfn, | ||||
) | ||||
def diff( | ||||
self, | ||||
ctx2=None, | ||||
match=None, | ||||
changes=None, | ||||
opts=None, | ||||
losedatafn=None, | ||||
pathfn=None, | ||||
copy=None, | ||||
copysourcematch=None, | ||||
hunksfilterfn=None, | ||||
): | ||||
Sean Farley
|
r19564 | """Returns a diff generator for the given contexts and matcher""" | ||
if ctx2 is None: | ||||
ctx2 = self.p1() | ||||
Sean Farley
|
r19568 | if ctx2 is not None: | ||
Sean Farley
|
r19564 | ctx2 = self._repo[ctx2] | ||
Augie Fackler
|
r43346 | return patch.diff( | ||
self._repo, | ||||
ctx2, | ||||
self, | ||||
match=match, | ||||
changes=changes, | ||||
opts=opts, | ||||
losedatafn=losedatafn, | ||||
pathfn=pathfn, | ||||
copy=copy, | ||||
copysourcematch=copysourcematch, | ||||
hunksfilterfn=hunksfilterfn, | ||||
) | ||||
Sean Farley
|
r19564 | |||
Drew Gottlieb
|
r24323 | def dirs(self): | ||
return self._manifest.dirs() | ||||
Sean Farley
|
r19565 | |||
Drew Gottlieb
|
r24325 | def hasdir(self, dir): | ||
return self._manifest.hasdir(dir) | ||||
Sean Farley
|
r19566 | |||
Augie Fackler
|
r43346 | def status( | ||
self, | ||||
other=None, | ||||
match=None, | ||||
listignored=False, | ||||
listclean=False, | ||||
listunknown=False, | ||||
listsubrepos=False, | ||||
): | ||||
Sean Farley
|
r21594 | """return status of files between two nodes or node and working | ||
directory. | ||||
If other is None, compare this node with working directory. | ||||
Pierre-Yves David
|
r21722 | |||
returns (modified, added, removed, deleted, unknown, ignored, clean) | ||||
Sean Farley
|
r21594 | """ | ||
ctx1 = self | ||||
ctx2 = self._repo[other] | ||||
# This next code block is, admittedly, fragile logic that tests for | ||||
# reversing the contexts and wouldn't need to exist if it weren't for | ||||
# the fast (and common) code path of comparing the working directory | ||||
# with its first parent. | ||||
# | ||||
# What we're aiming for here is the ability to call: | ||||
# | ||||
# workingctx.status(parentctx) | ||||
# | ||||
# If we always built the manifest for each context and compared those, | ||||
# then we'd be done. But the special case of the above call means we | ||||
# just copy the manifest of the parent. | ||||
reversed = False | ||||
Augie Fackler
|
r43346 | if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx): | ||
Sean Farley
|
r21594 | reversed = True | ||
ctx1, ctx2 = ctx2, ctx1 | ||||
Martin von Zweigbergk
|
r40119 | match = self._repo.narrowmatch(match) | ||
Martin von Zweigbergk
|
r23237 | match = ctx2._matchstatus(ctx1, match) | ||
Martin von Zweigbergk
|
r23304 | r = scmutil.status([], [], [], [], [], [], []) | ||
Augie Fackler
|
r43346 | r = ctx2._buildstatus( | ||
ctx1, r, match, listignored, listclean, listunknown | ||||
) | ||||
Sean Farley
|
r21594 | |||
if reversed: | ||||
Martin von Zweigbergk
|
r23301 | # Reverse added and removed. Clear deleted, unknown and ignored as | ||
# these make no sense to reverse. | ||||
Augie Fackler
|
r43346 | r = scmutil.status( | ||
r.modified, r.removed, r.added, [], [], [], r.clean | ||||
) | ||||
Sean Farley
|
r21594 | |||
if listsubrepos: | ||||
for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): | ||||
try: | ||||
Andrew Zwicky
|
r27183 | rev2 = ctx2.subrev(subpath) | ||
except KeyError: | ||||
# A subrepo that existed in node1 was deleted between | ||||
# node1 and node2 (inclusive). Thus, ctx2's substate | ||||
# won't contain that subpath. The best we can do ignore it. | ||||
rev2 = None | ||||
Martin von Zweigbergk
|
r28017 | submatch = matchmod.subdirmatcher(subpath, match) | ||
Augie Fackler
|
r43346 | s = sub.status( | ||
rev2, | ||||
match=submatch, | ||||
ignored=listignored, | ||||
clean=listclean, | ||||
unknown=listunknown, | ||||
listsubrepos=True, | ||||
) | ||||
Augie Fackler
|
r44046 | for k in ( | ||
'modified', | ||||
'added', | ||||
'removed', | ||||
'deleted', | ||||
'unknown', | ||||
'ignored', | ||||
'clean', | ||||
): | ||||
rfiles, sfiles = getattr(r, k), getattr(s, k) | ||||
Augie Fackler
|
r43347 | rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles) | ||
Sean Farley
|
r21594 | |||
Augie Fackler
|
r44046 | r.modified.sort() | ||
r.added.sort() | ||||
r.removed.sort() | ||||
r.deleted.sort() | ||||
r.unknown.sort() | ||||
r.ignored.sort() | ||||
r.clean.sort() | ||||
Sean Farley
|
r21616 | |||
Martin von Zweigbergk
|
r23301 | return r | ||
Sean Farley
|
r21594 | |||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19537 | class changectx(basectx): | ||
Matt Mackall
|
r2563 | """A changecontext object makes access to data related to a particular | ||
Mads Kiilerich
|
r19951 | changeset convenient. It represents a read-only context already present in | ||
Sean Farley
|
r19537 | the repo.""" | ||
Augie Fackler
|
r43346 | |||
r44199 | def __init__(self, repo, rev, node, maybe_filtered=True): | |||
Martin von Zweigbergk
|
r37192 | super(changectx, self).__init__(repo) | ||
Martin von Zweigbergk
|
r39994 | self._rev = rev | ||
self._node = node | ||||
r44199 | # When maybe_filtered is True, the revision might be affected by | |||
# changelog filtering and operation through the filtered changelog must be used. | ||||
# | ||||
# When maybe_filtered is False, the revision has already been checked | ||||
# against filtering and is not filtered. Operation through the | ||||
# unfiltered changelog might be used in some case. | ||||
self._maybe_filtered = maybe_filtered | ||||
Matt Mackall
|
r2563 | |||
Paul Moore
|
r6469 | def __hash__(self): | ||
try: | ||||
return hash(self._rev) | ||||
except AttributeError: | ||||
return id(self) | ||||
Matt Mackall
|
r3168 | def __nonzero__(self): | ||
Thomas Arendsen Hein
|
r3578 | return self._rev != nullrev | ||
Matt Mackall
|
r3168 | |||
Gregory Szorc
|
r31476 | __bool__ = __nonzero__ | ||
Martin Geisler
|
r8157 | @propertycache | ||
Dirkjan Ochtman
|
r7368 | def _changeset(self): | ||
r44199 | if self._maybe_filtered: | |||
repo = self._repo | ||||
else: | ||||
repo = self._repo.unfiltered() | ||||
return repo.changelog.changelogrevision(self.rev()) | ||||
Dirkjan Ochtman
|
r7368 | |||
Martin Geisler
|
r8157 | @propertycache | ||
Dirkjan Ochtman
|
r7368 | def _manifest(self): | ||
Durham Goode
|
r30344 | return self._manifestctx.read() | ||
Jun Wu
|
r32519 | @property | ||
Durham Goode
|
r30344 | def _manifestctx(self): | ||
return self._repo.manifestlog[self._changeset.manifest] | ||||
Dirkjan Ochtman
|
r7368 | |||
Martin Geisler
|
r8157 | @propertycache | ||
Dirkjan Ochtman
|
r7368 | def _manifestdelta(self): | ||
Durham Goode
|
r30344 | return self._manifestctx.readdelta() | ||
Dirkjan Ochtman
|
r7368 | |||
Martin Geisler
|
r8157 | @propertycache | ||
Dirkjan Ochtman
|
r7368 | def _parents(self): | ||
Gregory Szorc
|
r27063 | repo = self._repo | ||
p1, p2 = repo.changelog.parentrevs(self._rev) | ||||
if p2 == nullrev: | ||||
Martin von Zweigbergk
|
r39993 | return [repo[p1]] | ||
return [repo[p1], repo[p2]] | ||||
Matt Mackall
|
r3215 | |||
Matt Mackall
|
r10282 | def changeset(self): | ||
Gregory Szorc
|
r28488 | c = self._changeset | ||
return ( | ||||
c.manifest, | ||||
c.user, | ||||
c.date, | ||||
c.files, | ||||
c.description, | ||||
c.extra, | ||||
) | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r10282 | def manifestnode(self): | ||
Gregory Szorc
|
r28488 | return self._changeset.manifest | ||
Matt Mackall
|
r2563 | |||
Matt Mackall
|
r10282 | def user(self): | ||
Gregory Szorc
|
r28488 | return self._changeset.user | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r10282 | def date(self): | ||
Gregory Szorc
|
r28488 | return self._changeset.date | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r10282 | def files(self): | ||
Gregory Szorc
|
r28488 | return self._changeset.files | ||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r42566 | def filesmodified(self): | ||
modified = set(self.files()) | ||||
modified.difference_update(self.filesadded()) | ||||
modified.difference_update(self.filesremoved()) | ||||
return sorted(modified) | ||||
r43291 | ||||
Martin von Zweigbergk
|
r42566 | def filesadded(self): | ||
r43291 | filesadded = self._changeset.filesadded | |||
r43416 | compute_on_none = True | |||
if self._repo.filecopiesmode == b'changeset-sidedata': | ||||
compute_on_none = False | ||||
else: | ||||
source = self._repo.ui.config(b'experimental', b'copies.read-from') | ||||
if source == b'changeset-only': | ||||
compute_on_none = False | ||||
elif source != b'compatibility': | ||||
# filelog mode, ignore any changelog content | ||||
filesadded = None | ||||
if filesadded is None: | ||||
if compute_on_none: | ||||
r43417 | filesadded = copies.computechangesetfilesadded(self) | |||
r43416 | else: | |||
r43291 | filesadded = [] | |||
return filesadded | ||||
Martin von Zweigbergk
|
r42566 | def filesremoved(self): | ||
r43292 | filesremoved = self._changeset.filesremoved | |||
r43416 | compute_on_none = True | |||
if self._repo.filecopiesmode == b'changeset-sidedata': | ||||
compute_on_none = False | ||||
else: | ||||
source = self._repo.ui.config(b'experimental', b'copies.read-from') | ||||
if source == b'changeset-only': | ||||
compute_on_none = False | ||||
elif source != b'compatibility': | ||||
# filelog mode, ignore any changelog content | ||||
filesremoved = None | ||||
if filesremoved is None: | ||||
if compute_on_none: | ||||
r43417 | filesremoved = copies.computechangesetfilesremoved(self) | |||
r43416 | else: | |||
r43292 | filesremoved = [] | |||
return filesremoved | ||||
Martin von Zweigbergk
|
r42566 | |||
Martin von Zweigbergk
|
r41921 | @propertycache | ||
def _copies(self): | ||||
Martin von Zweigbergk
|
r42318 | p1copies = self._changeset.p1copies | ||
p2copies = self._changeset.p2copies | ||||
r43416 | compute_on_none = True | |||
if self._repo.filecopiesmode == b'changeset-sidedata': | ||||
compute_on_none = False | ||||
else: | ||||
source = self._repo.ui.config(b'experimental', b'copies.read-from') | ||||
# If config says to get copy metadata only from changeset, then | ||||
# return that, defaulting to {} if there was no copy metadata. In | ||||
# compatibility mode, we return copy data from the changeset if it | ||||
# was recorded there, and otherwise we fall back to getting it from | ||||
# the filelogs (below). | ||||
# | ||||
# If we are in compatiblity mode and there is not data in the | ||||
# changeset), we get the copy metadata from the filelogs. | ||||
# | ||||
# otherwise, when config said to read only from filelog, we get the | ||||
# copy metadata from the filelogs. | ||||
if source == b'changeset-only': | ||||
compute_on_none = False | ||||
elif source != b'compatibility': | ||||
# filelog mode, ignore any changelog content | ||||
p1copies = p2copies = None | ||||
if p1copies is None: | ||||
if compute_on_none: | ||||
r43293 | p1copies, p2copies = super(changectx, self)._copies | |||
r43416 | else: | |||
if p1copies is None: | ||||
p1copies = {} | ||||
if p2copies is None: | ||||
p2copies = {} | ||||
r43293 | return p1copies, p2copies | |||
Matt Mackall
|
r10282 | def description(self): | ||
Gregory Szorc
|
r28488 | return self._changeset.description | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r10282 | def branch(self): | ||
Augie Fackler
|
r43347 | return encoding.tolocal(self._changeset.extra.get(b"branch")) | ||
Augie Fackler
|
r43346 | |||
Brodie Rao
|
r16720 | def closesbranch(self): | ||
Augie Fackler
|
r43347 | return b'close' in self._changeset.extra | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r10282 | def extra(self): | ||
rlevasseur@google.com
|
r35106 | """Return a dict of extra information.""" | ||
Gregory Szorc
|
r28488 | return self._changeset.extra | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r10282 | def tags(self): | ||
rlevasseur@google.com
|
r35106 | """Return a list of byte tag names""" | ||
Matt Mackall
|
r10282 | return self._repo.nodetags(self._node) | ||
Augie Fackler
|
r43346 | |||
David Soria Parra
|
r13384 | def bookmarks(self): | ||
rlevasseur@google.com
|
r35106 | """Return a list of byte bookmark names.""" | ||
David Soria Parra
|
r13384 | return self._repo.nodebookmarks(self._node) | ||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r15421 | def phase(self): | ||
Patrick Mezard
|
r16657 | return self._repo._phasecache.phase(self._repo, self._rev) | ||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r14644 | def hidden(self): | ||
Augie Fackler
|
r43347 | return self._rev in repoview.filterrevs(self._repo, b'visible') | ||
Matt Mackall
|
r2563 | |||
Phil Cohen
|
r34682 | def isinmemory(self): | ||
return False | ||||
Matt Mackall
|
r2563 | def children(self): | ||
rlevasseur@google.com
|
r35106 | """return list of changectx contexts for each child changeset. | ||
This returns only the immediate child changesets. Use descendants() to | ||||
recursively walk children. | ||||
""" | ||||
Benoit Boissinot
|
r2627 | c = self._repo.changelog.children(self._node) | ||
Martin von Zweigbergk
|
r39993 | return [self._repo[x] for x in c] | ||
Matt Mackall
|
r2563 | |||
Matt Mackall
|
r6876 | def ancestors(self): | ||
Bryan O'Sullivan
|
r16866 | for a in self._repo.changelog.ancestors([self._rev]): | ||
Martin von Zweigbergk
|
r39993 | yield self._repo[a] | ||
Matt Mackall
|
r6876 | |||
def descendants(self): | ||||
rlevasseur@google.com
|
r35106 | """Recursively yield all children of the changeset. | ||
For just the immediate children, use children() | ||||
""" | ||||
Bryan O'Sullivan
|
r16867 | for d in self._repo.changelog.descendants([self._rev]): | ||
Martin von Zweigbergk
|
r39993 | yield self._repo[d] | ||
Matt Mackall
|
r6876 | |||
Benoit Boissinot
|
r3966 | def filectx(self, path, fileid=None, filelog=None): | ||
Matt Mackall
|
r2563 | """get a file context from this changeset""" | ||
Benoit Boissinot
|
r2628 | if fileid is None: | ||
fileid = self.filenode(path) | ||||
Augie Fackler
|
r43346 | return filectx( | ||
self._repo, path, fileid=fileid, changectx=self, filelog=filelog | ||||
) | ||||
Matt Mackall
|
r2563 | |||
Matt Mackall
|
r21203 | def ancestor(self, c2, warn=False): | ||
Mads Kiilerich
|
r22389 | """return the "best" ancestor context of self and c2 | ||
If there are multiple candidates, it will show a message and check | ||||
merge.preferancestor configuration before falling back to the | ||||
revlog ancestor.""" | ||||
Matt Mackall
|
r9843 | # deal with workingctxs | ||
n2 = c2._node | ||||
Martin Geisler
|
r13031 | if n2 is None: | ||
Matt Mackall
|
r9843 | n2 = c2._parents[0]._node | ||
Mads Kiilerich
|
r21125 | cahs = self._repo.changelog.commonancestorsheads(self._node, n2) | ||
if not cahs: | ||||
anc = nullid | ||||
elif len(cahs) == 1: | ||||
anc = cahs[0] | ||||
else: | ||||
Matt Mackall
|
r25844 | # experimental config: merge.preferancestor | ||
Augie Fackler
|
r43347 | for r in self._repo.ui.configlist(b'merge', b'preferancestor'): | ||
Mads Kiilerich
|
r22671 | try: | ||
Martin von Zweigbergk
|
r37372 | ctx = scmutil.revsymbol(self._repo, r) | ||
Mads Kiilerich
|
r22671 | except error.RepoLookupError: | ||
Mads Kiilerich
|
r22180 | continue | ||
Mads Kiilerich
|
r21126 | anc = ctx.node() | ||
if anc in cahs: | ||||
break | ||||
else: | ||||
anc = self._repo.changelog.ancestor(self._node, n2) | ||||
Matt Mackall
|
r21203 | if warn: | ||
self._repo.ui.status( | ||||
Augie Fackler
|
r43346 | ( | ||
Augie Fackler
|
r43347 | _(b"note: using %s as ancestor of %s and %s\n") | ||
Augie Fackler
|
r43346 | % (short(anc), short(self._node), short(n2)) | ||
) | ||||
Augie Fackler
|
r43347 | + b''.join( | ||
Augie Fackler
|
r43346 | _( | ||
Augie Fackler
|
r43347 | b" alternatively, use --config " | ||
b"merge.preferancestor=%s\n" | ||||
Augie Fackler
|
r43346 | ) | ||
% short(n) | ||||
for n in sorted(cahs) | ||||
if n != anc | ||||
) | ||||
) | ||||
Martin von Zweigbergk
|
r39993 | return self._repo[anc] | ||
Matt Mackall
|
r3125 | |||
Martin von Zweigbergk
|
r38692 | def isancestorof(self, other): | ||
"""True if this changeset is an ancestor of other""" | ||||
Martin von Zweigbergk
|
r38690 | return self._repo.changelog.isancestorrev(self._rev, other._rev) | ||
FUJIWARA Katsunori
|
r17626 | |||
Matt Mackall
|
r6764 | def walk(self, match): | ||
Drew Gottlieb
|
r24646 | '''Generates matching file names.''' | ||
Durham Goode
|
r20292 | |||
Matt Harbison
|
r25435 | # Wrap match.bad method to have message with nodeid | ||
Drew Gottlieb
|
r24646 | def bad(fn, msg): | ||
Matt Harbison
|
r25193 | # The manifest doesn't know about subrepos, so don't complain about | ||
# paths into valid subrepos. | ||||
Augie Fackler
|
r43347 | if any(fn == s or fn.startswith(s + b'/') for s in self.substate): | ||
Matt Harbison
|
r25193 | return | ||
Augie Fackler
|
r43347 | match.bad(fn, _(b'no such file in rev %s') % self) | ||
Durham Goode
|
r20292 | |||
Martin von Zweigbergk
|
r40120 | m = matchmod.badmatch(self._repo.narrowmatch(match), bad) | ||
Matt Harbison
|
r25435 | return self._manifest.walk(m) | ||
Matt Mackall
|
r6764 | |||
Siddharth Agarwal
|
r21985 | def matches(self, match): | ||
return self.walk(match) | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19572 | class basefilectx(object): | ||
"""A filecontext object represents the common logic for its children: | ||||
filectx: read-only access to a filerevision that is already present | ||||
in the repo, | ||||
workingfilectx: a filecontext that represents files from the working | ||||
directory, | ||||
Jun Wu
|
r32239 | memfilectx: a filecontext that represents files in-memory, | ||
""" | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19573 | @propertycache | ||
def _filelog(self): | ||||
return self._repo.file(self._path) | ||||
Sean Farley
|
r19574 | @propertycache | ||
def _changeid(self): | ||||
Augie Fackler
|
r43906 | if '_changectx' in self.__dict__: | ||
Sean Farley
|
r19574 | return self._changectx.rev() | ||
Augie Fackler
|
r43906 | elif '_descendantrev' in self.__dict__: | ||
Matt Mackall
|
r23983 | # this file context was created from a revision with a known | ||
# descendant, we can (lazily) correct for linkrev aliases | ||||
Jun Wu
|
r30275 | return self._adjustlinkrev(self._descendantrev) | ||
Sean Farley
|
r19574 | else: | ||
return self._filelog.linkrev(self._filerev) | ||||
Sean Farley
|
r19575 | @propertycache | ||
def _filenode(self): | ||||
Augie Fackler
|
r43906 | if '_fileid' in self.__dict__: | ||
Sean Farley
|
r19575 | return self._filelog.lookup(self._fileid) | ||
else: | ||||
return self._changectx.filenode(self._path) | ||||
Sean Farley
|
r19576 | @propertycache | ||
def _filerev(self): | ||||
return self._filelog.rev(self._filenode) | ||||
Sean Farley
|
r19577 | @propertycache | ||
def _repopath(self): | ||||
return self._path | ||||
Sean Farley
|
r19578 | def __nonzero__(self): | ||
try: | ||||
self._filenode | ||||
return True | ||||
except error.LookupError: | ||||
# file is missing | ||||
return False | ||||
Gregory Szorc
|
r31476 | __bool__ = __nonzero__ | ||
Yuya Nishihara
|
r33022 | def __bytes__(self): | ||
Mads Kiilerich
|
r30270 | try: | ||
Augie Fackler
|
r43347 | return b"%s@%s" % (self.path(), self._changectx) | ||
Mads Kiilerich
|
r30270 | except error.LookupError: | ||
Augie Fackler
|
r43347 | return b"%s@???" % self.path() | ||
Sean Farley
|
r19579 | |||
Yuya Nishihara
|
r33022 | __str__ = encoding.strmethod(__bytes__) | ||
Pulkit Goyal
|
r33019 | |||
Sean Farley
|
r19580 | def __repr__(self): | ||
Augie Fackler
|
r43809 | return "<%s %s>" % (type(self).__name__, str(self)) | ||
Sean Farley
|
r19580 | |||
Sean Farley
|
r19581 | def __hash__(self): | ||
try: | ||||
return hash((self._path, self._filenode)) | ||||
except AttributeError: | ||||
return id(self) | ||||
Sean Farley
|
r19582 | def __eq__(self, other): | ||
try: | ||||
Augie Fackler
|
r43346 | return ( | ||
type(self) == type(other) | ||||
and self._path == other._path | ||||
and self._filenode == other._filenode | ||||
) | ||||
Sean Farley
|
r19582 | except AttributeError: | ||
return False | ||||
Sean Farley
|
r19583 | def __ne__(self, other): | ||
return not (self == other) | ||||
Sean Farley
|
r19584 | def filerev(self): | ||
return self._filerev | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19585 | def filenode(self): | ||
return self._filenode | ||||
Augie Fackler
|
r43346 | |||
Jun Wu
|
r32234 | @propertycache | ||
def _flags(self): | ||||
return self._changectx.flags(self._path) | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19586 | def flags(self): | ||
Jun Wu
|
r32234 | return self._flags | ||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19587 | def filelog(self): | ||
return self._filelog | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19588 | def rev(self): | ||
return self._changeid | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19589 | def linkrev(self): | ||
return self._filelog.linkrev(self._filerev) | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19590 | def node(self): | ||
return self._changectx.node() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19591 | def hex(self): | ||
return self._changectx.hex() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19592 | def user(self): | ||
return self._changectx.user() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19593 | def date(self): | ||
return self._changectx.date() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19594 | def files(self): | ||
return self._changectx.files() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19595 | def description(self): | ||
return self._changectx.description() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19596 | def branch(self): | ||
return self._changectx.branch() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19597 | def extra(self): | ||
return self._changectx.extra() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19598 | def phase(self): | ||
return self._changectx.phase() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19599 | def phasestr(self): | ||
return self._changectx.phasestr() | ||||
Augie Fackler
|
r43346 | |||
r35087 | def obsolete(self): | |||
return self._changectx.obsolete() | ||||
Augie Fackler
|
r43346 | |||
r35092 | def instabilities(self): | |||
return self._changectx.instabilities() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19600 | def manifest(self): | ||
return self._changectx.manifest() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19601 | def changectx(self): | ||
return self._changectx | ||||
Augie Fackler
|
r43346 | |||
Jun Wu
|
r32235 | def renamed(self): | ||
return self._copied | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41934 | def copysource(self): | ||
return self._copied and self._copied[0] | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r24333 | def repo(self): | ||
return self._repo | ||||
Augie Fackler
|
r43346 | |||
Jun Wu
|
r32236 | def size(self): | ||
return len(self.data()) | ||||
Sean Farley
|
r19584 | |||
Sean Farley
|
r19602 | def path(self): | ||
return self._path | ||||
Sean Farley
|
r19603 | def isbinary(self): | ||
try: | ||||
Yuya Nishihara
|
r37102 | return stringutil.binary(self.data()) | ||
Sean Farley
|
r19603 | except IOError: | ||
return False | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r22054 | def isexec(self): | ||
Augie Fackler
|
r43347 | return b'x' in self.flags() | ||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r22054 | def islink(self): | ||
Augie Fackler
|
r43347 | return b'l' in self.flags() | ||
Sean Farley
|
r19603 | |||
Siddharth Agarwal
|
r26978 | def isabsent(self): | ||
"""whether this filectx represents a file not in self._changectx | ||||
This is mainly for merge code to detect change/delete conflicts. This is | ||||
expected to be True for all subclasses of basectx.""" | ||||
return False | ||||
Siddharth Agarwal
|
r26977 | _customcmp = False | ||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19604 | def cmp(self, fctx): | ||
"""compare with other file context | ||||
returns True if different than fctx. | ||||
""" | ||||
Siddharth Agarwal
|
r26977 | if fctx._customcmp: | ||
return fctx.cmp(self) | ||||
Yuya Nishihara
|
r41027 | if self._filenode is None: | ||
raise error.ProgrammingError( | ||||
Augie Fackler
|
r43347 | b'filectx.cmp() must be reimplemented if not backed by revlog' | ||
Augie Fackler
|
r43346 | ) | ||
Yuya Nishihara
|
r41027 | |||
Yuya Nishihara
|
r41026 | if fctx._filenode is None: | ||
if self._repo._encodefilterpats: | ||||
# can't rely on size() because wdir content may be decoded | ||||
return self._filelog.cmp(self._filenode, fctx.data()) | ||||
if self.size() - 4 == fctx.size(): | ||||
# size() can match: | ||||
# if file data starts with '\1\n', empty metadata block is | ||||
# prepended, which adds 4 bytes to filelog.size(). | ||||
return self._filelog.cmp(self._filenode, fctx.data()) | ||||
if self.size() == fctx.size(): | ||||
# size() matches: need to compare content | ||||
Sean Farley
|
r19604 | return self._filelog.cmp(self._filenode, fctx.data()) | ||
Yuya Nishihara
|
r41026 | # size() differs | ||
Sean Farley
|
r19604 | return True | ||
Boris Feld
|
r40733 | def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None): | ||
Mads Kiilerich
|
r24180 | """return the first ancestor of <srcrev> introducing <fnode> | ||
Pierre-Yves David
|
r23979 | |||
If the linkrev of the file revision does not point to an ancestor of | ||||
srcrev, we'll walk down the ancestors until we find one introducing | ||||
this file revision. | ||||
:srcrev: the changeset revision we search ancestors from | ||||
:inclusive: if true, the src revision will also be checked | ||||
Boris Feld
|
r40733 | :stoprev: an optional revision to stop the walk at. If no introduction | ||
of this file content could be found before this floor | ||||
revision, the function will returns "None" and stops its | ||||
iteration. | ||||
Pierre-Yves David
|
r23979 | """ | ||
repo = self._repo | ||||
cl = repo.unfiltered().changelog | ||||
Durham Goode
|
r29939 | mfl = repo.manifestlog | ||
Pierre-Yves David
|
r23979 | # fetch the linkrev | ||
Jun Wu
|
r30275 | lkr = self.linkrev() | ||
Boris Feld
|
r40080 | if srcrev == lkr: | ||
return lkr | ||||
Pierre-Yves David
|
r23980 | # hack to reuse ancestor computation when searching for renames | ||
memberanc = getattr(self, '_ancestrycontext', None) | ||||
iteranc = None | ||||
Pierre-Yves David
|
r24411 | if srcrev is None: | ||
# wctx case, used by workingfilectx during mergecopy | ||||
revs = [p.rev() for p in self._repo[None].parents()] | ||||
Augie Fackler
|
r43346 | inclusive = True # we skipped the real (revless) source | ||
Pierre-Yves David
|
r24411 | else: | ||
revs = [srcrev] | ||||
Pierre-Yves David
|
r23980 | if memberanc is None: | ||
Augie Fackler
|
r43346 | memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) | ||
Pierre-Yves David
|
r23979 | # check if this linkrev is an ancestor of srcrev | ||
Pierre-Yves David
|
r23980 | if lkr not in memberanc: | ||
if iteranc is None: | ||||
Pierre-Yves David
|
r24410 | iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) | ||
Jun Wu
|
r30275 | fnode = self._filenode | ||
path = self._path | ||||
Pierre-Yves David
|
r23980 | for a in iteranc: | ||
Boris Feld
|
r40733 | if stoprev is not None and a < stoprev: | ||
return None | ||||
Augie Fackler
|
r43346 | ac = cl.read(a) # get changeset data (we avoid object creation) | ||
if path in ac[3]: # checking the 'files' field. | ||||
Pierre-Yves David
|
r23979 | # The file has been touched, check if the content is | ||
# similar to the one we search for. | ||||
Durham Goode
|
r29939 | if fnode == mfl[ac[0]].readfast().get(path): | ||
Pierre-Yves David
|
r23979 | return a | ||
# In theory, we should never get out of that loop without a result. | ||||
# But if manifest uses a buggy file revision (not children of the | ||||
# one it replaces) we could. Such a buggy situation will likely | ||||
# result is crash somewhere else at to some point. | ||||
return lkr | ||||
Boris Feld
|
r40730 | def isintroducedafter(self, changelogrev): | ||
"""True if a filectx has been introduced after a given floor revision | ||||
""" | ||||
Boris Feld
|
r40732 | if self.linkrev() >= changelogrev: | ||
return True | ||||
Boris Feld
|
r40733 | introrev = self._introrev(stoprev=changelogrev) | ||
if introrev is None: | ||||
return False | ||||
Boris Feld
|
r40732 | return introrev >= changelogrev | ||
Boris Feld
|
r40730 | |||
Pierre-Yves David
|
r23703 | def introrev(self): | ||
"""return the rev of the changeset which introduced this file revision | ||||
This method is different from linkrev because it take into account the | ||||
changeset the filectx was created from. It ensures the returned | ||||
revision is one of its ancestors. This prevents bugs from | ||||
'linkrev-shadowing' when a file revision is used by multiple | ||||
changesets. | ||||
""" | ||||
Boris Feld
|
r40731 | return self._introrev() | ||
Boris Feld
|
r40733 | def _introrev(self, stoprev=None): | ||
""" | ||||
Same as `introrev` but, with an extra argument to limit changelog | ||||
iteration range in some internal usecase. | ||||
If `stoprev` is set, the `introrev` will not be searched past that | ||||
`stoprev` revision and "None" might be returned. This is useful to | ||||
limit the iteration range. | ||||
""" | ||||
Boris Feld
|
r40728 | toprev = None | ||
Pierre-Yves David
|
r23703 | attrs = vars(self) | ||
Augie Fackler
|
r43906 | if '_changeid' in attrs: | ||
Boris Feld
|
r40728 | # We have a cached value already | ||
toprev = self._changeid | ||||
Augie Fackler
|
r43906 | elif '_changectx' in attrs: | ||
Boris Feld
|
r40728 | # We know which changelog entry we are coming from | ||
toprev = self._changectx.rev() | ||||
if toprev is not None: | ||||
Boris Feld
|
r40733 | return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev) | ||
Augie Fackler
|
r43906 | elif '_descendantrev' in attrs: | ||
Boris Feld
|
r40733 | introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev) | ||
Boris Feld
|
r40729 | # be nice and cache the result of the computation | ||
Boris Feld
|
r40733 | if introrev is not None: | ||
self._changeid = introrev | ||||
Boris Feld
|
r40729 | return introrev | ||
Boris Feld
|
r40082 | else: | ||
Pierre-Yves David
|
r23703 | return self.linkrev() | ||
Yuya Nishihara
|
r35272 | def introfilectx(self): | ||
"""Return filectx having identical contents, but pointing to the | ||||
changeset revision where this filectx was introduced""" | ||||
introrev = self.introrev() | ||||
if self.rev() == introrev: | ||||
return self | ||||
return self.filectx(self.filenode(), changeid=introrev) | ||||
Yuya Nishihara
|
r24816 | def _parentfilectx(self, path, fileid, filelog): | ||
"""create parent filectx keeping ancestry info for _adjustlinkrev()""" | ||||
fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog) | ||||
Augie Fackler
|
r43906 | if '_changeid' in vars(self) or '_changectx' in vars(self): | ||
Yuya Nishihara
|
r24816 | # If self is associated with a changeset (probably explicitly | ||
# fed), ensure the created filectx is associated with a | ||||
# changeset that is an ancestor of self.changectx. | ||||
# This lets us later use _adjustlinkrev to get a correct link. | ||||
fctx._descendantrev = self.rev() | ||||
fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) | ||||
Augie Fackler
|
r43906 | elif '_descendantrev' in vars(self): | ||
Yuya Nishihara
|
r24816 | # Otherwise propagate _descendantrev if we have one associated. | ||
fctx._descendantrev = self._descendantrev | ||||
fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) | ||||
return fctx | ||||
Sean Farley
|
r19605 | def parents(self): | ||
Mads Kiilerich
|
r22201 | _path = self._path | ||
Sean Farley
|
r19605 | fl = self._filelog | ||
Pierre-Yves David
|
r23688 | parents = self._filelog.parents(self._filenode) | ||
pl = [(_path, node, fl) for node in parents if node != nullid] | ||||
Sean Farley
|
r19605 | |||
Pierre-Yves David
|
r23702 | r = fl.renamed(self._filenode) | ||
Sean Farley
|
r19605 | if r: | ||
Pierre-Yves David
|
r23688 | # - In the simple rename case, both parent are nullid, pl is empty. | ||
# - In case of merge, only one of the parent is null id and should | ||||
# be replaced with the rename information. This parent is -always- | ||||
# the first one. | ||||
# | ||||
Mads Kiilerich
|
r24180 | # As null id have always been filtered out in the previous list | ||
Pierre-Yves David
|
r23688 | # comprehension, inserting to 0 will always result in "replacing | ||
# first nullid parent with rename information. | ||||
Pierre-Yves David
|
r23699 | pl.insert(0, (r[0], r[1], self._repo.file(r[0]))) | ||
Sean Farley
|
r19605 | |||
Yuya Nishihara
|
r24816 | return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl] | ||
Sean Farley
|
r19605 | |||
Sean Farley
|
r19606 | def p1(self): | ||
return self.parents()[0] | ||||
Sean Farley
|
r19607 | def p2(self): | ||
p = self.parents() | ||||
if len(p) == 2: | ||||
return p[1] | ||||
return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) | ||||
Yuya Nishihara
|
r37083 | def annotate(self, follow=False, skiprevs=None, diffopts=None): | ||
Yuya Nishihara
|
r37084 | """Returns a list of annotateline objects for each line in the file | ||
- line.fctx is the filectx of the node where that line was last changed | ||||
- line.lineno is the line number at the first appearance in the managed | ||||
Yuya Nishihara
|
r37083 | file | ||
Yuya Nishihara
|
r37084 | - line.text is the data on that line (including newline character) | ||
Yuya Nishihara
|
r37083 | """ | ||
Matt Mackall
|
r9097 | getlog = util.lrucachefunc(lambda x: self._repo.file(x)) | ||
Brendan Cully
|
r3172 | |||
def parents(f): | ||||
Yuya Nishihara
|
r24862 | # Cut _descendantrev here to mitigate the penalty of lazy linkrev | ||
# adjustment. Otherwise, p._adjustlinkrev() would walk changelog | ||||
# from the topmost introrev (= srcrev) down to p.linkrev() if it | ||||
# isn't an ancestor of the srcrev. | ||||
f._changeid | ||||
Durham Goode
|
r19292 | pl = f.parents() | ||
# Don't return renamed parents if we aren't following. | ||||
if not follow: | ||||
pl = [p for p in pl if p.path() == f.path()] | ||||
Brendan Cully
|
r3172 | |||
Durham Goode
|
r19292 | # renamed filectx won't have a filelog yet, so set it | ||
# from the cache to save time | ||||
for p in pl: | ||||
Augie Fackler
|
r43906 | if not '_filelog' in p.__dict__: | ||
Durham Goode
|
r19292 | p._filelog = getlog(p.path()) | ||
Brendan Cully
|
r3146 | |||
Durham Goode
|
r19292 | return pl | ||
Matt Mackall
|
r3217 | |||
Brendan Cully
|
r3404 | # use linkrev to find the first changeset where self appeared | ||
Yuya Nishihara
|
r35272 | base = self.introfilectx() | ||
Yuya Nishihara
|
r24818 | if getattr(base, '_ancestrycontext', None) is None: | ||
cl = self._repo.changelog | ||||
Yuya Nishihara
|
r35272 | if base.rev() is None: | ||
Yuya Nishihara
|
r24818 | # wctx is not inclusive, but works because _ancestrycontext | ||
# is used to test filelog revisions | ||||
Augie Fackler
|
r43346 | ac = cl.ancestors( | ||
[p.rev() for p in base.parents()], inclusive=True | ||||
) | ||||
Yuya Nishihara
|
r24818 | else: | ||
Yuya Nishihara
|
r35272 | ac = cl.ancestors([base.rev()], inclusive=True) | ||
Pierre-Yves David
|
r24407 | base._ancestrycontext = ac | ||
Brendan Cully
|
r3404 | |||
Augie Fackler
|
r43346 | return dagop.annotate( | ||
base, parents, skiprevs=skiprevs, diffopts=diffopts | ||||
) | ||||
Matt Mackall
|
r3124 | |||
Sean Farley
|
r19610 | def ancestors(self, followfirst=False): | ||
visit = {} | ||||
c = self | ||||
Jordi Gutiérrez Hermoso
|
r24306 | if followfirst: | ||
cut = 1 | ||||
else: | ||||
cut = None | ||||
Sean Farley
|
r19610 | while True: | ||
for parent in c.parents()[:cut]: | ||||
Matt Mackall
|
r23981 | visit[(parent.linkrev(), parent.filenode())] = parent | ||
Sean Farley
|
r19610 | if not visit: | ||
break | ||||
c = visit.pop(max(visit)) | ||||
yield c | ||||
Phil Cohen
|
r33902 | def decodeddata(self): | ||
"""Returns `data()` after running repository decoding filters. | ||||
This is often equivalent to how the data would be expressed on disk. | ||||
""" | ||||
return self._repo.wwritedata(self.path(), self.data()) | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19608 | class filectx(basefilectx): | ||
"""A filecontext object makes access to data related to a particular | ||||
filerevision convenient.""" | ||||
Augie Fackler
|
r43346 | |||
def __init__( | ||||
self, | ||||
repo, | ||||
path, | ||||
changeid=None, | ||||
fileid=None, | ||||
filelog=None, | ||||
changectx=None, | ||||
): | ||||
Martin von Zweigbergk
|
r40098 | """changeid must be a revision number, if specified. | ||
Sean Farley
|
r19608 | fileid can be a file revision or node.""" | ||
self._repo = repo | ||||
self._path = path | ||||
Augie Fackler
|
r43346 | assert ( | ||
changeid is not None or fileid is not None or changectx is not None | ||||
r43663 | ), ( | |||
b"bad args: changeid=%r, fileid=%r, changectx=%r" | ||||
% (changeid, fileid, changectx,) | ||||
Augie Fackler
|
r43346 | ) | ||
Sean Farley
|
r19608 | |||
if filelog is not None: | ||||
self._filelog = filelog | ||||
if changeid is not None: | ||||
self._changeid = changeid | ||||
if changectx is not None: | ||||
self._changectx = changectx | ||||
if fileid is not None: | ||||
self._fileid = fileid | ||||
@propertycache | ||||
def _changectx(self): | ||||
try: | ||||
Martin von Zweigbergk
|
r39993 | return self._repo[self._changeid] | ||
Pierre-Yves David
|
r23687 | except error.FilteredRepoLookupError: | ||
Sean Farley
|
r19608 | # Linkrev may point to any revision in the repository. When the | ||
# repository is filtered this may lead to `filectx` trying to build | ||||
# `changectx` for filtered revision. In such case we fallback to | ||||
# creating `changectx` on the unfiltered version of the reposition. | ||||
# This fallback should not be an issue because `changectx` from | ||||
# `filectx` are not used in complex operations that care about | ||||
# filtering. | ||||
# | ||||
# This fallback is a cheap and dirty fix that prevent several | ||||
# crashes. It does not ensure the behavior is correct. However the | ||||
# behavior was not correct before filtering either and "incorrect | ||||
# behavior" is seen as better as "crash" | ||||
# | ||||
# Linkrevs have several serious troubles with filtering that are | ||||
# complicated to solve. Proper handling of the issue here should be | ||||
# considered when solving linkrev issue are on the table. | ||||
Martin von Zweigbergk
|
r39993 | return self._repo.unfiltered()[self._changeid] | ||
Sean Farley
|
r19608 | |||
Durham Goode
|
r23770 | def filectx(self, fileid, changeid=None): | ||
Sean Farley
|
r19608 | '''opens an arbitrary revision of the file without | ||
opening a new filelog''' | ||||
Augie Fackler
|
r43346 | return filectx( | ||
self._repo, | ||||
self._path, | ||||
fileid=fileid, | ||||
filelog=self._filelog, | ||||
changeid=changeid, | ||||
) | ||||
Sean Farley
|
r19608 | |||
Remi Chaintron
|
r30743 | def rawdata(self): | ||
r43037 | return self._filelog.rawdata(self._filenode) | |||
Remi Chaintron
|
r30743 | |||
Jun Wu
|
r32237 | def rawflags(self): | ||
"""low-level revlog flags""" | ||||
return self._filelog.flags(self._filerev) | ||||
Sean Farley
|
r19608 | def data(self): | ||
Mike Edgar
|
r22932 | try: | ||
return self._filelog.read(self._filenode) | ||||
except error.CensoredNodeError: | ||||
Augie Fackler
|
r43347 | if self._repo.ui.config(b"censor", b"policy") == b"ignore": | ||
return b"" | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | _(b"censored node: %s") % short(self._filenode), | ||
hint=_(b"set censor.policy to ignore errors"), | ||||
Augie Fackler
|
r43346 | ) | ||
Mike Edgar
|
r22932 | |||
Sean Farley
|
r19608 | def size(self): | ||
return self._filelog.size(self._filerev) | ||||
Jun Wu
|
r32235 | @propertycache | ||
def _copied(self): | ||||
Sean Farley
|
r19608 | """check if file was actually renamed in this changeset revision | ||
If rename logged in file revision, we report copy for changeset only | ||||
if file revisions linkrev points back to the changeset in question | ||||
or both changeset parents contain different file revisions. | ||||
""" | ||||
renamed = self._filelog.renamed(self._filenode) | ||||
if not renamed: | ||||
Sean Farley
|
r39746 | return None | ||
Sean Farley
|
r19608 | |||
if self.rev() == self.linkrev(): | ||||
return renamed | ||||
name = self.path() | ||||
fnode = self._filenode | ||||
for p in self._changectx.parents(): | ||||
try: | ||||
if fnode == p.filenode(name): | ||||
return None | ||||
except error.LookupError: | ||||
pass | ||||
return renamed | ||||
def children(self): | ||||
# hard for renames | ||||
c = self._filelog.children(self._filenode) | ||||
Augie Fackler
|
r43346 | return [ | ||
filectx(self._repo, self._path, fileid=x, filelog=self._filelog) | ||||
for x in c | ||||
] | ||||
Sean Farley
|
r19608 | |||
Sean Farley
|
r19733 | class committablectx(basectx): | ||
"""A committablectx object provides common functionality for a context that | ||||
Sean Farley
|
r19664 | wants the ability to commit, e.g. workingctx or memctx.""" | ||
Augie Fackler
|
r43346 | |||
def __init__( | ||||
self, | ||||
repo, | ||||
Augie Fackler
|
r43347 | text=b"", | ||
Augie Fackler
|
r43346 | user=None, | ||
date=None, | ||||
extra=None, | ||||
changes=None, | ||||
branch=None, | ||||
): | ||||
Martin von Zweigbergk
|
r37192 | super(committablectx, self).__init__(repo) | ||
Matt Mackall
|
r3217 | self._rev = None | ||
self._node = None | ||||
Patrick Mezard
|
r6709 | self._text = text | ||
Christian Ebert
|
r6718 | if date: | ||
Boris Feld
|
r36625 | self._date = dateutil.parsedate(date) | ||
Matt Mackall
|
r6817 | if user: | ||
self._user = user | ||||
Patrick Mezard
|
r6707 | if changes: | ||
Sean Farley
|
r21592 | self._status = changes | ||
Matt Mackall
|
r3217 | |||
Patrick Mezard
|
r6708 | self._extra = {} | ||
if extra: | ||||
self._extra = extra.copy() | ||||
Martin von Zweigbergk
|
r42482 | if branch is not None: | ||
Augie Fackler
|
r43347 | self._extra[b'branch'] = encoding.fromlocal(branch) | ||
if not self._extra.get(b'branch'): | ||||
self._extra[b'branch'] = b'default' | ||||
Patrick Mezard
|
r6708 | |||
Pulkit Goyal
|
r32643 | def __bytes__(self): | ||
Augie Fackler
|
r43347 | return bytes(self._parents[0]) + b"+" | ||
Sean Farley
|
r19666 | |||
Yuya Nishihara
|
r33022 | __str__ = encoding.strmethod(__bytes__) | ||
Sean Farley
|
r19667 | def __nonzero__(self): | ||
return True | ||||
Gregory Szorc
|
r31476 | __bool__ = __nonzero__ | ||
Matt Mackall
|
r15337 | @propertycache | ||
Sean Farley
|
r19672 | def _status(self): | ||
Sean Farley
|
r21592 | return self._repo.status() | ||
Sean Farley
|
r19672 | |||
Sean Farley
|
r19674 | @propertycache | ||
def _user(self): | ||||
return self._repo.ui.username() | ||||
Sean Farley
|
r19676 | @propertycache | ||
def _date(self): | ||||
Boris Feld
|
r32409 | ui = self._repo.ui | ||
Augie Fackler
|
r43347 | date = ui.configdate(b'devel', b'default-date') | ||
Boris Feld
|
r32409 | if date is None: | ||
Boris Feld
|
r36625 | date = dateutil.makedate() | ||
Boris Feld
|
r32409 | return date | ||
Sean Farley
|
r19676 | |||
Sean Farley
|
r21587 | def subrev(self, subpath): | ||
return None | ||||
Yuya Nishihara
|
r24719 | def manifestnode(self): | ||
return None | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19675 | def user(self): | ||
return self._user or self._repo.ui.username() | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19677 | def date(self): | ||
return self._date | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19678 | def description(self): | ||
return self._text | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19679 | def files(self): | ||
Augie Fackler
|
r43346 | return sorted( | ||
self._status.modified + self._status.added + self._status.removed | ||||
) | ||||
Martin von Zweigbergk
|
r41955 | def modified(self): | ||
return self._status.modified | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41955 | def added(self): | ||
return self._status.added | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41955 | def removed(self): | ||
return self._status.removed | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41955 | def deleted(self): | ||
return self._status.deleted | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r42566 | filesmodified = modified | ||
filesadded = added | ||||
filesremoved = removed | ||||
Sean Farley
|
r19687 | def branch(self): | ||
Augie Fackler
|
r43347 | return encoding.tolocal(self._extra[b'branch']) | ||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19688 | def closesbranch(self): | ||
Augie Fackler
|
r43347 | return b'close' in self._extra | ||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19689 | def extra(self): | ||
return self._extra | ||||
Sean Farley
|
r19680 | |||
Phil Cohen
|
r34682 | def isinmemory(self): | ||
return False | ||||
Sean Farley
|
r19690 | def tags(self): | ||
Matt Harbison
|
r25688 | return [] | ||
Sean Farley
|
r19690 | |||
Sean Farley
|
r19691 | def bookmarks(self): | ||
b = [] | ||||
for p in self.parents(): | ||||
b.extend(p.bookmarks()) | ||||
return b | ||||
Sean Farley
|
r19692 | def phase(self): | ||
Augie Fackler
|
r43346 | phase = phases.draft # default phase to draft | ||
Sean Farley
|
r19692 | for p in self.parents(): | ||
phase = max(phase, p.phase()) | ||||
return phase | ||||
Sean Farley
|
r19693 | def hidden(self): | ||
return False | ||||
Sean Farley
|
r19694 | def children(self): | ||
return [] | ||||
Sean Farley
|
r19696 | def ancestor(self, c2): | ||
Mads Kiilerich
|
r22389 | """return the "best" ancestor context of self and c2""" | ||
Augie Fackler
|
r43346 | return self._parents[0].ancestor(c2) # punt on two parents for now | ||
Sean Farley
|
r19696 | |||
Sean Farley
|
r19698 | def ancestors(self): | ||
Durham Goode
|
r23616 | for p in self._parents: | ||
yield p | ||||
Sean Farley
|
r19698 | for a in self._repo.changelog.ancestors( | ||
Augie Fackler
|
r43346 | [p.rev() for p in self._parents] | ||
): | ||||
Martin von Zweigbergk
|
r39993 | yield self._repo[a] | ||
Sean Farley
|
r19698 | |||
Sean Farley
|
r19699 | def markcommitted(self, node): | ||
"""Perform post-commit cleanup necessary after committing this ctx | ||||
Specifically, this updates backing stores this working context | ||||
wraps to reflect the fact that the changes reflected by this | ||||
workingctx have been committed. For example, it marks | ||||
modified and added files as normal in the dirstate. | ||||
""" | ||||
Sean Farley
|
r32610 | def dirty(self, missing=False, merge=True, branch=True): | ||
return False | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19733 | class workingctx(committablectx): | ||
Sean Farley
|
r19671 | """A workingctx object makes access to data related to | ||
the current working directory convenient. | ||||
date - any valid date string or (unixtime, offset), or None. | ||||
user - username string, or None. | ||||
extra - a dictionary of extra values, or None. | ||||
changes - a list of file lists as returned by localrepo.status() | ||||
or None to use the repository status. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
def __init__( | ||||
Augie Fackler
|
r43347 | self, repo, text=b"", user=None, date=None, extra=None, changes=None | ||
Augie Fackler
|
r43346 | ): | ||
Martin von Zweigbergk
|
r42483 | branch = None | ||
Augie Fackler
|
r43347 | if not extra or b'branch' not in extra: | ||
Martin von Zweigbergk
|
r42483 | try: | ||
branch = repo.dirstate.branch() | ||||
except UnicodeDecodeError: | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b'branch name not in UTF-8!')) | ||
Augie Fackler
|
r43346 | super(workingctx, self).__init__( | ||
repo, text, user, date, extra, changes, branch=branch | ||||
) | ||||
Sean Farley
|
r19671 | |||
Matt Mackall
|
r14129 | def __iter__(self): | ||
d = self._repo.dirstate | ||||
for f in d: | ||||
Augie Fackler
|
r43347 | if d[f] != b'r': | ||
Matt Mackall
|
r14129 | yield f | ||
Sean Farley
|
r21845 | def __contains__(self, key): | ||
Augie Fackler
|
r43347 | return self._repo.dirstate[key] not in b"?r" | ||
Sean Farley
|
r21845 | |||
Matt Harbison
|
r25590 | def hex(self): | ||
Martin von Zweigbergk
|
r42133 | return wdirhex | ||
Matt Harbison
|
r25590 | |||
Martin Geisler
|
r8157 | @propertycache | ||
Dirkjan Ochtman
|
r7368 | def _parents(self): | ||
p = self._repo.dirstate.parents() | ||||
if p[1] == nullid: | ||||
p = p[:-1] | ||||
Martin von Zweigbergk
|
r39995 | # use unfiltered repo to delay/avoid loading obsmarkers | ||
unfi = self._repo.unfiltered() | ||||
return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p] | ||||
Matt Mackall
|
r3217 | |||
Yuya Nishihara
|
r37465 | def _fileinfo(self, path): | ||
# populate __dict__['_manifest'] as workingctx has no _manifestdelta | ||||
self._manifest | ||||
return super(workingctx, self)._fileinfo(path) | ||||
Martin von Zweigbergk
|
r42478 | def _buildflagfunc(self): | ||
# Create a fallback function for getting file flags when the | ||||
# filesystem doesn't support them | ||||
copiesget = self._repo.dirstate.copies().get | ||||
parents = self.parents() | ||||
if len(parents) < 2: | ||||
# when we have one parent, it's easy: copy from parent | ||||
man = parents[0].manifest() | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r42478 | def func(f): | ||
f = copiesget(f, f) | ||||
return man.flags(f) | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r42478 | else: | ||
# merges are tricky: we try to reconstruct the unstored | ||||
# result from the merge (issue1802) | ||||
p1, p2 = parents | ||||
pa = p1.ancestor(p2) | ||||
m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() | ||||
def func(f): | ||||
Augie Fackler
|
r43346 | f = copiesget(f, f) # may be wrong for merges with copies | ||
Martin von Zweigbergk
|
r42478 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) | ||
if fl1 == fl2: | ||||
return fl1 | ||||
if fl1 == fla: | ||||
return fl2 | ||||
if fl2 == fla: | ||||
return fl1 | ||||
Augie Fackler
|
r43347 | return b'' # punt for conflicts | ||
Martin von Zweigbergk
|
r42478 | |||
return func | ||||
@propertycache | ||||
def _flagfunc(self): | ||||
return self._repo.dirstate.flagfunc(self._buildflagfunc) | ||||
def flags(self, path): | ||||
Augie Fackler
|
r43906 | if '_manifest' in self.__dict__: | ||
Martin von Zweigbergk
|
r42478 | try: | ||
return self._manifest.flags(path) | ||||
except KeyError: | ||||
Augie Fackler
|
r43347 | return b'' | ||
Martin von Zweigbergk
|
r42478 | |||
try: | ||||
return self._flagfunc(path) | ||||
except OSError: | ||||
Augie Fackler
|
r43347 | return b'' | ||
Martin von Zweigbergk
|
r42478 | |||
Benoit Boissinot
|
r3966 | def filectx(self, path, filelog=None): | ||
Matt Mackall
|
r3217 | """get a file context from the working directory""" | ||
Augie Fackler
|
r43346 | return workingfilectx( | ||
self._repo, path, workingctx=self, filelog=filelog | ||||
) | ||||
Matt Mackall
|
r3217 | |||
Patrick Mezard
|
r16491 | def dirty(self, missing=False, merge=True, branch=True): | ||
Augie Fackler
|
r43347 | b"check whether a working directory is modified" | ||
Edouard Gomez
|
r11110 | # check subrepos first | ||
Mads Kiilerich
|
r18364 | for s in sorted(self.substate): | ||
Matt Harbison
|
r33364 | if self.sub(s).dirty(missing=missing): | ||
Edouard Gomez
|
r11110 | return True | ||
# check current working dir | ||||
Augie Fackler
|
r43346 | return ( | ||
(merge and self.p2()) | ||||
or (branch and self.branch() != self.p1().branch()) | ||||
or self.modified() | ||||
or self.added() | ||||
or self.removed() | ||||
or (missing and self.deleted()) | ||||
) | ||||
Matt Mackall
|
r8717 | |||
Augie Fackler
|
r43347 | def add(self, list, prefix=b""): | ||
Bryan O'Sullivan
|
r27809 | with self._repo.wlock(): | ||
ui, ds = self._repo.ui, self._repo.dirstate | ||||
Matt Harbison
|
r33501 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) | ||
Dirkjan Ochtman
|
r11303 | rejected = [] | ||
FUJIWARA Katsunori
|
r19900 | lstat = self._repo.wvfs.lstat | ||
Dirkjan Ochtman
|
r11303 | for f in list: | ||
Matt Harbison
|
r33501 | # ds.pathto() returns an absolute file when this is invoked from | ||
# the keyword extension. That gets flagged as non-portable on | ||||
# Windows, since it contains the drive letter and colon. | ||||
scmutil.checkportable(ui, os.path.join(prefix, f)) | ||||
Dirkjan Ochtman
|
r11303 | try: | ||
FUJIWARA Katsunori
|
r19900 | st = lstat(f) | ||
Idan Kamara
|
r14004 | except OSError: | ||
Augie Fackler
|
r43347 | ui.warn(_(b"%s does not exist!\n") % uipath(f)) | ||
Dirkjan Ochtman
|
r11303 | rejected.append(f) | ||
continue | ||||
Augie Fackler
|
r43347 | limit = ui.configbytes(b'ui', b'large-file-limit') | ||
Joerg Sonnenberger
|
r38619 | if limit != 0 and st.st_size > limit: | ||
Augie Fackler
|
r43346 | ui.warn( | ||
_( | ||||
Augie Fackler
|
r43347 | b"%s: up to %d MB of RAM may be required " | ||
b"to manage this file\n" | ||||
b"(use 'hg revert %s' to cancel the " | ||||
b"pending addition)\n" | ||||
Augie Fackler
|
r43346 | ) | ||
% (f, 3 * st.st_size // 1000000, uipath(f)) | ||||
) | ||||
Dirkjan Ochtman
|
r11303 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | ||
Augie Fackler
|
r43346 | ui.warn( | ||
_( | ||||
Augie Fackler
|
r43347 | b"%s not added: only files and symlinks " | ||
b"supported currently\n" | ||||
Augie Fackler
|
r43346 | ) | ||
% uipath(f) | ||||
) | ||||
FUJIWARA Katsunori
|
r19900 | rejected.append(f) | ||
Augie Fackler
|
r43347 | elif ds[f] in b'amn': | ||
ui.warn(_(b"%s already tracked!\n") % uipath(f)) | ||||
elif ds[f] == b'r': | ||||
Dirkjan Ochtman
|
r11303 | ds.normallookup(f) | ||
else: | ||||
ds.add(f) | ||||
return rejected | ||||
Augie Fackler
|
r43347 | def forget(self, files, prefix=b""): | ||
Bryan O'Sullivan
|
r27810 | with self._repo.wlock(): | ||
Matt Harbison
|
r33501 | ds = self._repo.dirstate | ||
uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) | ||||
David M. Carr
|
r15912 | rejected = [] | ||
Matt Mackall
|
r14435 | for f in files: | ||
Martin von Zweigbergk
|
r41763 | if f not in ds: | ||
Augie Fackler
|
r43347 | self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f)) | ||
David M. Carr
|
r15912 | rejected.append(f) | ||
Augie Fackler
|
r43347 | elif ds[f] != b'a': | ||
Martin von Zweigbergk
|
r41763 | ds.remove(f) | ||
Dirkjan Ochtman
|
r11303 | else: | ||
Martin von Zweigbergk
|
r41763 | ds.drop(f) | ||
David M. Carr
|
r15912 | return rejected | ||
Dirkjan Ochtman
|
r11303 | |||
def copy(self, source, dest): | ||||
FUJIWARA Katsunori
|
r19902 | try: | ||
st = self._repo.wvfs.lstat(dest) | ||||
Gregory Szorc
|
r25660 | except OSError as err: | ||
FUJIWARA Katsunori
|
r19902 | if err.errno != errno.ENOENT: | ||
raise | ||||
Augie Fackler
|
r43346 | self._repo.ui.warn( | ||
Augie Fackler
|
r43347 | _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest) | ||
Augie Fackler
|
r43346 | ) | ||
FUJIWARA Katsunori
|
r19902 | return | ||
if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | ||||
Augie Fackler
|
r43346 | self._repo.ui.warn( | ||
Martin von Zweigbergk
|
r43387 | _(b"copy failed: %s is not a file or a symbolic link\n") | ||
Augie Fackler
|
r43346 | % self._repo.dirstate.pathto(dest) | ||
) | ||||
Dirkjan Ochtman
|
r11303 | else: | ||
Bryan O'Sullivan
|
r27812 | with self._repo.wlock(): | ||
Martin von Zweigbergk
|
r41763 | ds = self._repo.dirstate | ||
Augie Fackler
|
r43347 | if ds[dest] in b'?': | ||
Martin von Zweigbergk
|
r41763 | ds.add(dest) | ||
Augie Fackler
|
r43347 | elif ds[dest] in b'r': | ||
Martin von Zweigbergk
|
r41763 | ds.normallookup(dest) | ||
ds.copy(source, dest) | ||||
Dirkjan Ochtman
|
r11303 | |||
Augie Fackler
|
r43346 | def match( | ||
self, | ||||
pats=None, | ||||
include=None, | ||||
exclude=None, | ||||
Augie Fackler
|
r43347 | default=b'glob', | ||
Augie Fackler
|
r43346 | listsubrepos=False, | ||
badfn=None, | ||||
): | ||||
Matt Harbison
|
r24790 | r = self._repo | ||
# Only a case insensitive filesystem needs magic to translate user input | ||||
# to actual case in the filesystem. | ||||
Martin von Zweigbergk
|
r32400 | icasefs = not util.fscasesensitive(r.root) | ||
Augie Fackler
|
r43346 | return matchmod.match( | ||
r.root, | ||||
r.getcwd(), | ||||
pats, | ||||
include, | ||||
exclude, | ||||
default, | ||||
auditor=r.auditor, | ||||
ctx=self, | ||||
listsubrepos=listsubrepos, | ||||
badfn=badfn, | ||||
icasefs=icasefs, | ||||
) | ||||
Matt Harbison
|
r24790 | |||
Sean Farley
|
r21393 | def _filtersuspectsymlink(self, files): | ||
if not files or self._repo.dirstate._checklink: | ||||
return files | ||||
# Symlink placeholders may get non-symlink-like contents | ||||
# via user error or dereferencing by NFS or Samba servers, | ||||
# so we filter out any placeholders that don't look like a | ||||
# symlink | ||||
sane = [] | ||||
for f in files: | ||||
Augie Fackler
|
r43347 | if self.flags(f) == b'l': | ||
Sean Farley
|
r21393 | d = self[f].data() | ||
Augie Fackler
|
r43346 | if ( | ||
Augie Fackler
|
r43347 | d == b'' | ||
Augie Fackler
|
r43346 | or len(d) >= 1024 | ||
Augie Fackler
|
r43347 | or b'\n' in d | ||
Augie Fackler
|
r43346 | or stringutil.binary(d) | ||
): | ||||
self._repo.ui.debug( | ||||
Martin von Zweigbergk
|
r43387 | b'ignoring suspect symlink placeholder "%s"\n' % f | ||
Augie Fackler
|
r43346 | ) | ||
Sean Farley
|
r21393 | continue | ||
sane.append(f) | ||||
return sane | ||||
Sean Farley
|
r21395 | def _checklookup(self, files): | ||
# check for any possibly clean files | ||||
if not files: | ||||
Siddharth Agarwal
|
r32651 | return [], [], [] | ||
Sean Farley
|
r21395 | |||
modified = [] | ||||
Siddharth Agarwal
|
r32651 | deleted = [] | ||
Sean Farley
|
r21395 | fixup = [] | ||
pctx = self._parents[0] | ||||
# do a full compare of any files that might have changed | ||||
for f in sorted(files): | ||||
Siddharth Agarwal
|
r32651 | try: | ||
# This will return True for a file that got replaced by a | ||||
# directory in the interim, but fixing that is pretty hard. | ||||
Augie Fackler
|
r43346 | if ( | ||
f not in pctx | ||||
or self.flags(f) != pctx.flags(f) | ||||
or pctx[f].cmp(self[f]) | ||||
): | ||||
Siddharth Agarwal
|
r32651 | modified.append(f) | ||
else: | ||||
fixup.append(f) | ||||
except (IOError, OSError): | ||||
# A file become inaccessible in between? Mark it as deleted, | ||||
# matching dirstate behavior (issue5584). | ||||
# The dirstate has more complex behavior around whether a | ||||
# missing file matches a directory, etc, but we don't need to | ||||
# bother with that: if f has made it to this point, we're sure | ||||
# it's in the dirstate. | ||||
deleted.append(f) | ||||
Sean Farley
|
r21395 | |||
Siddharth Agarwal
|
r32812 | return modified, deleted, fixup | ||
Siddharth Agarwal
|
r32813 | def _poststatusfixup(self, status, fixup): | ||
Siddharth Agarwal
|
r32812 | """update dirstate for files that are actually clean""" | ||
Siddharth Agarwal
|
r32814 | poststatus = self._repo.postdsstatus() | ||
if fixup or poststatus: | ||||
Sean Farley
|
r21395 | try: | ||
FUJIWARA Katsunori
|
r32752 | oldid = self._repo.dirstate.identity() | ||
Sean Farley
|
r21395 | # updating the dirstate is optional | ||
# so we don't wait on the lock | ||||
Siddharth Agarwal
|
r21990 | # wlock can invalidate the dirstate, so cache normal _after_ | ||
# taking the lock | ||||
Bryan O'Sullivan
|
r27813 | with self._repo.wlock(False): | ||
FUJIWARA Katsunori
|
r32752 | if self._repo.dirstate.identity() == oldid: | ||
Siddharth Agarwal
|
r32814 | if fixup: | ||
normal = self._repo.dirstate.normal | ||||
for f in fixup: | ||||
normal(f) | ||||
# write changes out explicitly, because nesting | ||||
# wlock at runtime may prevent 'wlock.release()' | ||||
# after this block from doing so for subsequent | ||||
# changing files | ||||
tr = self._repo.currenttransaction() | ||||
self._repo.dirstate.write(tr) | ||||
if poststatus: | ||||
for ps in poststatus: | ||||
ps(self, status) | ||||
FUJIWARA Katsunori
|
r32752 | else: | ||
# in this case, writing changes out breaks | ||||
# consistency, because .hg/dirstate was | ||||
# already changed simultaneously after last | ||||
# caching (see also issue5584 for detail) | ||||
Augie Fackler
|
r43346 | self._repo.ui.debug( | ||
Martin von Zweigbergk
|
r43387 | b'skip updating dirstate: identity mismatch\n' | ||
Augie Fackler
|
r43346 | ) | ||
Sean Farley
|
r21395 | except error.LockError: | ||
pass | ||||
Siddharth Agarwal
|
r32814 | finally: | ||
# Even if the wlock couldn't be grabbed, clear out the list. | ||||
self._repo.clearpostdsstatus() | ||||
Sean Farley
|
r21395 | |||
Martin von Zweigbergk
|
r33938 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): | ||
Sean Farley
|
r21397 | '''Gets the status from the dirstate -- internal use only.''' | ||
subrepos = [] | ||||
Augie Fackler
|
r43347 | if b'.hgsub' in self: | ||
Sean Farley
|
r21397 | subrepos = sorted(self.substate) | ||
Augie Fackler
|
r43346 | cmp, s = self._repo.dirstate.status( | ||
match, subrepos, ignored=ignored, clean=clean, unknown=unknown | ||||
) | ||||
Sean Farley
|
r21397 | |||
# check for any possibly clean files | ||||
Siddharth Agarwal
|
r32812 | fixup = [] | ||
Sean Farley
|
r21397 | if cmp: | ||
Siddharth Agarwal
|
r32651 | modified2, deleted2, fixup = self._checklookup(cmp) | ||
Martin von Zweigbergk
|
r23303 | s.modified.extend(modified2) | ||
Siddharth Agarwal
|
r32651 | s.deleted.extend(deleted2) | ||
Sean Farley
|
r21397 | |||
Martin von Zweigbergk
|
r34345 | if fixup and clean: | ||
Martin von Zweigbergk
|
r23303 | s.clean.extend(fixup) | ||
Sean Farley
|
r21397 | |||
Siddharth Agarwal
|
r32813 | self._poststatusfixup(s, fixup) | ||
Siddharth Agarwal
|
r32812 | |||
Martin von Zweigbergk
|
r23776 | if match.always(): | ||
# cache for performance | ||||
if s.unknown or s.ignored or s.clean: | ||||
# "_status" is cached with list*=False in the normal route | ||||
Augie Fackler
|
r43346 | self._status = scmutil.status( | ||
s.modified, s.added, s.removed, s.deleted, [], [], [] | ||||
) | ||||
Martin von Zweigbergk
|
r23776 | else: | ||
self._status = s | ||||
Martin von Zweigbergk
|
r23303 | return s | ||
Sean Farley
|
r21397 | |||
Durham Goode
|
r31259 | @propertycache | ||
Martin von Zweigbergk
|
r42477 | def _copies(self): | ||
p1copies = {} | ||||
p2copies = {} | ||||
parents = self._repo.dirstate.parents() | ||||
p1manifest = self._repo[parents[0]].manifest() | ||||
p2manifest = self._repo[parents[1]].manifest() | ||||
Martin von Zweigbergk
|
r43125 | changedset = set(self.added()) | set(self.modified()) | ||
Martin von Zweigbergk
|
r42477 | narrowmatch = self._repo.narrowmatch() | ||
for dst, src in self._repo.dirstate.copies().items(): | ||||
Martin von Zweigbergk
|
r43125 | if dst not in changedset or not narrowmatch(dst): | ||
Martin von Zweigbergk
|
r42477 | continue | ||
if src in p1manifest: | ||||
p1copies[dst] = src | ||||
elif src in p2manifest: | ||||
p2copies[dst] = src | ||||
return p1copies, p2copies | ||||
@propertycache | ||||
Durham Goode
|
r31259 | def _manifest(self): | ||
"""generate a manifest corresponding to the values in self._status | ||||
This reuse the file nodeid from parent, but we use special node | ||||
identifiers for added and modified files. This is used by manifests | ||||
merge to see that files are different and by update logic to avoid | ||||
deleting newly added files. | ||||
""" | ||||
return self._buildstatusmanifest(self._status) | ||||
def _buildstatusmanifest(self, status): | ||||
"""Builds a manifest that includes the given status results.""" | ||||
parents = self.parents() | ||||
man = parents[0].manifest().copy() | ||||
ff = self._flagfunc | ||||
Augie Fackler
|
r43346 | for i, l in ( | ||
(addednodeid, status.added), | ||||
(modifiednodeid, status.modified), | ||||
): | ||||
Durham Goode
|
r31259 | for f in l: | ||
man[f] = i | ||||
try: | ||||
man.setflag(f, ff(f)) | ||||
except OSError: | ||||
pass | ||||
for f in status.deleted + status.removed: | ||||
if f in man: | ||||
del man[f] | ||||
return man | ||||
Augie Fackler
|
r43346 | def _buildstatus( | ||
self, other, s, match, listignored, listclean, listunknown | ||||
): | ||||
Sean Farley
|
r21480 | """build a status with respect to another context | ||
This includes logic for maintaining the fast path of status when | ||||
comparing the working directory against its parent, which is to skip | ||||
building a new manifest if self (working directory) is not comparing | ||||
against its parent (repo['.']). | ||||
""" | ||||
Martin von Zweigbergk
|
r23239 | s = self._dirstatestatus(match, listignored, listclean, listunknown) | ||
Mads Kiilerich
|
r23543 | # Filter out symlinks that, in the case of FAT32 and NTFS filesystems, | ||
Martin von Zweigbergk
|
r23242 | # might have accidentally ended up with the entire contents of the file | ||
Mads Kiilerich
|
r23543 | # they are supposed to be linking to. | ||
Martin von Zweigbergk
|
r23302 | s.modified[:] = self._filtersuspectsymlink(s.modified) | ||
Augie Fackler
|
r43347 | if other != self._repo[b'.']: | ||
Augie Fackler
|
r43346 | s = super(workingctx, self)._buildstatus( | ||
other, s, match, listignored, listclean, listunknown | ||||
) | ||||
Sean Farley
|
r21480 | return s | ||
Martin von Zweigbergk
|
r23237 | def _matchstatus(self, other, match): | ||
Sean Farley
|
r21482 | """override the match method with a filter for directory patterns | ||
We use inheritance to customize the match.bad method only in cases of | ||||
workingctx since it belongs only to the working directory when | ||||
comparing against the parent changeset. | ||||
If we aren't comparing against the working directory's parent, then we | ||||
just use the default match object sent to us. | ||||
""" | ||||
Augie Fackler
|
r43347 | if other != self._repo[b'.']: | ||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r21482 | def bad(f, msg): | ||
# 'f' may be a directory pattern from 'match.files()', | ||||
# so 'f not in ctx1' is not enough | ||||
Drew Gottlieb
|
r24326 | if f not in other and not other.hasdir(f): | ||
Augie Fackler
|
r43346 | self._repo.ui.warn( | ||
Augie Fackler
|
r43347 | b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg) | ||
Augie Fackler
|
r43346 | ) | ||
Sean Farley
|
r21482 | match.bad = bad | ||
return match | ||||
Martin von Zweigbergk
|
r42479 | def walk(self, match): | ||
'''Generates matching file names.''' | ||||
Augie Fackler
|
r43346 | return sorted( | ||
self._repo.dirstate.walk( | ||||
self._repo.narrowmatch(match), | ||||
subrepos=sorted(self.substate), | ||||
unknown=True, | ||||
ignored=False, | ||||
) | ||||
) | ||||
Martin von Zweigbergk
|
r42479 | |||
def matches(self, match): | ||||
match = self._repo.narrowmatch(match) | ||||
ds = self._repo.dirstate | ||||
Augie Fackler
|
r43347 | return sorted(f for f in ds.matches(match) if ds[f] != b'r') | ||
Martin von Zweigbergk
|
r42479 | |||
Gregory Szorc
|
r33353 | def markcommitted(self, node): | ||
Martin von Zweigbergk
|
r42481 | with self._repo.dirstate.parentchange(): | ||
for f in self.modified() + self.added(): | ||||
self._repo.dirstate.normal(f) | ||||
for f in self.removed(): | ||||
self._repo.dirstate.drop(f) | ||||
self._repo.dirstate.setparents(node) | ||||
# write changes out explicitly, because nesting wlock at | ||||
# runtime may prevent 'wlock.release()' in 'repo.commit()' | ||||
# from immediately doing so for subsequent changing files | ||||
self._repo.dirstate.write(self._repo.currenttransaction()) | ||||
Gregory Szorc
|
r33353 | |||
sparse.aftercommit(self._repo, node) | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19733 | class committablefilectx(basefilectx): | ||
"""A committablefilectx provides common functionality for a file context | ||||
that wants the ability to commit, e.g. workingfilectx or memfilectx.""" | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19701 | def __init__(self, repo, path, filelog=None, ctx=None): | ||
Matt Mackall
|
r3217 | self._repo = repo | ||
self._path = path | ||||
self._changeid = None | ||||
self._filerev = self._filenode = None | ||||
Durham Goode
|
r19149 | if filelog is not None: | ||
Matt Mackall
|
r3217 | self._filelog = filelog | ||
Sean Farley
|
r19702 | if ctx: | ||
self._changectx = ctx | ||||
Sean Farley
|
r19703 | def __nonzero__(self): | ||
return True | ||||
Gregory Szorc
|
r31476 | __bool__ = __nonzero__ | ||
Yuya Nishihara
|
r24420 | def linkrev(self): | ||
# linked to self._changectx no matter if file is modified or not | ||||
return self.rev() | ||||
Martin von Zweigbergk
|
r41936 | def renamed(self): | ||
path = self.copysource() | ||||
if not path: | ||||
return None | ||||
return path, self._changectx._parents[0]._manifest.get(path, nullid) | ||||
Matt Mackall
|
r3217 | def parents(self): | ||
'''return parent filectxs, following copies if necessary''' | ||||
Augie Fackler
|
r43346 | |||
Benoit Boissinot
|
r8528 | def filenode(ctx, path): | ||
return ctx._manifest.get(path, nullid) | ||||
path = self._path | ||||
Matt Mackall
|
r3217 | fl = self._filelog | ||
Benoit Boissinot
|
r8528 | pcl = self._changectx._parents | ||
renamed = self.renamed() | ||||
if renamed: | ||||
pl = [renamed + (None,)] | ||||
else: | ||||
pl = [(path, filenode(pcl[0], path), fl)] | ||||
for pc in pcl[1:]: | ||||
pl.append((path, filenode(pc, path), fl)) | ||||
Matt Mackall
|
r3217 | |||
Augie Fackler
|
r43346 | return [ | ||
self._parentfilectx(p, fileid=n, filelog=l) | ||||
for p, n, l in pl | ||||
if n != nullid | ||||
] | ||||
Matt Mackall
|
r3217 | |||
Sean Farley
|
r19705 | def children(self): | ||
return [] | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19733 | class workingfilectx(committablefilectx): | ||
Sean Farley
|
r19704 | """A workingfilectx object makes access to data related to a particular | ||
file in the working directory convenient.""" | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r19704 | def __init__(self, repo, path, filelog=None, workingctx=None): | ||
super(workingfilectx, self).__init__(repo, path, filelog, workingctx) | ||||
@propertycache | ||||
def _changectx(self): | ||||
return workingctx(self._repo) | ||||
def data(self): | ||||
return self._repo.wread(self._path) | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41934 | def copysource(self): | ||
return self._repo.dirstate.copied(self._path) | ||||
Sean Farley
|
r19704 | |||
Matt Mackall
|
r10282 | def size(self): | ||
FUJIWARA Katsunori
|
r19901 | return self._repo.wvfs.lstat(self._path).st_size | ||
Augie Fackler
|
r43346 | |||
Valentin Gatien-Baron
|
r42656 | def lstat(self): | ||
return self._repo.wvfs.lstat(self._path) | ||||
Augie Fackler
|
r43346 | |||
Benoit Boissinot
|
r3962 | def date(self): | ||
t, tz = self._changectx.date() | ||||
try: | ||||
Augie Fackler
|
r36799 | return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz) | ||
Gregory Szorc
|
r25660 | except OSError as err: | ||
Matt Mackall
|
r10282 | if err.errno != errno.ENOENT: | ||
raise | ||||
Benoit Boissinot
|
r3962 | return (t, tz) | ||
Matt Mackall
|
r3310 | |||
Phil Cohen
|
r33283 | def exists(self): | ||
return self._repo.wvfs.exists(self._path) | ||||
def lexists(self): | ||||
return self._repo.wvfs.lexists(self._path) | ||||
Phil Cohen
|
r33086 | def audit(self): | ||
return self._repo.wvfs.audit(self._path) | ||||
Nicolas Dumazet
|
r11702 | def cmp(self, fctx): | ||
"""compare with other file context | ||||
Nicolas Dumazet
|
r11539 | |||
Nicolas Dumazet
|
r11702 | returns True if different than fctx. | ||
Nicolas Dumazet
|
r11539 | """ | ||
Mads Kiilerich
|
r17425 | # fctx should be a filectx (not a workingfilectx) | ||
Nicolas Dumazet
|
r11703 | # invert comparison to reuse the same code path | ||
return fctx.cmp(self) | ||||
Patrick Mezard
|
r6715 | |||
Sean Farley
|
r22073 | def remove(self, ignoremissing=False): | ||
"""wraps unlink for a repo's working directory""" | ||||
Augie Fackler
|
r43347 | rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs') | ||
Augie Fackler
|
r43346 | self._repo.wvfs.unlinkpath( | ||
self._path, ignoremissing=ignoremissing, rmdir=rmdir | ||||
) | ||||
Sean Farley
|
r22073 | |||
Boris Feld
|
r35743 | def write(self, data, flags, backgroundclose=False, **kwargs): | ||
Sean Farley
|
r22073 | """wraps repo.wwrite""" | ||
Augie Fackler
|
r43346 | return self._repo.wwrite( | ||
self._path, data, flags, backgroundclose=backgroundclose, **kwargs | ||||
) | ||||
Sean Farley
|
r22073 | |||
Phil Cohen
|
r34788 | def markcopied(self, src): | ||
"""marks this file a copy of `src`""" | ||||
Martin von Zweigbergk
|
r42509 | self._repo.dirstate.copy(src, self._path) | ||
Phil Cohen
|
r34788 | |||
Phil Cohen
|
r34039 | def clearunknown(self): | ||
"""Removes conflicting items in the working directory so that | ||||
``write()`` can be called successfully. | ||||
""" | ||||
wvfs = self._repo.wvfs | ||||
Mark Thomas
|
r34557 | f = self._path | ||
Mark Thomas
|
r34834 | wvfs.audit(f) | ||
Augie Fackler
|
r43347 | if self._repo.ui.configbool( | ||
b'experimental', b'merge.checkpathconflicts' | ||||
): | ||||
Yuya Nishihara
|
r39232 | # remove files under the directory as they should already be | ||
# warned and backed up | ||||
if wvfs.isdir(f) and not wvfs.islink(f): | ||||
wvfs.rmtree(f, forcibly=True) | ||||
Martin von Zweigbergk
|
r44032 | for p in reversed(list(pathutil.finddirs(f))): | ||
Matt Harbison
|
r37119 | if wvfs.isfileorlink(p): | ||
wvfs.unlink(p) | ||||
break | ||||
Yuya Nishihara
|
r39232 | else: | ||
# don't remove files if path conflicts are not processed | ||||
if wvfs.isdir(f) and not wvfs.islink(f): | ||||
wvfs.removedirs(f) | ||||
Phil Cohen
|
r34039 | |||
Phil Cohen
|
r33084 | def setflags(self, l, x): | ||
self._repo.wvfs.setflags(self._path, l, x) | ||||
Augie Fackler
|
r43346 | |||
Phil Cohen
|
r35324 | class overlayworkingctx(committablectx): | ||
"""Wraps another mutable context with a write-back cache that can be | ||||
converted into a commit context. | ||||
Phil Cohen
|
r34106 | |||
self._cache[path] maps to a dict with keys: { | ||||
'exists': bool? | ||||
'date': date? | ||||
'data': str? | ||||
'flags': str? | ||||
Phil Cohen
|
r35294 | 'copied': str? (path or None) | ||
Phil Cohen
|
r34106 | } | ||
If `exists` is True, `flags` must be non-None and 'date' is non-None. If it | ||||
is `False`, the file was deleted. | ||||
""" | ||||
Phil Cohen
|
r35290 | def __init__(self, repo): | ||
Phil Cohen
|
r34106 | super(overlayworkingctx, self).__init__(repo) | ||
Phil Cohen
|
r35287 | self.clean() | ||
Phil Cohen
|
r35290 | |||
def setbase(self, wrappedctx): | ||||
Phil Cohen
|
r34106 | self._wrappedctx = wrappedctx | ||
Phil Cohen
|
r35290 | self._parents = [wrappedctx] | ||
Phil Cohen
|
r35328 | # Drop old manifest cache as it is now out of date. | ||
# This is necessary when, e.g., rebasing several nodes with one | ||||
# ``overlayworkingctx`` (e.g. with --collapse). | ||||
Augie Fackler
|
r43347 | util.clearcachedproperty(self, b'_manifest') | ||
Phil Cohen
|
r34106 | |||
def data(self, path): | ||||
if self.isdirty(path): | ||||
Augie Fackler
|
r43347 | if self._cache[path][b'exists']: | ||
if self._cache[path][b'data'] is not None: | ||||
return self._cache[path][b'data'] | ||||
Phil Cohen
|
r34106 | else: | ||
# Must fallback here, too, because we only set flags. | ||||
return self._wrappedctx[path].data() | ||||
else: | ||||
Augie Fackler
|
r43346 | raise error.ProgrammingError( | ||
Augie Fackler
|
r43347 | b"No such file or directory: %s" % path | ||
Augie Fackler
|
r43346 | ) | ||
Phil Cohen
|
r34106 | else: | ||
return self._wrappedctx[path].data() | ||||
Phil Cohen
|
r35322 | @propertycache | ||
def _manifest(self): | ||||
parents = self.parents() | ||||
man = parents[0].manifest().copy() | ||||
flag = self._flagfunc | ||||
for path in self.added(): | ||||
man[path] = addednodeid | ||||
man.setflag(path, flag(path)) | ||||
for path in self.modified(): | ||||
man[path] = modifiednodeid | ||||
man.setflag(path, flag(path)) | ||||
for path in self.removed(): | ||||
del man[path] | ||||
return man | ||||
@propertycache | ||||
def _flagfunc(self): | ||||
def f(path): | ||||
Augie Fackler
|
r43347 | return self._cache[path][b'flags'] | ||
Augie Fackler
|
r43346 | |||
Phil Cohen
|
r35322 | return f | ||
def files(self): | ||||
return sorted(self.added() + self.modified() + self.removed()) | ||||
def modified(self): | ||||
Augie Fackler
|
r43346 | return [ | ||
f | ||||
for f in self._cache.keys() | ||||
Augie Fackler
|
r43347 | if self._cache[f][b'exists'] and self._existsinparent(f) | ||
Augie Fackler
|
r43346 | ] | ||
Phil Cohen
|
r35322 | |||
def added(self): | ||||
Augie Fackler
|
r43346 | return [ | ||
f | ||||
for f in self._cache.keys() | ||||
Augie Fackler
|
r43347 | if self._cache[f][b'exists'] and not self._existsinparent(f) | ||
Augie Fackler
|
r43346 | ] | ||
Phil Cohen
|
r35322 | |||
def removed(self): | ||||
Augie Fackler
|
r43346 | return [ | ||
f | ||||
for f in self._cache.keys() | ||||
Augie Fackler
|
r43347 | if not self._cache[f][b'exists'] and self._existsinparent(f) | ||
Augie Fackler
|
r43346 | ] | ||
Phil Cohen
|
r35322 | |||
Martin von Zweigbergk
|
r41921 | def p1copies(self): | ||
copies = self._repo._wrappedctx.p1copies().copy() | ||||
narrowmatch = self._repo.narrowmatch() | ||||
for f in self._cache.keys(): | ||||
if not narrowmatch(f): | ||||
continue | ||||
Augie Fackler
|
r43346 | copies.pop(f, None) # delete if it exists | ||
Augie Fackler
|
r43347 | source = self._cache[f][b'copied'] | ||
Martin von Zweigbergk
|
r41921 | if source: | ||
copies[f] = source | ||||
return copies | ||||
def p2copies(self): | ||||
copies = self._repo._wrappedctx.p2copies().copy() | ||||
narrowmatch = self._repo.narrowmatch() | ||||
for f in self._cache.keys(): | ||||
if not narrowmatch(f): | ||||
continue | ||||
Augie Fackler
|
r43346 | copies.pop(f, None) # delete if it exists | ||
Augie Fackler
|
r43347 | source = self._cache[f][b'copied'] | ||
Martin von Zweigbergk
|
r41921 | if source: | ||
copies[f] = source | ||||
return copies | ||||
Phil Cohen
|
r34682 | def isinmemory(self): | ||
return True | ||||
Phil Cohen
|
r34106 | def filedate(self, path): | ||
if self.isdirty(path): | ||||
Augie Fackler
|
r43347 | return self._cache[path][b'date'] | ||
Phil Cohen
|
r34106 | else: | ||
return self._wrappedctx[path].date() | ||||
Phil Cohen
|
r35294 | def markcopied(self, path, origin): | ||
Augie Fackler
|
r43346 | self._markdirty( | ||
path, | ||||
exists=True, | ||||
date=self.filedate(path), | ||||
flags=self.flags(path), | ||||
copied=origin, | ||||
) | ||||
Phil Cohen
|
r35294 | |||
def copydata(self, path): | ||||
if self.isdirty(path): | ||||
Augie Fackler
|
r43347 | return self._cache[path][b'copied'] | ||
Phil Cohen
|
r35294 | else: | ||
Martin von Zweigbergk
|
r42474 | return None | ||
Phil Cohen
|
r35294 | |||
Phil Cohen
|
r34106 | def flags(self, path): | ||
if self.isdirty(path): | ||||
Augie Fackler
|
r43347 | if self._cache[path][b'exists']: | ||
return self._cache[path][b'flags'] | ||||
Phil Cohen
|
r34106 | else: | ||
Augie Fackler
|
r43346 | raise error.ProgrammingError( | ||
Augie Fackler
|
r43347 | b"No such file or directory: %s" % self._path | ||
Augie Fackler
|
r43346 | ) | ||
Phil Cohen
|
r34106 | else: | ||
return self._wrappedctx[path].flags() | ||||
Martin von Zweigbergk
|
r40839 | def __contains__(self, key): | ||
if key in self._cache: | ||||
Augie Fackler
|
r43347 | return self._cache[key][b'exists'] | ||
Martin von Zweigbergk
|
r40839 | return key in self.p1() | ||
Phil Cohen
|
r35296 | def _existsinparent(self, path): | ||
try: | ||||
# ``commitctx` raises a ``ManifestLookupError`` if a path does not | ||||
# exist, unlike ``workingctx``, which returns a ``workingfilectx`` | ||||
# with an ``exists()`` function. | ||||
self._wrappedctx[path] | ||||
return True | ||||
except error.ManifestLookupError: | ||||
return False | ||||
Phil Cohen
|
r35325 | def _auditconflicts(self, path): | ||
"""Replicates conflict checks done by wvfs.write(). | ||||
Since we never write to the filesystem and never call `applyupdates` in | ||||
IMM, we'll never check that a path is actually writable -- e.g., because | ||||
it adds `a/foo`, but `a` is actually a file in the other commit. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Phil Cohen
|
r35325 | def fail(path, component): | ||
# p1() is the base and we're receiving "writes" for p2()'s | ||||
# files. | ||||
Augie Fackler
|
r43347 | if b'l' in self.p1()[component].flags(): | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | b"error: %s conflicts with symlink %s " | ||
b"in %d." % (path, component, self.p1().rev()) | ||||
Augie Fackler
|
r43346 | ) | ||
Phil Cohen
|
r35325 | else: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | b"error: '%s' conflicts with file '%s' in " | ||
b"%d." % (path, component, self.p1().rev()) | ||||
Augie Fackler
|
r43346 | ) | ||
Phil Cohen
|
r35325 | |||
# Test that each new directory to be created to write this path from p2 | ||||
# is not a file in p1. | ||||
Augie Fackler
|
r43347 | components = path.split(b'/') | ||
Gregory Szorc
|
r38806 | for i in pycompat.xrange(len(components)): | ||
Augie Fackler
|
r43347 | component = b"/".join(components[0:i]) | ||
Martin von Zweigbergk
|
r40839 | if component in self: | ||
Phil Cohen
|
r35325 | fail(path, component) | ||
# Test the other direction -- that this path from p2 isn't a directory | ||||
Martin von Zweigbergk
|
r40852 | # in p1 (test that p1 doesn't have any paths matching `path/*`). | ||
Martin von Zweigbergk
|
r42271 | match = self.match([path], default=b'path') | ||
Phil Cohen
|
r35325 | matches = self.p1().manifest().matches(match) | ||
Pulkit Goyal
|
r39612 | mfiles = matches.keys() | ||
if len(mfiles) > 0: | ||||
if len(mfiles) == 1 and mfiles[0] == path: | ||||
return | ||||
# omit the files which are deleted in current IMM wctx | ||||
Martin von Zweigbergk
|
r40839 | mfiles = [m for m in mfiles if m in self] | ||
Pulkit Goyal
|
r39612 | if not mfiles: | ||
Phil Cohen
|
r35325 | return | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | b"error: file '%s' cannot be written because " | ||
b" '%s/' is a directory in %s (containing %d " | ||||
b"entries: %s)" | ||||
% (path, path, self.p1(), len(mfiles), b', '.join(mfiles)) | ||||
Augie Fackler
|
r43346 | ) | ||
Phil Cohen
|
r35325 | |||
Augie Fackler
|
r43347 | def write(self, path, data, flags=b'', **kwargs): | ||
Phil Cohen
|
r34106 | if data is None: | ||
Augie Fackler
|
r43347 | raise error.ProgrammingError(b"data must be non-None") | ||
Phil Cohen
|
r35325 | self._auditconflicts(path) | ||
Augie Fackler
|
r43346 | self._markdirty( | ||
path, exists=True, data=data, date=dateutil.makedate(), flags=flags | ||||
) | ||||
Phil Cohen
|
r34106 | |||
def setflags(self, path, l, x): | ||||
Augie Fackler
|
r43347 | flag = b'' | ||
Pulkit Goyal
|
r39101 | if l: | ||
Augie Fackler
|
r43347 | flag = b'l' | ||
Pulkit Goyal
|
r39101 | elif x: | ||
Augie Fackler
|
r43347 | flag = b'x' | ||
Augie Fackler
|
r43346 | self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag) | ||
Phil Cohen
|
r34106 | |||
def remove(self, path): | ||||
self._markdirty(path, exists=False) | ||||
def exists(self, path): | ||||
"""exists behaves like `lexists`, but needs to follow symlinks and | ||||
return False if they are broken. | ||||
""" | ||||
if self.isdirty(path): | ||||
# If this path exists and is a symlink, "follow" it by calling | ||||
# exists on the destination path. | ||||
Augie Fackler
|
r43346 | if ( | ||
Augie Fackler
|
r43347 | self._cache[path][b'exists'] | ||
and b'l' in self._cache[path][b'flags'] | ||||
Augie Fackler
|
r43346 | ): | ||
Augie Fackler
|
r43347 | return self.exists(self._cache[path][b'data'].strip()) | ||
Phil Cohen
|
r34106 | else: | ||
Augie Fackler
|
r43347 | return self._cache[path][b'exists'] | ||
Phil Cohen
|
r35296 | |||
return self._existsinparent(path) | ||||
Phil Cohen
|
r34106 | |||
def lexists(self, path): | ||||
"""lexists returns True if the path exists""" | ||||
if self.isdirty(path): | ||||
Augie Fackler
|
r43347 | return self._cache[path][b'exists'] | ||
Phil Cohen
|
r35296 | |||
return self._existsinparent(path) | ||||
Phil Cohen
|
r34106 | |||
def size(self, path): | ||||
if self.isdirty(path): | ||||
Augie Fackler
|
r43347 | if self._cache[path][b'exists']: | ||
return len(self._cache[path][b'data']) | ||||
Phil Cohen
|
r34106 | else: | ||
Augie Fackler
|
r43346 | raise error.ProgrammingError( | ||
Augie Fackler
|
r43347 | b"No such file or directory: %s" % self._path | ||
Augie Fackler
|
r43346 | ) | ||
Phil Cohen
|
r34106 | return self._wrappedctx[path].size() | ||
Augie Fackler
|
r43346 | def tomemctx( | ||
self, | ||||
text, | ||||
branch=None, | ||||
extra=None, | ||||
date=None, | ||||
parents=None, | ||||
user=None, | ||||
editor=None, | ||||
): | ||||
Phil Cohen
|
r35326 | """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be | ||
committed. | ||||
``text`` is the commit message. | ||||
``parents`` (optional) are rev numbers. | ||||
""" | ||||
# Default parents to the wrapped contexts' if not passed. | ||||
if parents is None: | ||||
parents = self._wrappedctx.parents() | ||||
if len(parents) == 1: | ||||
parents = (parents[0], None) | ||||
# ``parents`` is passed as rev numbers; convert to ``commitctxs``. | ||||
if parents[1] is None: | ||||
parents = (self._repo[parents[0]], None) | ||||
else: | ||||
parents = (self._repo[parents[0]], self._repo[parents[1]]) | ||||
Martin von Zweigbergk
|
r42476 | files = self.files() | ||
Augie Fackler
|
r43346 | |||
Phil Cohen
|
r35326 | def getfile(repo, memctx, path): | ||
Augie Fackler
|
r43347 | if self._cache[path][b'exists']: | ||
Augie Fackler
|
r43346 | return memfilectx( | ||
repo, | ||||
memctx, | ||||
path, | ||||
Augie Fackler
|
r43347 | self._cache[path][b'data'], | ||
b'l' in self._cache[path][b'flags'], | ||||
b'x' in self._cache[path][b'flags'], | ||||
self._cache[path][b'copied'], | ||||
Augie Fackler
|
r43346 | ) | ||
Phil Cohen
|
r35326 | else: | ||
# Returning None, but including the path in `files`, is | ||||
# necessary for memctx to register a deletion. | ||||
return None | ||||
Augie Fackler
|
r43346 | |||
return memctx( | ||||
self._repo, | ||||
parents, | ||||
text, | ||||
files, | ||||
getfile, | ||||
date=date, | ||||
extra=extra, | ||||
user=user, | ||||
branch=branch, | ||||
editor=editor, | ||||
) | ||||
Phil Cohen
|
r35326 | |||
Phil Cohen
|
r34106 | def isdirty(self, path): | ||
return path in self._cache | ||||
Phil Cohen
|
r35320 | def isempty(self): | ||
# We need to discard any keys that are actually clean before the empty | ||||
# commit check. | ||||
self._compact() | ||||
return len(self._cache) == 0 | ||||
Phil Cohen
|
r35287 | def clean(self): | ||
Phil Cohen
|
r34106 | self._cache = {} | ||
Phil Cohen
|
r35327 | def _compact(self): | ||
"""Removes keys from the cache that are actually clean, by comparing | ||||
them with the underlying context. | ||||
This can occur during the merge process, e.g. by passing --tool :local | ||||
to resolve a conflict. | ||||
""" | ||||
keys = [] | ||||
Kyle Lippincott
|
r41189 | # This won't be perfect, but can help performance significantly when | ||
# using things like remotefilelog. | ||||
scmutil.prefetchfiles( | ||||
Augie Fackler
|
r43346 | self.repo(), | ||
[self.p1().rev()], | ||||
scmutil.matchfiles(self.repo(), self._cache.keys()), | ||||
) | ||||
Kyle Lippincott
|
r41189 | |||
Phil Cohen
|
r35327 | for path in self._cache.keys(): | ||
cache = self._cache[path] | ||||
try: | ||||
underlying = self._wrappedctx[path] | ||||
Augie Fackler
|
r43346 | if ( | ||
Augie Fackler
|
r43347 | underlying.data() == cache[b'data'] | ||
and underlying.flags() == cache[b'flags'] | ||||
Augie Fackler
|
r43346 | ): | ||
Phil Cohen
|
r35327 | keys.append(path) | ||
except error.ManifestLookupError: | ||||
# Path not in the underlying manifest (created). | ||||
continue | ||||
for path in keys: | ||||
del self._cache[path] | ||||
return keys | ||||
Augie Fackler
|
r43346 | def _markdirty( | ||
Augie Fackler
|
r43347 | self, path, exists, data=None, date=None, flags=b'', copied=None | ||
Augie Fackler
|
r43346 | ): | ||
Kyle Lippincott
|
r39163 | # data not provided, let's see if we already have some; if not, let's | ||
# grab it from our underlying context, so that we always have data if | ||||
# the file is marked as existing. | ||||
if exists and data is None: | ||||
oldentry = self._cache.get(path) or {} | ||||
Augie Fackler
|
r43347 | data = oldentry.get(b'data') | ||
Martin von Zweigbergk
|
r42728 | if data is None: | ||
data = self._wrappedctx[path].data() | ||||
Kyle Lippincott
|
r39163 | |||
Phil Cohen
|
r34106 | self._cache[path] = { | ||
Augie Fackler
|
r43347 | b'exists': exists, | ||
b'data': data, | ||||
b'date': date, | ||||
b'flags': flags, | ||||
b'copied': copied, | ||||
Phil Cohen
|
r34106 | } | ||
def filectx(self, path, filelog=None): | ||||
Augie Fackler
|
r43346 | return overlayworkingfilectx( | ||
self._repo, path, parent=self, filelog=filelog | ||||
) | ||||
Phil Cohen
|
r34106 | |||
Phil Cohen
|
r35324 | class overlayworkingfilectx(committablefilectx): | ||
Phil Cohen
|
r34106 | """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory | ||
cache, which can be flushed through later by calling ``flush()``.""" | ||||
def __init__(self, repo, path, filelog=None, parent=None): | ||||
Augie Fackler
|
r43346 | super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent) | ||
Phil Cohen
|
r34106 | self._repo = repo | ||
self._parent = parent | ||||
self._path = path | ||||
Phil Cohen
|
r34784 | def cmp(self, fctx): | ||
return self.data() != fctx.data() | ||||
Phil Cohen
|
r35282 | def changectx(self): | ||
Phil Cohen
|
r34106 | return self._parent | ||
def data(self): | ||||
return self._parent.data(self._path) | ||||
def date(self): | ||||
return self._parent.filedate(self._path) | ||||
def exists(self): | ||||
return self.lexists() | ||||
def lexists(self): | ||||
return self._parent.exists(self._path) | ||||
Martin von Zweigbergk
|
r41934 | def copysource(self): | ||
return self._parent.copydata(self._path) | ||||
Phil Cohen
|
r34106 | def size(self): | ||
return self._parent.size(self._path) | ||||
Phil Cohen
|
r35294 | def markcopied(self, origin): | ||
self._parent.markcopied(self._path, origin) | ||||
Phil Cohen
|
r34106 | def audit(self): | ||
pass | ||||
def flags(self): | ||||
return self._parent.flags(self._path) | ||||
def setflags(self, islink, isexec): | ||||
return self._parent.setflags(self._path, islink, isexec) | ||||
Boris Feld
|
r35743 | def write(self, data, flags, backgroundclose=False, **kwargs): | ||
return self._parent.write(self._path, data, flags, **kwargs) | ||||
Phil Cohen
|
r34106 | |||
def remove(self, ignoremissing=False): | ||||
return self._parent.remove(self._path) | ||||
Phil Cohen
|
r35323 | def clearunknown(self): | ||
pass | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r23710 | class workingcommitctx(workingctx): | ||
"""A workingcommitctx object makes access to data related to | ||||
the revision being committed convenient. | ||||
This hides changes in the working directory, if they aren't | ||||
committed in this context. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
def __init__( | ||||
Augie Fackler
|
r43347 | self, repo, changes, text=b"", user=None, date=None, extra=None | ||
Augie Fackler
|
r43346 | ): | ||
super(workingcommitctx, self).__init__( | ||||
repo, text, user, date, extra, changes | ||||
) | ||||
FUJIWARA Katsunori
|
r23710 | |||
Martin von Zweigbergk
|
r33938 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): | ||
FUJIWARA Katsunori
|
r23712 | """Return matched files only in ``self._status`` | ||
Uncommitted files appear "clean" via this context, even if | ||||
they aren't actually so in the working directory. | ||||
""" | ||||
if clean: | ||||
clean = [f for f in self._manifest if f not in self._changedset] | ||||
else: | ||||
clean = [] | ||||
Augie Fackler
|
r43346 | return scmutil.status( | ||
[f for f in self._status.modified if match(f)], | ||||
[f for f in self._status.added if match(f)], | ||||
[f for f in self._status.removed if match(f)], | ||||
[], | ||||
[], | ||||
[], | ||||
clean, | ||||
) | ||||
FUJIWARA Katsunori
|
r23712 | |||
@propertycache | ||||
def _changedset(self): | ||||
"""Return the set of files changed in this context | ||||
""" | ||||
changed = set(self._status.modified) | ||||
changed.update(self._status.added) | ||||
changed.update(self._status.removed) | ||||
return changed | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r27906 | def makecachingfilectxfn(func): | ||
"""Create a filectxfn that caches based on the path. | ||||
We can't use util.cachefunc because it uses all arguments as the cache | ||||
key and this creates a cycle since the arguments include the repo and | ||||
memctx. | ||||
""" | ||||
cache = {} | ||||
def getfilectx(repo, memctx, path): | ||||
if path not in cache: | ||||
cache[path] = func(repo, memctx, path) | ||||
return cache[path] | ||||
return getfilectx | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r32763 | def memfilefromctx(ctx): | ||
"""Given a context return a memfilectx for ctx[path] | ||||
This is a convenience method for building a memctx based on another | ||||
context. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r32763 | def getfilectx(repo, memctx, path): | ||
fctx = ctx[path] | ||||
Martin von Zweigbergk
|
r42161 | copysource = fctx.copysource() | ||
Augie Fackler
|
r43346 | return memfilectx( | ||
repo, | ||||
memctx, | ||||
path, | ||||
fctx.data(), | ||||
islink=fctx.islink(), | ||||
isexec=fctx.isexec(), | ||||
copysource=copysource, | ||||
) | ||||
Sean Farley
|
r32763 | |||
return getfilectx | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r32764 | def memfilefrompatch(patchstore): | ||
"""Given a patch (e.g. patchstore object) return a memfilectx | ||||
This is a convenience method for building a memctx based on a patchstore. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r32764 | def getfilectx(repo, memctx, path): | ||
Martin von Zweigbergk
|
r42161 | data, mode, copysource = patchstore.getfile(path) | ||
Sean Farley
|
r32764 | if data is None: | ||
return None | ||||
islink, isexec = mode | ||||
Augie Fackler
|
r43346 | return memfilectx( | ||
repo, | ||||
memctx, | ||||
path, | ||||
data, | ||||
islink=islink, | ||||
isexec=isexec, | ||||
copysource=copysource, | ||||
) | ||||
Sean Farley
|
r32764 | |||
return getfilectx | ||||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r21665 | class memctx(committablectx): | ||
Patrick Mezard
|
r7077 | """Use memctx to perform in-memory commits via localrepo.commitctx(). | ||
Patrick Mezard
|
r6715 | |||
Patrick Mezard
|
r7077 | Revision information is supplied at initialization time while | ||
related files data and is made available through a callback | ||||
mechanism. 'repo' is the current localrepo, 'parents' is a | ||||
sequence of two parent revisions identifiers (pass None for every | ||||
missing parent), 'text' is the commit message and 'files' lists | ||||
names of files touched by the revision (normalized and relative to | ||||
repository root). | ||||
Patrick Mezard
|
r6715 | |||
Patrick Mezard
|
r7077 | filectxfn(repo, memctx, path) is a callable receiving the | ||
repository, the current memctx object and the normalized path of | ||||
requested file, relative to repository root. It is fired by the | ||||
commit function for every file in 'files', but calls order is | ||||
undefined. If the file is available in the revision being | ||||
committed (updated or added), filectxfn returns a memfilectx | ||||
FUJIWARA Katsunori
|
r31612 | object. If the file was removed, filectxfn return None for recent | ||
Mercurial. Moved files are represented by marking the source file | ||||
Patrick Mezard
|
r7077 | removed and the new file added with copy information (see | ||
memfilectx). | ||||
user receives the committer name and defaults to current | ||||
repository username, date is the commit date in any format | ||||
Boris Feld
|
r36625 | supported by dateutil.parsedate() and defaults to current date, extra | ||
Patrick Mezard
|
r7077 | is a dictionary of metadata or is left empty. | ||
Patrick Mezard
|
r6715 | """ | ||
Siddharth Agarwal
|
r22313 | |||
# Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files. | ||||
# Extensions that need to retain compatibility across Mercurial 3.1 can use | ||||
# this field to determine what to do in filectxfn. | ||||
_returnnoneformissingfiles = True | ||||
Augie Fackler
|
r43346 | def __init__( | ||
self, | ||||
repo, | ||||
parents, | ||||
text, | ||||
files, | ||||
filectxfn, | ||||
user=None, | ||||
date=None, | ||||
extra=None, | ||||
branch=None, | ||||
editor=False, | ||||
): | ||||
super(memctx, self).__init__( | ||||
repo, text, user, date, extra, branch=branch | ||||
) | ||||
Patrick Mezard
|
r6715 | self._rev = None | ||
self._node = None | ||||
parents = [(p or nullid) for p in parents] | ||||
p1, p2 = parents | ||||
Martin von Zweigbergk
|
r37190 | self._parents = [self._repo[p] for p in (p1, p2)] | ||
Matt Mackall
|
r8209 | files = sorted(set(files)) | ||
FUJIWARA Katsunori
|
r23587 | self._files = files | ||
Sean Farley
|
r21938 | self.substate = {} | ||
Patrick Mezard
|
r6715 | |||
Sean Farley
|
r32765 | if isinstance(filectxfn, patch.filestore): | ||
Sean Farley
|
r32781 | filectxfn = memfilefrompatch(filectxfn) | ||
Sean Farley
|
r32765 | elif not callable(filectxfn): | ||
# if store is not callable, wrap it in a function | ||||
Sean Farley
|
r32781 | filectxfn = memfilefromctx(filectxfn) | ||
# memoizing increases performance for e.g. vcs convert scenarios. | ||||
self._filectxfn = makecachingfilectxfn(filectxfn) | ||||
Sean Farley
|
r22072 | |||
FUJIWARA Katsunori
|
r21238 | if editor: | ||
self._text = editor(self._repo, self, []) | ||||
self._repo.savecommitmessage(self._text) | ||||
Patrick Mezard
|
r6715 | def filectx(self, path, filelog=None): | ||
Mads Kiilerich
|
r22296 | """get a file context from the working directory | ||
Returns None if file doesn't exist and should be removed.""" | ||||
Patrick Mezard
|
r6715 | return self._filectxfn(self._repo, self, path) | ||
Alexander Solovyov
|
r11151 | def commit(self): | ||
"""commit context to the repo""" | ||||
return self._repo.commitctx(self) | ||||
Sean Farley
|
r21835 | @propertycache | ||
def _manifest(self): | ||||
"""generate a manifest based on the return values of filectxfn""" | ||||
# keep this simple for now; just worry about p1 | ||||
pctx = self._parents[0] | ||||
man = pctx.manifest().copy() | ||||
FUJIWARA Katsunori
|
r23603 | for f in self._status.modified: | ||
Sean Farley
|
r39749 | man[f] = modifiednodeid | ||
Sean Farley
|
r21835 | |||
FUJIWARA Katsunori
|
r23588 | for f in self._status.added: | ||
Sean Farley
|
r39749 | man[f] = addednodeid | ||
FUJIWARA Katsunori
|
r23588 | |||
FUJIWARA Katsunori
|
r23589 | for f in self._status.removed: | ||
if f in man: | ||||
del man[f] | ||||
Sean Farley
|
r21835 | |||
return man | ||||
FUJIWARA Katsunori
|
r23587 | @propertycache | ||
def _status(self): | ||||
"""Calculate exact status from ``files`` specified at construction | ||||
""" | ||||
man1 = self.p1().manifest() | ||||
p2 = self._parents[1] | ||||
# "1 < len(self._parents)" can't be used for checking | ||||
# existence of the 2nd parent, because "memctx._parents" is | ||||
# explicitly initialized by the list, of which length is 2. | ||||
if p2.node() != nullid: | ||||
man2 = p2.manifest() | ||||
managing = lambda f: f in man1 or f in man2 | ||||
else: | ||||
managing = lambda f: f in man1 | ||||
modified, added, removed = [], [], [] | ||||
for f in self._files: | ||||
if not managing(f): | ||||
added.append(f) | ||||
elif self[f]: | ||||
modified.append(f) | ||||
else: | ||||
removed.append(f) | ||||
return scmutil.status(modified, added, removed, [], [], [], []) | ||||
Sean Farley
|
r21835 | |||
Augie Fackler
|
r43346 | |||
Sean Farley
|
r21688 | class memfilectx(committablefilectx): | ||
Patrick Mezard
|
r7077 | """memfilectx represents an in-memory file to commit. | ||
Mads Kiilerich
|
r23139 | See memctx and committablefilectx for more details. | ||
Patrick Mezard
|
r6715 | """ | ||
Augie Fackler
|
r43346 | |||
def __init__( | ||||
self, | ||||
repo, | ||||
changectx, | ||||
path, | ||||
data, | ||||
islink=False, | ||||
isexec=False, | ||||
copysource=None, | ||||
): | ||||
Patrick Mezard
|
r7077 | """ | ||
path is the normalized file path relative to repository root. | ||||
data is the file content as a string. | ||||
islink is True if the file is a symbolic link. | ||||
isexec is True if the file is executable. | ||||
copied is the source file path if current file was copied in the | ||||
revision being committed, or None.""" | ||||
Martin von Zweigbergk
|
r35401 | super(memfilectx, self).__init__(repo, path, None, changectx) | ||
Patrick Mezard
|
r6715 | self._data = data | ||
Pulkit Goyal
|
r38784 | if islink: | ||
Augie Fackler
|
r43347 | self._flags = b'l' | ||
Pulkit Goyal
|
r38784 | elif isexec: | ||
Augie Fackler
|
r43347 | self._flags = b'x' | ||
Pulkit Goyal
|
r38784 | else: | ||
Augie Fackler
|
r43347 | self._flags = b'' | ||
Martin von Zweigbergk
|
r42162 | self._copysource = copysource | ||
def copysource(self): | ||||
return self._copysource | ||||
Patrick Mezard
|
r6715 | |||
Yuya Nishihara
|
r41022 | def cmp(self, fctx): | ||
return self.data() != fctx.data() | ||||
Matt Mackall
|
r10282 | def data(self): | ||
return self._data | ||||
Sean Farley
|
r22074 | |||
def remove(self, ignoremissing=False): | ||||
"""wraps unlink for a repo's working directory""" | ||||
# need to figure out what to do here | ||||
del self._changectx[self._path] | ||||
Boris Feld
|
r35743 | def write(self, data, flags, **kwargs): | ||
Sean Farley
|
r22074 | """wraps repo.wwrite""" | ||
self._data = data | ||||
Mateusz Kwapich
|
r30567 | |||
Jun Wu
|
r32239 | |||
Mateusz Kwapich
|
r30567 | class metadataonlyctx(committablectx): | ||
"""Like memctx but it's reusing the manifest of different commit. | ||||
Intended to be used by lightweight operations that are creating | ||||
metadata-only changes. | ||||
Revision information is supplied at initialization time. 'repo' is the | ||||
current localrepo, 'ctx' is original revision which manifest we're reuisng | ||||
'parents' is a sequence of two parent revisions identifiers (pass None for | ||||
every missing parent), 'text' is the commit. | ||||
user receives the committer name and defaults to current repository | ||||
username, date is the commit date in any format supported by | ||||
Boris Feld
|
r36625 | dateutil.parsedate() and defaults to current date, extra is a dictionary of | ||
Mateusz Kwapich
|
r30567 | metadata or is left empty. | ||
""" | ||||
Augie Fackler
|
r43346 | |||
def __init__( | ||||
self, | ||||
repo, | ||||
originalctx, | ||||
parents=None, | ||||
text=None, | ||||
user=None, | ||||
date=None, | ||||
extra=None, | ||||
editor=False, | ||||
): | ||||
Jun Wu
|
r34000 | if text is None: | ||
text = originalctx.description() | ||||
Mateusz Kwapich
|
r30567 | super(metadataonlyctx, self).__init__(repo, text, user, date, extra) | ||
self._rev = None | ||||
self._node = None | ||||
self._originalctx = originalctx | ||||
self._manifestnode = originalctx.manifestnode() | ||||
Jun Wu
|
r34000 | if parents is None: | ||
parents = originalctx.parents() | ||||
else: | ||||
parents = [repo[p] for p in parents if p is not None] | ||||
parents = parents[:] | ||||
while len(parents) < 2: | ||||
parents.append(repo[nullid]) | ||||
p1, p2 = self._parents = parents | ||||
Mateusz Kwapich
|
r30567 | |||
# sanity check to ensure that the reused manifest parents are | ||||
# manifests of our commit parents | ||||
mp1, mp2 = self.manifestctx().parents | ||||
Jun Wu
|
r31840 | if p1 != nullid and p1.manifestnode() != mp1: | ||
Augie Fackler
|
r43346 | raise RuntimeError( | ||
r"can't reuse the manifest: its p1 " | ||||
r"doesn't match the new ctx p1" | ||||
) | ||||
Jun Wu
|
r31840 | if p2 != nullid and p2.manifestnode() != mp2: | ||
Augie Fackler
|
r43346 | raise RuntimeError( | ||
r"can't reuse the manifest: " | ||||
r"its p2 doesn't match the new ctx p2" | ||||
) | ||||
Mateusz Kwapich
|
r30567 | |||
self._files = originalctx.files() | ||||
self.substate = {} | ||||
if editor: | ||||
self._text = editor(self._repo, self, []) | ||||
self._repo.savecommitmessage(self._text) | ||||
def manifestnode(self): | ||||
return self._manifestnode | ||||
Jun Wu
|
r32519 | @property | ||
Mateusz Kwapich
|
r30567 | def _manifestctx(self): | ||
return self._repo.manifestlog[self._manifestnode] | ||||
def filectx(self, path, filelog=None): | ||||
return self._originalctx.filectx(path, filelog=filelog) | ||||
def commit(self): | ||||
"""commit context to the repo""" | ||||
return self._repo.commitctx(self) | ||||
@property | ||||
def _manifest(self): | ||||
return self._originalctx.manifest() | ||||
@propertycache | ||||
def _status(self): | ||||
"""Calculate exact status from ``files`` specified in the ``origctx`` | ||||
and parents manifests. | ||||
""" | ||||
man1 = self.p1().manifest() | ||||
p2 = self._parents[1] | ||||
# "1 < len(self._parents)" can't be used for checking | ||||
# existence of the 2nd parent, because "metadataonlyctx._parents" is | ||||
# explicitly initialized by the list, of which length is 2. | ||||
if p2.node() != nullid: | ||||
man2 = p2.manifest() | ||||
managing = lambda f: f in man1 or f in man2 | ||||
else: | ||||
managing = lambda f: f in man1 | ||||
modified, added, removed = [], [], [] | ||||
for f in self._files: | ||||
if not managing(f): | ||||
added.append(f) | ||||
Jun Wu
|
r34001 | elif f in self: | ||
Mateusz Kwapich
|
r30567 | modified.append(f) | ||
else: | ||||
removed.append(f) | ||||
return scmutil.status(modified, added, removed, [], [], [], []) | ||||
Phil Cohen
|
r34053 | |||
Augie Fackler
|
r43346 | |||
Phil Cohen
|
r34053 | class arbitraryfilectx(object): | ||
"""Allows you to use filectx-like functions on a file in an arbitrary | ||||
location on disk, possibly not in the working directory. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Phil Cohen
|
r34686 | def __init__(self, path, repo=None): | ||
# Repo is optional because contrib/simplemerge uses this class. | ||||
self._repo = repo | ||||
Phil Cohen
|
r34053 | self._path = path | ||
Phil Cohen
|
r34686 | def cmp(self, fctx): | ||
Phil Cohen
|
r34836 | # filecmp follows symlinks whereas `cmp` should not, so skip the fast | ||
# path if either side is a symlink. | ||||
Augie Fackler
|
r43347 | symlinks = b'l' in self.flags() or b'l' in fctx.flags() | ||
Phil Cohen
|
r34836 | if not symlinks and isinstance(fctx, workingfilectx) and self._repo: | ||
Phil Cohen
|
r34686 | # Add a fast-path for merge if both sides are disk-backed. | ||
Phil Cohen
|
r34836 | # Note that filecmp uses the opposite return values (True if same) | ||
# from our cmp functions (True if different). | ||||
Phil Cohen
|
r34686 | return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path())) | ||
return self.data() != fctx.data() | ||||
Phil Cohen
|
r34053 | |||
def path(self): | ||||
return self._path | ||||
def flags(self): | ||||
Augie Fackler
|
r43347 | return b'' | ||
Phil Cohen
|
r34053 | |||
def data(self): | ||||
return util.readfile(self._path) | ||||
def decodeddata(self): | ||||
Augie Fackler
|
r43347 | with open(self._path, b"rb") as f: | ||
Phil Cohen
|
r34053 | return f.read() | ||
def remove(self): | ||||
util.unlink(self._path) | ||||
Boris Feld
|
r35743 | def write(self, data, flags, **kwargs): | ||
Phil Cohen
|
r34053 | assert not flags | ||
Augie Fackler
|
r43347 | with open(self._path, b"wb") as f: | ||
Phil Cohen
|
r34053 | f.write(data) | ||