localrepo.py
2578 lines
| 98.6 KiB
| text/x-python
|
PythonLexer
/ mercurial / localrepo.py
mpm@selenic.com
|
r1089 | # localrepo.py - read/write repository class for mercurial | ||
# | ||||
Thomas Arendsen Hein
|
r4635 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | ||
mpm@selenic.com
|
r1089 | # | ||
Martin Geisler
|
r8225 | # This software may be used and distributed according to the terms of the | ||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Pierre-Yves David
|
r18118 | from node import hex, nullid, short | ||
Matt Mackall
|
r3891 | from i18n import _ | ||
Pierre-Yves David
|
r18100 | import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview | ||
Pierre-Yves David
|
r15418 | import changelog, dirstate, filelog, manifest, context, bookmarks, phases | ||
Pierre-Yves.David@ens-lyon.org
|
r17075 | import lock, transaction, store, encoding, base85 | ||
Matt Mackall
|
r14902 | import scmutil, util, extensions, hook, error, revset | ||
Benoit Boissinot
|
r10651 | import match as matchmod | ||
import merge as mergemod | ||||
import tags as tagsmod | ||||
Ronny Pfannschmidt
|
r8109 | from lock import release | ||
Dirkjan Ochtman
|
r11303 | import weakref, errno, os, time, inspect | ||
Pierre-Yves David
|
r18117 | import branchmap | ||
Matt Mackall
|
r8260 | propertycache = util.propertycache | ||
Idan Kamara
|
r14930 | filecache = scmutil.filecache | ||
Ronny Pfannschmidt
|
r8109 | |||
Pierre-Yves David
|
r18014 | class repofilecache(filecache): | ||
"""All filecache usage on repo are done for logic that should be unfiltered | ||||
""" | ||||
def __get__(self, repo, type=None): | ||||
return super(repofilecache, self).__get__(repo.unfiltered(), type) | ||||
def __set__(self, repo, value): | ||||
return super(repofilecache, self).__set__(repo.unfiltered(), value) | ||||
def __delete__(self, repo): | ||||
return super(repofilecache, self).__delete__(repo.unfiltered()) | ||||
class storecache(repofilecache): | ||||
Idan Kamara
|
r16198 | """filecache for files in the store""" | ||
def join(self, obj, fname): | ||||
return obj.sjoin(fname) | ||||
Pierre-Yves David
|
r18013 | class unfilteredpropertycache(propertycache): | ||
"""propertycache that apply to unfiltered repo only""" | ||||
def __get__(self, repo, type=None): | ||||
return super(unfilteredpropertycache, self).__get__(repo.unfiltered()) | ||||
class filteredpropertycache(propertycache): | ||||
"""propertycache that must take filtering in account""" | ||||
def cachevalue(self, obj, value): | ||||
object.__setattr__(obj, self.name, value) | ||||
def hasunfilteredcache(repo, name): | ||||
"""check if an repo and a unfilteredproperty cached value for <name>""" | ||||
return name in vars(repo.unfiltered()) | ||||
Pierre-Yves David
|
r18016 | def unfilteredmethod(orig): | ||
Pierre-Yves David
|
r17994 | """decorate method that always need to be run on unfiltered version""" | ||
def wrapper(repo, *args, **kwargs): | ||||
return orig(repo.unfiltered(), *args, **kwargs) | ||||
return wrapper | ||||
Peter Arrenbrecht
|
r17192 | MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle')) | ||
LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset'])) | ||||
class localpeer(peer.peerrepository): | ||||
'''peer for a local repo; reflects only the most recent API''' | ||||
def __init__(self, repo, caps=MODERNCAPS): | ||||
peer.peerrepository.__init__(self) | ||||
Pierre-Yves David
|
r18278 | self._repo = repo.filtered('unserved') | ||
Peter Arrenbrecht
|
r17192 | self.ui = repo.ui | ||
self._caps = repo._restrictcapabilities(caps) | ||||
self.requirements = repo.requirements | ||||
self.supportedformats = repo.supportedformats | ||||
def close(self): | ||||
self._repo.close() | ||||
def _capabilities(self): | ||||
return self._caps | ||||
def local(self): | ||||
return self._repo | ||||
Sune Foldager
|
r17193 | def canpush(self): | ||
return True | ||||
Peter Arrenbrecht
|
r17192 | def url(self): | ||
return self._repo.url() | ||||
def lookup(self, key): | ||||
return self._repo.lookup(key) | ||||
def branchmap(self): | ||||
Pierre-Yves David
|
r18279 | return self._repo.branchmap() | ||
Peter Arrenbrecht
|
r17192 | |||
def heads(self): | ||||
Pierre-Yves David
|
r18279 | return self._repo.heads() | ||
Peter Arrenbrecht
|
r17192 | |||
def known(self, nodes): | ||||
return self._repo.known(nodes) | ||||
def getbundle(self, source, heads=None, common=None): | ||||
return self._repo.getbundle(source, heads=heads, common=common) | ||||
# TODO We might want to move the next two calls into legacypeer and add | ||||
# unbundle instead. | ||||
def lock(self): | ||||
return self._repo.lock() | ||||
def addchangegroup(self, cg, source, url): | ||||
return self._repo.addchangegroup(cg, source, url) | ||||
def pushkey(self, namespace, key, old, new): | ||||
return self._repo.pushkey(namespace, key, old, new) | ||||
def listkeys(self, namespace): | ||||
return self._repo.listkeys(namespace) | ||||
def debugwireargs(self, one, two, three=None, four=None, five=None): | ||||
'''used to test argument passing over the wire''' | ||||
return "%s %s %s %s %s" % (one, two, three, four, five) | ||||
class locallegacypeer(localpeer): | ||||
'''peer extension which implements legacy methods too; used for tests with | ||||
restricted capabilities''' | ||||
def __init__(self, repo): | ||||
localpeer.__init__(self, repo, caps=LEGACYCAPS) | ||||
def branches(self, nodes): | ||||
return self._repo.branches(nodes) | ||||
def between(self, pairs): | ||||
return self._repo.between(pairs) | ||||
def changegroup(self, basenodes, source): | ||||
return self._repo.changegroup(basenodes, source) | ||||
def changegroupsubset(self, bases, heads, source): | ||||
return self._repo.changegroupsubset(bases, heads, source) | ||||
class localrepository(object): | ||||
Sune Foldager
|
r14270 | supportedformats = set(('revlogv1', 'generaldelta')) | ||
Adrian Buehlmann
|
r12687 | supported = supportedformats | set(('store', 'fncache', 'shared', | ||
'dotencode')) | ||||
Bryan O'Sullivan
|
r17137 | openerreqs = set(('revlogv1', 'generaldelta')) | ||
requirements = ['revlogv1'] | ||||
Pierre-Yves David
|
r18186 | filtername = None | ||
Bryan O'Sullivan
|
r17137 | |||
def _baserequirements(self, create): | ||||
return self.requirements[:] | ||||
Vadim Gelfer
|
r2439 | |||
Martin Geisler
|
r14363 | def __init__(self, baseui, path=None, create=False): | ||
FUJIWARA Katsunori
|
r17650 | self.wvfs = scmutil.vfs(path, expand=True) | ||
self.wopener = self.wvfs | ||||
FUJIWARA Katsunori
|
r17157 | self.root = self.wvfs.base | ||
FUJIWARA Katsunori
|
r17158 | self.path = self.wvfs.join(".hg") | ||
Benoit Boissinot
|
r3850 | self.origroot = path | ||
Adrian Buehlmann
|
r14220 | self.auditor = scmutil.pathauditor(self.root, self._checknested) | ||
FUJIWARA Katsunori
|
r17650 | self.vfs = scmutil.vfs(self.path) | ||
self.opener = self.vfs | ||||
Matt Mackall
|
r8797 | self.baseui = baseui | ||
self.ui = baseui.copy() | ||||
Pierre-Yves David
|
r15922 | # A list of callback to shape the phase if no data were found. | ||
# Callback are in the form: func(repo, roots) --> processed root. | ||||
# This list it to be filled by extension during repo setup | ||||
self._phasedefaults = [] | ||||
Matt Mackall
|
r8797 | try: | ||
self.ui.readconfig(self.join("hgrc"), self.root) | ||||
extensions.loadall(self.ui) | ||||
except IOError: | ||||
pass | ||||
mpm@selenic.com
|
r1089 | |||
FUJIWARA Katsunori
|
r17161 | if not self.vfs.isdir(): | ||
Benoit Boissinot
|
r3035 | if create: | ||
FUJIWARA Katsunori
|
r17161 | if not self.wvfs.exists(): | ||
self.wvfs.makedirs() | ||||
self.vfs.makedir(notindexed=True) | ||||
Bryan O'Sullivan
|
r17137 | requirements = self._baserequirements(create) | ||
Matt Mackall
|
r8797 | if self.ui.configbool('format', 'usestore', True): | ||
FUJIWARA Katsunori
|
r17161 | self.vfs.mkdir("store") | ||
Alexis S. L. Carvalho
|
r4166 | requirements.append("store") | ||
Matt Mackall
|
r8797 | if self.ui.configbool('format', 'usefncache', True): | ||
Adrian Buehlmann
|
r7234 | requirements.append("fncache") | ||
Adrian Buehlmann
|
r12687 | if self.ui.configbool('format', 'dotencode', True): | ||
requirements.append('dotencode') | ||||
Alexis S. L. Carvalho
|
r4166 | # create an invalid changelog | ||
FUJIWARA Katsunori
|
r17160 | self.vfs.append( | ||
Dan Villiom Podlaski Christiansen
|
r14168 | "00changelog.i", | ||
Alexis S. L. Carvalho
|
r4166 | '\0\0\0\2' # represents revlogv2 | ||
' dummy changelog to prevent using the old repo layout' | ||||
) | ||||
Sune Foldager
|
r14270 | if self.ui.configbool('format', 'generaldelta', False): | ||
requirements.append("generaldelta") | ||||
Andrew Pritchard
|
r14905 | requirements = set(requirements) | ||
Benoit Boissinot
|
r3035 | else: | ||
Matt Mackall
|
r7637 | raise error.RepoError(_("repository %s not found") % path) | ||
Benoit Boissinot
|
r3035 | elif create: | ||
Matt Mackall
|
r7637 | raise error.RepoError(_("repository %s already exists") % path) | ||
Benoit Boissinot
|
r3851 | else: | ||
try: | ||||
FUJIWARA Katsunori
|
r17160 | requirements = scmutil.readrequires(self.vfs, self.supported) | ||
Benoit Boissinot
|
r3851 | except IOError, inst: | ||
if inst.errno != errno.ENOENT: | ||||
raise | ||||
Adrian Buehlmann
|
r14482 | requirements = set() | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r8799 | self.sharedpath = self.path | ||
try: | ||||
Matt Mackall
|
r15381 | s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n')) | ||
Matt Mackall
|
r8799 | if not os.path.exists(s): | ||
raise error.RepoError( | ||||
Dongsheng Song
|
r8908 | _('.hg/sharedpath points to nonexistent directory %s') % s) | ||
Matt Mackall
|
r8799 | self.sharedpath = s | ||
except IOError, inst: | ||||
if inst.errno != errno.ENOENT: | ||||
raise | ||||
FUJIWARA Katsunori
|
r17654 | self.store = store.store(requirements, self.sharedpath, scmutil.vfs) | ||
Adrian Buehlmann
|
r6840 | self.spath = self.store.path | ||
FUJIWARA Katsunori
|
r17654 | self.svfs = self.store.vfs | ||
self.sopener = self.svfs | ||||
Adrian Buehlmann
|
r6840 | self.sjoin = self.store.join | ||
FUJIWARA Katsunori
|
r17654 | self.vfs.createmode = self.store.createmode | ||
Sune Foldager
|
r12295 | self._applyrequirements(requirements) | ||
if create: | ||||
self._writerequirements() | ||||
Benoit Boissinot
|
r3850 | |||
Greg Ward
|
r9146 | |||
Pierre-Yves David
|
r18189 | self._branchcaches = {} | ||
Matt Mackall
|
r4004 | self.filterpats = {} | ||
Patrick Mezard
|
r5966 | self._datafilters = {} | ||
Matt Mackall
|
r4916 | self._transref = self._lockref = self._wlockref = None | ||
mpm@selenic.com
|
r1089 | |||
Idan Kamara
|
r14929 | # A cache for various files under .hg/ that tracks file changes, | ||
# (used by the filecache decorator) | ||||
# | ||||
# Maps a property name to its util.filecacheentry | ||||
self._filecache = {} | ||||
Pierre-Yves David
|
r18101 | # hold sets of revision to be filtered | ||
# should be cleared when something might have changed the filter value: | ||||
# - new changesets, | ||||
# - phase change, | ||||
# - new obsolescence marker, | ||||
# - working directory parent change, | ||||
# - bookmark changes | ||||
self.filteredrevcache = {} | ||||
Peter Arrenbrecht
|
r17192 | def close(self): | ||
pass | ||||
def _restrictcapabilities(self, caps): | ||||
return caps | ||||
Sune Foldager
|
r12295 | def _applyrequirements(self, requirements): | ||
self.requirements = requirements | ||||
Sune Foldager
|
r14333 | self.sopener.options = dict((r, 1) for r in requirements | ||
Bryan O'Sullivan
|
r17137 | if r in self.openerreqs) | ||
Sune Foldager
|
r12295 | |||
def _writerequirements(self): | ||||
reqfile = self.opener("requires", "w") | ||||
for r in self.requirements: | ||||
reqfile.write("%s\n" % r) | ||||
reqfile.close() | ||||
Martin Geisler
|
r12162 | def _checknested(self, path): | ||
"""Determine if path is a legal nested repository.""" | ||||
if not path.startswith(self.root): | ||||
return False | ||||
subpath = path[len(self.root) + 1:] | ||||
FUJIWARA Katsunori
|
r15722 | normsubpath = util.pconvert(subpath) | ||
Martin Geisler
|
r12162 | |||
# XXX: Checking against the current working copy is wrong in | ||||
# the sense that it can reject things like | ||||
# | ||||
# $ hg cat -r 10 sub/x.txt | ||||
# | ||||
# if sub/ is no longer a subrepository in the working copy | ||||
# parent revision. | ||||
# | ||||
# However, it can of course also allow things that would have | ||||
# been rejected before, such as the above cat command if sub/ | ||||
# is a subrepository now, but was a normal directory before. | ||||
# The old path auditor would have rejected by mistake since it | ||||
# panics when it sees sub/.hg/. | ||||
# | ||||
Martin Geisler
|
r12174 | # All in all, checking against the working copy seems sensible | ||
# since we want to prevent access to nested repositories on | ||||
# the filesystem *now*. | ||||
ctx = self[None] | ||||
Martin Geisler
|
r12162 | parts = util.splitpath(subpath) | ||
while parts: | ||||
FUJIWARA Katsunori
|
r15722 | prefix = '/'.join(parts) | ||
Martin Geisler
|
r12162 | if prefix in ctx.substate: | ||
FUJIWARA Katsunori
|
r15722 | if prefix == normsubpath: | ||
Martin Geisler
|
r12162 | return True | ||
else: | ||||
sub = ctx.sub(prefix) | ||||
return sub.checknested(subpath[len(prefix) + 1:]) | ||||
else: | ||||
parts.pop() | ||||
return False | ||||
Peter Arrenbrecht
|
r17192 | def peer(self): | ||
return localpeer(self) # not cached to avoid reference cycle | ||||
Pierre-Yves David
|
r17993 | def unfiltered(self): | ||
"""Return unfiltered version of the repository | ||||
Intended to be ovewritten by filtered repo.""" | ||||
return self | ||||
Pierre-Yves David
|
r18100 | def filtered(self, name): | ||
"""Return a filtered version of a repository""" | ||||
# build a new class with the mixin and the current class | ||||
# (possibily subclass of the repo) | ||||
class proxycls(repoview.repoview, self.unfiltered().__class__): | ||||
pass | ||||
return proxycls(self, name) | ||||
Pierre-Yves David
|
r18014 | @repofilecache('bookmarks') | ||
Matt Mackall
|
r13355 | def _bookmarks(self): | ||
Augie Fackler
|
r17922 | return bookmarks.bmstore(self) | ||
Matt Mackall
|
r13355 | |||
Pierre-Yves David
|
r18014 | @repofilecache('bookmarks.current') | ||
Matt Mackall
|
r13355 | def _bookmarkcurrent(self): | ||
return bookmarks.readcurrent(self) | ||||
Martin Geisler
|
r12162 | |||
David Soria Parra
|
r16707 | def bookmarkheads(self, bookmark): | ||
name = bookmark.split('@', 1)[0] | ||||
heads = [] | ||||
for mark, n in self._bookmarks.iteritems(): | ||||
if mark.split('@', 1)[0] == name: | ||||
heads.append(n) | ||||
return heads | ||||
Pierre-Yves David
|
r15418 | |||
Idan Kamara
|
r16198 | @storecache('phaseroots') | ||
Patrick Mezard
|
r16657 | def _phasecache(self): | ||
return phases.phasecache(self, self._phasedefaults) | ||||
Pierre-Yves David
|
r15420 | |||
Pierre-Yves.David@ens-lyon.org
|
r17070 | @storecache('obsstore') | ||
def obsstore(self): | ||||
Pierre-Yves David
|
r17124 | store = obsolete.obsstore(self.sopener) | ||
Pierre-Yves David
|
r17297 | if store and not obsolete._enabled: | ||
Thomas Arendsen Hein
|
r17306 | # message is rare enough to not be translated | ||
Pierre-Yves David
|
r17297 | msg = 'obsolete feature not enabled but %i markers found!\n' | ||
self.ui.warn(msg % len(list(store))) | ||||
Pierre-Yves.David@ens-lyon.org
|
r17070 | return store | ||
Idan Kamara
|
r16198 | @storecache('00changelog.i') | ||
Matt Mackall
|
r8260 | def changelog(self): | ||
c = changelog.changelog(self.sopener) | ||||
if 'HG_PENDING' in os.environ: | ||||
p = os.environ['HG_PENDING'] | ||||
if p.startswith(self.root): | ||||
c.readpending('00changelog.i.a') | ||||
return c | ||||
Idan Kamara
|
r16198 | @storecache('00manifest.i') | ||
Matt Mackall
|
r8260 | def manifest(self): | ||
return manifest.manifest(self.sopener) | ||||
Pierre-Yves David
|
r18014 | @repofilecache('dirstate') | ||
Matt Mackall
|
r8260 | def dirstate(self): | ||
Matt Mackall
|
r13032 | warned = [0] | ||
def validate(node): | ||||
try: | ||||
Alexander Solovyov
|
r14064 | self.changelog.rev(node) | ||
Matt Mackall
|
r13032 | return node | ||
except error.LookupError: | ||||
if not warned[0]: | ||||
warned[0] = True | ||||
self.ui.warn(_("warning: ignoring unknown" | ||||
Martin Geisler
|
r13037 | " working parent %s!\n") % short(node)) | ||
Matt Mackall
|
r13032 | return nullid | ||
return dirstate.dirstate(self.opener, self.ui, self.root, validate) | ||||
Vadim Gelfer
|
r2155 | |||
Matt Mackall
|
r6747 | def __getitem__(self, changeid): | ||
Martin Geisler
|
r8527 | if changeid is None: | ||
Matt Mackall
|
r6747 | return context.workingctx(self) | ||
return context.changectx(self, changeid) | ||||
Alexander Solovyov
|
r9924 | def __contains__(self, changeid): | ||
try: | ||||
return bool(self.lookup(changeid)) | ||||
except error.RepoLookupError: | ||||
return False | ||||
Matt Mackall
|
r6750 | def __nonzero__(self): | ||
return True | ||||
def __len__(self): | ||||
return len(self.changelog) | ||||
def __iter__(self): | ||||
Pierre-Yves David
|
r17675 | return iter(self.changelog) | ||
Vadim Gelfer
|
r2155 | |||
Matt Mackall
|
r15403 | def revs(self, expr, *args): | ||
'''Return a list of revisions matching the given revset''' | ||||
expr = revset.formatspec(expr, *args) | ||||
m = revset.match(None, expr) | ||||
Pierre-Yves David
|
r17675 | return [r for r in m(self, list(self))] | ||
Matt Mackall
|
r15403 | |||
Matt Mackall
|
r14902 | def set(self, expr, *args): | ||
''' | ||||
Yield a context for each matching revision, after doing arg | ||||
Matt Mackall
|
r14904 | replacement via revset.formatspec | ||
Matt Mackall
|
r14902 | ''' | ||
Matt Mackall
|
r15403 | for r in self.revs(expr, *args): | ||
Matt Mackall
|
r14902 | yield self[r] | ||
Vadim Gelfer
|
r2673 | def url(self): | ||
return 'file:' + self.root | ||||
Vadim Gelfer
|
r1718 | def hook(self, name, throw=False, **args): | ||
Matt Mackall
|
r4622 | return hook.hook(self.ui, self, name, throw, **args) | ||
mpm@selenic.com
|
r1089 | |||
Pierre-Yves David
|
r18016 | @unfilteredmethod | ||
Matt Mackall
|
r8402 | def _tag(self, names, node, message, local, user, date, extra={}): | ||
John Coomes
|
r6321 | if isinstance(names, str): | ||
names = (names,) | ||||
Brendan Cully
|
r4118 | |||
Nicolas Dumazet
|
r11063 | branches = self.branchmap() | ||
John Coomes
|
r6321 | for name in names: | ||
self.hook('pretag', throw=True, node=hex(node), tag=name, | ||||
local=local) | ||||
Nicolas Dumazet
|
r11063 | if name in branches: | ||
self.ui.warn(_("warning: tag %s conflicts with existing" | ||||
" branch name\n") % name) | ||||
Brendan Cully
|
r4118 | |||
John Coomes
|
r6321 | def writetags(fp, names, munge, prevtags): | ||
Alexis S. L. Carvalho
|
r5985 | fp.seek(0, 2) | ||
Bryan O'Sullivan
|
r4932 | if prevtags and prevtags[-1] != '\n': | ||
fp.write('\n') | ||||
John Coomes
|
r6321 | for name in names: | ||
Matt Mackall
|
r6671 | m = munge and munge(name) or name | ||
Brodie Rao
|
r16683 | if (self._tagscache.tagtypes and | ||
name in self._tagscache.tagtypes): | ||||
Idan Kamara
|
r14936 | old = self.tags().get(name, nullid) | ||
Matt Mackall
|
r6671 | fp.write('%s %s\n' % (hex(old), m)) | ||
fp.write('%s %s\n' % (hex(node), m)) | ||||
Bryan O'Sullivan
|
r4932 | fp.close() | ||
Thomas Arendsen Hein
|
r5081 | |||
Bryan O'Sullivan
|
r4932 | prevtags = '' | ||
Brendan Cully
|
r4118 | if local: | ||
Bryan O'Sullivan
|
r4932 | try: | ||
fp = self.opener('localtags', 'r+') | ||||
Peter Arrenbrecht
|
r7875 | except IOError: | ||
Bryan O'Sullivan
|
r4932 | fp = self.opener('localtags', 'a') | ||
else: | ||||
prevtags = fp.read() | ||||
Brendan Cully
|
r4118 | # local tags are stored in the current charset | ||
John Coomes
|
r6321 | writetags(fp, names, None, prevtags) | ||
for name in names: | ||||
self.hook('tag', node=hex(node), tag=name, local=local) | ||||
Brendan Cully
|
r4118 | return | ||
Matt Mackall
|
r8402 | try: | ||
fp = self.wfile('.hgtags', 'rb+') | ||||
Idan Kamara
|
r14646 | except IOError, e: | ||
if e.errno != errno.ENOENT: | ||||
raise | ||||
Matt Mackall
|
r8402 | fp = self.wfile('.hgtags', 'ab') | ||
Bryan O'Sullivan
|
r4932 | else: | ||
Matt Mackall
|
r8402 | prevtags = fp.read() | ||
Bryan O'Sullivan
|
r4932 | |||
Brendan Cully
|
r4118 | # committed tags are stored in UTF-8 | ||
Matt Mackall
|
r7948 | writetags(fp, names, encoding.fromlocal, prevtags) | ||
Bryan O'Sullivan
|
r4932 | |||
Dan Villiom Podlaski Christiansen
|
r13400 | fp.close() | ||
Mads Kiilerich
|
r15929 | self.invalidatecaches() | ||
Matt Mackall
|
r8402 | if '.hgtags' not in self.dirstate: | ||
Dirkjan Ochtman
|
r11303 | self[None].add(['.hgtags']) | ||
Brendan Cully
|
r4118 | |||
Benoit Boissinot
|
r10651 | m = matchmod.exact(self.root, '', ['.hgtags']) | ||
Matt Mackall
|
r8706 | tagnode = self.commit(message, user, date, extra=extra, match=m) | ||
Brendan Cully
|
r4118 | |||
John Coomes
|
r6321 | for name in names: | ||
self.hook('tag', node=hex(node), tag=name, local=local) | ||||
Brendan Cully
|
r4118 | |||
return tagnode | ||||
John Coomes
|
r6321 | def tag(self, names, node, message, local, user, date): | ||
'''tag a revision with one or more symbolic names. | ||||
Vadim Gelfer
|
r2601 | |||
John Coomes
|
r6321 | names is a list of strings or, when adding a single tag, names may be a | ||
string. | ||||
Thomas Arendsen Hein
|
r6334 | |||
John Coomes
|
r6321 | if local is True, the tags are stored in a per-repository file. | ||
otherwise, they are stored in the .hgtags file, and a new | ||||
Vadim Gelfer
|
r2601 | changeset is committed with the change. | ||
keyword arguments: | ||||
John Coomes
|
r6321 | local: whether to store tags in non-version-controlled file | ||
Vadim Gelfer
|
r2601 | (default False) | ||
message: commit message to use if committing | ||||
user: name of user to use if committing | ||||
date: date tuple to use if committing''' | ||||
Kevin Bullock
|
r13133 | if not local: | ||
for x in self.status()[:5]: | ||||
if '.hgtags' in x: | ||||
raise util.Abort(_('working copy of .hgtags is changed ' | ||||
'(please commit .hgtags manually)')) | ||||
Vadim Gelfer
|
r2601 | |||
Matt Mackall
|
r7814 | self.tags() # instantiate the cache | ||
John Coomes
|
r6321 | self._tag(names, node, message, local, user, date) | ||
Vadim Gelfer
|
r2601 | |||
Pierre-Yves David
|
r18013 | @filteredpropertycache | ||
Idan Kamara
|
r14936 | def _tagscache(self): | ||
Brodie Rao
|
r16683 | '''Returns a tagscache object that contains various tags related | ||
caches.''' | ||||
Idan Kamara
|
r14936 | |||
# This simplifies its cache management by having one decorated | ||||
# function (this one) and the rest simply fetch things from it. | ||||
class tagscache(object): | ||||
def __init__(self): | ||||
# These two define the set of tags for this repository. tags | ||||
# maps tag name to node; tagtypes maps tag name to 'global' or | ||||
# 'local'. (Global tags are defined by .hgtags across all | ||||
# heads, and local tags are defined in .hg/localtags.) | ||||
# They constitute the in-memory cache of tags. | ||||
self.tags = self.tagtypes = None | ||||
self.nodetagscache = self.tagslist = None | ||||
cache = tagscache() | ||||
cache.tags, cache.tagtypes = self._findtags() | ||||
return cache | ||||
mpm@selenic.com
|
r1089 | def tags(self): | ||
'''return a mapping of tag to node''' | ||||
Matt Mackall
|
r16371 | t = {} | ||
Pierre-Yves David
|
r17715 | if self.changelog.filteredrevs: | ||
tags, tt = self._findtags() | ||||
else: | ||||
tags = self._tagscache.tags | ||||
for k, v in tags.iteritems(): | ||||
Matt Mackall
|
r16371 | try: | ||
# ignore tags to unknown nodes | ||||
self.changelog.rev(v) | ||||
t[k] = v | ||||
Bryan O'Sullivan
|
r16679 | except (error.LookupError, ValueError): | ||
Matt Mackall
|
r16371 | pass | ||
return t | ||||
Matt Mackall
|
r4210 | |||
Greg Ward
|
r9145 | def _findtags(self): | ||
'''Do the hard work of finding tags. Return a pair of dicts | ||||
(tags, tagtypes) where tags maps tag name to node, and tagtypes | ||||
maps tag name to a string like \'global\' or \'local\'. | ||||
Subclasses or extensions are free to add their own tags, but | ||||
should be aware that the returned dicts will be retained for the | ||||
duration of the localrepo object.''' | ||||
# XXX what tagtype should subclasses/extensions use? Currently | ||||
# mq and bookmarks add tags, but do not set the tagtype at all. | ||||
# Should each extension invent its own tag type? Should there | ||||
# be one tagtype for all such "virtual" tags? Or is the status | ||||
# quo fine? | ||||
Matt Mackall
|
r4210 | |||
Greg Ward
|
r9148 | alltags = {} # map tag name to (node, hist) | ||
Osku Salerma
|
r5657 | tagtypes = {} | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r10651 | tagsmod.findglobaltags(self.ui, self, alltags, tagtypes) | ||
tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) | ||||
Osku Salerma
|
r5657 | |||
Greg Ward
|
r9152 | # Build the return dicts. Have to re-encode tag names because | ||
# the tags module always uses UTF-8 (in order not to lose info | ||||
# writing to the cache), but the rest of Mercurial wants them in | ||||
# local encoding. | ||||
Greg Ward
|
r9145 | tags = {} | ||
Greg Ward
|
r9147 | for (name, (node, hist)) in alltags.iteritems(): | ||
if node != nullid: | ||||
Matt Mackall
|
r16371 | tags[encoding.tolocal(name)] = node | ||
Greg Ward
|
r9145 | tags['tip'] = self.changelog.tip() | ||
Greg Ward
|
r9152 | tagtypes = dict([(encoding.tolocal(name), value) | ||
for (name, value) in tagtypes.iteritems()]) | ||||
Greg Ward
|
r9145 | return (tags, tagtypes) | ||
mpm@selenic.com
|
r1089 | |||
Osku Salerma
|
r5657 | def tagtype(self, tagname): | ||
''' | ||||
return the type of the given tag. result can be: | ||||
'local' : a local tag | ||||
'global' : a global tag | ||||
None : tag does not exist | ||||
''' | ||||
Idan Kamara
|
r14936 | return self._tagscache.tagtypes.get(tagname) | ||
Osku Salerma
|
r5657 | |||
mpm@selenic.com
|
r1089 | def tagslist(self): | ||
'''return a list of tags ordered by revision''' | ||||
Idan Kamara
|
r14936 | if not self._tagscache.tagslist: | ||
l = [] | ||||
for t, n in self.tags().iteritems(): | ||||
r = self.changelog.rev(n) | ||||
l.append((r, t, n)) | ||||
self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)] | ||||
return self._tagscache.tagslist | ||||
mpm@selenic.com
|
r1089 | |||
def nodetags(self, node): | ||||
'''return the tags associated with a node''' | ||||
Idan Kamara
|
r14936 | if not self._tagscache.nodetagscache: | ||
nodetagscache = {} | ||||
Matt Mackall
|
r16371 | for t, n in self._tagscache.tags.iteritems(): | ||
Idan Kamara
|
r14936 | nodetagscache.setdefault(n, []).append(t) | ||
for tags in nodetagscache.itervalues(): | ||||
Eric Eisner
|
r11047 | tags.sort() | ||
Idan Kamara
|
r14936 | self._tagscache.nodetagscache = nodetagscache | ||
return self._tagscache.nodetagscache.get(node, []) | ||||
mpm@selenic.com
|
r1089 | |||
David Soria Parra
|
r13384 | def nodebookmarks(self, node): | ||
marks = [] | ||||
for bookmark, n in self._bookmarks.iteritems(): | ||||
if n == node: | ||||
marks.append(bookmark) | ||||
return sorted(marks) | ||||
Georg Brandl
|
r12066 | def branchmap(self): | ||
'''returns a dictionary {branch: [branchheads]}''' | ||||
Pierre-Yves David
|
r18189 | branchmap.updatecache(self) | ||
return self._branchcaches[self.filtername] | ||||
Pierre-Yves David
|
r17714 | |||
John Mulligan
|
r7654 | |||
Brodie Rao
|
r16719 | def _branchtip(self, heads): | ||
'''return the tipmost branch head in heads''' | ||||
tip = heads[-1] | ||||
for h in reversed(heads): | ||||
Brodie Rao
|
r16720 | if not self[h].closesbranch(): | ||
Brodie Rao
|
r16719 | tip = h | ||
break | ||||
return tip | ||||
def branchtip(self, branch): | ||||
'''return the tip node for a given branch''' | ||||
if branch not in self.branchmap(): | ||||
raise error.RepoLookupError(_("unknown branch '%s'") % branch) | ||||
return self._branchtip(self.branchmap()[branch]) | ||||
John Mulligan
|
r7654 | def branchtags(self): | ||
'''return a dict where branch names map to the tipmost head of | ||||
John Mulligan
|
r7656 | the branch, open heads come before closed''' | ||
bt = {} | ||||
Benoit Boissinot
|
r9675 | for bn, heads in self.branchmap().iteritems(): | ||
Brodie Rao
|
r16719 | bt[bn] = self._branchtip(heads) | ||
John Mulligan
|
r7656 | return bt | ||
mpm@selenic.com
|
r1089 | def lookup(self, key): | ||
Matt Mackall
|
r16378 | return self[key].node() | ||
mpm@selenic.com
|
r1089 | |||
Steve Losh
|
r10960 | def lookupbranch(self, key, remote=None): | ||
repo = remote or self | ||||
if key in repo.branchmap(): | ||||
return key | ||||
repo = (remote and remote.local()) and remote or self | ||||
return repo[key].branch() | ||||
Peter Arrenbrecht
|
r13723 | def known(self, nodes): | ||
nm = self.changelog.nodemap | ||||
Patrick Mezard
|
r16657 | pc = self._phasecache | ||
Pierre-Yves David
|
r15889 | result = [] | ||
for n in nodes: | ||||
r = nm.get(n) | ||||
Patrick Mezard
|
r16657 | resp = not (r is None or pc.phase(self, r) >= phases.secret) | ||
Pierre-Yves David
|
r15889 | result.append(resp) | ||
return result | ||||
Peter Arrenbrecht
|
r13723 | |||
mpm@selenic.com
|
r1089 | def local(self): | ||
Matt Mackall
|
r14603 | return self | ||
mpm@selenic.com
|
r1089 | |||
Peter Arrenbrecht
|
r17192 | def cancopy(self): | ||
return self.local() # so statichttprepo's override of local() works | ||||
mpm@selenic.com
|
r1089 | def join(self, f): | ||
return os.path.join(self.path, f) | ||||
def wjoin(self, f): | ||||
return os.path.join(self.root, f) | ||||
def file(self, f): | ||||
Thomas Arendsen Hein
|
r1615 | if f[0] == '/': | ||
f = f[1:] | ||||
Matt Mackall
|
r4258 | return filelog.filelog(self.sopener, f) | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r6739 | def changectx(self, changeid): | ||
Matt Mackall
|
r6747 | return self[changeid] | ||
Matt Mackall
|
r3218 | |||
Matt Mackall
|
r3163 | def parents(self, changeid=None): | ||
Matt Mackall
|
r6742 | '''get list of changectxs for parents of changeid''' | ||
Matt Mackall
|
r6747 | return self[changeid].parents() | ||
Matt Mackall
|
r3163 | |||
Patrick Mezard
|
r16551 | def setparents(self, p1, p2=nullid): | ||
copies = self.dirstate.setparents(p1, p2) | ||||
if copies: | ||||
# Adjust copy records, the dirstate cannot do it, it | ||||
# requires access to parents manifests. Preserve them | ||||
# only for entries added to first parent. | ||||
pctx = self[p1] | ||||
for f in copies: | ||||
if f not in pctx and copies[f] in pctx: | ||||
self.dirstate.copy(copies[f], f) | ||||
Matt Mackall
|
r2564 | def filectx(self, path, changeid=None, fileid=None): | ||
"""changeid can be a changeset revision, node, or tag. | ||||
fileid can be a file revision or node.""" | ||||
return context.filectx(self, path, changeid, fileid) | ||||
mpm@selenic.com
|
r1089 | def getcwd(self): | ||
return self.dirstate.getcwd() | ||||
Alexis S. L. Carvalho
|
r4525 | def pathto(self, f, cwd=None): | ||
return self.dirstate.pathto(f, cwd) | ||||
mpm@selenic.com
|
r1089 | def wfile(self, f, mode='r'): | ||
return self.wopener(f, mode) | ||||
Alexis S. L. Carvalho
|
r4275 | def _link(self, f): | ||
return os.path.islink(self.wjoin(f)) | ||||
Nicolas Dumazet
|
r11698 | def _loadfilter(self, filter): | ||
Matt Mackall
|
r4004 | if filter not in self.filterpats: | ||
mpm@selenic.com
|
r1258 | l = [] | ||
Matt Mackall
|
r4004 | for pat, cmd in self.ui.configitems(filter): | ||
Mads Kiilerich
|
r7226 | if cmd == '!': | ||
continue | ||||
Benoit Boissinot
|
r10651 | mf = matchmod.match(self.root, '', [pat]) | ||
Patrick Mezard
|
r5966 | fn = None | ||
Jesse Glick
|
r6066 | params = cmd | ||
Patrick Mezard
|
r5966 | for name, filterfn in self._datafilters.iteritems(): | ||
Thomas Arendsen Hein
|
r6210 | if cmd.startswith(name): | ||
Patrick Mezard
|
r5966 | fn = filterfn | ||
Jesse Glick
|
r6066 | params = cmd[len(name):].lstrip() | ||
Patrick Mezard
|
r5966 | break | ||
if not fn: | ||||
Jesse Glick
|
r5967 | fn = lambda s, c, **kwargs: util.filter(s, c) | ||
# Wrap old filters not supporting keyword arguments | ||||
if not inspect.getargspec(fn)[2]: | ||||
oldfn = fn | ||||
fn = lambda s, c, **kwargs: oldfn(s, c) | ||||
Jesse Glick
|
r6066 | l.append((mf, fn, params)) | ||
Matt Mackall
|
r4004 | self.filterpats[filter] = l | ||
Nicolas Dumazet
|
r12706 | return self.filterpats[filter] | ||
mpm@selenic.com
|
r1258 | |||
Nicolas Dumazet
|
r12707 | def _filter(self, filterpats, filename, data): | ||
for mf, fn, cmd in filterpats: | ||||
mpm@selenic.com
|
r1258 | if mf(filename): | ||
Martin Geisler
|
r9467 | self.ui.debug("filtering %s through %s\n" % (filename, cmd)) | ||
Jesse Glick
|
r5967 | data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) | ||
mpm@selenic.com
|
r1258 | break | ||
return data | ||||
mpm@selenic.com
|
r1089 | |||
Pierre-Yves David
|
r18013 | @unfilteredpropertycache | ||
Nicolas Dumazet
|
r12708 | def _encodefilterpats(self): | ||
return self._loadfilter('encode') | ||||
Pierre-Yves David
|
r18013 | @unfilteredpropertycache | ||
Nicolas Dumazet
|
r12708 | def _decodefilterpats(self): | ||
return self._loadfilter('decode') | ||||
Patrick Mezard
|
r5966 | def adddatafilter(self, name, filter): | ||
self._datafilters[name] = filter | ||||
Matt Mackall
|
r4004 | def wread(self, filename): | ||
if self._link(filename): | ||||
data = os.readlink(self.wjoin(filename)) | ||||
else: | ||||
Dan Villiom Podlaski Christiansen
|
r14168 | data = self.wopener.read(filename) | ||
Nicolas Dumazet
|
r12708 | return self._filter(self._encodefilterpats, filename, data) | ||
mpm@selenic.com
|
r1258 | |||
Matt Mackall
|
r4006 | def wwrite(self, filename, data, flags): | ||
Nicolas Dumazet
|
r12708 | data = self._filter(self._decodefilterpats, filename, data) | ||
Matt Mackall
|
r6877 | if 'l' in flags: | ||
self.wopener.symlink(data, filename) | ||||
else: | ||||
Sune Foldager
|
r14184 | self.wopener.write(filename, data) | ||
Matt Mackall
|
r6877 | if 'x' in flags: | ||
Adrian Buehlmann
|
r14232 | util.setflags(self.wjoin(filename), False, True) | ||
mpm@selenic.com
|
r1258 | |||
Matt Mackall
|
r4005 | def wwritedata(self, filename, data): | ||
Nicolas Dumazet
|
r12708 | return self._filter(self._decodefilterpats, filename, data) | ||
mpm@selenic.com
|
r1089 | |||
Steve Borho
|
r10881 | def transaction(self, desc): | ||
Henrik Stuart
|
r8072 | tr = self._transref and self._transref() or None | ||
if tr and tr.running(): | ||||
return tr.nest() | ||||
mason@suse.com
|
r1806 | |||
Matt Mackall
|
r5865 | # abort here if the journal already exists | ||
if os.path.exists(self.sjoin("journal")): | ||||
Matt Mackall
|
r10282 | raise error.RepoError( | ||
_("abandoned transaction found - run hg recover")) | ||||
Matt Mackall
|
r5865 | |||
Idan Kamara
|
r16236 | self._writejournal(desc) | ||
renames = [(x, undoname(x)) for x in self._journalfiles()] | ||||
Alexander Solovyov
|
r14266 | |||
tr = transaction.transaction(self.ui.warn, self.sopener, | ||||
self.sjoin("journal"), | ||||
aftertrans(renames), | ||||
self.store.createmode) | ||||
self._transref = weakref.ref(tr) | ||||
return tr | ||||
Idan Kamara
|
r16236 | def _journalfiles(self): | ||
return (self.sjoin('journal'), self.join('journal.dirstate'), | ||||
self.join('journal.branch'), self.join('journal.desc'), | ||||
self.join('journal.bookmarks'), | ||||
self.sjoin('journal.phaseroots')) | ||||
def undofiles(self): | ||||
return [undoname(x) for x in self._journalfiles()] | ||||
Alexander Solovyov
|
r14266 | def _writejournal(self, desc): | ||
Matt Mackall
|
r16456 | self.opener.write("journal.dirstate", | ||
self.opener.tryread("dirstate")) | ||||
Dan Villiom Podlaski Christiansen
|
r14168 | self.opener.write("journal.branch", | ||
encoding.fromlocal(self.dirstate.branch())) | ||||
self.opener.write("journal.desc", | ||||
"%d\n%s\n" % (len(self), desc)) | ||||
Matt Mackall
|
r16456 | self.opener.write("journal.bookmarks", | ||
self.opener.tryread("bookmarks")) | ||||
self.sopener.write("journal.phaseroots", | ||||
self.sopener.tryread("phaseroots")) | ||||
Alexander Solovyov
|
r14266 | |||
mpm@selenic.com
|
r1089 | def recover(self): | ||
Ronny Pfannschmidt
|
r8109 | lock = self.lock() | ||
Matt Mackall
|
r4915 | try: | ||
if os.path.exists(self.sjoin("journal")): | ||||
self.ui.status(_("rolling back interrupted transaction\n")) | ||||
Matt Mackall
|
r10282 | transaction.rollback(self.sopener, self.sjoin("journal"), | ||
self.ui.warn) | ||||
Matt Mackall
|
r4915 | self.invalidate() | ||
return True | ||||
else: | ||||
self.ui.warn(_("no interrupted transaction available\n")) | ||||
return False | ||||
finally: | ||||
Ronny Pfannschmidt
|
r8109 | lock.release() | ||
mpm@selenic.com
|
r1089 | |||
Greg Ward
|
r15183 | def rollback(self, dryrun=False, force=False): | ||
Matt Mackall
|
r4917 | wlock = lock = None | ||
Matt Mackall
|
r4915 | try: | ||
mason@suse.com
|
r1712 | wlock = self.wlock() | ||
Eric Hopper
|
r4438 | lock = self.lock() | ||
Matt Mackall
|
r4915 | if os.path.exists(self.sjoin("undo")): | ||
Greg Ward
|
r15183 | return self._rollback(dryrun, force) | ||
Matt Mackall
|
r4915 | else: | ||
self.ui.warn(_("no rollback information available\n")) | ||||
Matt Mackall
|
r11177 | return 1 | ||
Matt Mackall
|
r4915 | finally: | ||
Ronny Pfannschmidt
|
r8109 | release(lock, wlock) | ||
mpm@selenic.com
|
r1089 | |||
Pierre-Yves David
|
r18016 | @unfilteredmethod # Until we get smarter cache management | ||
Greg Ward
|
r15183 | def _rollback(self, dryrun, force): | ||
Greg Ward
|
r15130 | ui = self.ui | ||
Greg Ward
|
r15097 | try: | ||
Greg Ward
|
r15130 | args = self.opener.read('undo.desc').splitlines() | ||
(oldlen, desc, detail) = (int(args[0]), args[1], None) | ||||
if len(args) >= 3: | ||||
detail = args[2] | ||||
oldtip = oldlen - 1 | ||||
if detail and ui.verbose: | ||||
msg = (_('repository tip rolled back to revision %s' | ||||
' (undo %s: %s)\n') | ||||
% (oldtip, desc, detail)) | ||||
else: | ||||
msg = (_('repository tip rolled back to revision %s' | ||||
' (undo %s)\n') | ||||
% (oldtip, desc)) | ||||
Greg Ward
|
r15097 | except IOError: | ||
Greg Ward
|
r15130 | msg = _('rolling back unknown transaction\n') | ||
Greg Ward
|
r15183 | desc = None | ||
if not force and self['.'] != self['tip'] and desc == 'commit': | ||||
raise util.Abort( | ||||
_('rollback of last commit while not checked out ' | ||||
Matt Mackall
|
r15187 | 'may lose data'), hint=_('use -f to force')) | ||
Greg Ward
|
r15183 | |||
Greg Ward
|
r15130 | ui.status(msg) | ||
Greg Ward
|
r15097 | if dryrun: | ||
return 0 | ||||
Greg Ward
|
r15131 | |||
parents = self.dirstate.parents() | ||||
Idan Kamara
|
r18310 | self.destroying() | ||
Greg Ward
|
r15130 | transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn) | ||
Greg Ward
|
r15097 | if os.path.exists(self.join('undo.bookmarks')): | ||
util.rename(self.join('undo.bookmarks'), | ||||
self.join('bookmarks')) | ||||
Pierre-Yves David
|
r15455 | if os.path.exists(self.sjoin('undo.phaseroots')): | ||
util.rename(self.sjoin('undo.phaseroots'), | ||||
self.sjoin('phaseroots')) | ||||
Greg Ward
|
r15097 | self.invalidate() | ||
Greg Ward
|
r15131 | |||
parentgone = (parents[0] not in self.changelog.nodemap or | ||||
parents[1] not in self.changelog.nodemap) | ||||
if parentgone: | ||||
util.rename(self.join('undo.dirstate'), self.join('dirstate')) | ||||
try: | ||||
branch = self.opener.read('undo.branch') | ||||
Sune Foldager
|
r17360 | self.dirstate.setbranch(encoding.tolocal(branch)) | ||
Greg Ward
|
r15131 | except IOError: | ||
ui.warn(_('named branch could not be reset: ' | ||||
'current branch is still \'%s\'\n') | ||||
% self.dirstate.branch()) | ||||
self.dirstate.invalidate() | ||||
parents = tuple([p.rev() for p in self.parents()]) | ||||
if len(parents) > 1: | ||||
ui.status(_('working directory now based on ' | ||||
'revisions %d and %d\n') % parents) | ||||
else: | ||||
ui.status(_('working directory now based on ' | ||||
'revision %d\n') % parents) | ||||
Joshua Redstone
|
r17013 | # TODO: if we know which new heads may result from this rollback, pass | ||
# them to destroy(), which will prevent the branchhead cache from being | ||||
# invalidated. | ||||
Greg Ward
|
r15604 | self.destroyed() | ||
Greg Ward
|
r15097 | return 0 | ||
Benoit Boissinot
|
r10547 | def invalidatecaches(self): | ||
Idan Kamara
|
r15988 | |||
Pierre-Yves David
|
r18013 | if '_tagscache' in vars(self): | ||
# can't use delattr on proxy | ||||
del self.__dict__['_tagscache'] | ||||
Idan Kamara
|
r14936 | |||
Pierre-Yves David
|
r18189 | self.unfiltered()._branchcaches.clear() | ||
Pierre-Yves David
|
r18105 | self.invalidatevolatilesets() | ||
def invalidatevolatilesets(self): | ||||
self.filteredrevcache.clear() | ||||
Pierre-Yves David
|
r17469 | obsolete.clearobscaches(self) | ||
Benoit Boissinot
|
r1784 | |||
Idan Kamara
|
r14930 | def invalidatedirstate(self): | ||
'''Invalidates the dirstate, causing the next call to dirstate | ||||
to check if it was modified since the last time it was read, | ||||
rereading it if it has. | ||||
This is different to dirstate.invalidate() that it doesn't always | ||||
rereads the dirstate. Use dirstate.invalidate() if you want to | ||||
explicitly read the dirstate again (i.e. restoring it to a previous | ||||
known good state).''' | ||||
Pierre-Yves David
|
r18013 | if hasunfilteredcache(self, 'dirstate'): | ||
Idan Kamara
|
r16200 | for k in self.dirstate._filecache: | ||
try: | ||||
delattr(self.dirstate, k) | ||||
except AttributeError: | ||||
pass | ||||
Pierre-Yves David
|
r17997 | delattr(self.unfiltered(), 'dirstate') | ||
Idan Kamara
|
r14930 | |||
Benoit Boissinot
|
r10547 | def invalidate(self): | ||
Pierre-Yves David
|
r17997 | unfiltered = self.unfiltered() # all filecaches are stored on unfiltered | ||
Idan Kamara
|
r14935 | for k in self._filecache: | ||
# dirstate is invalidated separately in invalidatedirstate() | ||||
if k == 'dirstate': | ||||
continue | ||||
try: | ||||
Pierre-Yves David
|
r17997 | delattr(unfiltered, k) | ||
Idan Kamara
|
r14935 | except AttributeError: | ||
pass | ||||
Benoit Boissinot
|
r10547 | self.invalidatecaches() | ||
Matt Mackall
|
r4913 | def _lock(self, lockname, wait, releasefn, acquirefn, desc): | ||
mpm@selenic.com
|
r1089 | try: | ||
Matt Mackall
|
r3457 | l = lock.lock(lockname, 0, releasefn, desc=desc) | ||
Matt Mackall
|
r7640 | except error.LockHeld, inst: | ||
Benoit Boissinot
|
r1531 | if not wait: | ||
Vadim Gelfer
|
r2016 | raise | ||
Thomas Arendsen Hein
|
r3688 | self.ui.warn(_("waiting for lock on %s held by %r\n") % | ||
(desc, inst.locker)) | ||||
Vadim Gelfer
|
r2016 | # default to 600 seconds timeout | ||
Matt Mackall
|
r3457 | l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")), | ||
Vadim Gelfer
|
r2016 | releasefn, desc=desc) | ||
Benoit Boissinot
|
r1751 | if acquirefn: | ||
acquirefn() | ||||
return l | ||||
Matt Mackall
|
r15587 | def _afterlock(self, callback): | ||
Pierre-Yves David
|
r15583 | """add a callback to the current repository lock. | ||
The callback will be executed on lock release.""" | ||||
l = self._lockref and self._lockref() | ||||
Matt Mackall
|
r15588 | if l: | ||
Matt Mackall
|
r15589 | l.postrelease.append(callback) | ||
Mads Kiilerich
|
r16680 | else: | ||
callback() | ||||
Pierre-Yves David
|
r15583 | |||
Matt Mackall
|
r4914 | def lock(self, wait=True): | ||
Greg Ward
|
r9309 | '''Lock the repository store (.hg/store) and return a weak reference | ||
to the lock. Use this before modifying the store (e.g. committing or | ||||
stripping). If you are opening a transaction, get a lock as well.)''' | ||||
Ronny Pfannschmidt
|
r8108 | l = self._lockref and self._lockref() | ||
if l is not None and l.held: | ||||
l.lock() | ||||
return l | ||||
Matt Mackall
|
r4917 | |||
Idan Kamara
|
r14931 | def unlock(): | ||
self.store.write() | ||||
Pierre-Yves David
|
r18013 | if hasunfilteredcache(self, '_phasecache'): | ||
Patrick Mezard
|
r16657 | self._phasecache.write() | ||
Idan Kamara
|
r14931 | for k, ce in self._filecache.items(): | ||
Idan Kamara
|
r18309 | if k == 'dirstate' or k not in self.__dict__: | ||
Idan Kamara
|
r14931 | continue | ||
ce.refresh() | ||||
l = self._lock(self.sjoin("lock"), wait, unlock, | ||||
Adrian Buehlmann
|
r13391 | self.invalidate, _('repository %s') % self.origroot) | ||
Matt Mackall
|
r4917 | self._lockref = weakref.ref(l) | ||
return l | ||||
Benoit Boissinot
|
r1751 | |||
Matt Mackall
|
r4914 | def wlock(self, wait=True): | ||
Greg Ward
|
r9309 | '''Lock the non-store parts of the repository (everything under | ||
.hg except .hg/store) and return a weak reference to the lock. | ||||
Use this before modifying files in .hg.''' | ||||
Ronny Pfannschmidt
|
r8108 | l = self._wlockref and self._wlockref() | ||
if l is not None and l.held: | ||||
l.lock() | ||||
return l | ||||
Benoit Boissinot
|
r1531 | |||
Idan Kamara
|
r14930 | def unlock(): | ||
self.dirstate.write() | ||||
Idan Kamara
|
r18318 | self._filecache['dirstate'].refresh() | ||
Idan Kamara
|
r14930 | |||
l = self._lock(self.join("wlock"), wait, unlock, | ||||
self.invalidatedirstate, _('working directory of %s') % | ||||
Matt Mackall
|
r4917 | self.origroot) | ||
self._wlockref = weakref.ref(l) | ||||
return l | ||||
Benoit Boissinot
|
r1531 | |||
Matt Mackall
|
r8401 | def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist): | ||
Matt Mackall
|
r3292 | """ | ||
Matt Mackall
|
r3294 | commit an individual file as part of a larger transaction | ||
""" | ||||
Matt Mackall
|
r3292 | |||
Martijn Pieters
|
r8244 | fname = fctx.path() | ||
text = fctx.data() | ||||
flog = self.file(fname) | ||||
fparent1 = manifest1.get(fname, nullid) | ||||
Matt Mackall
|
r8401 | fparent2 = fparent2o = manifest2.get(fname, nullid) | ||
Matt Mackall
|
r1716 | |||
Matt Mackall
|
r3292 | meta = {} | ||
Martijn Pieters
|
r8244 | copy = fctx.renamed() | ||
if copy and copy[0] != fname: | ||||
Alexis S. L. Carvalho
|
r4058 | # Mark the new revision of this file as a copy of another | ||
Thomas Arendsen Hein
|
r4516 | # file. This copy data will effectively act as a parent | ||
# of this new revision. If this is a merge, the first | ||||
Alexis S. L. Carvalho
|
r4058 | # parent will be the nullid (meaning "look up the copy data") | ||
# and the second one will be the other parent. For example: | ||||
# | ||||
# 0 --- 1 --- 3 rev1 changes file foo | ||||
# \ / rev2 renames foo to bar and changes it | ||||
# \- 2 -/ rev3 should have bar with all changes and | ||||
# should record that bar descends from | ||||
# bar in rev2 and foo in rev1 | ||||
# | ||||
# this allows this merge to succeed: | ||||
# | ||||
# 0 --- 1 --- 3 rev4 reverts the content change from rev2 | ||||
# \ / merging rev3 and rev4 should use bar@rev2 | ||||
# \- 2 --- 4 as the merge base | ||||
# | ||||
Matt Mackall
|
r6874 | |||
Martijn Pieters
|
r8244 | cfname = copy[0] | ||
crev = manifest1.get(cfname) | ||||
newfparent = fparent2 | ||||
Matt Mackall
|
r6874 | |||
if manifest2: # branch merge | ||||
Martijn Pieters
|
r8244 | if fparent2 == nullid or crev is None: # copied on remote side | ||
if cfname in manifest2: | ||||
crev = manifest2[cfname] | ||||
newfparent = fparent1 | ||||
Matt Mackall
|
r6874 | |||
Matt Mackall
|
r6875 | # find source in nearest ancestor if we've lost track | ||
Martijn Pieters
|
r8244 | if not crev: | ||
Martin Geisler
|
r9467 | self.ui.debug(" %s: searching for copy revision for %s\n" % | ||
Martijn Pieters
|
r8244 | (fname, cfname)) | ||
Matt Mackall
|
r13000 | for ancestor in self[None].ancestors(): | ||
Martijn Pieters
|
r8244 | if cfname in ancestor: | ||
crev = ancestor[cfname].filenode() | ||||
Matt Mackall
|
r6876 | break | ||
Matt Mackall
|
r6875 | |||
Matt Mackall
|
r13000 | if crev: | ||
self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev))) | ||||
meta["copy"] = cfname | ||||
meta["copyrev"] = hex(crev) | ||||
fparent1, fparent2 = nullid, newfparent | ||||
else: | ||||
self.ui.warn(_("warning: can't find ancestor for '%s' " | ||||
"copied from '%s'!\n") % (fname, cfname)) | ||||
Martijn Pieters
|
r8244 | elif fparent2 != nullid: | ||
Matt Mackall
|
r1716 | # is one parent an ancestor of the other? | ||
Martijn Pieters
|
r8244 | fparentancestor = flog.ancestor(fparent1, fparent2) | ||
if fparentancestor == fparent1: | ||||
fparent1, fparent2 = fparent2, nullid | ||||
elif fparentancestor == fparent2: | ||||
fparent2 = nullid | ||||
Matt Mackall
|
r1716 | |||
Matt Mackall
|
r8401 | # is the file changed? | ||
if fparent2 != nullid or flog.cmp(fparent1, text) or meta: | ||||
changelist.append(fname) | ||||
return flog.add(text, meta, tr, linkrev, fparent1, fparent2) | ||||
Matt Mackall
|
r1716 | |||
Matt Mackall
|
r8401 | # are just the flags changed during merge? | ||
Henri Wiechers
|
r10320 | if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags(): | ||
Matt Mackall
|
r8401 | changelist.append(fname) | ||
return fparent1 | ||||
Matt Mackall
|
r1716 | |||
Pierre-Yves David
|
r18016 | @unfilteredmethod | ||
Matt Mackall
|
r8706 | def commit(self, text="", user=None, date=None, match=None, force=False, | ||
editor=False, extra={}): | ||||
Benoit Boissinot
|
r8515 | """Add a new revision to current repository. | ||
Matt Mackall
|
r8706 | Revision information is gathered from the working directory, | ||
match can be used to filter the committed files. If editor is | ||||
supplied, it is called to get a commit message. | ||||
Benoit Boissinot
|
r8515 | """ | ||
Matt Mackall
|
r8709 | |||
Matt Mackall
|
r8715 | def fail(f, msg): | ||
raise util.Abort('%s: %s' % (f, msg)) | ||||
if not match: | ||||
Benoit Boissinot
|
r10651 | match = matchmod.always(self.root, '') | ||
Matt Mackall
|
r8715 | |||
if not force: | ||||
vdirs = [] | ||||
match.dir = vdirs.append | ||||
match.bad = fail | ||||
Matt Mackall
|
r8405 | wlock = self.wlock() | ||
Matt Mackall
|
r4915 | try: | ||
Matt Mackall
|
r8813 | wctx = self[None] | ||
Benoit Boissinot
|
r10970 | merge = len(wctx.parents()) > 1 | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r10970 | if (not force and merge and match and | ||
Matt Mackall
|
r8501 | (match.files() or match.anypats())): | ||
Matt Mackall
|
r8397 | raise util.Abort(_('cannot partially commit a merge ' | ||
'(do not specify files or patterns)')) | ||||
Patrick Mezard
|
r6706 | |||
Matt Mackall
|
r8706 | changes = self.status(match=match, clean=force) | ||
if force: | ||||
changes[0].extend(changes[6]) # mq may commit unchanged files | ||||
Benoit Boissinot
|
r3621 | |||
Matt Mackall
|
r8813 | # check subrepos | ||
subs = [] | ||||
Matt Mackall
|
r16073 | commitsubs = set() | ||
newstate = wctx.substate.copy() | ||||
# only manage subrepos and .hgsubstate if .hgsub is present | ||||
Matt Mackall
|
r14536 | if '.hgsub' in wctx: | ||
Matt Mackall
|
r16073 | # we'll decide whether to track this ourselves, thanks | ||
if '.hgsubstate' in changes[0]: | ||||
changes[0].remove('.hgsubstate') | ||||
if '.hgsubstate' in changes[2]: | ||||
changes[2].remove('.hgsubstate') | ||||
# compare current state to last committed state | ||||
# build new substate based on last committed state | ||||
oldstate = wctx.p1().substate | ||||
for s in sorted(newstate.keys()): | ||||
if not match(s): | ||||
# ignore working copy, use old state if present | ||||
if s in oldstate: | ||||
newstate[s] = oldstate[s] | ||||
continue | ||||
if not force: | ||||
raise util.Abort( | ||||
_("commit with new subrepo %s excluded") % s) | ||||
if wctx.sub(s).dirty(True): | ||||
if not self.ui.configbool('ui', 'commitsubrepos'): | ||||
raise util.Abort( | ||||
_("uncommitted changes in subrepo %s") % s, | ||||
hint=_("use --subrepos for recursive commit")) | ||||
subs.append(s) | ||||
commitsubs.add(s) | ||||
else: | ||||
bs = wctx.sub(s).basestate() | ||||
newstate[s] = (newstate[s][0], bs, newstate[s][2]) | ||||
if oldstate.get(s, (None, None, None))[1] != bs: | ||||
subs.append(s) | ||||
# check for removed subrepos | ||||
Matt Mackall
|
r14536 | for p in wctx.parents(): | ||
Matt Mackall
|
r16073 | r = [s for s in p.substate if s not in newstate] | ||
subs += [s for s in r if match(s)] | ||||
if subs: | ||||
Matt Mackall
|
r14536 | if (not match('.hgsub') and | ||
'.hgsub' in (wctx.modified() + wctx.added())): | ||||
raise util.Abort( | ||||
_("can't commit subrepos without .hgsub")) | ||||
Matt Mackall
|
r16073 | changes[0].insert(0, '.hgsubstate') | ||
Matt Mackall
|
r14536 | elif '.hgsub' in changes[2]: | ||
# clean up .hgsubstate when .hgsub is removed | ||||
if ('.hgsubstate' in wctx and | ||||
'.hgsubstate' not in changes[0] + changes[1] + changes[2]): | ||||
changes[2].insert(0, '.hgsubstate') | ||||
Matt Mackall
|
r8813 | |||
Matt Mackall
|
r8709 | # make sure all explicit patterns are matched | ||
if not force and match.files(): | ||||
Matt Mackall
|
r8710 | matched = set(changes[0] + changes[1] + changes[2]) | ||
Matt Mackall
|
r8709 | |||
for f in match.files(): | ||||
Matt Mackall
|
r17378 | f = self.dirstate.normalize(f) | ||
Matt Mackall
|
r8813 | if f == '.' or f in matched or f in wctx.substate: | ||
Matt Mackall
|
r8709 | continue | ||
if f in changes[3]: # missing | ||||
fail(f, _('file not found!')) | ||||
if f in vdirs: # visited directory | ||||
d = f + '/' | ||||
Matt Mackall
|
r8710 | for mf in matched: | ||
if mf.startswith(d): | ||||
break | ||||
else: | ||||
Matt Mackall
|
r8709 | fail(f, _("no match under directory!")) | ||
elif f not in self.dirstate: | ||||
fail(f, _("file not tracked!")) | ||||
Benoit Boissinot
|
r10970 | if (not force and not extra.get("close") and not merge | ||
Matt Mackall
|
r8501 | and not (changes[0] or changes[1] or changes[2]) | ||
Benoit Boissinot
|
r10970 | and wctx.branch() == wctx.p1().branch()): | ||
Matt Mackall
|
r8404 | return None | ||
Patrick Mezard
|
r16536 | if merge and changes[3]: | ||
raise util.Abort(_("cannot commit merge with missing files")) | ||||
Benoit Boissinot
|
r10651 | ms = mergemod.mergestate(self) | ||
Stefano Tortarolo
|
r6888 | for f in changes[0]: | ||
if f in ms and ms[f] == 'u': | ||||
raise util.Abort(_("unresolved merge conflicts " | ||||
Patrick Mezard
|
r13541 | "(see hg help resolve)")) | ||
Matt Mackall
|
r8496 | |||
Benoit Boissinot
|
r10969 | cctx = context.workingctx(self, text, user, date, extra, changes) | ||
Matt Mackall
|
r8496 | if editor: | ||
Matt Mackall
|
r8994 | cctx._text = editor(self, cctx, subs) | ||
Greg Ward
|
r9935 | edited = (text != cctx._text) | ||
Matt Mackall
|
r8813 | |||
Matt Mackall
|
r16073 | # commit subs and write new state | ||
if subs: | ||||
for s in sorted(commitsubs): | ||||
Edouard Gomez
|
r11112 | sub = wctx.sub(s) | ||
self.ui.status(_('committing subrepository %s\n') % | ||||
Mads Kiilerich
|
r12752 | subrepo.subrelpath(sub)) | ||
Edouard Gomez
|
r11112 | sr = sub.commit(cctx._text, user, date) | ||
Matt Mackall
|
r16073 | newstate[s] = (newstate[s][0], sr) | ||
subrepo.writestate(self, newstate) | ||||
Matt Mackall
|
r8813 | |||
Greg Ward
|
r9934 | # Save commit message in case this transaction gets rolled back | ||
Greg Ward
|
r9949 | # (e.g. by a pretxncommit hook). Leave the content alone on | ||
# the assumption that the user will use the same editor again. | ||||
Patrick Mezard
|
r14529 | msgfn = self.savecommitmessage(cctx._text) | ||
Greg Ward
|
r9934 | |||
Benoit Boissinot
|
r10970 | p1, p2 = self.dirstate.parents() | ||
hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '') | ||||
Greg Ward
|
r9935 | try: | ||
Brodie Rao
|
r16683 | self.hook("precommit", throw=True, parent1=hookp1, | ||
parent2=hookp2) | ||||
Greg Ward
|
r9935 | ret = self.commitctx(cctx, True) | ||
Brodie Rao
|
r16705 | except: # re-raises | ||
Greg Ward
|
r9935 | if edited: | ||
self.ui.write( | ||||
_('note: commit message saved in %s\n') % msgfn) | ||||
raise | ||||
Matt Mackall
|
r8496 | |||
Matt Mackall
|
r13357 | # update bookmarks, dirstate and mergestate | ||
David Soria Parra
|
r16706 | bookmarks.update(self, [p1, p2], ret) | ||
Matt Mackall
|
r8416 | for f in changes[0] + changes[1]: | ||
self.dirstate.normal(f) | ||||
for f in changes[2]: | ||||
Matt Mackall
|
r14434 | self.dirstate.drop(f) | ||
Matt Mackall
|
r8416 | self.dirstate.setparents(ret) | ||
Matt Mackall
|
r8503 | ms.reset() | ||
Patrick Mezard
|
r6710 | finally: | ||
Matt Mackall
|
r8405 | wlock.release() | ||
Patrick Mezard
|
r6710 | |||
Mads Kiilerich
|
r16680 | def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2): | ||
self.hook("commit", node=node, parent1=parent1, parent2=parent2) | ||||
self._afterlock(commithook) | ||||
Sune Foldager
|
r10492 | return ret | ||
Pierre-Yves David
|
r18016 | @unfilteredmethod | ||
Matt Mackall
|
r8496 | def commitctx(self, ctx, error=False): | ||
Patrick Mezard
|
r7077 | """Add a new revision to current repository. | ||
Matt Mackall
|
r8410 | Revision information is passed via the context argument. | ||
Patrick Mezard
|
r7077 | """ | ||
Patrick Mezard
|
r6715 | |||
Matt Mackall
|
r8412 | tr = lock = None | ||
Patrick Mezard
|
r12899 | removed = list(ctx.removed()) | ||
Matt Mackall
|
r8414 | p1, p2 = ctx.p1(), ctx.p2() | ||
Matt Mackall
|
r8412 | user = ctx.user() | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r8411 | lock = self.lock() | ||
try: | ||||
Steve Borho
|
r10881 | tr = self.transaction("commit") | ||
Matt Mackall
|
r4970 | trp = weakref.proxy(tr) | ||
mpm@selenic.com
|
r1089 | |||
Peter Arrenbrecht
|
r14162 | if ctx.files(): | ||
m1 = p1.manifest().copy() | ||||
m2 = p2.manifest() | ||||
# check in files | ||||
new = {} | ||||
changed = [] | ||||
linkrev = len(self) | ||||
for f in sorted(ctx.modified() + ctx.added()): | ||||
self.ui.note(f + "\n") | ||||
try: | ||||
fctx = ctx[f] | ||||
new[f] = self._filecommit(fctx, m1, m2, linkrev, trp, | ||||
changed) | ||||
m1.set(f, fctx.flags()) | ||||
except OSError, inst: | ||||
Matt Mackall
|
r4915 | self.ui.warn(_("trouble committing %s!\n") % f) | ||
raise | ||||
Peter Arrenbrecht
|
r14162 | except IOError, inst: | ||
errcode = getattr(inst, 'errno', errno.ENOENT) | ||||
if error or errcode and errcode != errno.ENOENT: | ||||
self.ui.warn(_("trouble committing %s!\n") % f) | ||||
raise | ||||
else: | ||||
removed.append(f) | ||||
mpm@selenic.com
|
r1089 | |||
Peter Arrenbrecht
|
r14162 | # update manifest | ||
m1.update(new) | ||||
removed = [f for f in sorted(removed) if f in m1 or f in m2] | ||||
drop = [f for f in removed if f in m1] | ||||
for f in drop: | ||||
del m1[f] | ||||
mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(), | ||||
p2.manifestnode(), (new, drop)) | ||||
files = changed + removed | ||||
else: | ||||
mn = p1.manifestnode() | ||||
files = [] | ||||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r8499 | # update changelog | ||
Matt Mackall
|
r7787 | self.changelog.delayupdate() | ||
Peter Arrenbrecht
|
r14162 | n = self.changelog.add(mn, files, ctx.description(), | ||
Matt Mackall
|
r8499 | trp, p1.node(), p2.node(), | ||
Matt Mackall
|
r8412 | user, ctx.date(), ctx.extra().copy()) | ||
Matt Mackall
|
r7787 | p = lambda: self.changelog.writepending() and self.root or "" | ||
Sune Foldager
|
r10492 | xp1, xp2 = p1.hex(), p2 and p2.hex() or '' | ||
Matt Mackall
|
r4915 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, | ||
Matt Mackall
|
r7787 | parent2=xp2, pending=p) | ||
self.changelog.finalize(trp) | ||||
Pierre-Yves David
|
r15706 | # set the new commit is proper phase | ||
Pierre-Yves David
|
r16030 | targetphase = phases.newcommitphase(self.ui) | ||
Pierre-Yves David
|
r15706 | if targetphase: | ||
# retract boundary do not alter parent changeset. | ||||
# if a parent have higher the resulting phase will | ||||
# be compliant anyway | ||||
# | ||||
# if minimal phase was 0 we don't need to retract anything | ||||
phases.retractboundary(self, targetphase, [n]) | ||||
Matt Mackall
|
r4915 | tr.close() | ||
Pierre-Yves David
|
r18121 | branchmap.updatecache(self) | ||
Matt Mackall
|
r4915 | return n | ||
finally: | ||||
Ronny Pfannschmidt
|
r11230 | if tr: | ||
tr.release() | ||||
Matt Mackall
|
r8405 | lock.release() | ||
mpm@selenic.com
|
r1089 | |||
Pierre-Yves David
|
r18016 | @unfilteredmethod | ||
Idan Kamara
|
r18310 | def destroying(self): | ||
'''Inform the repository that nodes are about to be destroyed. | ||||
Intended for use by strip and rollback, so there's a common | ||||
place for anything that has to be done before destroying history. | ||||
This is mostly useful for saving state that is in memory and waiting | ||||
to be flushed when the current lock is released. Because a call to | ||||
destroyed is imminent, the repo will be invalidated causing those | ||||
changes to stay in memory (waiting for the next unlock), or vanish | ||||
completely. | ||||
''' | ||||
Idan Kamara
|
r18311 | # It simplifies the logic around updating the branchheads cache if we | ||
# only have to consider the effect of the stripped revisions and not | ||||
# revisions missing because the cache is out-of-date. | ||||
branchmap.updatecache(self) | ||||
Idan Kamara
|
r18310 | |||
Idan Kamara
|
r18312 | # When using the same lock to commit and strip, the phasecache is left | ||
# dirty after committing. Then when we strip, the repo is invalidated, | ||||
# causing those changes to disappear. | ||||
if '_phasecache' in vars(self): | ||||
self._phasecache.write() | ||||
Idan Kamara
|
r18310 | @unfilteredmethod | ||
Joshua Redstone
|
r17013 | def destroyed(self, newheadnodes=None): | ||
Greg Ward
|
r9150 | '''Inform the repository that nodes have been destroyed. | ||
Intended for use by strip and rollback, so there's a common | ||||
Joshua Redstone
|
r17013 | place for anything that has to be done after destroying history. | ||
If you know the branchheadcache was uptodate before nodes were removed | ||||
and you also know the set of candidate new heads that may have resulted | ||||
from the destruction, you can set newheadnodes. This will enable the | ||||
code to update the branchheads cache, rather than having future code | ||||
Mads Kiilerich
|
r17424 | decide it's invalid and regenerating it from scratch. | ||
Joshua Redstone
|
r17013 | ''' | ||
Idan Kamara
|
r18221 | # When one tries to: | ||
# 1) destroy nodes thus calling this method (e.g. strip) | ||||
# 2) use phasecache somewhere (e.g. commit) | ||||
# | ||||
# then 2) will fail because the phasecache contains nodes that were | ||||
# removed. We can either remove phasecache from the filecache, | ||||
# causing it to reload next time it is accessed, or simply filter | ||||
# the removed nodes now and write the updated cache. | ||||
if '_phasecache' in self._filecache: | ||||
self._phasecache.filterunknown(self) | ||||
self._phasecache.write() | ||||
Pierre-Yves David
|
r18223 | # If we have info, newheadnodes, on how to update the branch cache, do | ||
# it, Otherwise, since nodes were destroyed, the cache is stale and this | ||||
# will be caught the next time it is read. | ||||
if newheadnodes: | ||||
Pierre-Yves David
|
r18305 | cl = self.changelog | ||
revgen = (cl.rev(node) for node in newheadnodes | ||||
if cl.hasnode(node)) | ||||
Pierre-Yves David
|
r18223 | cache = self._branchcaches[None] | ||
Pierre-Yves David
|
r18305 | cache.update(self, revgen) | ||
Pierre-Yves David
|
r18223 | cache.write(self) | ||
Greg Ward
|
r9151 | # Ensure the persistent tag cache is updated. Doing it now | ||
# means that the tag cache only has to worry about destroyed | ||||
# heads immediately after a strip/rollback. That in turn | ||||
# guarantees that "cachetip == currenttip" (comparing both rev | ||||
# and node) always means no nodes have been added or destroyed. | ||||
# XXX this is suboptimal when qrefresh'ing: we strip the current | ||||
# head, refresh the tag cache, then immediately add a new head. | ||||
# But I think doing it this way is necessary for the "instant | ||||
# tag cache retrieval" case to work. | ||||
Idan Kamara
|
r18313 | self.invalidate() | ||
Idan Kamara
|
r17324 | |||
Matt Mackall
|
r6585 | def walk(self, match, node=None): | ||
Matt Mackall
|
r3532 | ''' | ||
walk recursively through the directory tree or a given | ||||
changeset, finding all files matched by the match | ||||
function | ||||
''' | ||||
Matt Mackall
|
r6764 | return self[node].walk(match) | ||
Matt Mackall
|
r3532 | |||
Matt Mackall
|
r6769 | def status(self, node1='.', node2=None, match=None, | ||
Martin Geisler
|
r12166 | ignored=False, clean=False, unknown=False, | ||
listsubrepos=False): | ||||
Brodie Rao
|
r16683 | """return status of files between two nodes or node and working | ||
directory. | ||||
Thomas Arendsen Hein
|
r1616 | |||
If node1 is None, use the first dirstate parent instead. | ||||
If node2 is None, compare node1 with working directory. | ||||
""" | ||||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r6769 | def mfmatches(ctx): | ||
mf = ctx.manifest().copy() | ||||
Jesse Glick
|
r16645 | if match.always(): | ||
return mf | ||||
mpm@selenic.com
|
r1089 | for fn in mf.keys(): | ||
if not match(fn): | ||||
del mf[fn] | ||||
return mf | ||||
Matt Mackall
|
r7090 | if isinstance(node1, context.changectx): | ||
ctx1 = node1 | ||||
else: | ||||
ctx1 = self[node1] | ||||
if isinstance(node2, context.changectx): | ||||
ctx2 = node2 | ||||
else: | ||||
ctx2 = self[node2] | ||||
Dirkjan Ochtman
|
r7435 | working = ctx2.rev() is None | ||
Matt Mackall
|
r6769 | parentworking = working and ctx1 == self['.'] | ||
Benoit Boissinot
|
r10651 | match = match or matchmod.always(self.root, self.getcwd()) | ||
Matt Mackall
|
r6753 | listignored, listclean, listunknown = ignored, clean, unknown | ||
Chris Mason
|
r2474 | |||
Matt Mackall
|
r7090 | # load earliest manifest first for caching reasons | ||
if not working and ctx2.rev() < ctx1.rev(): | ||||
ctx2.manifest() | ||||
Matt Mackall
|
r7067 | if not parentworking: | ||
def bad(f, msg): | ||||
FUJIWARA Katsunori
|
r16144 | # 'f' may be a directory pattern from 'match.files()', | ||
# so 'f not in ctx1' is not enough | ||||
if f not in ctx1 and f not in ctx1.dirs(): | ||||
Matt Mackall
|
r7067 | self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg)) | ||
match.bad = bad | ||||
Matt Mackall
|
r6770 | if working: # we need to scan the working dir | ||
Matt Mackall
|
r11227 | subrepos = [] | ||
if '.hgsub' in self.dirstate: | ||||
Matt Mackall
|
r14870 | subrepos = ctx2.substate.keys() | ||
Augie Fackler
|
r10176 | s = self.dirstate.status(match, subrepos, listignored, | ||
listclean, listunknown) | ||||
Matt Mackall
|
r6770 | cmp, modified, added, removed, deleted, unknown, ignored, clean = s | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r6770 | # check for any possibly clean files | ||
if parentworking and cmp: | ||||
fixup = [] | ||||
# do a full compare of any files that might have changed | ||||
Matt Mackall
|
r8395 | for f in sorted(cmp): | ||
Matt Mackall
|
r6770 | if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f) | ||
Nicolas Dumazet
|
r11702 | or ctx1[f].cmp(ctx2[f])): | ||
Matt Mackall
|
r6770 | modified.append(f) | ||
else: | ||||
fixup.append(f) | ||||
# update dirstate for files that are actually clean | ||||
if fixup: | ||||
Nicolas Dumazet
|
r11669 | if listclean: | ||
clean += fixup | ||||
Matt Mackall
|
r6770 | try: | ||
Adrian Buehlmann
|
r8647 | # updating the dirstate is optional | ||
# so we don't wait on the lock | ||||
Simon Heimberg
|
r8646 | wlock = self.wlock(False) | ||
Matt Mackall
|
r4915 | try: | ||
Matt Mackall
|
r6770 | for f in fixup: | ||
self.dirstate.normal(f) | ||||
Simon Heimberg
|
r8646 | finally: | ||
wlock.release() | ||||
except error.LockError: | ||||
pass | ||||
Vadim Gelfer
|
r2661 | |||
Matt Mackall
|
r6769 | if not parentworking: | ||
mf1 = mfmatches(ctx1) | ||||
Matt Mackall
|
r6770 | if working: | ||
Thomas Arendsen Hein
|
r1616 | # we are comparing working dir against non-parent | ||
# generate a pseudo-manifest for the working dir | ||||
Matt Mackall
|
r6769 | mf2 = mfmatches(self['.']) | ||
Matt Mackall
|
r6770 | for f in cmp + modified + added: | ||
Matt Mackall
|
r6769 | mf2[f] = None | ||
Matt Mackall
|
r6817 | mf2.set(f, ctx2.flags(f)) | ||
Thomas Arendsen Hein
|
r1617 | for f in removed: | ||
Thomas Arendsen Hein
|
r1616 | if f in mf2: | ||
del mf2[f] | ||||
Matt Mackall
|
r6770 | else: | ||
# we are comparing two revisions | ||||
deleted, unknown, ignored = [], [], [] | ||||
mf2 = mfmatches(ctx2) | ||||
Bryan O'Sullivan
|
r4372 | |||
Vadim Gelfer
|
r2661 | modified, added, clean = [], [], [] | ||
Jesse Glick
|
r16646 | withflags = mf1.withflags() | mf2.withflags() | ||
Matt Mackall
|
r6827 | for fn in mf2: | ||
Christian Ebert
|
r5915 | if fn in mf1: | ||
Idan Kamara
|
r14500 | if (fn not in deleted and | ||
Jesse Glick
|
r16646 | ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or | ||
Idan Kamara
|
r14500 | (mf1[fn] != mf2[fn] and | ||
(mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))): | ||||
Thomas Arendsen Hein
|
r1616 | modified.append(fn) | ||
Matt Mackall
|
r6753 | elif listclean: | ||
Vadim Gelfer
|
r2661 | clean.append(fn) | ||
Thomas Arendsen Hein
|
r1616 | del mf1[fn] | ||
Idan Kamara
|
r14500 | elif fn not in deleted: | ||
Thomas Arendsen Hein
|
r1616 | added.append(fn) | ||
Thomas Arendsen Hein
|
r1617 | removed = mf1.keys() | ||
Matt Mackall
|
r15348 | if working and modified and not self.dirstate._checklink: | ||
# Symlink placeholders may get non-symlink-like contents | ||||
# via user error or dereferencing by NFS or Samba servers, | ||||
# so we filter out any placeholders that don't look like a | ||||
# symlink | ||||
sane = [] | ||||
for f in modified: | ||||
if ctx2.flags(f) == 'l': | ||||
d = ctx2[f].data() | ||||
if len(d) >= 1024 or '\n' in d or util.binary(d): | ||||
self.ui.debug('ignoring suspect symlink placeholder' | ||||
' "%s"\n' % f) | ||||
continue | ||||
sane.append(f) | ||||
modified = sane | ||||
Matt Mackall
|
r6827 | r = modified, added, removed, deleted, unknown, ignored, clean | ||
Martin Geisler
|
r12166 | |||
if listsubrepos: | ||||
Martin Geisler
|
r12176 | for subpath, sub in subrepo.itersubrepos(ctx1, ctx2): | ||
Martin Geisler
|
r12166 | if working: | ||
rev2 = None | ||||
else: | ||||
rev2 = ctx2.substate[subpath][1] | ||||
try: | ||||
submatch = matchmod.narrowmatcher(subpath, match) | ||||
s = sub.status(rev2, match=submatch, ignored=listignored, | ||||
clean=listclean, unknown=listunknown, | ||||
listsubrepos=True) | ||||
for rfiles, sfiles in zip(r, s): | ||||
rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) | ||||
except error.LookupError: | ||||
self.ui.status(_("skipping missing subrepository: %s\n") | ||||
% subpath) | ||||
Martin Geisler
|
r13412 | for l in r: | ||
l.sort() | ||||
Matt Mackall
|
r6827 | return r | ||
Vadim Gelfer
|
r2661 | |||
John Mulligan
|
r8796 | def heads(self, start=None): | ||
Benoit Boissinot
|
r1550 | heads = self.changelog.heads(start) | ||
# sort the output in rev descending order | ||||
Thomas Arendsen Hein
|
r13075 | return sorted(heads, key=self.changelog.rev, reverse=True) | ||
mpm@selenic.com
|
r1089 | |||
John Mulligan
|
r8694 | def branchheads(self, branch=None, start=None, closed=False): | ||
Sune Foldager
|
r9475 | '''return a (possibly filtered) list of heads for the given branch | ||
Heads are returned in topological order, from newest to oldest. | ||||
If branch is None, use the dirstate branch. | ||||
If start is not None, return only heads reachable from start. | ||||
If closed is True, return heads that are marked as closed as well. | ||||
''' | ||||
Matt Mackall
|
r6747 | if branch is None: | ||
branch = self[None].branch() | ||||
Benoit Boissinot
|
r9675 | branches = self.branchmap() | ||
Eric Hopper
|
r4648 | if branch not in branches: | ||
return [] | ||||
John Mulligan
|
r7654 | # the cache returns heads ordered lowest to highest | ||
Sune Foldager
|
r9475 | bheads = list(reversed(branches[branch])) | ||
Eric Hopper
|
r4648 | if start is not None: | ||
John Mulligan
|
r7654 | # filter out the heads that cannot be reached from startrev | ||
Sune Foldager
|
r9475 | fbheads = set(self.changelog.nodesbetween([start], bheads)[2]) | ||
bheads = [h for h in bheads if h in fbheads] | ||||
John Mulligan
|
r7656 | if not closed: | ||
Brodie Rao
|
r16720 | bheads = [h for h in bheads if not self[h].closesbranch()] | ||
John Mulligan
|
r7654 | return bheads | ||
Eric Hopper
|
r4648 | |||
mpm@selenic.com
|
r1089 | def branches(self, nodes): | ||
Thomas Arendsen Hein
|
r1615 | if not nodes: | ||
nodes = [self.changelog.tip()] | ||||
mpm@selenic.com
|
r1089 | b = [] | ||
for n in nodes: | ||||
t = n | ||||
Martin Geisler
|
r14494 | while True: | ||
mpm@selenic.com
|
r1089 | p = self.changelog.parents(n) | ||
if p[1] != nullid or p[0] == nullid: | ||||
b.append((t, n, p[0], p[1])) | ||||
break | ||||
n = p[0] | ||||
return b | ||||
def between(self, pairs): | ||||
r = [] | ||||
for top, bottom in pairs: | ||||
n, l, i = top, [], 0 | ||||
f = 1 | ||||
Matt Mackall
|
r7708 | while n != bottom and n != nullid: | ||
mpm@selenic.com
|
r1089 | p = self.changelog.parents(n)[0] | ||
if i == f: | ||||
l.append(n) | ||||
f = f * 2 | ||||
n = p | ||||
i += 1 | ||||
r.append(l) | ||||
return r | ||||
Matt Mackall
|
r4917 | def pull(self, remote, heads=None, force=False): | ||
Pierre-Yves David
|
r17126 | # don't open transaction for nothing or you break future useful | ||
# rollback call | ||||
tr = None | ||||
trname = 'pull\n' + util.hidepassword(remote.url()) | ||||
Matt Mackall
|
r4917 | lock = self.lock() | ||
Vadim Gelfer
|
r2827 | try: | ||
Dirkjan Ochtman
|
r11301 | tmp = discovery.findcommonincoming(self, remote, heads=heads, | ||
force=force) | ||||
common, fetch, rheads = tmp | ||||
Vadim Gelfer
|
r2827 | if not fetch: | ||
self.ui.status(_("no changes found\n")) | ||||
Pierre-Yves David
|
r15650 | added = [] | ||
Matt Mackall
|
r13364 | result = 0 | ||
else: | ||||
Pierre-Yves David
|
r17126 | tr = self.transaction(trname) | ||
Peter Arrenbrecht
|
r13742 | if heads is None and list(common) == [nullid]: | ||
Matt Mackall
|
r13364 | self.ui.status(_("requesting all changes\n")) | ||
elif heads is None and remote.capable('changegroupsubset'): | ||||
# issue1320, avoid a race if remote changed after discovery | ||||
heads = rheads | ||||
Benoit Boissinot
|
r7415 | |||
Peter Arrenbrecht
|
r14073 | if remote.capable('getbundle'): | ||
Peter Arrenbrecht
|
r13742 | cg = remote.getbundle('pull', common=common, | ||
heads=heads or rheads) | ||||
elif heads is None: | ||||
Matt Mackall
|
r13364 | cg = remote.changegroup(fetch, 'pull') | ||
elif not remote.capable('changegroupsubset'): | ||||
Martin Geisler
|
r12067 | raise util.Abort(_("partial pull cannot be done because " | ||
Matt Mackall
|
r13364 | "other repository doesn't support " | ||
"changegroupsubset.")) | ||||
else: | ||||
cg = remote.changegroupsubset(fetch, heads, 'pull') | ||||
Pierre-Yves David
|
r15650 | clstart = len(self.changelog) | ||
Pierre-Yves David
|
r15585 | result = self.addchangegroup(cg, 'pull', remote.url()) | ||
Pierre-Yves David
|
r15650 | clend = len(self.changelog) | ||
added = [self.changelog.node(r) for r in xrange(clstart, clend)] | ||||
Pierre-Yves David
|
r15956 | # compute target subset | ||
if heads is None: | ||||
# We pulled every thing possible | ||||
# sync on everything common | ||||
subset = common + added | ||||
else: | ||||
# We pulled a specific subset | ||||
# sync on this subset | ||||
subset = heads | ||||
Pierre-Yves David
|
r15650 | |||
# Get remote phases data from remote | ||||
remotephases = remote.listkeys('phases') | ||||
publishing = bool(remotephases.get('publishing', False)) | ||||
if remotephases and not publishing: | ||||
# remote is new and unpublishing | ||||
Pierre-Yves David
|
r15892 | pheads, _dr = phases.analyzeremotephases(self, subset, | ||
remotephases) | ||||
phases.advanceboundary(self, phases.public, pheads) | ||||
Pierre-Yves David
|
r15956 | phases.advanceboundary(self, phases.draft, subset) | ||
Pierre-Yves David
|
r15650 | else: | ||
# Remote is old or publishing all common changesets | ||||
# should be seen as public | ||||
Pierre-Yves David
|
r15956 | phases.advanceboundary(self, phases.public, subset) | ||
Pierre-Yves.David@ens-lyon.org
|
r17075 | |||
Pierre-Yves David
|
r17298 | if obsolete._enabled: | ||
Pierre-Yves David
|
r17857 | self.ui.debug('fetching remote obsolete markers\n') | ||
Pierre-Yves David
|
r17298 | remoteobs = remote.listkeys('obsolete') | ||
if 'dump0' in remoteobs: | ||||
if tr is None: | ||||
tr = self.transaction(trname) | ||||
for key in sorted(remoteobs, reverse=True): | ||||
if key.startswith('dump'): | ||||
data = base85.b85decode(remoteobs[key]) | ||||
self.obsstore.mergemarkers(tr, data) | ||||
Pierre-Yves David
|
r18105 | self.invalidatevolatilesets() | ||
Pierre-Yves David
|
r17126 | if tr is not None: | ||
tr.close() | ||||
Vadim Gelfer
|
r2827 | finally: | ||
Pierre-Yves David
|
r17126 | if tr is not None: | ||
tr.release() | ||||
Ronny Pfannschmidt
|
r8109 | lock.release() | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r13364 | return result | ||
Patrick Mezard
|
r13327 | def checkpush(self, force, revs): | ||
"""Extensions can override this function if additional checks have | ||||
to be performed before pushing, or call it if they override push | ||||
command. | ||||
""" | ||||
pass | ||||
Sune Foldager
|
r11211 | def push(self, remote, force=False, revs=None, newbranch=False): | ||
Greg Ward
|
r11153 | '''Push outgoing changesets (limited by revs) from the current | ||
repository to remote. Return an integer: | ||||
Matt Mackall
|
r16023 | - None means nothing to push | ||
- 0 means HTTP error | ||||
Greg Ward
|
r11153 | - 1 means we pushed and remote head count is unchanged *or* | ||
we have outgoing changesets but refused to push | ||||
- other values as described by addchangegroup() | ||||
''' | ||||
Vadim Gelfer
|
r2439 | # there are two ways to push to remote repo: | ||
# | ||||
# addchangegroup assumes local user can lock remote | ||||
# repo (local filesystem, old ssh servers). | ||||
# | ||||
# unbundle assumes local user cannot lock remote repo (new ssh | ||||
# servers, http servers). | ||||
mpm@selenic.com
|
r1089 | |||
Sune Foldager
|
r17193 | if not remote.canpush(): | ||
raise util.Abort(_("destination does not support push")) | ||||
Kevin Bullock
|
r18044 | unfi = self.unfiltered() | ||
Pierre-Yves David
|
r15952 | # get local lock as we might write phase data | ||
locallock = self.lock() | ||||
Matt Mackall
|
r4915 | try: | ||
Pierre-Yves David
|
r15952 | self.checkpush(force, revs) | ||
lock = None | ||||
unbundle = remote.capable('unbundle') | ||||
if not unbundle: | ||||
lock = remote.lock() | ||||
Pierre-Yves David
|
r15485 | try: | ||
Pierre-Yves David
|
r15932 | # discovery | ||
fci = discovery.findcommonincoming | ||||
Pierre-Yves David
|
r18007 | commoninc = fci(unfi, remote, force=force) | ||
Pierre-Yves David
|
r15932 | common, inc, remoteheads = commoninc | ||
fco = discovery.findcommonoutgoing | ||||
Pierre-Yves David
|
r18007 | outgoing = fco(unfi, remote, onlyheads=revs, | ||
Pierre-Yves David
|
r15932 | commoninc=commoninc, force=force) | ||
if not outgoing.missing: | ||||
# nothing to push | ||||
Pierre-Yves David
|
r18007 | scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) | ||
Matt Mackall
|
r16023 | ret = None | ||
Pierre-Yves David
|
r15932 | else: | ||
# something to push | ||||
if not force: | ||||
Pierre-Yves David
|
r17169 | # if self.obsstore == False --> no obsolete | ||
# then, save the iteration | ||||
Pierre-Yves David
|
r18007 | if unfi.obsstore: | ||
Pierre-Yves David
|
r17169 | # this message are here for 80 char limit reason | ||
push includes an xxx changeset: yyyyyyyyyy
|
r17833 | mso = _("push includes obsolete changeset: %s!") | ||
Pierre-Yves David
|
r18162 | mst = "push includes %s changeset: %s!" | ||
# plain versions for i18n tool to detect them | ||||
_("push includes unstable changeset: %s!") | ||||
_("push includes bumped changeset: %s!") | ||||
_("push includes divergent changeset: %s!") | ||||
Pierre-Yves David
|
r17172 | # If we are to push if there is at least one | ||
# obsolete or unstable changeset in missing, at | ||||
# least one of the missinghead will be obsolete or | ||||
# unstable. So checking heads only is ok | ||||
for node in outgoing.missingheads: | ||||
Pierre-Yves David
|
r18007 | ctx = unfi[node] | ||
Pierre-Yves David
|
r17169 | if ctx.obsolete(): | ||
Thomas Arendsen Hein
|
r17855 | raise util.Abort(mso % ctx) | ||
Pierre-Yves David
|
r18162 | elif ctx.troubled(): | ||
raise util.Abort(_(mst) | ||||
% (ctx.troubles()[0], | ||||
ctx)) | ||||
Pierre-Yves David
|
r18007 | discovery.checkheads(unfi, remote, outgoing, | ||
Pierre-Yves David
|
r15986 | remoteheads, newbranch, | ||
bool(inc)) | ||||
Pierre-Yves David
|
r15932 | |||
# create a changegroup from local | ||||
if revs is None and not outgoing.excluded: | ||||
# push everything, | ||||
# use the fast path, no race possible on push | ||||
cg = self._changegroup(outgoing.missing, 'push') | ||||
else: | ||||
cg = self.getlocalbundle('push', outgoing) | ||||
# apply changegroup to remote | ||||
Pierre-Yves David
|
r15485 | if unbundle: | ||
# local repo finds heads on server, finds out what | ||||
# revs it must push. once revs transferred, if server | ||||
# finds it has different heads (someone else won | ||||
# commit/push race), server aborts. | ||||
if force: | ||||
Pierre-Yves David
|
r15932 | remoteheads = ['force'] | ||
Pierre-Yves David
|
r15485 | # ssh: return remote's addchangegroup() | ||
# http: return remote's addchangegroup() or 0 for error | ||||
Pierre-Yves David
|
r15932 | ret = remote.unbundle(cg, remoteheads, 'push') | ||
Pierre-Yves David
|
r15485 | else: | ||
Brodie Rao
|
r16683 | # we return an integer indicating remote head count | ||
# change | ||||
Pierre-Yves David
|
r15585 | ret = remote.addchangegroup(cg, 'push', self.url()) | ||
Pierre-Yves David
|
r15651 | |||
Pierre-Yves David
|
r15933 | if ret: | ||
timeless@mozdev.org
|
r17521 | # push succeed, synchronize target of the push | ||
Pierre-Yves David
|
r15956 | cheads = outgoing.missingheads | ||
elif revs is None: | ||||
# All out push fails. synchronize all common | ||||
cheads = outgoing.commonheads | ||||
else: | ||||
# I want cheads = heads(::missingheads and ::commonheads) | ||||
# (missingheads is revs with secret changeset filtered out) | ||||
# | ||||
# This can be expressed as: | ||||
# cheads = ( (missingheads and ::commonheads) | ||||
# + (commonheads and ::missingheads))" | ||||
# ) | ||||
# | ||||
# while trying to push we already computed the following: | ||||
# common = (::commonheads) | ||||
# missing = ((commonheads::missingheads) - commonheads) | ||||
# | ||||
# We can pick: | ||||
Mads Kiilerich
|
r17424 | # * missingheads part of common (::commonheads) | ||
Pierre-Yves David
|
r15956 | common = set(outgoing.common) | ||
Matt Mackall
|
r16020 | cheads = [node for node in revs if node in common] | ||
redstone
|
r16628 | # and | ||
Pierre-Yves David
|
r15956 | # * commonheads parents on missing | ||
Pierre-Yves David
|
r18007 | revset = unfi.set('%ln and parents(roots(%ln))', | ||
Matt Mackall
|
r16020 | outgoing.commonheads, | ||
outgoing.missing) | ||||
cheads.extend(c.node() for c in revset) | ||||
Pierre-Yves David
|
r15651 | # even when we don't push, exchanging phase data is useful | ||
remotephases = remote.listkeys('phases') | ||||
if not remotephases: # old server or public only repo | ||||
Pierre-Yves David
|
r15933 | phases.advanceboundary(self, phases.public, cheads) | ||
Pierre-Yves David
|
r15651 | # don't push any phase data as there is nothing to push | ||
Matt Mackall
|
r13364 | else: | ||
Pierre-Yves David
|
r15933 | ana = phases.analyzeremotephases(self, cheads, remotephases) | ||
Pierre-Yves David
|
r15892 | pheads, droots = ana | ||
Pierre-Yves David
|
r15651 | ### Apply remote phase on local | ||
if remotephases.get('publishing', False): | ||||
Pierre-Yves David
|
r15933 | phases.advanceboundary(self, phases.public, cheads) | ||
Pierre-Yves David
|
r15651 | else: # publish = False | ||
Pierre-Yves David
|
r15892 | phases.advanceboundary(self, phases.public, pheads) | ||
Pierre-Yves David
|
r15933 | phases.advanceboundary(self, phases.draft, cheads) | ||
Pierre-Yves David
|
r15651 | ### Apply local phase on remote | ||
Pierre-Yves David
|
r15820 | |||
Pierre-Yves David
|
r15892 | # Get the list of all revs draft on remote by public here. | ||
# XXX Beware that revset break if droots is not strictly | ||||
# XXX root we may want to ensure it is but it is costly | ||||
Pierre-Yves David
|
r18007 | outdated = unfi.set('heads((%ln::%ln) and public())', | ||
Pierre-Yves David
|
r15933 | droots, cheads) | ||
Pierre-Yves David
|
r15892 | for newremotehead in outdated: | ||
Pierre-Yves David
|
r15820 | r = remote.pushkey('phases', | ||
newremotehead.hex(), | ||||
Pierre-Yves David
|
r15892 | str(phases.draft), | ||
str(phases.public)) | ||||
Pierre-Yves David
|
r15820 | if not r: | ||
Pierre-Yves David
|
r15892 | self.ui.warn(_('updating %s to public failed!\n') | ||
% newremotehead) | ||||
Pierre-Yves David
|
r17294 | self.ui.debug('try to push obsolete markers to remote\n') | ||
Pierre-Yves David
|
r17298 | if (obsolete._enabled and self.obsstore and | ||
Patrick Mezard
|
r17252 | 'obsolete' in remote.listkeys('namespaces')): | ||
Pierre-Yves David
|
r17295 | rslts = [] | ||
remotedata = self.listkeys('obsolete') | ||||
for key in sorted(remotedata, reverse=True): | ||||
# reverse sort to ensure we end with dump0 | ||||
data = remotedata[key] | ||||
rslts.append(remote.pushkey('obsolete', key, '', data)) | ||||
if [r for r in rslts if not r]: | ||||
msg = _('failed to push some obsolete markers!\n') | ||||
self.ui.warn(msg) | ||||
Pierre-Yves David
|
r15485 | finally: | ||
Pierre-Yves David
|
r15952 | if lock is not None: | ||
lock.release() | ||||
Matt Mackall
|
r4915 | finally: | ||
Pierre-Yves David
|
r15952 | locallock.release() | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r13364 | self.ui.debug("checking for updated bookmarks\n") | ||
rb = remote.listkeys('bookmarks') | ||||
for k in rb.keys(): | ||||
Pierre-Yves David
|
r18009 | if k in unfi._bookmarks: | ||
Matt Mackall
|
r13364 | nr, nl = rb[k], hex(self._bookmarks[k]) | ||
Pierre-Yves David
|
r18009 | if nr in unfi: | ||
cr = unfi[nr] | ||||
cl = unfi[nl] | ||||
if bookmarks.validdest(unfi, cr, cl): | ||||
Matt Mackall
|
r13364 | r = remote.pushkey('bookmarks', k, nr, nl) | ||
if r: | ||||
self.ui.status(_("updating bookmark %s\n") % k) | ||||
else: | ||||
self.ui.warn(_('updating bookmark %s' | ||||
' failed!\n') % k) | ||||
return ret | ||||
Thomas Arendsen Hein
|
r5763 | def changegroupinfo(self, nodes, source): | ||
if self.ui.verbose or source == 'bundle': | ||||
self.ui.status(_("%d changesets found\n") % len(nodes)) | ||||
Thomas Arendsen Hein
|
r3513 | if self.ui.debugflag: | ||
Martin Geisler
|
r9467 | self.ui.debug("list of changesets:\n") | ||
Thomas Arendsen Hein
|
r3513 | for node in nodes: | ||
self.ui.debug("%s\n" % hex(node)) | ||||
Benoit Boissinot
|
r13703 | def changegroupsubset(self, bases, heads, source): | ||
Greg Ward
|
r9437 | """Compute a changegroup consisting of all the nodes that are | ||
Matt Mackall
|
r14549 | descendants of any of the bases and ancestors of any of the heads. | ||
Greg Ward
|
r9437 | Return a chunkbuffer object whose read() method will return | ||
successive changegroup chunks. | ||||
Eric Hopper
|
r1466 | |||
It is fairly complex as determining which filenodes and which | ||||
manifest nodes need to be included for the changeset to be complete | ||||
is non-trivial. | ||||
Another wrinkle is doing the reverse, figuring out which changeset in | ||||
Alexis S. L. Carvalho
|
r5908 | the changegroup a particular filenode or manifestnode belongs to. | ||
""" | ||||
Peter Arrenbrecht
|
r9820 | cl = self.changelog | ||
if not bases: | ||||
bases = [nullid] | ||||
Peter Arrenbrecht
|
r13741 | csets, bases, heads = cl.nodesbetween(bases, heads) | ||
# We assume that all ancestors of bases are known | ||||
Siddharth Agarwal
|
r18092 | common = cl.ancestors([cl.rev(n) for n in bases]) | ||
Peter Arrenbrecht
|
r13741 | return self._changegroupsubset(common, csets, heads, source) | ||
Pierre-Yves David
|
r15837 | def getlocalbundle(self, source, outgoing): | ||
"""Like getbundle, but taking a discovery.outgoing as an argument. | ||||
This is only implemented for local repos and reuses potentially | ||||
precomputed sets in outgoing.""" | ||||
if not outgoing.missing: | ||||
return None | ||||
return self._changegroupsubset(outgoing.common, | ||||
outgoing.missing, | ||||
outgoing.missingheads, | ||||
source) | ||||
Peter Arrenbrecht
|
r13741 | def getbundle(self, source, heads=None, common=None): | ||
"""Like changegroupsubset, but returns the set difference between the | ||||
ancestors of heads and the ancestors common. | ||||
If heads is None, use the local heads. If common is None, use [nullid]. | ||||
Peter Arrenbrecht
|
r9820 | |||
Peter Arrenbrecht
|
r13741 | The nodes in common might not all be known locally due to the way the | ||
current discovery protocol works. | ||||
""" | ||||
cl = self.changelog | ||||
if common: | ||||
Pierre-Yves David
|
r18086 | hasnode = cl.hasnode | ||
common = [n for n in common if hasnode(n)] | ||||
Peter Arrenbrecht
|
r13741 | else: | ||
common = [nullid] | ||||
if not heads: | ||||
heads = cl.heads() | ||||
Pierre-Yves David
|
r15837 | return self.getlocalbundle(source, | ||
discovery.outgoing(cl, common, heads)) | ||||
Peter Arrenbrecht
|
r13741 | |||
Pierre-Yves David
|
r18016 | @unfilteredmethod | ||
Peter Arrenbrecht
|
r13741 | def _changegroupsubset(self, commonrevs, csets, heads, source): | ||
Eric Hopper
|
r1466 | |||
Peter Arrenbrecht
|
r9820 | cl = self.changelog | ||
Matt Mackall
|
r13706 | mf = self.manifest | ||
mfs = {} # needed manifests | ||||
fnodes = {} # needed file nodes | ||||
Matt Mackall
|
r13812 | changedfiles = set() | ||
Matt Mackall
|
r13829 | fstate = ['', {}] | ||
Matt Mackall
|
r16421 | count = [0, 0] | ||
Matt Mackall
|
r13706 | |||
Benoit Boissinot
|
r13703 | # can we go through the fast path ? | ||
heads.sort() | ||||
Matt Mackall
|
r13707 | if heads == sorted(self.heads()): | ||
Matt Mackall
|
r13706 | return self._changegroup(csets, source) | ||
Benoit Boissinot
|
r7233 | |||
Peter Arrenbrecht
|
r9820 | # slow path | ||
Vadim Gelfer
|
r1736 | self.hook('preoutgoing', throw=True, source=source) | ||
Matt Mackall
|
r13706 | self.changegroupinfo(csets, source) | ||
Eric Hopper
|
r1458 | |||
Matt Mackall
|
r13810 | # filter any nodes that claim to be part of the known set | ||
def prune(revlog, missing): | ||||
Matt Mackall
|
r16426 | rr, rl = revlog.rev, revlog.linkrev | ||
Sune Foldager
|
r14521 | return [n for n in missing | ||
Matt Mackall
|
r16426 | if rl(rr(n)) not in commonrevs] | ||
Eric Hopper
|
r1458 | |||
Matt Mackall
|
r16420 | progress = self.ui.progress | ||
_bundling = _('bundling') | ||||
_changesets = _('changesets') | ||||
_manifests = _('manifests') | ||||
_files = _('files') | ||||
Matt Mackall
|
r13830 | def lookup(revlog, x): | ||
if revlog == cl: | ||||
c = cl.read(x) | ||||
changedfiles.update(c[3]) | ||||
mfs.setdefault(c[0], x) | ||||
count[0] += 1 | ||||
Matt Mackall
|
r16420 | progress(_bundling, count[0], | ||
Matt Mackall
|
r16421 | unit=_changesets, total=count[1]) | ||
Matt Mackall
|
r13830 | return x | ||
elif revlog == mf: | ||||
clnode = mfs[x] | ||||
mdata = mf.readfast(x) | ||||
Matt Mackall
|
r16422 | for f, n in mdata.iteritems(): | ||
Matt Mackall
|
r16419 | if f in changedfiles: | ||
Matt Mackall
|
r16422 | fnodes[f].setdefault(n, clnode) | ||
Matt Mackall
|
r13830 | count[0] += 1 | ||
Matt Mackall
|
r16420 | progress(_bundling, count[0], | ||
Matt Mackall
|
r16421 | unit=_manifests, total=count[1]) | ||
Matt Mackall
|
r16422 | return clnode | ||
Matt Mackall
|
r13830 | else: | ||
Matt Mackall
|
r16420 | progress(_bundling, count[0], item=fstate[0], | ||
Matt Mackall
|
r16421 | unit=_files, total=count[1]) | ||
Matt Mackall
|
r13830 | return fstate[1][x] | ||
Eric Hopper
|
r1458 | |||
Matt Mackall
|
r13831 | bundler = changegroup.bundle10(lookup) | ||
Sune Foldager
|
r14365 | reorder = self.ui.config('bundle', 'reorder', 'auto') | ||
if reorder == 'auto': | ||||
reorder = None | ||||
else: | ||||
reorder = util.parsebool(reorder) | ||||
mpm@selenic.com
|
r1089 | |||
def gengroup(): | ||||
Eric Hopper
|
r1466 | # Create a changenode group generator that will call our functions | ||
# back to lookup the owning changenode and collect information. | ||||
Matt Mackall
|
r16421 | count[:] = [0, len(csets)] | ||
Sune Foldager
|
r14365 | for chunk in cl.group(csets, bundler, reorder=reorder): | ||
Matt Mackall
|
r13709 | yield chunk | ||
Matt Mackall
|
r16420 | progress(_bundling, None) | ||
Augie Fackler
|
r10432 | |||
Eric Hopper
|
r1466 | # Create a generator for the manifestnodes that calls our lookup | ||
# and data collection functions back. | ||||
Matt Mackall
|
r16422 | for f in changedfiles: | ||
fnodes[f] = {} | ||||
Matt Mackall
|
r16421 | count[:] = [0, len(mfs)] | ||
Sune Foldager
|
r14365 | for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder): | ||
Matt Mackall
|
r13709 | yield chunk | ||
Matt Mackall
|
r16420 | progress(_bundling, None) | ||
Eric Hopper
|
r1466 | |||
Matt Mackall
|
r13706 | mfs.clear() | ||
Eric Hopper
|
r1466 | |||
# Go through all our files in order sorted by name. | ||||
Matt Mackall
|
r16421 | count[:] = [0, len(changedfiles)] | ||
Matt Mackall
|
r13829 | for fname in sorted(changedfiles): | ||
Eric Hopper
|
r1458 | filerevlog = self.file(fname) | ||
Matt Mackall
|
r6750 | if not len(filerevlog): | ||
Brodie Rao
|
r16683 | raise util.Abort(_("empty or missing revlog for %s") | ||
% fname) | ||||
Matt Mackall
|
r13829 | fstate[0] = fname | ||
fstate[1] = fnodes.pop(fname, {}) | ||||
Matt Mackall
|
r13783 | |||
Sune Foldager
|
r14522 | nodelist = prune(filerevlog, fstate[1]) | ||
if nodelist: | ||||
count[0] += 1 | ||||
yield bundler.fileheader(fname) | ||||
for chunk in filerevlog.group(nodelist, bundler, reorder): | ||||
yield chunk | ||||
Eric Hopper
|
r1466 | # Signal that no more groups are left. | ||
Matt Mackall
|
r13831 | yield bundler.close() | ||
Matt Mackall
|
r16420 | progress(_bundling, None) | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r13706 | if csets: | ||
self.hook('outgoing', node=hex(csets[0]), source=source) | ||||
Vadim Gelfer
|
r1736 | |||
Matt Mackall
|
r12337 | return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN') | ||
Eric Hopper
|
r1458 | |||
Vadim Gelfer
|
r1736 | def changegroup(self, basenodes, source): | ||
Benoit Boissinot
|
r7233 | # to avoid a race we use changegroupsubset() (issue1320) | ||
return self.changegroupsubset(basenodes, self.heads(), source) | ||||
Pierre-Yves David
|
r18016 | @unfilteredmethod | ||
Peter Arrenbrecht
|
r9820 | def _changegroup(self, nodes, source): | ||
Greg Ward
|
r9437 | """Compute the changegroup of all nodes that we have that a recipient | ||
doesn't. Return a chunkbuffer object whose read() method will return | ||||
successive changegroup chunks. | ||||
Eric Hopper
|
r1466 | |||
This is much easier than the previous function as we can assume that | ||||
Benoit Boissinot
|
r7233 | the recipient has any changenode we aren't sending them. | ||
Peter Arrenbrecht
|
r9820 | nodes is the set of nodes to send""" | ||
Vadim Gelfer
|
r1736 | |||
Matt Mackall
|
r13812 | cl = self.changelog | ||
mf = self.manifest | ||||
mfs = {} | ||||
changedfiles = set() | ||||
Matt Mackall
|
r13829 | fstate = [''] | ||
Matt Mackall
|
r16421 | count = [0, 0] | ||
Vadim Gelfer
|
r1736 | |||
Matt Mackall
|
r13812 | self.hook('preoutgoing', throw=True, source=source) | ||
Thomas Arendsen Hein
|
r5763 | self.changegroupinfo(nodes, source) | ||
Eric Hopper
|
r1458 | |||
Martin Geisler
|
r8152 | revset = set([cl.rev(n) for n in nodes]) | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r6750 | def gennodelst(log): | ||
Matt Mackall
|
r16425 | ln, llr = log.node, log.linkrev | ||
return [ln(r) for r in log if llr(r) in revset] | ||||
Matt Mackall
|
r16420 | progress = self.ui.progress | ||
_bundling = _('bundling') | ||||
_changesets = _('changesets') | ||||
_manifests = _('manifests') | ||||
_files = _('files') | ||||
Eric Hopper
|
r1458 | |||
Matt Mackall
|
r13830 | def lookup(revlog, x): | ||
if revlog == cl: | ||||
c = cl.read(x) | ||||
changedfiles.update(c[3]) | ||||
mfs.setdefault(c[0], x) | ||||
count[0] += 1 | ||||
Matt Mackall
|
r16420 | progress(_bundling, count[0], | ||
Matt Mackall
|
r16421 | unit=_changesets, total=count[1]) | ||
Matt Mackall
|
r13830 | return x | ||
elif revlog == mf: | ||||
count[0] += 1 | ||||
Matt Mackall
|
r16420 | progress(_bundling, count[0], | ||
Matt Mackall
|
r16421 | unit=_manifests, total=count[1]) | ||
Matt Mackall
|
r13830 | return cl.node(revlog.linkrev(revlog.rev(x))) | ||
else: | ||||
Matt Mackall
|
r16420 | progress(_bundling, count[0], item=fstate[0], | ||
Matt Mackall
|
r16421 | total=count[1], unit=_files) | ||
Matt Mackall
|
r13830 | return cl.node(revlog.linkrev(revlog.rev(x))) | ||
Matt Mackall
|
r13829 | |||
Matt Mackall
|
r13831 | bundler = changegroup.bundle10(lookup) | ||
Sune Foldager
|
r14365 | reorder = self.ui.config('bundle', 'reorder', 'auto') | ||
if reorder == 'auto': | ||||
reorder = None | ||||
else: | ||||
reorder = util.parsebool(reorder) | ||||
Eric Hopper
|
r1458 | |||
def gengroup(): | ||||
Greg Ward
|
r9437 | '''yield a sequence of changegroup chunks (strings)''' | ||
mpm@selenic.com
|
r1089 | # construct a list of all changed files | ||
Eric Hopper
|
r1458 | |||
Matt Mackall
|
r16421 | count[:] = [0, len(nodes)] | ||
Sune Foldager
|
r14365 | for chunk in cl.group(nodes, bundler, reorder=reorder): | ||
Matt Mackall
|
r13716 | yield chunk | ||
Matt Mackall
|
r16420 | progress(_bundling, None) | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r16421 | count[:] = [0, len(mfs)] | ||
Sune Foldager
|
r14365 | for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder): | ||
Matt Mackall
|
r13716 | yield chunk | ||
Matt Mackall
|
r16420 | progress(_bundling, None) | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r16421 | count[:] = [0, len(changedfiles)] | ||
Matt Mackall
|
r13829 | for fname in sorted(changedfiles): | ||
Eric Hopper
|
r1458 | filerevlog = self.file(fname) | ||
Matt Mackall
|
r6750 | if not len(filerevlog): | ||
Brodie Rao
|
r16683 | raise util.Abort(_("empty or missing revlog for %s") | ||
% fname) | ||||
Matt Mackall
|
r13829 | fstate[0] = fname | ||
Sune Foldager
|
r14522 | nodelist = gennodelst(filerevlog) | ||
if nodelist: | ||||
count[0] += 1 | ||||
yield bundler.fileheader(fname) | ||||
for chunk in filerevlog.group(nodelist, bundler, reorder): | ||||
yield chunk | ||||
Matt Mackall
|
r13831 | yield bundler.close() | ||
Matt Mackall
|
r16420 | progress(_bundling, None) | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r2107 | if nodes: | ||
self.hook('outgoing', node=hex(nodes[0]), source=source) | ||||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r12337 | return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN') | ||
mpm@selenic.com
|
r1089 | |||
Pierre-Yves David
|
r18016 | @unfilteredmethod | ||
Pierre-Yves David
|
r15585 | def addchangegroup(self, source, srctype, url, emptyok=False): | ||
Greg Ward
|
r11153 | """Add the changegroup returned by source.read() to this repo. | ||
srctype is a string like 'push', 'pull', or 'unbundle'. url is | ||||
the URL of the repo where this changegroup is coming from. | ||||
mpm@selenic.com
|
r1089 | |||
Greg Ward
|
r11153 | Return an integer summarizing the change to this repo: | ||
Thomas Arendsen Hein
|
r3803 | - nothing changed or no source: 0 | ||
- more heads than before: 1+added heads (2..n) | ||||
Greg Ward
|
r11153 | - fewer heads than before: -1-removed heads (-2..-n) | ||
Thomas Arendsen Hein
|
r3803 | - number of heads stays the same: 1 | ||
""" | ||||
mpm@selenic.com
|
r1089 | def csmap(x): | ||
Martin Geisler
|
r9467 | self.ui.debug("add changeset %s\n" % short(x)) | ||
Matt Mackall
|
r6750 | return len(cl) | ||
mpm@selenic.com
|
r1089 | |||
def revmap(x): | ||||
Vadim Gelfer
|
r1998 | return cl.rev(x) | ||
mpm@selenic.com
|
r1089 | |||
Thomas Arendsen Hein
|
r1615 | if not source: | ||
Vadim Gelfer
|
r2019 | return 0 | ||
Vadim Gelfer
|
r1730 | |||
Vadim Gelfer
|
r2673 | self.hook('prechangegroup', throw=True, source=srctype, url=url) | ||
Vadim Gelfer
|
r1730 | |||
mpm@selenic.com
|
r1089 | changesets = files = revisions = 0 | ||
Matt Mackall
|
r10888 | efiles = set() | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r2395 | # write changelog data to temp files so concurrent readers will not see | ||
# inconsistent view | ||||
Matt Mackall
|
r4261 | cl = self.changelog | ||
cl.delayupdate() | ||||
Adrian Buehlmann
|
r14036 | oldheads = cl.heads() | ||
Vadim Gelfer
|
r1998 | |||
Brodie Rao
|
r14076 | tr = self.transaction("\n".join([srctype, util.hidepassword(url)])) | ||
Matt Mackall
|
r4915 | try: | ||
Matt Mackall
|
r4970 | trp = weakref.proxy(tr) | ||
Matt Mackall
|
r4915 | # pull off the changeset group | ||
self.ui.status(_("adding changesets\n")) | ||||
Peter Arrenbrecht
|
r8393 | clstart = len(cl) | ||
Augie Fackler
|
r10430 | class prog(object): | ||
Martin Geisler
|
r10496 | step = _('changesets') | ||
Augie Fackler
|
r10430 | count = 1 | ||
ui = self.ui | ||||
Matt Mackall
|
r10888 | total = None | ||
Augie Fackler
|
r10430 | def __call__(self): | ||
Matt Mackall
|
r10888 | self.ui.progress(self.step, self.count, unit=_('chunks'), | ||
total=self.total) | ||||
Augie Fackler
|
r10430 | self.count += 1 | ||
pr = prog() | ||||
Matt Mackall
|
r12334 | source.callback = pr | ||
Benoit Boissinot
|
r14144 | source.changelogheader() | ||
Pierre-Yves David
|
r15890 | srccontent = cl.addgroup(source, csmap, trp) | ||
if not (srccontent or emptyok): | ||||
Matt Mackall
|
r4915 | raise util.Abort(_("received changelog group is empty")) | ||
Peter Arrenbrecht
|
r8393 | clend = len(cl) | ||
changesets = clend - clstart | ||||
Matt Mackall
|
r10888 | for c in xrange(clstart, clend): | ||
efiles.update(self[c].files()) | ||||
efiles = len(efiles) | ||||
Martin Geisler
|
r10496 | self.ui.progress(_('changesets'), None) | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r4915 | # pull off the manifest group | ||
self.ui.status(_("adding manifests\n")) | ||||
Martin Geisler
|
r10496 | pr.step = _('manifests') | ||
Augie Fackler
|
r10430 | pr.count = 1 | ||
Matt Mackall
|
r10888 | pr.total = changesets # manifests <= changesets | ||
Matt Mackall
|
r4915 | # no need to check for empty manifest group here: | ||
# if the result of the merge of 1 and 2 is the same in 3 and 4, | ||||
# no new manifest will be created and the manifest group will | ||||
# be empty during the pull | ||||
Benoit Boissinot
|
r14144 | source.manifestheader() | ||
Matt Mackall
|
r12335 | self.manifest.addgroup(source, revmap, trp) | ||
Martin Geisler
|
r10496 | self.ui.progress(_('manifests'), None) | ||
mpm@selenic.com
|
r1089 | |||
Augie Fackler
|
r10418 | needfiles = {} | ||
if self.ui.configbool('server', 'validate', default=False): | ||||
# validate incoming csets have their manifests | ||||
for cset in xrange(clstart, clend): | ||||
mfest = self.changelog.read(self.changelog.node(cset))[0] | ||||
mfest = self.manifest.readdelta(mfest) | ||||
# store file nodes we must see | ||||
for f, n in mfest.iteritems(): | ||||
needfiles.setdefault(f, set()).add(n) | ||||
Matt Mackall
|
r4915 | # process the files | ||
self.ui.status(_("adding file changes\n")) | ||||
Wagner Bruna
|
r14756 | pr.step = _('files') | ||
Augie Fackler
|
r10430 | pr.count = 1 | ||
Matt Mackall
|
r10888 | pr.total = efiles | ||
Matt Mackall
|
r12334 | source.callback = None | ||
Martin Geisler
|
r14494 | while True: | ||
Benoit Boissinot
|
r14144 | chunkdata = source.filelogheader() | ||
if not chunkdata: | ||||
Matt Mackall
|
r4915 | break | ||
Benoit Boissinot
|
r14144 | f = chunkdata["filename"] | ||
Martin Geisler
|
r9467 | self.ui.debug("adding %s revisions\n" % f) | ||
Matt Mackall
|
r10888 | pr() | ||
Matt Mackall
|
r4915 | fl = self.file(f) | ||
Matt Mackall
|
r6750 | o = len(fl) | ||
Pierre-Yves David
|
r15890 | if not fl.addgroup(source, revmap, trp): | ||
Matt Mackall
|
r4915 | raise util.Abort(_("received file revlog group is empty")) | ||
Matt Mackall
|
r6750 | revisions += len(fl) - o | ||
Matt Mackall
|
r4915 | files += 1 | ||
Augie Fackler
|
r10418 | if f in needfiles: | ||
needs = needfiles[f] | ||||
for new in xrange(o, len(fl)): | ||||
n = fl.node(new) | ||||
if n in needs: | ||||
needs.remove(n) | ||||
if not needs: | ||||
del needfiles[f] | ||||
Martin Geisler
|
r10496 | self.ui.progress(_('files'), None) | ||
Augie Fackler
|
r10418 | |||
for f, needs in needfiles.iteritems(): | ||||
fl = self.file(f) | ||||
for n in needs: | ||||
try: | ||||
fl.rev(n) | ||||
except error.LookupError: | ||||
raise util.Abort( | ||||
_('missing file data for %s:%s - run hg verify') % | ||||
(f, hex(n))) | ||||
Matt Mackall
|
r4915 | |||
Adrian Buehlmann
|
r14036 | dh = 0 | ||
if oldheads: | ||||
heads = cl.heads() | ||||
dh = len(heads) - len(oldheads) | ||||
for h in heads: | ||||
Brodie Rao
|
r16720 | if h not in oldheads and self[h].closesbranch(): | ||
Adrian Buehlmann
|
r14036 | dh -= 1 | ||
htext = "" | ||||
if dh: | ||||
htext = _(" (%+d heads)") % dh | ||||
Vadim Gelfer
|
r1998 | |||
Matt Mackall
|
r4915 | self.ui.status(_("added %d changesets" | ||
" with %d changes to %d files%s\n") | ||||
Adrian Buehlmann
|
r14036 | % (changesets, revisions, files, htext)) | ||
Pierre-Yves David
|
r18105 | self.invalidatevolatilesets() | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r4915 | if changesets > 0: | ||
Peter Arrenbrecht
|
r8392 | p = lambda: cl.writepending() and self.root or "" | ||
Matt Mackall
|
r4915 | self.hook('pretxnchangegroup', throw=True, | ||
Peter Arrenbrecht
|
r8393 | node=hex(cl.node(clstart)), source=srctype, | ||
Matt Mackall
|
r7787 | url=url, pending=p) | ||
Pierre-Yves David
|
r15484 | added = [cl.node(r) for r in xrange(clstart, clend)] | ||
Pierre-Yves David
|
r15659 | publishing = self.ui.configbool('phases', 'publish', True) | ||
Pierre-Yves David
|
r15891 | if srctype == 'push': | ||
Pierre-Yves David
|
r15659 | # Old server can not push the boundary themself. | ||
Pierre-Yves David
|
r15891 | # New server won't push the boundary if changeset already | ||
# existed locally as secrete | ||||
# | ||||
# We should not use added here but the list of all change in | ||||
# the bundle | ||||
if publishing: | ||||
phases.advanceboundary(self, phases.public, srccontent) | ||||
else: | ||||
phases.advanceboundary(self, phases.draft, srccontent) | ||||
phases.retractboundary(self, phases.draft, added) | ||||
elif srctype != 'strip': | ||||
# publishing only alter behavior during push | ||||
# | ||||
# strip should not touch boundary at all | ||||
Pierre-Yves David
|
r15818 | phases.retractboundary(self, phases.draft, added) | ||
Pierre-Yves David
|
r15646 | |||
Matt Mackall
|
r7787 | # make changelog see real files again | ||
cl.finalize(trp) | ||||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r4915 | tr.close() | ||
Pierre-Yves David
|
r15584 | |||
Matt Mackall
|
r15586 | if changesets > 0: | ||
Pierre-Yves David
|
r18137 | if srctype != 'strip': | ||
# During strip, branchcache is invalid but coming call to | ||||
# `destroyed` will repair it. | ||||
# In other case we can safely update cache on disk. | ||||
branchmap.updatecache(self) | ||||
Matt Mackall
|
r15586 | def runhooks(): | ||
Pierre-Yves David
|
r15584 | # forcefully update the on-disk branch cache | ||
self.ui.debug("updating the branch cache\n") | ||||
self.hook("changegroup", node=hex(cl.node(clstart)), | ||||
source=srctype, url=url) | ||||
for n in added: | ||||
self.hook("incoming", node=hex(n), source=srctype, | ||||
url=url) | ||||
Matt Mackall
|
r15587 | self._afterlock(runhooks) | ||
Pierre-Yves David
|
r15584 | |||
Matt Mackall
|
r4915 | finally: | ||
Ronny Pfannschmidt
|
r11230 | tr.release() | ||
Thomas Arendsen Hein
|
r3803 | # never return 0 here: | ||
Adrian Buehlmann
|
r14036 | if dh < 0: | ||
return dh - 1 | ||||
Thomas Arendsen Hein
|
r3803 | else: | ||
Adrian Buehlmann
|
r14036 | return dh + 1 | ||
mpm@selenic.com
|
r1089 | |||
Sune Foldager
|
r12296 | def stream_in(self, remote, requirements): | ||
Adrian Buehlmann
|
r13390 | lock = self.lock() | ||
Thomas Arendsen Hein
|
r3564 | try: | ||
Tomasz Kleczek
|
r17740 | # Save remote branchmap. We will use it later | ||
# to speed up branchcache creation | ||||
rbranchmap = None | ||||
if remote.capable("branchmap"): | ||||
rbranchmap = remote.branchmap() | ||||
Adrian Buehlmann
|
r13390 | fp = remote.stream_out() | ||
Thomas Arendsen Hein
|
r3564 | l = fp.readline() | ||
try: | ||||
Adrian Buehlmann
|
r13390 | resp = int(l) | ||
except ValueError: | ||||
raise error.ResponseError( | ||||
Martin Geisler
|
r16941 | _('unexpected response from remote server:'), l) | ||
Adrian Buehlmann
|
r13390 | if resp == 1: | ||
raise util.Abort(_('operation forbidden by server')) | ||||
elif resp == 2: | ||||
raise util.Abort(_('locking the remote repository failed')) | ||||
elif resp != 0: | ||||
raise util.Abort(_('the server sent an unknown error code')) | ||||
self.ui.status(_('streaming all changes\n')) | ||||
l = fp.readline() | ||||
try: | ||||
total_files, total_bytes = map(int, l.split(' ', 1)) | ||||
Bernhard Leiner
|
r7063 | except (ValueError, TypeError): | ||
Matt Mackall
|
r7641 | raise error.ResponseError( | ||
Martin Geisler
|
r16941 | _('unexpected response from remote server:'), l) | ||
Adrian Buehlmann
|
r13390 | self.ui.status(_('%d files to transfer, %s of data\n') % | ||
(total_files, util.bytecount(total_bytes))) | ||||
Augie Fackler
|
r16770 | handled_bytes = 0 | ||
self.ui.progress(_('clone'), 0, total=total_bytes) | ||||
Adrian Buehlmann
|
r13390 | start = time.time() | ||
for i in xrange(total_files): | ||||
# XXX doesn't support '\n' or '\r' in filenames | ||||
l = fp.readline() | ||||
try: | ||||
name, size = l.split('\0', 1) | ||||
size = int(size) | ||||
except (ValueError, TypeError): | ||||
raise error.ResponseError( | ||||
Martin Geisler
|
r16941 | _('unexpected response from remote server:'), l) | ||
Matt Mackall
|
r16398 | if self.ui.debugflag: | ||
self.ui.debug('adding %s (%s)\n' % | ||||
(name, util.bytecount(size))) | ||||
Adrian Buehlmann
|
r13390 | # for backwards compat, name was partially encoded | ||
ofp = self.sopener(store.decodedir(name), 'w') | ||||
for chunk in util.filechunkiter(fp, limit=size): | ||||
Augie Fackler
|
r16770 | handled_bytes += len(chunk) | ||
self.ui.progress(_('clone'), handled_bytes, | ||||
total=total_bytes) | ||||
Adrian Buehlmann
|
r13390 | ofp.write(chunk) | ||
ofp.close() | ||||
elapsed = time.time() - start | ||||
if elapsed <= 0: | ||||
elapsed = 0.001 | ||||
Augie Fackler
|
r16770 | self.ui.progress(_('clone'), None) | ||
Adrian Buehlmann
|
r13390 | self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % | ||
(util.bytecount(total_bytes), elapsed, | ||||
util.bytecount(total_bytes / elapsed))) | ||||
Sune Foldager
|
r12296 | |||
Brodie Rao
|
r16683 | # new requirements = old non-format requirements + | ||
# new format-related | ||||
Adrian Buehlmann
|
r13390 | # requirements from the streamed-in repository | ||
requirements.update(set(self.requirements) - self.supportedformats) | ||||
self._applyrequirements(requirements) | ||||
self._writerequirements() | ||||
Sune Foldager
|
r12296 | |||
Tomasz Kleczek
|
r17740 | if rbranchmap: | ||
rbheads = [] | ||||
for bheads in rbranchmap.itervalues(): | ||||
rbheads.extend(bheads) | ||||
if rbheads: | ||||
rtiprev = max((int(self.changelog.rev(node)) | ||||
for node in rbheads)) | ||||
Pierre-Yves David
|
r18125 | cache = branchmap.branchcache(rbranchmap, | ||
Pierre-Yves David
|
r18126 | self[rtiprev].node(), | ||
rtiprev) | ||||
Pierre-Yves David
|
r18189 | self._branchcaches[None] = cache | ||
cache.write(self.unfiltered()) | ||||
Adrian Buehlmann
|
r13390 | self.invalidate() | ||
return len(self.heads()) + 1 | ||||
finally: | ||||
lock.release() | ||||
mpm@selenic.com
|
r1089 | |||
Vadim Gelfer
|
r2613 | def clone(self, remote, heads=[], stream=False): | ||
Vadim Gelfer
|
r2612 | '''clone remote repository. | ||
Matt Mackall
|
r1382 | |||
Vadim Gelfer
|
r2612 | keyword arguments: | ||
heads: list of revs to clone (forces use of pull) | ||||
Vadim Gelfer
|
r2621 | stream: use streaming clone if possible''' | ||
mpm@selenic.com
|
r1089 | |||
Vadim Gelfer
|
r2621 | # now, all clients that can request uncompressed clones can | ||
# read repo formats supported by all servers that can serve | ||||
# them. | ||||
mpm@selenic.com
|
r1089 | |||
Vadim Gelfer
|
r2612 | # if revlog format changes, client will have to check version | ||
Vadim Gelfer
|
r2621 | # and format flags on "stream" capability, and use | ||
# uncompressed only if compatible. | ||||
mpm@selenic.com
|
r1089 | |||
Benoit Allard
|
r16361 | if not stream: | ||
Mads Kiilerich
|
r17427 | # if the server explicitly prefers to stream (for fast LANs) | ||
Benoit Allard
|
r16361 | stream = remote.capable('stream-preferred') | ||
Sune Foldager
|
r12296 | if stream and not heads: | ||
# 'stream' means remote revlog format is revlogv1 only | ||||
if remote.capable('stream'): | ||||
return self.stream_in(remote, set(('revlogv1',))) | ||||
# otherwise, 'streamreqs' contains the remote revlog format | ||||
streamreqs = remote.capable('streamreqs') | ||||
if streamreqs: | ||||
streamreqs = set(streamreqs.split(',')) | ||||
# if we support it, stream in and adjust our requirements | ||||
if not streamreqs - self.supportedformats: | ||||
return self.stream_in(remote, streamreqs) | ||||
Vadim Gelfer
|
r2612 | return self.pull(remote, heads) | ||
mason@suse.com
|
r1806 | |||
Matt Mackall
|
r11368 | def pushkey(self, namespace, key, old, new): | ||
Brodie Rao
|
r14102 | self.hook('prepushkey', throw=True, namespace=namespace, key=key, | ||
old=old, new=new) | ||||
Pierre-Yves David
|
r17293 | self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key)) | ||
Brodie Rao
|
r14102 | ret = pushkey.push(self, namespace, key, old, new) | ||
self.hook('pushkey', namespace=namespace, key=key, old=old, new=new, | ||||
ret=ret) | ||||
return ret | ||||
Matt Mackall
|
r11368 | |||
def listkeys(self, namespace): | ||||
Brodie Rao
|
r14102 | self.hook('prelistkeys', throw=True, namespace=namespace) | ||
Pierre-Yves David
|
r17293 | self.ui.debug('listing keys for "%s"\n' % namespace) | ||
Brodie Rao
|
r14102 | values = pushkey.list(self, namespace) | ||
self.hook('listkeys', namespace=namespace, values=values) | ||||
return values | ||||
Matt Mackall
|
r11368 | |||
Peter Arrenbrecht
|
r14048 | def debugwireargs(self, one, two, three=None, four=None, five=None): | ||
Peter Arrenbrecht
|
r13720 | '''used to test argument passing over the wire''' | ||
Peter Arrenbrecht
|
r14048 | return "%s %s %s %s %s" % (one, two, three, four, five) | ||
Matt Mackall
|
r11368 | |||
Patrick Mezard
|
r14529 | def savecommitmessage(self, text): | ||
fp = self.opener('last-message.txt', 'wb') | ||||
try: | ||||
fp.write(text) | ||||
finally: | ||||
fp.close() | ||||
Mads Kiilerich
|
r18054 | return self.pathto(fp.name[len(self.root) + 1:]) | ||
Patrick Mezard
|
r14529 | |||
mason@suse.com
|
r1806 | # used to avoid circular references so destructors work | ||
Benoit Boissinot
|
r3790 | def aftertrans(files): | ||
renamefiles = [tuple(t) for t in files] | ||||
mason@suse.com
|
r1806 | def a(): | ||
Benoit Boissinot
|
r3790 | for src, dest in renamefiles: | ||
Alain Leufroy
|
r16441 | try: | ||
util.rename(src, dest) | ||||
except OSError: # journal file does not yet exist | ||||
pass | ||||
mason@suse.com
|
r1806 | return a | ||
Alexander Solovyov
|
r14266 | def undoname(fn): | ||
base, name = os.path.split(fn) | ||||
assert name.startswith('journal') | ||||
return os.path.join(base, name.replace('journal', 'undo', 1)) | ||||
Vadim Gelfer
|
r2740 | def instance(ui, path, create): | ||
Mads Kiilerich
|
r14825 | return localrepository(ui, util.urllocalpath(path), create) | ||
Thomas Arendsen Hein
|
r3223 | |||
Vadim Gelfer
|
r2740 | def islocal(path): | ||
return True | ||||