localrepo.py
1899 lines
| 68.5 KiB
| text/x-python
|
PythonLexer
/ mercurial / localrepo.py
mpm@selenic.com
|
r1089 | # localrepo.py - read/write repository class for mercurial | ||
# | ||||
# Copyright 2005 Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms | ||||
# of the GNU General Public License, incorporated herein by reference. | ||||
mpm@selenic.com
|
r1097 | import struct, os, util | ||
mpm@selenic.com
|
r1100 | import filelog, manifest, changelog, dirstate, repo | ||
from node import * | ||||
Benoit Boissinot
|
r1400 | from i18n import gettext as _ | ||
mpm@selenic.com
|
r1089 | from demandload import * | ||
Thomas Arendsen Hein
|
r1839 | demandload(globals(), "re lock transaction tempfile stat mdiff errno ui") | ||
mpm@selenic.com
|
r1089 | |||
Eric Hopper
|
r1559 | class localrepository(object): | ||
mason@suse.com
|
r1806 | def __del__(self): | ||
self.transhandle = None | ||||
Thomas Arendsen Hein
|
r1839 | def __init__(self, parentui, path=None, create=0): | ||
mpm@selenic.com
|
r1101 | if not path: | ||
p = os.getcwd() | ||||
while not os.path.isdir(os.path.join(p, ".hg")): | ||||
oldp = p | ||||
p = os.path.dirname(p) | ||||
Thomas Arendsen Hein
|
r1615 | if p == oldp: | ||
raise repo.RepoError(_("no repo found")) | ||||
mpm@selenic.com
|
r1101 | path = p | ||
self.path = os.path.join(path, ".hg") | ||||
mpm@selenic.com
|
r1089 | |||
mpm@selenic.com
|
r1101 | if not create and not os.path.isdir(self.path): | ||
Thomas Arendsen Hein
|
r1588 | raise repo.RepoError(_("repository %s not found") % path) | ||
mpm@selenic.com
|
r1089 | |||
self.root = os.path.abspath(path) | ||||
Thomas Arendsen Hein
|
r1839 | self.ui = ui.ui(parentui=parentui) | ||
mpm@selenic.com
|
r1102 | self.opener = util.opener(self.path) | ||
self.wopener = util.opener(self.root) | ||||
mpm@selenic.com
|
r1100 | self.manifest = manifest.manifest(self.opener) | ||
self.changelog = changelog.changelog(self.opener) | ||||
mpm@selenic.com
|
r1089 | self.tagscache = None | ||
self.nodetagscache = None | ||||
mpm@selenic.com
|
r1258 | self.encodepats = None | ||
self.decodepats = None | ||||
mason@suse.com
|
r1806 | self.transhandle = None | ||
mpm@selenic.com
|
r1089 | |||
Thomas Arendsen Hein
|
r1133 | if create: | ||
os.mkdir(self.path) | ||||
os.mkdir(self.join("data")) | ||||
Thomas Arendsen Hein
|
r1839 | self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root) | ||
mpm@selenic.com
|
r1101 | try: | ||
Benoit Boissinot
|
r1510 | self.ui.readconfig(self.join("hgrc")) | ||
Thomas Arendsen Hein
|
r1615 | except IOError: | ||
pass | ||||
mpm@selenic.com
|
r1089 | |||
Vadim Gelfer
|
r1718 | def hook(self, name, throw=False, **args): | ||
Benoit Boissinot
|
r1480 | def runhook(name, cmd): | ||
self.ui.note(_("running hook %s: %s\n") % (name, cmd)) | ||||
mpm@selenic.com
|
r1089 | old = {} | ||
for k, v in args.items(): | ||||
k = k.upper() | ||||
Vadim Gelfer
|
r1726 | old['HG_' + k] = os.environ.get(k, None) | ||
mpm@selenic.com
|
r1089 | old[k] = os.environ.get(k, None) | ||
Vadim Gelfer
|
r1726 | os.environ['HG_' + k] = str(v) | ||
Vadim Gelfer
|
r1719 | os.environ[k] = str(v) | ||
mpm@selenic.com
|
r1089 | |||
Vadim Gelfer
|
r1718 | try: | ||
# Hooks run in the repository root | ||||
olddir = os.getcwd() | ||||
os.chdir(self.root) | ||||
r = os.system(cmd) | ||||
finally: | ||||
for k, v in old.items(): | ||||
Vadim Gelfer
|
r1726 | if v is not None: | ||
Vadim Gelfer
|
r1718 | os.environ[k] = v | ||
else: | ||||
del os.environ[k] | ||||
mpm@selenic.com
|
r1089 | |||
Vadim Gelfer
|
r1718 | os.chdir(olddir) | ||
mpm@selenic.com
|
r1089 | |||
if r: | ||||
Vadim Gelfer
|
r1718 | desc, r = util.explain_exit(r) | ||
if throw: | ||||
raise util.Abort(_('%s hook %s') % (name, desc)) | ||||
self.ui.warn(_('error: %s hook %s\n') % (name, desc)) | ||||
mpm@selenic.com
|
r1089 | return False | ||
Benoit Boissinot
|
r1480 | return True | ||
r = True | ||||
Thomas Arendsen Hein
|
r1838 | hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks") | ||
if hname.split(".", 1)[0] == name and cmd] | ||||
hooks.sort() | ||||
for hname, cmd in hooks: | ||||
r = runhook(hname, cmd) and r | ||||
Benoit Boissinot
|
r1480 | return r | ||
mpm@selenic.com
|
r1089 | |||
def tags(self): | ||||
'''return a mapping of tag to node''' | ||||
if not self.tagscache: | ||||
self.tagscache = {} | ||||
def addtag(self, k, n): | ||||
try: | ||||
bin_n = bin(n) | ||||
except TypeError: | ||||
bin_n = '' | ||||
self.tagscache[k.strip()] = bin_n | ||||
try: | ||||
# read each head of the tags file, ending with the tip | ||||
# and add each tag found to the map, with "newer" ones | ||||
# taking precedence | ||||
fl = self.file(".hgtags") | ||||
h = fl.heads() | ||||
h.reverse() | ||||
for r in h: | ||||
for l in fl.read(r).splitlines(): | ||||
if l: | ||||
n, k = l.split(" ", 1) | ||||
addtag(self, k, n) | ||||
except KeyError: | ||||
pass | ||||
try: | ||||
f = self.opener("localtags") | ||||
for l in f: | ||||
n, k = l.split(" ", 1) | ||||
addtag(self, k, n) | ||||
except IOError: | ||||
pass | ||||
self.tagscache['tip'] = self.changelog.tip() | ||||
return self.tagscache | ||||
def tagslist(self): | ||||
'''return a list of tags ordered by revision''' | ||||
l = [] | ||||
for t, n in self.tags().items(): | ||||
try: | ||||
r = self.changelog.rev(n) | ||||
except: | ||||
r = -2 # sort to the beginning of the list if unknown | ||||
Thomas Arendsen Hein
|
r1615 | l.append((r, t, n)) | ||
mpm@selenic.com
|
r1089 | l.sort() | ||
Thomas Arendsen Hein
|
r1615 | return [(t, n) for r, t, n in l] | ||
mpm@selenic.com
|
r1089 | |||
def nodetags(self, node): | ||||
'''return the tags associated with a node''' | ||||
if not self.nodetagscache: | ||||
self.nodetagscache = {} | ||||
Thomas Arendsen Hein
|
r1615 | for t, n in self.tags().items(): | ||
self.nodetagscache.setdefault(n, []).append(t) | ||||
mpm@selenic.com
|
r1089 | return self.nodetagscache.get(node, []) | ||
def lookup(self, key): | ||||
try: | ||||
return self.tags()[key] | ||||
except KeyError: | ||||
try: | ||||
return self.changelog.lookup(key) | ||||
except: | ||||
Benoit Boissinot
|
r1402 | raise repo.RepoError(_("unknown revision '%s'") % key) | ||
mpm@selenic.com
|
r1089 | |||
def dev(self): | ||||
return os.stat(self.path).st_dev | ||||
def local(self): | ||||
mpm@selenic.com
|
r1101 | return True | ||
mpm@selenic.com
|
r1089 | |||
def join(self, f): | ||||
return os.path.join(self.path, f) | ||||
def wjoin(self, f): | ||||
return os.path.join(self.root, f) | ||||
def file(self, f): | ||||
Thomas Arendsen Hein
|
r1615 | if f[0] == '/': | ||
f = f[1:] | ||||
mpm@selenic.com
|
r1100 | return filelog.filelog(self.opener, f) | ||
mpm@selenic.com
|
r1089 | |||
def getcwd(self): | ||||
return self.dirstate.getcwd() | ||||
def wfile(self, f, mode='r'): | ||||
return self.wopener(f, mode) | ||||
def wread(self, filename): | ||||
mpm@selenic.com
|
r1258 | if self.encodepats == None: | ||
l = [] | ||||
for pat, cmd in self.ui.configitems("encode"): | ||||
mf = util.matcher("", "/", [pat], [], [])[1] | ||||
l.append((mf, cmd)) | ||||
self.encodepats = l | ||||
data = self.wopener(filename, 'r').read() | ||||
for mf, cmd in self.encodepats: | ||||
if mf(filename): | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) | ||
mpm@selenic.com
|
r1258 | data = util.filter(data, cmd) | ||
break | ||||
return data | ||||
mpm@selenic.com
|
r1089 | |||
def wwrite(self, filename, data, fd=None): | ||||
mpm@selenic.com
|
r1258 | if self.decodepats == None: | ||
l = [] | ||||
for pat, cmd in self.ui.configitems("decode"): | ||||
mf = util.matcher("", "/", [pat], [], [])[1] | ||||
l.append((mf, cmd)) | ||||
self.decodepats = l | ||||
for mf, cmd in self.decodepats: | ||||
if mf(filename): | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) | ||
mpm@selenic.com
|
r1258 | data = util.filter(data, cmd) | ||
break | ||||
mpm@selenic.com
|
r1089 | if fd: | ||
return fd.write(data) | ||||
return self.wopener(filename, 'w').write(data) | ||||
def transaction(self): | ||||
mason@suse.com
|
r1806 | tr = self.transhandle | ||
if tr != None and tr.running(): | ||||
return tr.nest() | ||||
mpm@selenic.com
|
r1089 | # save dirstate for undo | ||
try: | ||||
ds = self.opener("dirstate").read() | ||||
except IOError: | ||||
ds = "" | ||||
self.opener("journal.dirstate", "w").write(ds) | ||||
mason@suse.com
|
r1806 | tr = transaction.transaction(self.ui.warn, self.opener, | ||
self.join("journal"), | ||||
aftertrans(self.path)) | ||||
self.transhandle = tr | ||||
return tr | ||||
mpm@selenic.com
|
r1089 | |||
def recover(self): | ||||
Benoit Boissinot
|
r1749 | l = self.lock() | ||
mpm@selenic.com
|
r1089 | if os.path.exists(self.join("journal")): | ||
Benoit Boissinot
|
r1402 | self.ui.status(_("rolling back interrupted transaction\n")) | ||
Matt Mackall
|
r1516 | transaction.rollback(self.opener, self.join("journal")) | ||
Benoit Boissinot
|
r1784 | self.reload() | ||
Matt Mackall
|
r1516 | return True | ||
mpm@selenic.com
|
r1089 | else: | ||
Benoit Boissinot
|
r1402 | self.ui.warn(_("no interrupted transaction available\n")) | ||
Matt Mackall
|
r1516 | return False | ||
mpm@selenic.com
|
r1089 | |||
mason@suse.com
|
r1712 | def undo(self, wlock=None): | ||
if not wlock: | ||||
wlock = self.wlock() | ||||
Benoit Boissinot
|
r1749 | l = self.lock() | ||
mpm@selenic.com
|
r1089 | if os.path.exists(self.join("undo")): | ||
Benoit Boissinot
|
r1402 | self.ui.status(_("rolling back last transaction\n")) | ||
mpm@selenic.com
|
r1089 | transaction.rollback(self.opener, self.join("undo")) | ||
util.rename(self.join("undo.dirstate"), self.join("dirstate")) | ||||
Benoit Boissinot
|
r1784 | self.reload() | ||
self.wreload() | ||||
mpm@selenic.com
|
r1089 | else: | ||
Benoit Boissinot
|
r1402 | self.ui.warn(_("no undo information available\n")) | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r1784 | def wreload(self): | ||
self.dirstate.read() | ||||
def reload(self): | ||||
self.changelog.load() | ||||
self.manifest.load() | ||||
self.tagscache = None | ||||
self.nodetagscache = None | ||||
Benoit Boissinot
|
r1751 | def do_lock(self, lockname, wait, releasefn=None, acquirefn=None): | ||
mpm@selenic.com
|
r1089 | try: | ||
Benoit Boissinot
|
r1751 | l = lock.lock(self.join(lockname), 0, releasefn) | ||
Benoit Boissinot
|
r1531 | except lock.LockHeld, inst: | ||
if not wait: | ||||
raise inst | ||||
self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0]) | ||||
Benoit Boissinot
|
r1787 | try: | ||
Benoit Boissinot
|
r1788 | # default to 600 seconds timeout | ||
Benoit Boissinot
|
r1787 | l = lock.lock(self.join(lockname), | ||
Benoit Boissinot
|
r1788 | int(self.ui.config("ui", "timeout") or 600), | ||
Benoit Boissinot
|
r1787 | releasefn) | ||
except lock.LockHeld, inst: | ||||
raise util.Abort(_("timeout while waiting for " | ||||
"lock held by %s") % inst.args[0]) | ||||
Benoit Boissinot
|
r1751 | if acquirefn: | ||
acquirefn() | ||||
return l | ||||
def lock(self, wait=1): | ||||
Benoit Boissinot
|
r1784 | return self.do_lock("lock", wait, acquirefn=self.reload) | ||
Benoit Boissinot
|
r1751 | |||
def wlock(self, wait=1): | ||||
return self.do_lock("wlock", wait, | ||||
self.dirstate.write, | ||||
Benoit Boissinot
|
r1784 | self.wreload) | ||
Benoit Boissinot
|
r1531 | |||
Matt Mackall
|
r1716 | def checkfilemerge(self, filename, text, filelog, manifest1, manifest2): | ||
"determine whether a new filenode is needed" | ||||
fp1 = manifest1.get(filename, nullid) | ||||
fp2 = manifest2.get(filename, nullid) | ||||
if fp2 != nullid: | ||||
# is one parent an ancestor of the other? | ||||
fpa = filelog.ancestor(fp1, fp2) | ||||
if fpa == fp1: | ||||
fp1, fp2 = fp2, nullid | ||||
elif fpa == fp2: | ||||
fp2 = nullid | ||||
# is the file unmodified from the parent? report existing entry | ||||
if fp2 == nullid and text == filelog.read(fp1): | ||||
return (fp1, None, None) | ||||
return (None, fp1, fp2) | ||||
mason@suse.com
|
r1712 | def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None): | ||
mpm@selenic.com
|
r1089 | orig_parent = self.dirstate.parents()[0] or nullid | ||
p1 = p1 or self.dirstate.parents()[0] or nullid | ||||
p2 = p2 or self.dirstate.parents()[1] or nullid | ||||
c1 = self.changelog.read(p1) | ||||
c2 = self.changelog.read(p2) | ||||
m1 = self.manifest.read(c1[0]) | ||||
mf1 = self.manifest.readflags(c1[0]) | ||||
m2 = self.manifest.read(c2[0]) | ||||
changed = [] | ||||
if orig_parent == p1: | ||||
update_dirstate = 1 | ||||
else: | ||||
update_dirstate = 0 | ||||
mason@suse.com
|
r1712 | if not wlock: | ||
wlock = self.wlock() | ||||
Benoit Boissinot
|
r1749 | l = self.lock() | ||
mpm@selenic.com
|
r1089 | tr = self.transaction() | ||
mm = m1.copy() | ||||
mfm = mf1.copy() | ||||
linkrev = self.changelog.count() | ||||
for f in files: | ||||
try: | ||||
t = self.wread(f) | ||||
tm = util.is_exec(self.wjoin(f), mfm.get(f, False)) | ||||
r = self.file(f) | ||||
mfm[f] = tm | ||||
Matt Mackall
|
r1716 | (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2) | ||
if entry: | ||||
mm[f] = entry | ||||
continue | ||||
mpm@selenic.com
|
r1089 | |||
mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2) | ||||
changed.append(f) | ||||
if update_dirstate: | ||||
self.dirstate.update([f], "n") | ||||
except IOError: | ||||
try: | ||||
del mm[f] | ||||
del mfm[f] | ||||
if update_dirstate: | ||||
self.dirstate.forget([f]) | ||||
except: | ||||
# deleted from p2? | ||||
pass | ||||
mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0]) | ||||
user = user or self.ui.username() | ||||
n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date) | ||||
tr.close() | ||||
if update_dirstate: | ||||
self.dirstate.setparents(n, nullid) | ||||
Thomas Arendsen Hein
|
r1615 | def commit(self, files=None, text="", user=None, date=None, | ||
mason@suse.com
|
r1807 | match=util.always, force=False, lock=None, wlock=None): | ||
mpm@selenic.com
|
r1089 | commit = [] | ||
remove = [] | ||||
changed = [] | ||||
if files: | ||||
for f in files: | ||||
s = self.dirstate.state(f) | ||||
if s in 'nmai': | ||||
commit.append(f) | ||||
elif s == 'r': | ||||
remove.append(f) | ||||
else: | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("%s not tracked!\n") % f) | ||
mpm@selenic.com
|
r1089 | else: | ||
Thomas Arendsen Hein
|
r1619 | modified, added, removed, deleted, unknown = self.changes(match=match) | ||
Thomas Arendsen Hein
|
r1618 | commit = modified + added | ||
remove = removed | ||||
mpm@selenic.com
|
r1089 | |||
p1, p2 = self.dirstate.parents() | ||||
c1 = self.changelog.read(p1) | ||||
c2 = self.changelog.read(p2) | ||||
m1 = self.manifest.read(c1[0]) | ||||
mf1 = self.manifest.readflags(c1[0]) | ||||
m2 = self.manifest.read(c2[0]) | ||||
if not commit and not remove and not force and p2 == nullid: | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("nothing changed\n")) | ||
mpm@selenic.com
|
r1089 | return None | ||
Vadim Gelfer
|
r1721 | xp1 = hex(p1) | ||
if p2 == nullid: xp2 = '' | ||||
else: xp2 = hex(p2) | ||||
Vadim Gelfer
|
r1727 | self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) | ||
mpm@selenic.com
|
r1089 | |||
mason@suse.com
|
r1712 | if not wlock: | ||
wlock = self.wlock() | ||||
mason@suse.com
|
r1807 | if not lock: | ||
lock = self.lock() | ||||
mpm@selenic.com
|
r1089 | tr = self.transaction() | ||
# check in files | ||||
new = {} | ||||
linkrev = self.changelog.count() | ||||
commit.sort() | ||||
for f in commit: | ||||
self.ui.note(f + "\n") | ||||
try: | ||||
mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False)) | ||||
t = self.wread(f) | ||||
except IOError: | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("trouble committing %s!\n") % f) | ||
mpm@selenic.com
|
r1089 | raise | ||
mpm@selenic.com
|
r1117 | r = self.file(f) | ||
mpm@selenic.com
|
r1089 | meta = {} | ||
cp = self.dirstate.copied(f) | ||||
if cp: | ||||
meta["copy"] = cp | ||||
meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid))) | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"])) | ||
mpm@selenic.com
|
r1117 | fp1, fp2 = nullid, nullid | ||
else: | ||||
Matt Mackall
|
r1716 | entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2) | ||
if entry: | ||||
new[f] = entry | ||||
mpm@selenic.com
|
r1089 | continue | ||
new[f] = r.add(t, meta, tr, linkrev, fp1, fp2) | ||||
# remember what we've added so that we can later calculate | ||||
# the files to pull from a set of changesets | ||||
changed.append(f) | ||||
# update manifest | ||||
Thomas Arendsen Hein
|
r1629 | m1 = m1.copy() | ||
mpm@selenic.com
|
r1089 | m1.update(new) | ||
for f in remove: | ||||
if f in m1: | ||||
del m1[f] | ||||
mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], | ||||
(new, remove)) | ||||
# add changeset | ||||
new = new.keys() | ||||
new.sort() | ||||
if not text: | ||||
Thomas Arendsen Hein
|
r1709 | edittext = [""] | ||
mpm@selenic.com
|
r1089 | if p2 != nullid: | ||
Thomas Arendsen Hein
|
r1709 | edittext.append("HG: branch merge") | ||
edittext.extend(["HG: changed %s" % f for f in changed]) | ||||
edittext.extend(["HG: removed %s" % f for f in remove]) | ||||
mpm@selenic.com
|
r1089 | if not changed and not remove: | ||
Thomas Arendsen Hein
|
r1709 | edittext.append("HG: no files changed") | ||
edittext.append("") | ||||
Thomas Arendsen Hein
|
r1706 | # run editor in the repository root | ||
olddir = os.getcwd() | ||||
os.chdir(self.root) | ||||
Thomas Arendsen Hein
|
r1709 | edittext = self.ui.edit("\n".join(edittext)) | ||
Thomas Arendsen Hein
|
r1706 | os.chdir(olddir) | ||
mpm@selenic.com
|
r1089 | if not edittext.rstrip(): | ||
return None | ||||
text = edittext | ||||
user = user or self.ui.username() | ||||
Benoit Boissinot
|
r1645 | n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date) | ||
Vadim Gelfer
|
r1727 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, | ||
parent2=xp2) | ||||
mpm@selenic.com
|
r1089 | tr.close() | ||
self.dirstate.setparents(n) | ||||
self.dirstate.update(new, "n") | ||||
self.dirstate.forget(remove) | ||||
Vadim Gelfer
|
r1727 | self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) | ||
mpm@selenic.com
|
r1089 | return n | ||
def walk(self, node=None, files=[], match=util.always): | ||||
if node: | ||||
Benoit Boissinot
|
r1582 | fdict = dict.fromkeys(files) | ||
mpm@selenic.com
|
r1089 | for fn in self.manifest.read(self.changelog.read(node)[0]): | ||
Benoit Boissinot
|
r1582 | fdict.pop(fn, None) | ||
if match(fn): | ||||
yield 'm', fn | ||||
for fn in fdict: | ||||
self.ui.warn(_('%s: No such file in rev %s\n') % ( | ||||
util.pathto(self.getcwd(), fn), short(node))) | ||||
mpm@selenic.com
|
r1089 | else: | ||
for src, fn in self.dirstate.walk(files, match): | ||||
yield src, fn | ||||
mason@suse.com
|
r1712 | def changes(self, node1=None, node2=None, files=[], match=util.always, | ||
wlock=None): | ||||
Thomas Arendsen Hein
|
r1616 | """return changes between two nodes or node and working directory | ||
If node1 is None, use the first dirstate parent instead. | ||||
If node2 is None, compare node1 with working directory. | ||||
""" | ||||
mpm@selenic.com
|
r1089 | |||
def fcmp(fn, mf): | ||||
t1 = self.wread(fn) | ||||
t2 = self.file(fn).read(mf.get(fn, nullid)) | ||||
return cmp(t1, t2) | ||||
def mfmatches(node): | ||||
Thomas Arendsen Hein
|
r1616 | change = self.changelog.read(node) | ||
mf = dict(self.manifest.read(change[0])) | ||||
mpm@selenic.com
|
r1089 | for fn in mf.keys(): | ||
if not match(fn): | ||||
del mf[fn] | ||||
return mf | ||||
Alexis S. L. Carvalho
|
r1802 | if node1: | ||
# read the manifest from node1 before the manifest from node2, | ||||
# so that we'll hit the manifest cache if we're going through | ||||
# all the revisions in parent->child order. | ||||
mf1 = mfmatches(node1) | ||||
mpm@selenic.com
|
r1089 | # are we comparing the working directory? | ||
if not node2: | ||||
mason@suse.com
|
r1712 | if not wlock: | ||
try: | ||||
wlock = self.wlock(wait=0) | ||||
Benoit Boissinot
|
r1754 | except lock.LockException: | ||
mason@suse.com
|
r1712 | wlock = None | ||
Thomas Arendsen Hein
|
r1617 | lookup, modified, added, removed, deleted, unknown = ( | ||
Thomas Arendsen Hein
|
r1616 | self.dirstate.changes(files, match)) | ||
mpm@selenic.com
|
r1089 | |||
# are we comparing working dir against its parent? | ||||
if not node1: | ||||
Thomas Arendsen Hein
|
r1616 | if lookup: | ||
mpm@selenic.com
|
r1089 | # do a full compare of any files that might have changed | ||
Thomas Arendsen Hein
|
r1616 | mf2 = mfmatches(self.dirstate.parents()[0]) | ||
for f in lookup: | ||||
mpm@selenic.com
|
r1089 | if fcmp(f, mf2): | ||
Thomas Arendsen Hein
|
r1616 | modified.append(f) | ||
Benoit Boissinot
|
r1532 | elif wlock is not None: | ||
self.dirstate.update([f], "n") | ||||
Thomas Arendsen Hein
|
r1616 | else: | ||
# we are comparing working dir against non-parent | ||||
# generate a pseudo-manifest for the working dir | ||||
mf2 = mfmatches(self.dirstate.parents()[0]) | ||||
for f in lookup + modified + added: | ||||
mf2[f] = "" | ||||
Thomas Arendsen Hein
|
r1617 | for f in removed: | ||
Thomas Arendsen Hein
|
r1616 | if f in mf2: | ||
del mf2[f] | ||||
mpm@selenic.com
|
r1089 | else: | ||
Thomas Arendsen Hein
|
r1616 | # we are comparing two revisions | ||
Thomas Arendsen Hein
|
r1617 | deleted, unknown = [], [] | ||
Thomas Arendsen Hein
|
r1616 | mf2 = mfmatches(node2) | ||
mpm@selenic.com
|
r1089 | |||
Thomas Arendsen Hein
|
r1616 | if node1: | ||
# flush lists from dirstate before comparing manifests | ||||
modified, added = [], [] | ||||
mpm@selenic.com
|
r1089 | |||
Thomas Arendsen Hein
|
r1616 | for fn in mf2: | ||
if mf1.has_key(fn): | ||||
if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)): | ||||
modified.append(fn) | ||||
del mf1[fn] | ||||
else: | ||||
added.append(fn) | ||||
mpm@selenic.com
|
r1089 | |||
Thomas Arendsen Hein
|
r1617 | removed = mf1.keys() | ||
Thomas Arendsen Hein
|
r1616 | # sort and return results: | ||
Thomas Arendsen Hein
|
r1619 | for l in modified, added, removed, deleted, unknown: | ||
mpm@selenic.com
|
r1089 | l.sort() | ||
Thomas Arendsen Hein
|
r1619 | return (modified, added, removed, deleted, unknown) | ||
mpm@selenic.com
|
r1089 | |||
mason@suse.com
|
r1712 | def add(self, list, wlock=None): | ||
if not wlock: | ||||
wlock = self.wlock() | ||||
mpm@selenic.com
|
r1089 | for f in list: | ||
p = self.wjoin(f) | ||||
if not os.path.exists(p): | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("%s does not exist!\n") % f) | ||
mpm@selenic.com
|
r1089 | elif not os.path.isfile(p): | ||
Thomas Arendsen Hein
|
r1615 | self.ui.warn(_("%s not added: only files supported currently\n") | ||
% f) | ||||
mpm@selenic.com
|
r1089 | elif self.dirstate.state(f) in 'an': | ||
Benoit Boissinot
|
r1402 | self.ui.warn(_("%s already tracked!\n") % f) | ||
mpm@selenic.com
|
r1089 | else: | ||
self.dirstate.update([f], "a") | ||||
mason@suse.com
|
r1712 | def forget(self, list, wlock=None): | ||
if not wlock: | ||||
wlock = self.wlock() | ||||
mpm@selenic.com
|
r1089 | for f in list: | ||
if self.dirstate.state(f) not in 'ai': | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("%s not added!\n") % f) | ||
mpm@selenic.com
|
r1089 | else: | ||
self.dirstate.forget([f]) | ||||
mason@suse.com
|
r1712 | def remove(self, list, unlink=False, wlock=None): | ||
Benoit Boissinot
|
r1415 | if unlink: | ||
for f in list: | ||||
try: | ||||
util.unlink(self.wjoin(f)) | ||||
except OSError, inst: | ||||
Thomas Arendsen Hein
|
r1615 | if inst.errno != errno.ENOENT: | ||
raise | ||||
mason@suse.com
|
r1712 | if not wlock: | ||
wlock = self.wlock() | ||||
mpm@selenic.com
|
r1089 | for f in list: | ||
p = self.wjoin(f) | ||||
if os.path.exists(p): | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("%s still exists!\n") % f) | ||
mpm@selenic.com
|
r1089 | elif self.dirstate.state(f) == 'a': | ||
self.dirstate.forget([f]) | ||||
elif f not in self.dirstate: | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("%s not tracked!\n") % f) | ||
mpm@selenic.com
|
r1089 | else: | ||
self.dirstate.update([f], "r") | ||||
mason@suse.com
|
r1712 | def undelete(self, list, wlock=None): | ||
Matt Mackall
|
r1448 | p = self.dirstate.parents()[0] | ||
Benoit Boissinot
|
r1447 | mn = self.changelog.read(p)[0] | ||
mf = self.manifest.readflags(mn) | ||||
m = self.manifest.read(mn) | ||||
mason@suse.com
|
r1712 | if not wlock: | ||
wlock = self.wlock() | ||||
Benoit Boissinot
|
r1447 | for f in list: | ||
if self.dirstate.state(f) not in "r": | ||||
self.ui.warn("%s not removed!\n" % f) | ||||
else: | ||||
t = self.file(f).read(m[f]) | ||||
Benoit Boissinot
|
r1477 | self.wwrite(f, t) | ||
Benoit Boissinot
|
r1447 | util.set_exec(self.wjoin(f), mf[f]) | ||
self.dirstate.update([f], "n") | ||||
mason@suse.com
|
r1712 | def copy(self, source, dest, wlock=None): | ||
mpm@selenic.com
|
r1089 | p = self.wjoin(dest) | ||
if not os.path.exists(p): | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("%s does not exist!\n") % dest) | ||
mpm@selenic.com
|
r1089 | elif not os.path.isfile(p): | ||
Benoit Boissinot
|
r1402 | self.ui.warn(_("copy failed: %s is not a file\n") % dest) | ||
mpm@selenic.com
|
r1089 | else: | ||
mason@suse.com
|
r1712 | if not wlock: | ||
wlock = self.wlock() | ||||
mpm@selenic.com
|
r1089 | if self.dirstate.state(dest) == '?': | ||
self.dirstate.update([dest], "a") | ||||
self.dirstate.copy(source, dest) | ||||
Thomas Arendsen Hein
|
r1551 | def heads(self, start=None): | ||
Benoit Boissinot
|
r1550 | heads = self.changelog.heads(start) | ||
# sort the output in rev descending order | ||||
heads = [(-self.changelog.rev(h), h) for h in heads] | ||||
heads.sort() | ||||
return [n for (r, n) in heads] | ||||
mpm@selenic.com
|
r1089 | |||
# branchlookup returns a dict giving a list of branches for | ||||
# each head. A branch is defined as the tag of a node or | ||||
# the branch of the node's parents. If a node has multiple | ||||
# branch tags, tags are eliminated if they are visible from other | ||||
# branch tags. | ||||
# | ||||
# So, for this graph: a->b->c->d->e | ||||
# \ / | ||||
# aa -----/ | ||||
# a has tag 2.6.12 | ||||
# d has tag 2.6.13 | ||||
# e would have branch tags for 2.6.12 and 2.6.13. Because the node | ||||
# for 2.6.12 can be reached from the node 2.6.13, that is eliminated | ||||
# from the list. | ||||
# | ||||
# It is possible that more than one head will have the same branch tag. | ||||
# callers need to check the result for multiple heads under the same | ||||
# branch tag if that is a problem for them (ie checkout of a specific | ||||
# branch). | ||||
# | ||||
# passing in a specific branch will limit the depth of the search | ||||
# through the parents. It won't limit the branches returned in the | ||||
# result though. | ||||
def branchlookup(self, heads=None, branch=None): | ||||
if not heads: | ||||
heads = self.heads() | ||||
headt = [ h for h in heads ] | ||||
chlog = self.changelog | ||||
branches = {} | ||||
merges = [] | ||||
seenmerge = {} | ||||
# traverse the tree once for each head, recording in the branches | ||||
# dict which tags are visible from this head. The branches | ||||
# dict also records which tags are visible from each tag | ||||
# while we traverse. | ||||
while headt or merges: | ||||
if merges: | ||||
n, found = merges.pop() | ||||
visit = [n] | ||||
else: | ||||
h = headt.pop() | ||||
visit = [h] | ||||
found = [h] | ||||
seen = {} | ||||
while visit: | ||||
n = visit.pop() | ||||
if n in seen: | ||||
continue | ||||
pp = chlog.parents(n) | ||||
tags = self.nodetags(n) | ||||
if tags: | ||||
for x in tags: | ||||
if x == 'tip': | ||||
continue | ||||
for f in found: | ||||
branches.setdefault(f, {})[n] = 1 | ||||
branches.setdefault(n, {})[n] = 1 | ||||
break | ||||
if n not in found: | ||||
found.append(n) | ||||
if branch in tags: | ||||
continue | ||||
seen[n] = 1 | ||||
if pp[1] != nullid and n not in seenmerge: | ||||
merges.append((pp[1], [x for x in found])) | ||||
seenmerge[n] = 1 | ||||
if pp[0] != nullid: | ||||
visit.append(pp[0]) | ||||
# traverse the branches dict, eliminating branch tags from each | ||||
# head that are visible from another branch tag for that head. | ||||
out = {} | ||||
viscache = {} | ||||
for h in heads: | ||||
def visible(node): | ||||
if node in viscache: | ||||
return viscache[node] | ||||
ret = {} | ||||
visit = [node] | ||||
while visit: | ||||
x = visit.pop() | ||||
if x in viscache: | ||||
ret.update(viscache[x]) | ||||
elif x not in ret: | ||||
ret[x] = 1 | ||||
if x in branches: | ||||
visit[len(visit):] = branches[x].keys() | ||||
viscache[node] = ret | ||||
return ret | ||||
if h not in branches: | ||||
continue | ||||
# O(n^2), but somewhat limited. This only searches the | ||||
# tags visible from a specific head, not all the tags in the | ||||
# whole repo. | ||||
for b in branches[h]: | ||||
vis = False | ||||
for bb in branches[h].keys(): | ||||
if b != bb: | ||||
if b in visible(bb): | ||||
vis = True | ||||
break | ||||
if not vis: | ||||
l = out.setdefault(h, []) | ||||
l[len(l):] = self.nodetags(b) | ||||
return out | ||||
def branches(self, nodes): | ||||
Thomas Arendsen Hein
|
r1615 | if not nodes: | ||
nodes = [self.changelog.tip()] | ||||
mpm@selenic.com
|
r1089 | b = [] | ||
for n in nodes: | ||||
t = n | ||||
while n: | ||||
p = self.changelog.parents(n) | ||||
if p[1] != nullid or p[0] == nullid: | ||||
b.append((t, n, p[0], p[1])) | ||||
break | ||||
n = p[0] | ||||
return b | ||||
def between(self, pairs): | ||||
r = [] | ||||
for top, bottom in pairs: | ||||
n, l, i = top, [], 0 | ||||
f = 1 | ||||
while n != bottom: | ||||
p = self.changelog.parents(n)[0] | ||||
if i == f: | ||||
l.append(n) | ||||
f = f * 2 | ||||
n = p | ||||
i += 1 | ||||
r.append(l) | ||||
return r | ||||
def findincoming(self, remote, base=None, heads=None): | ||||
m = self.changelog.nodemap | ||||
search = [] | ||||
fetch = {} | ||||
seen = {} | ||||
seenbranch = {} | ||||
if base == None: | ||||
base = {} | ||||
# assume we're closer to the tip than the root | ||||
# and start by examining the heads | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("searching for changes\n")) | ||
mpm@selenic.com
|
r1089 | |||
if not heads: | ||||
heads = remote.heads() | ||||
unknown = [] | ||||
for h in heads: | ||||
if h not in m: | ||||
unknown.append(h) | ||||
else: | ||||
base[h] = 1 | ||||
if not unknown: | ||||
return None | ||||
rep = {} | ||||
reqcnt = 0 | ||||
# search through remote branches | ||||
# a 'branch' here is a linear segment of history, with four parts: | ||||
# head, root, first parent, second parent | ||||
# (a branch always has two parents (or none) by definition) | ||||
unknown = remote.branches(unknown) | ||||
while unknown: | ||||
r = [] | ||||
while unknown: | ||||
n = unknown.pop(0) | ||||
if n[0] in seen: | ||||
continue | ||||
Thomas Arendsen Hein
|
r1615 | self.ui.debug(_("examining %s:%s\n") | ||
% (short(n[0]), short(n[1]))) | ||||
mpm@selenic.com
|
r1089 | if n[0] == nullid: | ||
break | ||||
if n in seenbranch: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("branch already found\n")) | ||
mpm@selenic.com
|
r1089 | continue | ||
if n[1] and n[1] in m: # do we know the base? | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("found incomplete branch %s:%s\n") | ||
mpm@selenic.com
|
r1089 | % (short(n[0]), short(n[1]))) | ||
search.append(n) # schedule branch range for scanning | ||||
seenbranch[n] = 1 | ||||
else: | ||||
if n[1] not in seen and n[1] not in fetch: | ||||
if n[2] in m and n[3] in m: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("found new changeset %s\n") % | ||
mpm@selenic.com
|
r1089 | short(n[1])) | ||
fetch[n[1]] = 1 # earliest unknown | ||||
base[n[2]] = 1 # latest known | ||||
continue | ||||
for a in n[2:4]: | ||||
if a not in rep: | ||||
r.append(a) | ||||
rep[a] = 1 | ||||
seen[n[0]] = 1 | ||||
if r: | ||||
reqcnt += 1 | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("request %d: %s\n") % | ||
mpm@selenic.com
|
r1089 | (reqcnt, " ".join(map(short, r)))) | ||
for p in range(0, len(r), 10): | ||||
for b in remote.branches(r[p:p+10]): | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("received %s:%s\n") % | ||
mpm@selenic.com
|
r1089 | (short(b[0]), short(b[1]))) | ||
if b[0] in m: | ||||
Thomas Arendsen Hein
|
r1615 | self.ui.debug(_("found base node %s\n") | ||
% short(b[0])) | ||||
mpm@selenic.com
|
r1089 | base[b[0]] = 1 | ||
elif b[0] not in seen: | ||||
unknown.append(b) | ||||
# do binary search on the branches we found | ||||
while search: | ||||
n = search.pop(0) | ||||
reqcnt += 1 | ||||
l = remote.between([(n[0], n[1])])[0] | ||||
l.append(n[1]) | ||||
p = n[0] | ||||
f = 1 | ||||
for i in l: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i))) | ||
mpm@selenic.com
|
r1089 | if i in m: | ||
if f <= 2: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("found new branch changeset %s\n") % | ||
mpm@selenic.com
|
r1089 | short(p)) | ||
fetch[p] = 1 | ||||
base[i] = 1 | ||||
else: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("narrowed branch search to %s:%s\n") | ||
mpm@selenic.com
|
r1089 | % (short(p), short(i))) | ||
search.append((p, i)) | ||||
break | ||||
p, f = i, f * 2 | ||||
# sanity check our fetch list | ||||
for f in fetch.keys(): | ||||
if f in m: | ||||
Benoit Boissinot
|
r1402 | raise repo.RepoError(_("already have changeset ") + short(f[:4])) | ||
mpm@selenic.com
|
r1089 | |||
if base.keys() == [nullid]: | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("warning: pulling from an unrelated repository!\n")) | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r1402 | self.ui.note(_("found new changesets starting at ") + | ||
mpm@selenic.com
|
r1089 | " ".join([short(f) for f in fetch]) + "\n") | ||
Benoit Boissinot
|
r1402 | self.ui.debug(_("%d total queries\n") % reqcnt) | ||
mpm@selenic.com
|
r1089 | |||
return fetch.keys() | ||||
def findoutgoing(self, remote, base=None, heads=None): | ||||
if base == None: | ||||
base = {} | ||||
self.findincoming(remote, base, heads) | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("common changesets up to ") | ||
mpm@selenic.com
|
r1089 | + " ".join(map(short, base.keys())) + "\n") | ||
remain = dict.fromkeys(self.changelog.nodemap) | ||||
# prune everything remote has from the tree | ||||
del remain[nullid] | ||||
remove = base.keys() | ||||
while remove: | ||||
n = remove.pop(0) | ||||
if n in remain: | ||||
del remain[n] | ||||
for p in self.changelog.parents(n): | ||||
remove.append(p) | ||||
# find every node whose parents have been pruned | ||||
subset = [] | ||||
for n in remain: | ||||
p1, p2 = self.changelog.parents(n) | ||||
if p1 not in remain and p2 not in remain: | ||||
subset.append(n) | ||||
# this is the set of all roots we have to push | ||||
return subset | ||||
Thomas Arendsen Hein
|
r1615 | def pull(self, remote, heads=None): | ||
Benoit Boissinot
|
r1749 | l = self.lock() | ||
mpm@selenic.com
|
r1089 | |||
# if we have an empty repo, fetch everything | ||||
if self.changelog.tip() == nullid: | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("requesting all changes\n")) | ||
mpm@selenic.com
|
r1089 | fetch = [nullid] | ||
else: | ||||
fetch = self.findincoming(remote) | ||||
if not fetch: | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("no changes found\n")) | ||
mpm@selenic.com
|
r1089 | return 1 | ||
Eric Hopper
|
r1461 | if heads is None: | ||
Vadim Gelfer
|
r1736 | cg = remote.changegroup(fetch, 'pull') | ||
Eric Hopper
|
r1461 | else: | ||
Vadim Gelfer
|
r1736 | cg = remote.changegroupsubset(fetch, heads, 'pull') | ||
mpm@selenic.com
|
r1089 | return self.addchangegroup(cg) | ||
Benoit Boissinot
|
r1781 | def push(self, remote, force=False, revs=None): | ||
mpm@selenic.com
|
r1089 | lock = remote.lock() | ||
base = {} | ||||
heads = remote.heads() | ||||
inc = self.findincoming(remote, base, heads) | ||||
if not force and inc: | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("abort: unsynced remote changes!\n")) | ||
self.ui.status(_("(did you forget to sync? use push -f to force)\n")) | ||||
mpm@selenic.com
|
r1089 | return 1 | ||
update = self.findoutgoing(remote, base) | ||||
Benoit Boissinot
|
r1781 | if revs is not None: | ||
msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) | ||||
else: | ||||
bases, heads = update, self.changelog.heads() | ||||
if not bases: | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("no changes found\n")) | ||
mpm@selenic.com
|
r1089 | return 1 | ||
elif not force: | ||||
Benoit Boissinot
|
r1781 | if len(bases) < len(heads): | ||
Benoit Boissinot
|
r1402 | self.ui.warn(_("abort: push creates new remote branches!\n")) | ||
self.ui.status(_("(did you forget to merge?" | ||||
" use push -f to force)\n")) | ||||
mpm@selenic.com
|
r1089 | return 1 | ||
Benoit Boissinot
|
r1781 | if revs is None: | ||
Benoit Boissinot
|
r1782 | cg = self.changegroup(update, 'push') | ||
Benoit Boissinot
|
r1781 | else: | ||
Benoit Boissinot
|
r1782 | cg = self.changegroupsubset(update, revs, 'push') | ||
mpm@selenic.com
|
r1089 | return remote.addchangegroup(cg) | ||
Vadim Gelfer
|
r1736 | def changegroupsubset(self, bases, heads, source): | ||
Eric Hopper
|
r1466 | """This function generates a changegroup consisting of all the nodes | ||
that are descendents of any of the bases, and ancestors of any of | ||||
the heads. | ||||
It is fairly complex as determining which filenodes and which | ||||
manifest nodes need to be included for the changeset to be complete | ||||
is non-trivial. | ||||
Another wrinkle is doing the reverse, figuring out which changeset in | ||||
the changegroup a particular filenode or manifestnode belongs to.""" | ||||
Vadim Gelfer
|
r1736 | self.hook('preoutgoing', throw=True, source=source) | ||
Eric Hopper
|
r1466 | # Set up some initial variables | ||
# Make it easy to refer to self.changelog | ||||
Eric Hopper
|
r1458 | cl = self.changelog | ||
Eric Hopper
|
r1466 | # msng is short for missing - compute the list of changesets in this | ||
# changegroup. | ||||
Eric Hopper
|
r1460 | msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) | ||
Eric Hopper
|
r1466 | # Some bases may turn out to be superfluous, and some heads may be | ||
# too. nodesbetween will return the minimal set of bases and heads | ||||
# necessary to re-create the changegroup. | ||||
# Known heads are the list of heads that it is assumed the recipient | ||||
# of this changegroup will know about. | ||||
Eric Hopper
|
r1458 | knownheads = {} | ||
Eric Hopper
|
r1466 | # We assume that all parents of bases are known heads. | ||
Eric Hopper
|
r1460 | for n in bases: | ||
Eric Hopper
|
r1458 | for p in cl.parents(n): | ||
if p != nullid: | ||||
knownheads[p] = 1 | ||||
knownheads = knownheads.keys() | ||||
Eric Hopper
|
r1460 | if knownheads: | ||
Eric Hopper
|
r1466 | # Now that we know what heads are known, we can compute which | ||
# changesets are known. The recipient must know about all | ||||
# changesets required to reach the known heads from the null | ||||
# changeset. | ||||
Eric Hopper
|
r1460 | has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) | ||
Eric Hopper
|
r1466 | junk = None | ||
# Transform the list into an ersatz set. | ||||
Eric Hopper
|
r1464 | has_cl_set = dict.fromkeys(has_cl_set) | ||
Eric Hopper
|
r1460 | else: | ||
Eric Hopper
|
r1466 | # If there were no known heads, the recipient cannot be assumed to | ||
# know about any changesets. | ||||
Eric Hopper
|
r1460 | has_cl_set = {} | ||
Eric Hopper
|
r1458 | |||
Eric Hopper
|
r1466 | # Make it easy to refer to self.manifest | ||
Eric Hopper
|
r1458 | mnfst = self.manifest | ||
Eric Hopper
|
r1466 | # We don't know which manifests are missing yet | ||
Eric Hopper
|
r1458 | msng_mnfst_set = {} | ||
Eric Hopper
|
r1466 | # Nor do we know which filenodes are missing. | ||
Eric Hopper
|
r1458 | msng_filenode_set = {} | ||
Eric Hopper
|
r1460 | junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex | ||
junk = None | ||||
Eric Hopper
|
r1466 | # A changeset always belongs to itself, so the changenode lookup | ||
# function for a changenode is identity. | ||||
Eric Hopper
|
r1458 | def identity(x): | ||
return x | ||||
Eric Hopper
|
r1466 | # A function generating function. Sets up an environment for the | ||
# inner function. | ||||
Eric Hopper
|
r1458 | def cmp_by_rev_func(revlog): | ||
Eric Hopper
|
r1466 | # Compare two nodes by their revision number in the environment's | ||
# revision history. Since the revision number both represents the | ||||
# most efficient order to read the nodes in, and represents a | ||||
# topological sorting of the nodes, this function is often useful. | ||||
def cmp_by_rev(a, b): | ||||
Eric Hopper
|
r1458 | return cmp(revlog.rev(a), revlog.rev(b)) | ||
Eric Hopper
|
r1466 | return cmp_by_rev | ||
Eric Hopper
|
r1458 | |||
Eric Hopper
|
r1466 | # If we determine that a particular file or manifest node must be a | ||
# node that the recipient of the changegroup will already have, we can | ||||
# also assume the recipient will have all the parents. This function | ||||
# prunes them from the set of missing nodes. | ||||
Eric Hopper
|
r1458 | def prune_parents(revlog, hasset, msngset): | ||
haslst = hasset.keys() | ||||
haslst.sort(cmp_by_rev_func(revlog)) | ||||
for node in haslst: | ||||
parentlst = [p for p in revlog.parents(node) if p != nullid] | ||||
while parentlst: | ||||
n = parentlst.pop() | ||||
if n not in hasset: | ||||
hasset[n] = 1 | ||||
p = [p for p in revlog.parents(n) if p != nullid] | ||||
parentlst.extend(p) | ||||
for n in hasset: | ||||
msngset.pop(n, None) | ||||
Eric Hopper
|
r1466 | # This is a function generating function used to set up an environment | ||
# for the inner function to execute in. | ||||
Eric Hopper
|
r1458 | def manifest_and_file_collector(changedfileset): | ||
Eric Hopper
|
r1466 | # This is an information gathering function that gathers | ||
# information from each changeset node that goes out as part of | ||||
# the changegroup. The information gathered is a list of which | ||||
# manifest nodes are potentially required (the recipient may | ||||
# already have them) and total list of all files which were | ||||
# changed in any changeset in the changegroup. | ||||
# | ||||
# We also remember the first changenode we saw any manifest | ||||
# referenced by so we can later determine which changenode 'owns' | ||||
# the manifest. | ||||
Eric Hopper
|
r1458 | def collect_manifests_and_files(clnode): | ||
c = cl.read(clnode) | ||||
for f in c[3]: | ||||
# This is to make sure we only have one instance of each | ||||
# filename string for each filename. | ||||
Eric Hopper
|
r1460 | changedfileset.setdefault(f, f) | ||
msng_mnfst_set.setdefault(c[0], clnode) | ||||
Eric Hopper
|
r1458 | return collect_manifests_and_files | ||
Eric Hopper
|
r1466 | # Figure out which manifest nodes (of the ones we think might be part | ||
# of the changegroup) the recipient must know about and remove them | ||||
# from the changegroup. | ||||
Eric Hopper
|
r1458 | def prune_manifests(): | ||
has_mnfst_set = {} | ||||
for n in msng_mnfst_set: | ||||
Eric Hopper
|
r1466 | # If a 'missing' manifest thinks it belongs to a changenode | ||
# the recipient is assumed to have, obviously the recipient | ||||
# must have that manifest. | ||||
Eric Hopper
|
r1458 | linknode = cl.node(mnfst.linkrev(n)) | ||
if linknode in has_cl_set: | ||||
has_mnfst_set[n] = 1 | ||||
prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) | ||||
Eric Hopper
|
r1466 | # Use the information collected in collect_manifests_and_files to say | ||
# which changenode any manifestnode belongs to. | ||||
Eric Hopper
|
r1458 | def lookup_manifest_link(mnfstnode): | ||
return msng_mnfst_set[mnfstnode] | ||||
Eric Hopper
|
r1466 | # A function generating function that sets up the initial environment | ||
# the inner function. | ||||
Eric Hopper
|
r1458 | def filenode_collector(changedfiles): | ||
Eric Hopper
|
r1462 | next_rev = [0] | ||
Eric Hopper
|
r1466 | # This gathers information from each manifestnode included in the | ||
# changegroup about which filenodes the manifest node references | ||||
# so we can include those in the changegroup too. | ||||
# | ||||
# It also remembers which changenode each filenode belongs to. It | ||||
# does this by assuming the a filenode belongs to the changenode | ||||
# the first manifest that references it belongs to. | ||||
Eric Hopper
|
r1458 | def collect_msng_filenodes(mnfstnode): | ||
Eric Hopper
|
r1462 | r = mnfst.rev(mnfstnode) | ||
if r == next_rev[0]: | ||||
# If the last rev we looked at was the one just previous, | ||||
# we only need to see a diff. | ||||
delta = mdiff.patchtext(mnfst.delta(mnfstnode)) | ||||
Eric Hopper
|
r1466 | # For each line in the delta | ||
Eric Hopper
|
r1462 | for dline in delta.splitlines(): | ||
Eric Hopper
|
r1466 | # get the filename and filenode for that line | ||
Eric Hopper
|
r1462 | f, fnode = dline.split('\0') | ||
fnode = bin(fnode[:40]) | ||||
f = changedfiles.get(f, None) | ||||
Eric Hopper
|
r1466 | # And if the file is in the list of files we care | ||
# about. | ||||
Eric Hopper
|
r1462 | if f is not None: | ||
Eric Hopper
|
r1466 | # Get the changenode this manifest belongs to | ||
clnode = msng_mnfst_set[mnfstnode] | ||||
# Create the set of filenodes for the file if | ||||
# there isn't one already. | ||||
ndset = msng_filenode_set.setdefault(f, {}) | ||||
# And set the filenode's changelog node to the | ||||
# manifest's if it hasn't been set already. | ||||
ndset.setdefault(fnode, clnode) | ||||
else: | ||||
# Otherwise we need a full manifest. | ||||
m = mnfst.read(mnfstnode) | ||||
# For every file in we care about. | ||||
for f in changedfiles: | ||||
fnode = m.get(f, None) | ||||
# If it's in the manifest | ||||
if fnode is not None: | ||||
# See comments above. | ||||
Eric Hopper
|
r1462 | clnode = msng_mnfst_set[mnfstnode] | ||
ndset = msng_filenode_set.setdefault(f, {}) | ||||
ndset.setdefault(fnode, clnode) | ||||
Eric Hopper
|
r1466 | # Remember the revision we hope to see next. | ||
Eric Hopper
|
r1462 | next_rev[0] = r + 1 | ||
Eric Hopper
|
r1460 | return collect_msng_filenodes | ||
Eric Hopper
|
r1458 | |||
Eric Hopper
|
r1466 | # We have a list of filenodes we think we need for a file, lets remove | ||
# all those we now the recipient must have. | ||||
Eric Hopper
|
r1458 | def prune_filenodes(f, filerevlog): | ||
msngset = msng_filenode_set[f] | ||||
hasset = {} | ||||
Eric Hopper
|
r1466 | # If a 'missing' filenode thinks it belongs to a changenode we | ||
# assume the recipient must have, then the recipient must have | ||||
# that filenode. | ||||
Eric Hopper
|
r1458 | for n in msngset: | ||
clnode = cl.node(filerevlog.linkrev(n)) | ||||
if clnode in has_cl_set: | ||||
hasset[n] = 1 | ||||
prune_parents(filerevlog, hasset, msngset) | ||||
Eric Hopper
|
r1466 | # A function generator function that sets up the a context for the | ||
# inner function. | ||||
Eric Hopper
|
r1458 | def lookup_filenode_link_func(fname): | ||
msngset = msng_filenode_set[fname] | ||||
Eric Hopper
|
r1466 | # Lookup the changenode the filenode belongs to. | ||
Eric Hopper
|
r1458 | def lookup_filenode_link(fnode): | ||
return msngset[fnode] | ||||
return lookup_filenode_link | ||||
mpm@selenic.com
|
r1089 | |||
Eric Hopper
|
r1466 | # Now that we have all theses utility functions to help out and | ||
# logically divide up the task, generate the group. | ||||
mpm@selenic.com
|
r1089 | def gengroup(): | ||
Eric Hopper
|
r1466 | # The set of changed files starts empty. | ||
Eric Hopper
|
r1458 | changedfiles = {} | ||
Eric Hopper
|
r1466 | # Create a changenode group generator that will call our functions | ||
# back to lookup the owning changenode and collect information. | ||||
Eric Hopper
|
r1458 | group = cl.group(msng_cl_lst, identity, | ||
manifest_and_file_collector(changedfiles)) | ||||
for chnk in group: | ||||
yield chnk | ||||
Eric Hopper
|
r1466 | |||
# The list of manifests has been collected by the generator | ||||
# calling our functions back. | ||||
Eric Hopper
|
r1458 | prune_manifests() | ||
msng_mnfst_lst = msng_mnfst_set.keys() | ||||
Eric Hopper
|
r1466 | # Sort the manifestnodes by revision number. | ||
Eric Hopper
|
r1458 | msng_mnfst_lst.sort(cmp_by_rev_func(mnfst)) | ||
Eric Hopper
|
r1466 | # Create a generator for the manifestnodes that calls our lookup | ||
# and data collection functions back. | ||||
Eric Hopper
|
r1460 | group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, | ||
Eric Hopper
|
r1458 | filenode_collector(changedfiles)) | ||
for chnk in group: | ||||
yield chnk | ||||
Eric Hopper
|
r1466 | |||
# These are no longer needed, dereference and toss the memory for | ||||
# them. | ||||
Eric Hopper
|
r1458 | msng_mnfst_lst = None | ||
msng_mnfst_set.clear() | ||||
Eric Hopper
|
r1466 | |||
Eric Hopper
|
r1462 | changedfiles = changedfiles.keys() | ||
changedfiles.sort() | ||||
Eric Hopper
|
r1466 | # Go through all our files in order sorted by name. | ||
Eric Hopper
|
r1458 | for fname in changedfiles: | ||
filerevlog = self.file(fname) | ||||
Eric Hopper
|
r1466 | # Toss out the filenodes that the recipient isn't really | ||
# missing. | ||||
Eric Hopper
|
r1630 | if msng_filenode_set.has_key(fname): | ||
prune_filenodes(fname, filerevlog) | ||||
msng_filenode_lst = msng_filenode_set[fname].keys() | ||||
else: | ||||
msng_filenode_lst = [] | ||||
Eric Hopper
|
r1466 | # If any filenodes are left, generate the group for them, | ||
# otherwise don't bother. | ||||
Eric Hopper
|
r1458 | if len(msng_filenode_lst) > 0: | ||
Eric Hopper
|
r1460 | yield struct.pack(">l", len(fname) + 4) + fname | ||
Eric Hopper
|
r1466 | # Sort the filenodes by their revision # | ||
Eric Hopper
|
r1458 | msng_filenode_lst.sort(cmp_by_rev_func(filerevlog)) | ||
Eric Hopper
|
r1466 | # Create a group generator and only pass in a changenode | ||
# lookup function as we need to collect no information | ||||
# from filenodes. | ||||
Eric Hopper
|
r1458 | group = filerevlog.group(msng_filenode_lst, | ||
Eric Hopper
|
r1460 | lookup_filenode_link_func(fname)) | ||
Eric Hopper
|
r1458 | for chnk in group: | ||
yield chnk | ||||
Eric Hopper
|
r1630 | if msng_filenode_set.has_key(fname): | ||
# Don't need this anymore, toss it to free memory. | ||||
del msng_filenode_set[fname] | ||||
Eric Hopper
|
r1466 | # Signal that no more groups are left. | ||
Eric Hopper
|
r1458 | yield struct.pack(">l", 0) | ||
mpm@selenic.com
|
r1089 | |||
Vadim Gelfer
|
r1736 | self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) | ||
Eric Hopper
|
r1458 | return util.chunkbuffer(gengroup()) | ||
Vadim Gelfer
|
r1736 | def changegroup(self, basenodes, source): | ||
Eric Hopper
|
r1466 | """Generate a changegroup of all nodes that we have that a recipient | ||
doesn't. | ||||
This is much easier than the previous function as we can assume that | ||||
the recipient has any changenode we aren't sending them.""" | ||||
Vadim Gelfer
|
r1736 | |||
self.hook('preoutgoing', throw=True, source=source) | ||||
Eric Hopper
|
r1458 | cl = self.changelog | ||
nodes = cl.nodesbetween(basenodes, None)[0] | ||||
revset = dict.fromkeys([cl.rev(n) for n in nodes]) | ||||
def identity(x): | ||||
return x | ||||
mpm@selenic.com
|
r1089 | |||
Eric Hopper
|
r1458 | def gennodelst(revlog): | ||
for r in xrange(0, revlog.count()): | ||||
n = revlog.node(r) | ||||
if revlog.linkrev(n) in revset: | ||||
yield n | ||||
def changed_file_collector(changedfileset): | ||||
def collect_changed_files(clnode): | ||||
c = cl.read(clnode) | ||||
for fname in c[3]: | ||||
changedfileset[fname] = 1 | ||||
return collect_changed_files | ||||
def lookuprevlink_func(revlog): | ||||
def lookuprevlink(n): | ||||
return cl.node(revlog.linkrev(n)) | ||||
return lookuprevlink | ||||
def gengroup(): | ||||
mpm@selenic.com
|
r1089 | # construct a list of all changed files | ||
Eric Hopper
|
r1458 | changedfiles = {} | ||
for chnk in cl.group(nodes, identity, | ||||
changed_file_collector(changedfiles)): | ||||
yield chnk | ||||
changedfiles = changedfiles.keys() | ||||
changedfiles.sort() | ||||
mpm@selenic.com
|
r1089 | |||
Eric Hopper
|
r1458 | mnfst = self.manifest | ||
nodeiter = gennodelst(mnfst) | ||||
for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): | ||||
yield chnk | ||||
mpm@selenic.com
|
r1089 | |||
Eric Hopper
|
r1458 | for fname in changedfiles: | ||
filerevlog = self.file(fname) | ||||
nodeiter = gennodelst(filerevlog) | ||||
nodeiter = list(nodeiter) | ||||
if nodeiter: | ||||
yield struct.pack(">l", len(fname) + 4) + fname | ||||
lookup = lookuprevlink_func(filerevlog) | ||||
for chnk in filerevlog.group(nodeiter, lookup): | ||||
yield chnk | ||||
mpm@selenic.com
|
r1089 | |||
yield struct.pack(">l", 0) | ||||
Vadim Gelfer
|
r1736 | self.hook('outgoing', node=hex(nodes[0]), source=source) | ||
mpm@selenic.com
|
r1089 | |||
Eric Hopper
|
r1458 | return util.chunkbuffer(gengroup()) | ||
mpm@selenic.com
|
r1089 | |||
def addchangegroup(self, source): | ||||
def getchunk(): | ||||
d = source.read(4) | ||||
Thomas Arendsen Hein
|
r1615 | if not d: | ||
return "" | ||||
mpm@selenic.com
|
r1089 | l = struct.unpack(">l", d)[0] | ||
Thomas Arendsen Hein
|
r1615 | if l <= 4: | ||
return "" | ||||
mpm@selenic.com
|
r1280 | d = source.read(l - 4) | ||
if len(d) < l - 4: | ||||
Benoit Boissinot
|
r1402 | raise repo.RepoError(_("premature EOF reading chunk" | ||
" (got %d bytes, expected %d)") | ||||
mpm@selenic.com
|
r1280 | % (len(d), l - 4)) | ||
return d | ||||
mpm@selenic.com
|
r1089 | |||
def getgroup(): | ||||
while 1: | ||||
c = getchunk() | ||||
Thomas Arendsen Hein
|
r1615 | if not c: | ||
break | ||||
mpm@selenic.com
|
r1089 | yield c | ||
def csmap(x): | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("add changeset %s\n") % short(x)) | ||
mpm@selenic.com
|
r1089 | return self.changelog.count() | ||
def revmap(x): | ||||
return self.changelog.rev(x) | ||||
Thomas Arendsen Hein
|
r1615 | if not source: | ||
return | ||||
Vadim Gelfer
|
r1730 | |||
self.hook('prechangegroup', throw=True) | ||||
mpm@selenic.com
|
r1089 | changesets = files = revisions = 0 | ||
tr = self.transaction() | ||||
oldheads = len(self.changelog.heads()) | ||||
# pull off the changeset group | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("adding changesets\n")) | ||
mpm@selenic.com
|
r1089 | co = self.changelog.tip() | ||
cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique | ||||
mpm@selenic.com
|
r1316 | cnr, cor = map(self.changelog.rev, (cn, co)) | ||
Benoit Boissinot
|
r1375 | if cn == nullid: | ||
cnr = cor | ||||
mpm@selenic.com
|
r1316 | changesets = cnr - cor | ||
mpm@selenic.com
|
r1089 | |||
# pull off the manifest group | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("adding manifests\n")) | ||
mpm@selenic.com
|
r1089 | mm = self.manifest.tip() | ||
mo = self.manifest.addgroup(getgroup(), revmap, tr) | ||||
# process the files | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("adding file changes\n")) | ||
mpm@selenic.com
|
r1089 | while 1: | ||
f = getchunk() | ||||
Thomas Arendsen Hein
|
r1615 | if not f: | ||
break | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("adding %s revisions\n") % f) | ||
mpm@selenic.com
|
r1089 | fl = self.file(f) | ||
o = fl.count() | ||||
n = fl.addgroup(getgroup(), revmap, tr) | ||||
revisions += fl.count() - o | ||||
files += 1 | ||||
newheads = len(self.changelog.heads()) | ||||
heads = "" | ||||
if oldheads and newheads > oldheads: | ||||
Benoit Boissinot
|
r1402 | heads = _(" (+%d heads)") % (newheads - oldheads) | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r1402 | self.ui.status(_("added %d changesets" | ||
" with %d changes to %d files%s\n") | ||||
% (changesets, revisions, files, heads)) | ||||
mpm@selenic.com
|
r1089 | |||
Vadim Gelfer
|
r1730 | self.hook('pretxnchangegroup', throw=True, | ||
node=hex(self.changelog.node(cor+1))) | ||||
mpm@selenic.com
|
r1089 | tr.close() | ||
Benoit Boissinot
|
r1375 | if changesets > 0: | ||
Vadim Gelfer
|
r1717 | self.hook("changegroup", node=hex(self.changelog.node(cor+1))) | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r1375 | for i in range(cor + 1, cnr + 1): | ||
Daniel Santa Cruz
|
r1713 | self.hook("incoming", node=hex(self.changelog.node(i))) | ||
mpm@selenic.com
|
r1316 | |||
mpm@selenic.com
|
r1089 | def update(self, node, allow=False, force=False, choose=None, | ||
mason@suse.com
|
r1712 | moddirstate=True, forcemerge=False, wlock=None): | ||
mpm@selenic.com
|
r1089 | pl = self.dirstate.parents() | ||
if not force and pl[1] != nullid: | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("aborting: outstanding uncommitted merges\n")) | ||
mpm@selenic.com
|
r1089 | return 1 | ||
Benoit Boissinot
|
r1663 | err = False | ||
mpm@selenic.com
|
r1089 | p1, p2 = pl[0], node | ||
pa = self.changelog.ancestor(p1, p2) | ||||
m1n = self.changelog.read(p1)[0] | ||||
m2n = self.changelog.read(p2)[0] | ||||
man = self.manifest.ancestor(m1n, m2n) | ||||
m1 = self.manifest.read(m1n) | ||||
mf1 = self.manifest.readflags(m1n) | ||||
Thomas Arendsen Hein
|
r1629 | m2 = self.manifest.read(m2n).copy() | ||
mpm@selenic.com
|
r1089 | mf2 = self.manifest.readflags(m2n) | ||
ma = self.manifest.read(man) | ||||
mfa = self.manifest.readflags(man) | ||||
Thomas Arendsen Hein
|
r1619 | modified, added, removed, deleted, unknown = self.changes() | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r1674 | # is this a jump, or a merge? i.e. is there a linear path | ||
# from p1 to p2? | ||||
linear_path = (pa == p1 or pa == p2) | ||||
if allow and linear_path: | ||||
raise util.Abort(_("there is nothing to merge, " | ||||
"just use 'hg update'")) | ||||
Benoit Boissinot
|
r1581 | if allow and not forcemerge: | ||
Thomas Arendsen Hein
|
r1618 | if modified or added or removed: | ||
Benoit Boissinot
|
r1581 | raise util.Abort(_("outstanding uncommited changes")) | ||
if not forcemerge and not force: | ||||
Thomas Arendsen Hein
|
r1618 | for f in unknown: | ||
Benoit Boissinot
|
r1581 | if f in m2: | ||
Thomas Arendsen Hein
|
r1615 | t1 = self.wread(f) | ||
t2 = self.file(f).read(m2[f]) | ||||
if cmp(t1, t2) != 0: | ||||
Benoit Boissinot
|
r1581 | raise util.Abort(_("'%s' already exists in the working" | ||
" dir and differs from remote") % f) | ||||
mpm@selenic.com
|
r1089 | # resolve the manifest to determine which files | ||
# we care about merging | ||||
Benoit Boissinot
|
r1402 | self.ui.note(_("resolving manifests\n")) | ||
self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") % | ||||
mpm@selenic.com
|
r1089 | (force, allow, moddirstate, linear_path)) | ||
Benoit Boissinot
|
r1402 | self.ui.debug(_(" ancestor %s local %s remote %s\n") % | ||
mpm@selenic.com
|
r1089 | (short(man), short(m1n), short(m2n))) | ||
merge = {} | ||||
get = {} | ||||
remove = [] | ||||
# construct a working dir manifest | ||||
mw = m1.copy() | ||||
mfw = mf1.copy() | ||||
Thomas Arendsen Hein
|
r1618 | umap = dict.fromkeys(unknown) | ||
mpm@selenic.com
|
r1089 | |||
Thomas Arendsen Hein
|
r1618 | for f in added + modified + unknown: | ||
mpm@selenic.com
|
r1089 | mw[f] = "" | ||
mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False)) | ||||
mason@suse.com
|
r1712 | if moddirstate and not wlock: | ||
Benoit Boissinot
|
r1531 | wlock = self.wlock() | ||
Thomas Arendsen Hein
|
r1621 | for f in deleted + removed: | ||
Thomas Arendsen Hein
|
r1615 | if f in mw: | ||
del mw[f] | ||||
mpm@selenic.com
|
r1089 | |||
# If we're jumping between revisions (as opposed to merging), | ||||
# and if neither the working directory nor the target rev has | ||||
# the file, then we need to remove it from the dirstate, to | ||||
# prevent the dirstate from listing the file when it is no | ||||
# longer in the manifest. | ||||
if moddirstate and linear_path and f not in m2: | ||||
self.dirstate.forget((f,)) | ||||
# Compare manifests | ||||
for f, n in mw.iteritems(): | ||||
Thomas Arendsen Hein
|
r1615 | if choose and not choose(f): | ||
continue | ||||
mpm@selenic.com
|
r1089 | if f in m2: | ||
s = 0 | ||||
# is the wfile new since m1, and match m2? | ||||
if f not in m1: | ||||
t1 = self.wread(f) | ||||
t2 = self.file(f).read(m2[f]) | ||||
if cmp(t1, t2) == 0: | ||||
n = m2[f] | ||||
del t1, t2 | ||||
# are files different? | ||||
if n != m2[f]: | ||||
a = ma.get(f, nullid) | ||||
# are both different from the ancestor? | ||||
if n != a and m2[f] != a: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_(" %s versions differ, resolve\n") % f) | ||
mpm@selenic.com
|
r1089 | # merge executable bits | ||
# "if we changed or they changed, change in merge" | ||||
a, b, c = mfa.get(f, 0), mfw[f], mf2[f] | ||||
mode = ((a^b) | (a^c)) ^ a | ||||
merge[f] = (m1.get(f, nullid), m2[f], mode) | ||||
s = 1 | ||||
# are we clobbering? | ||||
# is remote's version newer? | ||||
# or are we going back in time? | ||||
elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]): | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_(" remote %s is newer, get\n") % f) | ||
mpm@selenic.com
|
r1089 | get[f] = m2[f] | ||
s = 1 | ||||
elif f in umap: | ||||
# this unknown file is the same as the checkout | ||||
get[f] = m2[f] | ||||
if not s and mfw[f] != mf2[f]: | ||||
if force: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_(" updating permissions for %s\n") % f) | ||
mpm@selenic.com
|
r1089 | util.set_exec(self.wjoin(f), mf2[f]) | ||
else: | ||||
a, b, c = mfa.get(f, 0), mfw[f], mf2[f] | ||||
mode = ((a^b) | (a^c)) ^ a | ||||
if mode != b: | ||||
Thomas Arendsen Hein
|
r1615 | self.ui.debug(_(" updating permissions for %s\n") | ||
% f) | ||||
mpm@selenic.com
|
r1089 | util.set_exec(self.wjoin(f), mode) | ||
del m2[f] | ||||
elif f in ma: | ||||
if n != ma[f]: | ||||
Benoit Boissinot
|
r1402 | r = _("d") | ||
mpm@selenic.com
|
r1089 | if not force and (linear_path or allow): | ||
r = self.ui.prompt( | ||||
Benoit Boissinot
|
r1402 | (_(" local changed %s which remote deleted\n") % f) + | ||
_("(k)eep or (d)elete?"), _("[kd]"), _("k")) | ||||
if r == _("d"): | ||||
mpm@selenic.com
|
r1089 | remove.append(f) | ||
else: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("other deleted %s\n") % f) | ||
mpm@selenic.com
|
r1089 | remove.append(f) # other deleted it | ||
else: | ||||
mpm@selenic.com
|
r1236 | # file is created on branch or in working directory | ||
if force and f not in umap: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("remote deleted %s, clobbering\n") % f) | ||
mpm@selenic.com
|
r1236 | remove.append(f) | ||
elif n == m1.get(f, nullid): # same as parent | ||||
mpm@selenic.com
|
r1234 | if p2 == pa: # going backwards? | ||
Benoit Boissinot
|
r1402 | self.ui.debug(_("remote deleted %s\n") % f) | ||
mpm@selenic.com
|
r1234 | remove.append(f) | ||
else: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("local modified %s, keeping\n") % f) | ||
mpm@selenic.com
|
r1089 | else: | ||
Benoit Boissinot
|
r1402 | self.ui.debug(_("working dir created %s, keeping\n") % f) | ||
mpm@selenic.com
|
r1089 | |||
for f, n in m2.iteritems(): | ||||
Thomas Arendsen Hein
|
r1615 | if choose and not choose(f): | ||
continue | ||||
if f[0] == "/": | ||||
continue | ||||
mpm@selenic.com
|
r1089 | if f in ma and n != ma[f]: | ||
Benoit Boissinot
|
r1402 | r = _("k") | ||
mpm@selenic.com
|
r1089 | if not force and (linear_path or allow): | ||
r = self.ui.prompt( | ||||
Benoit Boissinot
|
r1402 | (_("remote changed %s which local deleted\n") % f) + | ||
_("(k)eep or (d)elete?"), _("[kd]"), _("k")) | ||||
Thomas Arendsen Hein
|
r1615 | if r == _("k"): | ||
get[f] = n | ||||
mpm@selenic.com
|
r1089 | elif f not in ma: | ||
Benoit Boissinot
|
r1402 | self.ui.debug(_("remote created %s\n") % f) | ||
mpm@selenic.com
|
r1089 | get[f] = n | ||
else: | ||||
if force or p2 == pa: # going backwards? | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("local deleted %s, recreating\n") % f) | ||
mpm@selenic.com
|
r1089 | get[f] = n | ||
else: | ||||
Benoit Boissinot
|
r1402 | self.ui.debug(_("local deleted %s\n") % f) | ||
mpm@selenic.com
|
r1089 | |||
del mw, m1, m2, ma | ||||
if force: | ||||
for f in merge: | ||||
get[f] = merge[f][1] | ||||
merge = {} | ||||
if linear_path or force: | ||||
# we don't need to do any magic, just jump to the new rev | ||||
branch_merge = False | ||||
p1, p2 = p2, nullid | ||||
else: | ||||
if not allow: | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("this update spans a branch" | ||
" affecting the following files:\n")) | ||||
mpm@selenic.com
|
r1089 | fl = merge.keys() + get.keys() | ||
fl.sort() | ||||
for f in fl: | ||||
cf = "" | ||||
Thomas Arendsen Hein
|
r1615 | if f in merge: | ||
cf = _(" (resolve)") | ||||
mpm@selenic.com
|
r1089 | self.ui.status(" %s%s\n" % (f, cf)) | ||
Benoit Boissinot
|
r1402 | self.ui.warn(_("aborting update spanning branches!\n")) | ||
self.ui.status(_("(use update -m to merge across branches" | ||||
" or -C to lose changes)\n")) | ||||
mpm@selenic.com
|
r1089 | return 1 | ||
branch_merge = True | ||||
# get the files we don't need to change | ||||
files = get.keys() | ||||
files.sort() | ||||
for f in files: | ||||
Thomas Arendsen Hein
|
r1615 | if f[0] == "/": | ||
continue | ||||
Benoit Boissinot
|
r1402 | self.ui.note(_("getting %s\n") % f) | ||
mpm@selenic.com
|
r1089 | t = self.file(f).read(get[f]) | ||
Benoit Boissinot
|
r1477 | self.wwrite(f, t) | ||
mpm@selenic.com
|
r1089 | util.set_exec(self.wjoin(f), mf2[f]) | ||
if moddirstate: | ||||
if branch_merge: | ||||
self.dirstate.update([f], 'n', st_mtime=-1) | ||||
else: | ||||
self.dirstate.update([f], 'n') | ||||
# merge the tricky bits | ||||
files = merge.keys() | ||||
files.sort() | ||||
for f in files: | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("merging %s\n") % f) | ||
mpm@selenic.com
|
r1089 | my, other, flag = merge[f] | ||
Benoit Boissinot
|
r1663 | ret = self.merge3(f, my, other) | ||
if ret: | ||||
err = True | ||||
mpm@selenic.com
|
r1089 | util.set_exec(self.wjoin(f), flag) | ||
if moddirstate: | ||||
if branch_merge: | ||||
# We've done a branch merge, mark this file as merged | ||||
# so that we properly record the merger later | ||||
self.dirstate.update([f], 'm') | ||||
else: | ||||
# We've update-merged a locally modified file, so | ||||
# we set the dirstate to emulate a normal checkout | ||||
# of that file some time in the past. Thus our | ||||
# merge will appear as a normal local file | ||||
# modification. | ||||
f_len = len(self.file(f).read(other)) | ||||
self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1) | ||||
remove.sort() | ||||
for f in remove: | ||||
Benoit Boissinot
|
r1402 | self.ui.note(_("removing %s\n") % f) | ||
Thomas Arendsen Hein
|
r1835 | util.audit_path(f) | ||
mpm@selenic.com
|
r1089 | try: | ||
Benoit Boissinot
|
r1415 | util.unlink(self.wjoin(f)) | ||
mpm@selenic.com
|
r1089 | except OSError, inst: | ||
Vadim Gelfer
|
r1398 | if inst.errno != errno.ENOENT: | ||
Benoit Boissinot
|
r1402 | self.ui.warn(_("update failed to remove %s: %s!\n") % | ||
Vadim Gelfer
|
r1398 | (f, inst.strerror)) | ||
mpm@selenic.com
|
r1089 | if moddirstate: | ||
if branch_merge: | ||||
self.dirstate.update(remove, 'r') | ||||
else: | ||||
self.dirstate.forget(remove) | ||||
Matt Mackall
|
r1495 | if moddirstate: | ||
self.dirstate.setparents(p1, p2) | ||||
Benoit Boissinot
|
r1663 | return err | ||
Matt Mackall
|
r1495 | |||
mpm@selenic.com
|
r1089 | def merge3(self, fn, my, other): | ||
"""perform a 3-way merge in the working directory""" | ||||
def temp(prefix, node): | ||||
pre = "%s~%s." % (os.path.basename(fn), prefix) | ||||
(fd, name) = tempfile.mkstemp("", pre) | ||||
f = os.fdopen(fd, "wb") | ||||
self.wwrite(fn, fl.read(node), f) | ||||
f.close() | ||||
return name | ||||
fl = self.file(fn) | ||||
base = fl.ancestor(my, other) | ||||
a = self.wjoin(fn) | ||||
b = temp("base", base) | ||||
c = temp("other", other) | ||||
Benoit Boissinot
|
r1402 | self.ui.note(_("resolving %s\n") % fn) | ||
self.ui.debug(_("file %s: my %s other %s ancestor %s\n") % | ||||
Matt Mackall
|
r1349 | (fn, short(my), short(other), short(base))) | ||
mpm@selenic.com
|
r1089 | |||
cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge") | ||||
or "hgmerge") | ||||
michael.w.dales@intel.com
|
r1427 | r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c)) | ||
mpm@selenic.com
|
r1089 | if r: | ||
Benoit Boissinot
|
r1402 | self.ui.warn(_("merging %s failed!\n") % fn) | ||
mpm@selenic.com
|
r1089 | |||
os.unlink(b) | ||||
os.unlink(c) | ||||
Benoit Boissinot
|
r1663 | return r | ||
mpm@selenic.com
|
r1089 | |||
def verify(self): | ||||
filelinkrevs = {} | ||||
filenodes = {} | ||||
changesets = revisions = files = 0 | ||||
Matt Mackall
|
r1383 | errors = [0] | ||
Matt Mackall
|
r1382 | neededmanifests = {} | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r1383 | def err(msg): | ||
self.ui.warn(msg + "\n") | ||||
errors[0] += 1 | ||||
Matt Mackall
|
r1667 | def checksize(obj, name): | ||
d = obj.checksize() | ||||
if d[0]: | ||||
err(_("%s data length off by %d bytes") % (name, d[0])) | ||||
if d[1]: | ||||
err(_("%s index contains %d extra bytes") % (name, d[1])) | ||||
mpm@selenic.com
|
r1089 | seen = {} | ||
Benoit Boissinot
|
r1402 | self.ui.status(_("checking changesets\n")) | ||
Matt Mackall
|
r1667 | checksize(self.changelog, "changelog") | ||
mpm@selenic.com
|
r1089 | for i in range(self.changelog.count()): | ||
changesets += 1 | ||||
n = self.changelog.node(i) | ||||
Matt Mackall
|
r1382 | l = self.changelog.linkrev(n) | ||
if l != i: | ||||
Benoit Boissinot
|
r1402 | err(_("incorrect link (%d) for changeset revision %d") %(l, i)) | ||
mpm@selenic.com
|
r1089 | if n in seen: | ||
Benoit Boissinot
|
r1402 | err(_("duplicate changeset at revision %d") % i) | ||
mpm@selenic.com
|
r1089 | seen[n] = 1 | ||
for p in self.changelog.parents(n): | ||||
if p not in self.changelog.nodemap: | ||||
Benoit Boissinot
|
r1402 | err(_("changeset %s has unknown parent %s") % | ||
mpm@selenic.com
|
r1089 | (short(n), short(p))) | ||
try: | ||||
changes = self.changelog.read(n) | ||||
Matt Mackall
|
r1492 | except KeyboardInterrupt: | ||
self.ui.warn(_("interrupted")) | ||||
raise | ||||
mpm@selenic.com
|
r1089 | except Exception, inst: | ||
Benoit Boissinot
|
r1402 | err(_("unpacking changeset %s: %s") % (short(n), inst)) | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r1382 | neededmanifests[changes[0]] = n | ||
mpm@selenic.com
|
r1089 | for f in changes[3]: | ||
filelinkrevs.setdefault(f, []).append(i) | ||||
seen = {} | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("checking manifests\n")) | ||
Matt Mackall
|
r1667 | checksize(self.manifest, "manifest") | ||
mpm@selenic.com
|
r1089 | for i in range(self.manifest.count()): | ||
n = self.manifest.node(i) | ||||
Matt Mackall
|
r1382 | l = self.manifest.linkrev(n) | ||
if l < 0 or l >= self.changelog.count(): | ||||
Benoit Boissinot
|
r1402 | err(_("bad manifest link (%d) at revision %d") % (l, i)) | ||
Matt Mackall
|
r1382 | |||
if n in neededmanifests: | ||||
del neededmanifests[n] | ||||
mpm@selenic.com
|
r1089 | if n in seen: | ||
Benoit Boissinot
|
r1402 | err(_("duplicate manifest at revision %d") % i) | ||
Matt Mackall
|
r1383 | |||
mpm@selenic.com
|
r1089 | seen[n] = 1 | ||
for p in self.manifest.parents(n): | ||||
if p not in self.manifest.nodemap: | ||||
Benoit Boissinot
|
r1402 | err(_("manifest %s has unknown parent %s") % | ||
Matt Mackall
|
r1383 | (short(n), short(p))) | ||
mpm@selenic.com
|
r1089 | |||
try: | ||||
delta = mdiff.patchtext(self.manifest.delta(n)) | ||||
except KeyboardInterrupt: | ||||
Benoit Boissinot
|
r1402 | self.ui.warn(_("interrupted")) | ||
mpm@selenic.com
|
r1097 | raise | ||
mpm@selenic.com
|
r1089 | except Exception, inst: | ||
Benoit Boissinot
|
r1402 | err(_("unpacking manifest %s: %s") % (short(n), inst)) | ||
mpm@selenic.com
|
r1089 | |||
ff = [ l.split('\0') for l in delta.splitlines() ] | ||||
for f, fn in ff: | ||||
filenodes.setdefault(f, {})[bin(fn[:40])] = 1 | ||||
Benoit Boissinot
|
r1402 | self.ui.status(_("crosschecking files in changesets and manifests\n")) | ||
Matt Mackall
|
r1382 | |||
Thomas Arendsen Hein
|
r1615 | for m, c in neededmanifests.items(): | ||
Benoit Boissinot
|
r1402 | err(_("Changeset %s refers to unknown manifest %s") % | ||
Matt Mackall
|
r1384 | (short(m), short(c))) | ||
Matt Mackall
|
r1382 | del neededmanifests | ||
mpm@selenic.com
|
r1089 | for f in filenodes: | ||
if f not in filelinkrevs: | ||||
Benoit Boissinot
|
r1402 | err(_("file %s in manifest but not in changesets") % f) | ||
mpm@selenic.com
|
r1089 | |||
for f in filelinkrevs: | ||||
if f not in filenodes: | ||||
Benoit Boissinot
|
r1402 | err(_("file %s in changeset but not in manifest") % f) | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r1402 | self.ui.status(_("checking files\n")) | ||
mpm@selenic.com
|
r1089 | ff = filenodes.keys() | ||
ff.sort() | ||||
for f in ff: | ||||
Thomas Arendsen Hein
|
r1615 | if f == "/dev/null": | ||
continue | ||||
mpm@selenic.com
|
r1089 | files += 1 | ||
fl = self.file(f) | ||||
Matt Mackall
|
r1667 | checksize(fl, f) | ||
Matt Mackall
|
r1493 | |||
Thomas Arendsen Hein
|
r1615 | nodes = {nullid: 1} | ||
mpm@selenic.com
|
r1089 | seen = {} | ||
for i in range(fl.count()): | ||||
revisions += 1 | ||||
n = fl.node(i) | ||||
if n in seen: | ||||
Benoit Boissinot
|
r1402 | err(_("%s: duplicate revision %d") % (f, i)) | ||
mpm@selenic.com
|
r1089 | if n not in filenodes[f]: | ||
Benoit Boissinot
|
r1402 | err(_("%s: %d:%s not in manifests") % (f, i, short(n))) | ||
mpm@selenic.com
|
r1089 | else: | ||
del filenodes[f][n] | ||||
flr = fl.linkrev(n) | ||||
if flr not in filelinkrevs[f]: | ||||
Benoit Boissinot
|
r1402 | err(_("%s:%s points to unexpected changeset %d") | ||
Matt Mackall
|
r1383 | % (f, short(n), flr)) | ||
mpm@selenic.com
|
r1089 | else: | ||
filelinkrevs[f].remove(flr) | ||||
# verify contents | ||||
try: | ||||
t = fl.read(n) | ||||
Matt Mackall
|
r1492 | except KeyboardInterrupt: | ||
self.ui.warn(_("interrupted")) | ||||
raise | ||||
mpm@selenic.com
|
r1089 | except Exception, inst: | ||
Benoit Boissinot
|
r1402 | err(_("unpacking file %s %s: %s") % (f, short(n), inst)) | ||
mpm@selenic.com
|
r1089 | |||
# verify parents | ||||
(p1, p2) = fl.parents(n) | ||||
if p1 not in nodes: | ||||
Benoit Boissinot
|
r1402 | err(_("file %s:%s unknown parent 1 %s") % | ||
Matt Mackall
|
r1383 | (f, short(n), short(p1))) | ||
mpm@selenic.com
|
r1089 | if p2 not in nodes: | ||
Benoit Boissinot
|
r1402 | err(_("file %s:%s unknown parent 2 %s") % | ||
mpm@selenic.com
|
r1089 | (f, short(n), short(p1))) | ||
nodes[n] = 1 | ||||
# cross-check | ||||
for node in filenodes[f]: | ||||
Benoit Boissinot
|
r1402 | err(_("node %s in manifests not in %s") % (hex(node), f)) | ||
mpm@selenic.com
|
r1089 | |||
Benoit Boissinot
|
r1402 | self.ui.status(_("%d files, %d changesets, %d total revisions\n") % | ||
mpm@selenic.com
|
r1089 | (files, changesets, revisions)) | ||
Matt Mackall
|
r1383 | if errors[0]: | ||
Benoit Boissinot
|
r1402 | self.ui.warn(_("%d integrity errors encountered!\n") % errors[0]) | ||
mpm@selenic.com
|
r1089 | return 1 | ||
mason@suse.com
|
r1806 | |||
# used to avoid circular references so destructors work | ||||
def aftertrans(base): | ||||
p = base | ||||
def a(): | ||||
util.rename(os.path.join(p, "journal"), os.path.join(p, "undo")) | ||||
util.rename(os.path.join(p, "journal.dirstate"), | ||||
os.path.join(p, "undo.dirstate")) | ||||
return a | ||||