##// END OF EJS Templates
Don't mark files with 'm'erge unless we're actually merging branches...
Don't mark files with 'm'erge unless we're actually merging branches For 3-way merges, we were marking files as 'm', which should only be used for branch merges. This was causing files to show up in 'hg status' that aren't actually modified.

File last commit:

r655:b3bba126 default
r764:eea96285 default
Show More
revlog.py
543 lines | 16.7 KiB | text/x-python | PythonLexer
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 # revlog.py - storage back-end for mercurial
#
# This provides efficient delta storage with O(1) retrieve and append
# and O(changes) merge between branches
#
# Copyright 2005 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
mpm@selenic.com
revlog: remove some unnecessary imports...
r208 import zlib, struct, sha, binascii, heapq
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 from mercurial import mdiff
mpm@selenic.com
Add smart node lookup by substring or by rev number
r36 def hex(node): return binascii.hexlify(node)
def bin(node): return binascii.unhexlify(node)
mpm@selenic.com
Change the size of the short hash representation...
r373 def short(node): return hex(node[:6])
mpm@selenic.com
Add smart node lookup by substring or by rev number
r36
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 def compress(text):
mpm@selenic.com
Make compression more intelligent:...
r112 if not text: return text
if len(text) < 44:
if text[0] == '\0': return text
return 'u' + text
bin = zlib.compress(text)
if len(bin) > len(text):
if text[0] == '\0': return text
return 'u' + text
return bin
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
def decompress(bin):
mpm@selenic.com
Make compression more intelligent:...
r112 if not bin: return bin
t = bin[0]
if t == '\0': return bin
if t == 'x': return zlib.decompress(bin)
if t == 'u': return bin[1:]
raise "unknown compression type %s" % t
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
def hash(text, p1, p2):
l = [p1, p2]
l.sort()
mpm@selenic.com
Hashing speed-up...
r531 s = sha.new(l[0])
s.update(l[1])
s.update(text)
return s.digest()
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
nullid = "\0" * 20
indexformat = ">4l20s20s20s"
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 class lazyparser:
mpm@selenic.com
lazyparser speed ups...
r323 def __init__(self, data, revlog):
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 self.data = data
self.s = struct.calcsize(indexformat)
self.l = len(data)/self.s
self.index = [None] * self.l
self.map = {nullid: -1}
mpm@selenic.com
lazyparser speed ups...
r323 self.all = 0
self.revlog = revlog
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76
mpm@selenic.com
lazyparser speed ups...
r323 def load(self, pos=None):
if self.all: return
if pos is not None:
block = pos / 1000
i = block * 1000
end = min(self.l, i + 1000)
else:
self.all = 1
i = 0
end = self.l
self.revlog.index = self.index
self.revlog.nodemap = self.map
mpm@selenic.com
Whitespace cleanups...
r515
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 while i < end:
d = self.data[i * self.s: (i + 1) * self.s]
e = struct.unpack(indexformat, d)
self.index[i] = e
self.map[e[6]] = i
i += 1
mpm@selenic.com
Whitespace cleanups...
r515
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 class lazyindex:
def __init__(self, parser):
self.p = parser
def __len__(self):
return len(self.p.index)
mpm@selenic.com
Make lazyindex load slightly faster
r115 def load(self, pos):
self.p.load(pos)
return self.p.index[pos]
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 def __getitem__(self, pos):
mpm@selenic.com
Make lazyindex load slightly faster
r115 return self.p.index[pos] or self.load(pos)
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 def append(self, e):
self.p.index.append(e)
mpm@selenic.com
Whitespace cleanups...
r515
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 class lazymap:
def __init__(self, parser):
self.p = parser
def load(self, key):
mpm@selenic.com
lazyparser speed ups...
r323 if self.p.all: return
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 n = self.p.data.find(key)
mpm@selenic.com
Friendlier exceptions for unknown node errors
r86 if n < 0: raise KeyError("node " + hex(key))
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 pos = n / self.p.s
self.p.load(pos)
def __contains__(self, key):
mpm@selenic.com
lazyparser speed ups...
r323 self.p.load()
return key in self.p.map
mpm@selenic.com
Add iterator to the lazymap code
r97 def __iter__(self):
mpm@selenic.com
Various node id lookup tweaks...
r469 yield nullid
mpm@selenic.com
Add iterator to the lazymap code
r97 for i in xrange(self.p.l):
try:
yield self.p.index[i][6]
except:
self.p.load(i)
yield self.p.index[i][6]
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 def __getitem__(self, key):
try:
return self.p.map[key]
except KeyError:
mpm@selenic.com
Friendlier exceptions for unknown node errors
r86 try:
self.load(key)
return self.p.map[key]
except KeyError:
raise KeyError("node " + hex(key))
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 def __setitem__(self, key, val):
self.p.map[key] = val
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 class revlog:
def __init__(self, opener, indexfile, datafile):
self.indexfile = indexfile
self.datafile = datafile
self.opener = opener
self.cache = None
mpm@selenic.com
Only use lazy indexing for big indices and avoid the overhead of the...
r116
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 try:
i = self.opener(self.indexfile).read()
mpm@selenic.com
Change revlog to use new patch code
r73 except IOError:
mpm@selenic.com
Add lazy{parser,index,map} to speed up processing of index files
r76 i = ""
mpm@selenic.com
Only use lazy indexing for big indices and avoid the overhead of the...
r116
if len(i) > 10000:
# big index, let's parse it on demand
mpm@selenic.com
lazyparser speed ups...
r323 parser = lazyparser(i, self)
mpm@selenic.com
Only use lazy indexing for big indices and avoid the overhead of the...
r116 self.index = lazyindex(parser)
self.nodemap = lazymap(parser)
else:
s = struct.calcsize(indexformat)
l = len(i) / s
self.index = [None] * l
m = [None] * l
n = 0
for f in xrange(0, len(i), s):
# offset, size, base, linkrev, p1, p2, nodeid
e = struct.unpack(indexformat, i[f:f + s])
m[n] = (e[6], n)
self.index[n] = e
n += 1
self.nodemap = dict(m)
self.nodemap[nullid] = -1
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
def tip(self): return self.node(len(self.index) - 1)
def count(self): return len(self.index)
mpm@selenic.com
Simplify integrity checking...
r26 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 def rev(self, node): return self.nodemap[node]
def linkrev(self, node): return self.index[self.nodemap[node]][3]
mpm@selenic.com
Handle nullid better for ancestor
r2 def parents(self, node):
if node == nullid: return (nullid, nullid)
return self.index[self.nodemap[node]][4:6]
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
def start(self, rev): return self.index[rev][0]
def length(self, rev): return self.index[rev][1]
def end(self, rev): return self.start(rev) + self.length(rev)
def base(self, rev): return self.index[rev][2]
mpm@selenic.com
Beginning of multi-head support...
r221 def heads(self):
p = {}
h = []
mpm@selenic.com
fix heads for rev 0...
r243 for r in range(self.count() - 1, -1, -1):
mpm@selenic.com
Beginning of multi-head support...
r221 n = self.node(r)
if n not in p:
h.append(n)
for pn in self.parents(n):
p[pn] = 1
return h
mpm@selenic.com
revlog: add a children function...
r370
def children(self, node):
c = []
p = self.rev(node)
for r in range(p + 1, self.count()):
n = self.node(r)
for pn in self.parents(n):
if pn == p:
c.append(p)
continue
elif pn == nullid:
continue
return c
mpm@selenic.com
Whitespace cleanups...
r515
mpm@selenic.com
Add smart node lookup by substring or by rev number
r36 def lookup(self, id):
try:
rev = int(id)
mpm@selenic.com
Various node id lookup tweaks...
r469 if str(rev) != id: raise ValueError
if rev < 0: rev = self.count() + rev
Thomas Arendsen Hein
Really _call_ method revlog.count in revlog.lookup()...
r476 if rev < 0 or rev >= self.count(): raise ValueError
mpm@selenic.com
Add smart node lookup by substring or by rev number
r36 return self.node(rev)
mpm@selenic.com
Various node id lookup tweaks...
r469 except (ValueError, OverflowError):
mpm@selenic.com
Add smart node lookup by substring or by rev number
r36 c = []
for n in self.nodemap:
mpm@selenic.com
Various node id lookup tweaks...
r469 if hex(n).startswith(id):
mpm@selenic.com
Add smart node lookup by substring or by rev number
r36 c.append(n)
if len(c) > 1: raise KeyError("Ambiguous identifier")
mpm@selenic.com
Add tag support
r67 if len(c) < 1: raise KeyError("No match found")
mpm@selenic.com
Add smart node lookup by substring or by rev number
r36 return c[0]
mpm@selenic.com
Whitespace cleanups...
r515
mpm@selenic.com
Add smart node lookup by substring or by rev number
r36 return None
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 def diff(self, a, b):
return mdiff.textdiff(a, b)
mpm@selenic.com
Change revlog to use new patch code
r73 def patches(self, t, pl):
return mdiff.patches(t, pl)
mpm@selenic.com
Add code to retrieve or construct a revlog delta
r119 def delta(self, node):
r = self.rev(node)
b = self.base(r)
if r == b:
return self.diff(self.revision(self.node(r - 1)),
self.revision(node))
else:
f = self.opener(self.datafile)
f.seek(self.start(r))
data = f.read(self.length(r))
return decompress(data)
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 def revision(self, node):
mpm@selenic.com
Add smart node lookup by substring or by rev number
r36 if node == nullid: return ""
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 if self.cache and self.cache[0] == node: return self.cache[2]
text = None
rev = self.rev(node)
mpm@selenic.com
Make revision code slightly faster
r117 start, length, base, link, p1, p2, node = self.index[rev]
end = start + length
if base != rev: start = self.start(base)
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
if self.cache and self.cache[1] >= base and self.cache[1] < rev:
base = self.cache[1]
start = self.start(base + 1)
text = self.cache[2]
last = 0
f = self.opener(self.datafile)
f.seek(start)
data = f.read(end - start)
Matt Mackall
Fix an odd revlog bug...
r651 if text is None:
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 last = self.length(base)
text = decompress(data[:last])
mpm@selenic.com
Add mdiff.patches to speed up applying thousands of patches to the manifest
r71 bins = []
mpm@selenic.com
Diff in subdirectories from Jake Edge...
r64 for r in xrange(base + 1, rev + 1):
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 s = self.length(r)
mpm@selenic.com
Add mdiff.patches to speed up applying thousands of patches to the manifest
r71 bins.append(decompress(data[last:last + s]))
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 last = last + s
mpm@selenic.com
Add mdiff.patches to speed up applying thousands of patches to the manifest
r71 text = mdiff.patches(text, bins)
mpm@selenic.com
Simplify integrity checking...
r26 if node != hash(text, p1, p2):
mpm@selenic.com
Add paranoia to diff code
r98 raise IOError("integrity check failed on %s:%d"
% (self.datafile, rev))
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
self.cache = (node, rev, text)
mpm@selenic.com
Whitespace cleanups...
r515 return text
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
mason@suse.com
Performance enhancements for manifest.add()...
r644 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 if text is None: text = ""
if p1 is None: p1 = self.tip()
if p2 is None: p2 = nullid
node = hash(text, p1, p2)
mpm@selenic.com
revlog: allow duplicates...
r301 if node in self.nodemap:
return node
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 n = self.count()
t = n - 1
if n:
mpm@selenic.com
Diff in subdirectories from Jake Edge...
r64 base = self.base(t)
start = self.start(base)
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 end = self.end(t)
mason@suse.com
Performance enhancements for manifest.add()...
r644 if not d:
prev = self.revision(self.tip())
d = self.diff(prev, text)
mpm@selenic.com
Add paranoia to diff code
r98 data = compress(d)
mpm@selenic.com
Diff in subdirectories from Jake Edge...
r64 dist = end - start + len(data)
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
# full versions are inserted when the needed deltas
# become comparable to the uncompressed text
mpm@selenic.com
Diff in subdirectories from Jake Edge...
r64 if not n or dist > len(text) * 2:
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 data = compress(text)
base = n
else:
base = self.base(t)
offset = 0
if t >= 0:
offset = self.end(t)
e = (offset, len(data), base, link, p1, p2, node)
mpm@selenic.com
Whitespace cleanups...
r515
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 self.index.append(e)
self.nodemap[node] = n
entry = struct.pack(indexformat, *e)
mpm@selenic.com
Simplify integrity checking...
r26 transaction.add(self.datafile, e[0])
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 self.opener(self.datafile, "a").write(data)
mpm@selenic.com
Fix truncate logic for indices again
r41 transaction.add(self.indexfile, n * len(entry))
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0 self.opener(self.indexfile, "a").write(entry)
self.cache = (node, n, text)
return node
def ancestor(self, a, b):
mpm@selenic.com
A new ancestor algorithm...
r147 # calculate the distance of every node from root
dist = {nullid: 0}
for i in xrange(self.count()):
n = self.node(i)
p1, p2 = self.parents(n)
dist[n] = max(dist[p1], dist[p2]) + 1
mpm@selenic.com
Whitespace cleanups...
r515
mpm@selenic.com
A new ancestor algorithm...
r147 # traverse ancestors in order of decreasing distance from root
def ancestors(node):
# we store negative distances because heap returns smallest member
h = [(-dist[node], node)]
seen = {}
earliest = self.count()
while h:
d, n = heapq.heappop(h)
if n not in seen:
seen[n] = 1
mpm@selenic.com
Ancestor algorithm fix...
r381 r = self.rev(n)
yield (-d, r, n)
mpm@selenic.com
A new ancestor algorithm...
r147 for p in self.parents(n):
heapq.heappush(h, (-dist[p], p))
mpm@selenic.com
Fix recursion depth trouble with ancestor algorithm
r45
mpm@selenic.com
A new ancestor algorithm...
r147 x = ancestors(a)
y = ancestors(b)
lx = x.next()
ly = y.next()
mpm@selenic.com
Fix recursion depth trouble with ancestor algorithm
r45
mpm@selenic.com
A new ancestor algorithm...
r147 # increment each ancestor list until it is closer to root than
# the other, or they match
while 1:
if lx == ly:
mpm@selenic.com
Ancestor algorithm fix...
r381 return lx[2]
mpm@selenic.com
A new ancestor algorithm...
r147 elif lx < ly:
ly = y.next()
elif lx > ly:
lx = x.next()
mpm@selenic.com
Add back links from file revisions to changeset revisions...
r0
mpm@selenic.com
Add changegroup support
r46 def group(self, linkmap):
# given a list of changeset revs, return a set of deltas and
mpm@selenic.com
Refactor merge code...
r94 # metadata corresponding to nodes. the first delta is
mpm@selenic.com
Add changegroup support
r46 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
# have this parent as it has all history before these
# changesets. parent is parent[0]
revs = []
needed = {}
# find file nodes/revs that match changeset revs
for i in xrange(0, self.count()):
if self.index[i][3] in linkmap:
revs.append(i)
needed[i] = 1
# if we don't have any revisions touched by these changesets, bail
mpm@selenic.com
Changes to network protocol...
r192 if not revs:
yield struct.pack(">l", 0)
return
mpm@selenic.com
Add changegroup support
r46
# add the parent of the first rev
p = self.parents(self.node(revs[0]))[0]
revs.insert(0, self.rev(p))
# for each delta that isn't contiguous in the log, we need to
# reconstruct the base, reconstruct the result, and then
# calculate the delta. We also need to do this where we've
# stored a full version and not a delta
for i in xrange(0, len(revs) - 1):
a, b = revs[i], revs[i + 1]
if a + 1 != b or self.base(b) == b:
for j in xrange(self.base(a), a + 1):
needed[j] = 1
for j in xrange(self.base(b), b + 1):
needed[j] = 1
# calculate spans to retrieve from datafile
needed = needed.keys()
needed.sort()
spans = []
mpm@selenic.com
Changes to network protocol...
r192 oo = -1
ol = 0
mpm@selenic.com
Add changegroup support
r46 for n in needed:
if n < 0: continue
o = self.start(n)
l = self.length(n)
mpm@selenic.com
Changes to network protocol...
r192 if oo + ol == o: # can we merge with the previous?
nl = spans[-1][2]
nl.append((n, l))
ol += l
spans[-1] = (oo, ol, nl)
mpm@selenic.com
Add changegroup support
r46 else:
mpm@selenic.com
Changes to network protocol...
r192 oo = o
ol = l
spans.append((oo, ol, [(n, l)]))
mpm@selenic.com
Add changegroup support
r46
# read spans in, divide up chunks
chunks = {}
mpm@selenic.com
Changes to network protocol...
r192 for span in spans:
mpm@selenic.com
Add changegroup support
r46 # we reopen the file for each span to make http happy for now
f = self.opener(self.datafile)
f.seek(span[0])
data = f.read(span[1])
# divide up the span
pos = 0
for r, l in span[2]:
mpm@selenic.com
Changes to network protocol...
r192 chunks[r] = decompress(data[pos: pos + l])
mpm@selenic.com
Add changegroup support
r46 pos += l
# helper to reconstruct intermediate versions
def construct(text, base, rev):
mpm@selenic.com
Changes to network protocol...
r192 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
mpm@selenic.com
Add mdiff.patches to speed up applying thousands of patches to the manifest
r71 return mdiff.patches(text, bins)
mpm@selenic.com
Add changegroup support
r46
# build deltas
deltas = []
mpm@selenic.com
Add mdiff.patches to speed up applying thousands of patches to the manifest
r71 for d in xrange(0, len(revs) - 1):
mpm@selenic.com
Add changegroup support
r46 a, b = revs[d], revs[d + 1]
n = self.node(b)
mpm@selenic.com
Changes to network protocol...
r192
# do we need to construct a new delta?
mpm@selenic.com
Add changegroup support
r46 if a + 1 != b or self.base(b) == b:
if a >= 0:
base = self.base(a)
mpm@selenic.com
Changes to network protocol...
r192 ta = chunks[self.base(a)]
mpm@selenic.com
Add changegroup support
r46 ta = construct(ta, base, a)
else:
ta = ""
mpm@selenic.com
Whitespace cleanups...
r515
mpm@selenic.com
Add changegroup support
r46 base = self.base(b)
if a > base:
base = a
tb = ta
else:
mpm@selenic.com
Changes to network protocol...
r192 tb = chunks[self.base(b)]
mpm@selenic.com
Add changegroup support
r46 tb = construct(tb, base, b)
d = self.diff(ta, tb)
else:
mpm@selenic.com
Changes to network protocol...
r192 d = chunks[b]
mpm@selenic.com
Add changegroup support
r46
p = self.parents(n)
meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
l = struct.pack(">l", len(meta) + len(d) + 4)
mpm@selenic.com
Changes to network protocol...
r192 yield l
yield meta
yield d
mpm@selenic.com
Add changegroup support
r46
mpm@selenic.com
Changes to network protocol...
r192 yield struct.pack(">l", 0)
mpm@selenic.com
fix bad assumption about uniqueness of file versions...
r224 def addgroup(self, revs, linkmapper, transaction, unique = 0):
mpm@selenic.com
Add changegroup support
r46 # given a set of deltas, add them to the revision log. the
# first delta is against its parent, which should be in our
# log, the rest are against the previous delta.
# track the base of the current delta log
r = self.count()
t = r - 1
mpm@selenic.com
Changes to network protocol...
r192 node = nullid
mpm@selenic.com
Whitespace cleanups...
r515
Matt Mackall
Fix out of range regression...
r655 base = prev = -1
Matt Mackall
Fix corruption resulting from skipping parts of a revision group...
r653 start = end = measure = 0
mpm@selenic.com
Add changegroup support
r46 if r:
start = self.start(self.base(t))
end = self.end(t)
measure = self.length(self.base(t))
base = self.base(t)
prev = self.tip()
transaction.add(self.datafile, end)
transaction.add(self.indexfile, r * struct.calcsize(indexformat))
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a")
# loop through our set of deltas
mpm@selenic.com
Changes to network protocol...
r192 chain = None
for chunk in revs:
node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
mpm@selenic.com
Refactor merge code...
r94 link = linkmapper(cs)
mpm@selenic.com
Fix bug in lazymap code...
r77 if node in self.nodemap:
mpm@selenic.com
fix bad assumption about uniqueness of file versions...
r224 # this can happen if two branches make the same change
if unique:
raise "already have %s" % hex(node[:4])
Matt Mackall
Fix corruption resulting from skipping parts of a revision group...
r653 chain = node
mpm@selenic.com
fix bad assumption about uniqueness of file versions...
r224 continue
mpm@selenic.com
Changes to network protocol...
r192 delta = chunk[80:]
if not chain:
# retrieve the parent revision of the delta chain
chain = p1
if not chain in self.nodemap:
raise "unknown base %s" % short(chain[:4])
mpm@selenic.com
Add changegroup support
r46
# full versions are inserted when the needed deltas become
# comparable to the uncompressed text or when the previous
# version is not the one we have a delta against. We use
# the size of the previous full rev as a proxy for the
# current size.
if chain == prev:
cdelta = compress(delta)
if chain != prev or (end - start + len(cdelta)) > measure * 2:
# flush our writes here so we can read it in revision
dfh.flush()
ifh.flush()
mpm@selenic.com
Fix up a bunch of bugs in the new merge code...
r65 text = self.revision(chain)
mpm@selenic.com
Change revlog to use new patch code
r73 text = self.patches(text, [delta])
mpm@selenic.com
Add changegroup support
r46 chk = self.addrevision(text, transaction, link, p1, p2)
if chk != node:
raise "consistency error adding group"
measure = len(text)
else:
e = (end, len(cdelta), self.base(t), link, p1, p2, node)
self.index.append(e)
self.nodemap[node] = r
dfh.write(cdelta)
ifh.write(struct.pack(indexformat, *e))
mpm@selenic.com
Fix up a bunch of bugs in the new merge code...
r65 t, r, chain, prev = r, r + 1, node, node
mpm@selenic.com
Add changegroup support
r46 start = self.start(self.base(t))
end = self.end(t)
dfh.close()
ifh.close()
return node