##// END OF EJS Templates
revlog: remove delta function
Matt Mackall -
r7362:6db4a2cc default
parent child Browse files
Show More
@@ -1,196 +1,197 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid
8 from node import bin, hex, nullid
9 from revlog import revlog, RevlogError
9 from revlog import revlog, RevlogError
10 from i18n import _
10 from i18n import _
11 import array, struct, mdiff, parsers, util
11 import array, struct, mdiff, parsers, util
12
12
13 class manifestdict(dict):
13 class manifestdict(dict):
14 def __init__(self, mapping=None, flags=None):
14 def __init__(self, mapping=None, flags=None):
15 if mapping is None: mapping = {}
15 if mapping is None: mapping = {}
16 if flags is None: flags = {}
16 if flags is None: flags = {}
17 dict.__init__(self, mapping)
17 dict.__init__(self, mapping)
18 self._flags = flags
18 self._flags = flags
19 def flags(self, f):
19 def flags(self, f):
20 return self._flags.get(f, "")
20 return self._flags.get(f, "")
21 def set(self, f, flags):
21 def set(self, f, flags):
22 self._flags[f] = flags
22 self._flags[f] = flags
23 def copy(self):
23 def copy(self):
24 return manifestdict(dict.copy(self), dict.copy(self._flags))
24 return manifestdict(dict.copy(self), dict.copy(self._flags))
25
25
26 class manifest(revlog):
26 class manifest(revlog):
27 def __init__(self, opener):
27 def __init__(self, opener):
28 self.mapcache = None
28 self.mapcache = None
29 self.listcache = None
29 self.listcache = None
30 revlog.__init__(self, opener, "00manifest.i")
30 revlog.__init__(self, opener, "00manifest.i")
31
31
32 def parse(self, lines):
32 def parse(self, lines):
33 mfdict = manifestdict()
33 mfdict = manifestdict()
34 parsers.parse_manifest(mfdict, mfdict._flags, lines)
34 parsers.parse_manifest(mfdict, mfdict._flags, lines)
35 return mfdict
35 return mfdict
36
36
37 def readdelta(self, node):
37 def readdelta(self, node):
38 return self.parse(mdiff.patchtext(self.delta(node)))
38 r = self.rev(node)
39 return self.parse(mdiff.patchtext(self.revdiff(r - 1, r)))
39
40
40 def read(self, node):
41 def read(self, node):
41 if node == nullid: return manifestdict() # don't upset local cache
42 if node == nullid: return manifestdict() # don't upset local cache
42 if self.mapcache and self.mapcache[0] == node:
43 if self.mapcache and self.mapcache[0] == node:
43 return self.mapcache[1]
44 return self.mapcache[1]
44 text = self.revision(node)
45 text = self.revision(node)
45 self.listcache = array.array('c', text)
46 self.listcache = array.array('c', text)
46 mapping = self.parse(text)
47 mapping = self.parse(text)
47 self.mapcache = (node, mapping)
48 self.mapcache = (node, mapping)
48 return mapping
49 return mapping
49
50
50 def _search(self, m, s, lo=0, hi=None):
51 def _search(self, m, s, lo=0, hi=None):
51 '''return a tuple (start, end) that says where to find s within m.
52 '''return a tuple (start, end) that says where to find s within m.
52
53
53 If the string is found m[start:end] are the line containing
54 If the string is found m[start:end] are the line containing
54 that string. If start == end the string was not found and
55 that string. If start == end the string was not found and
55 they indicate the proper sorted insertion point. This was
56 they indicate the proper sorted insertion point. This was
56 taken from bisect_left, and modified to find line start/end as
57 taken from bisect_left, and modified to find line start/end as
57 it goes along.
58 it goes along.
58
59
59 m should be a buffer or a string
60 m should be a buffer or a string
60 s is a string'''
61 s is a string'''
61 def advance(i, c):
62 def advance(i, c):
62 while i < lenm and m[i] != c:
63 while i < lenm and m[i] != c:
63 i += 1
64 i += 1
64 return i
65 return i
65 lenm = len(m)
66 lenm = len(m)
66 if not hi:
67 if not hi:
67 hi = lenm
68 hi = lenm
68 while lo < hi:
69 while lo < hi:
69 mid = (lo + hi) // 2
70 mid = (lo + hi) // 2
70 start = mid
71 start = mid
71 while start > 0 and m[start-1] != '\n':
72 while start > 0 and m[start-1] != '\n':
72 start -= 1
73 start -= 1
73 end = advance(start, '\0')
74 end = advance(start, '\0')
74 if m[start:end] < s:
75 if m[start:end] < s:
75 # we know that after the null there are 40 bytes of sha1
76 # we know that after the null there are 40 bytes of sha1
76 # this translates to the bisect lo = mid + 1
77 # this translates to the bisect lo = mid + 1
77 lo = advance(end + 40, '\n') + 1
78 lo = advance(end + 40, '\n') + 1
78 else:
79 else:
79 # this translates to the bisect hi = mid
80 # this translates to the bisect hi = mid
80 hi = start
81 hi = start
81 end = advance(lo, '\0')
82 end = advance(lo, '\0')
82 found = m[lo:end]
83 found = m[lo:end]
83 if cmp(s, found) == 0:
84 if cmp(s, found) == 0:
84 # we know that after the null there are 40 bytes of sha1
85 # we know that after the null there are 40 bytes of sha1
85 end = advance(end + 40, '\n')
86 end = advance(end + 40, '\n')
86 return (lo, end+1)
87 return (lo, end+1)
87 else:
88 else:
88 return (lo, lo)
89 return (lo, lo)
89
90
90 def find(self, node, f):
91 def find(self, node, f):
91 '''look up entry for a single file efficiently.
92 '''look up entry for a single file efficiently.
92 return (node, flags) pair if found, (None, None) if not.'''
93 return (node, flags) pair if found, (None, None) if not.'''
93 if self.mapcache and node == self.mapcache[0]:
94 if self.mapcache and node == self.mapcache[0]:
94 return self.mapcache[1].get(f), self.mapcache[1].flags(f)
95 return self.mapcache[1].get(f), self.mapcache[1].flags(f)
95 text = self.revision(node)
96 text = self.revision(node)
96 start, end = self._search(text, f)
97 start, end = self._search(text, f)
97 if start == end:
98 if start == end:
98 return None, None
99 return None, None
99 l = text[start:end]
100 l = text[start:end]
100 f, n = l.split('\0')
101 f, n = l.split('\0')
101 return bin(n[:40]), n[40:-1]
102 return bin(n[:40]), n[40:-1]
102
103
103 def add(self, map, transaction, link, p1=None, p2=None,
104 def add(self, map, transaction, link, p1=None, p2=None,
104 changed=None):
105 changed=None):
105 # apply the changes collected during the bisect loop to our addlist
106 # apply the changes collected during the bisect loop to our addlist
106 # return a delta suitable for addrevision
107 # return a delta suitable for addrevision
107 def addlistdelta(addlist, x):
108 def addlistdelta(addlist, x):
108 # start from the bottom up
109 # start from the bottom up
109 # so changes to the offsets don't mess things up.
110 # so changes to the offsets don't mess things up.
110 i = len(x)
111 i = len(x)
111 while i > 0:
112 while i > 0:
112 i -= 1
113 i -= 1
113 start = x[i][0]
114 start = x[i][0]
114 end = x[i][1]
115 end = x[i][1]
115 if x[i][2]:
116 if x[i][2]:
116 addlist[start:end] = array.array('c', x[i][2])
117 addlist[start:end] = array.array('c', x[i][2])
117 else:
118 else:
118 del addlist[start:end]
119 del addlist[start:end]
119 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2]
120 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2]
120 for d in x ])
121 for d in x ])
121
122
122 def checkforbidden(l):
123 def checkforbidden(l):
123 for f in l:
124 for f in l:
124 if '\n' in f or '\r' in f:
125 if '\n' in f or '\r' in f:
125 raise RevlogError(_("'\\n' and '\\r' disallowed in filenames"))
126 raise RevlogError(_("'\\n' and '\\r' disallowed in filenames"))
126
127
127 # if we're using the listcache, make sure it is valid and
128 # if we're using the listcache, make sure it is valid and
128 # parented by the same node we're diffing against
129 # parented by the same node we're diffing against
129 if not (changed and self.listcache and p1 and self.mapcache[0] == p1):
130 if not (changed and self.listcache and p1 and self.mapcache[0] == p1):
130 files = util.sort(map)
131 files = util.sort(map)
131 checkforbidden(files)
132 checkforbidden(files)
132
133
133 # if this is changed to support newlines in filenames,
134 # if this is changed to support newlines in filenames,
134 # be sure to check the templates/ dir again (especially *-raw.tmpl)
135 # be sure to check the templates/ dir again (especially *-raw.tmpl)
135 text = ["%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
136 text = ["%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
136 for f in files]
137 for f in files]
137 self.listcache = array.array('c', "".join(text))
138 self.listcache = array.array('c', "".join(text))
138 cachedelta = None
139 cachedelta = None
139 else:
140 else:
140 addlist = self.listcache
141 addlist = self.listcache
141
142
142 checkforbidden(changed[0])
143 checkforbidden(changed[0])
143 # combine the changed lists into one list for sorting
144 # combine the changed lists into one list for sorting
144 work = [[x, 0] for x in changed[0]]
145 work = [[x, 0] for x in changed[0]]
145 work[len(work):] = [[x, 1] for x in changed[1]]
146 work[len(work):] = [[x, 1] for x in changed[1]]
146 work.sort()
147 work.sort()
147
148
148 delta = []
149 delta = []
149 dstart = None
150 dstart = None
150 dend = None
151 dend = None
151 dline = [""]
152 dline = [""]
152 start = 0
153 start = 0
153 # zero copy representation of addlist as a buffer
154 # zero copy representation of addlist as a buffer
154 addbuf = buffer(addlist)
155 addbuf = buffer(addlist)
155
156
156 # start with a readonly loop that finds the offset of
157 # start with a readonly loop that finds the offset of
157 # each line and creates the deltas
158 # each line and creates the deltas
158 for w in work:
159 for w in work:
159 f = w[0]
160 f = w[0]
160 # bs will either be the index of the item or the insert point
161 # bs will either be the index of the item or the insert point
161 start, end = self._search(addbuf, f, start)
162 start, end = self._search(addbuf, f, start)
162 if w[1] == 0:
163 if w[1] == 0:
163 l = "%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
164 l = "%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
164 else:
165 else:
165 l = ""
166 l = ""
166 if start == end and w[1] == 1:
167 if start == end and w[1] == 1:
167 # item we want to delete was not found, error out
168 # item we want to delete was not found, error out
168 raise AssertionError(
169 raise AssertionError(
169 _("failed to remove %s from manifest") % f)
170 _("failed to remove %s from manifest") % f)
170 if dstart != None and dstart <= start and dend >= start:
171 if dstart != None and dstart <= start and dend >= start:
171 if dend < end:
172 if dend < end:
172 dend = end
173 dend = end
173 if l:
174 if l:
174 dline.append(l)
175 dline.append(l)
175 else:
176 else:
176 if dstart != None:
177 if dstart != None:
177 delta.append([dstart, dend, "".join(dline)])
178 delta.append([dstart, dend, "".join(dline)])
178 dstart = start
179 dstart = start
179 dend = end
180 dend = end
180 dline = [l]
181 dline = [l]
181
182
182 if dstart != None:
183 if dstart != None:
183 delta.append([dstart, dend, "".join(dline)])
184 delta.append([dstart, dend, "".join(dline)])
184 # apply the delta to the addlist, and get a delta for addrevision
185 # apply the delta to the addlist, and get a delta for addrevision
185 cachedelta = addlistdelta(addlist, delta)
186 cachedelta = addlistdelta(addlist, delta)
186
187
187 # the delta is only valid if we've been processing the tip revision
188 # the delta is only valid if we've been processing the tip revision
188 if self.mapcache[0] != self.tip():
189 if self.mapcache[0] != self.tip():
189 cachedelta = None
190 cachedelta = None
190 self.listcache = addlist
191 self.listcache = addlist
191
192
192 n = self.addrevision(buffer(self.listcache), transaction, link,
193 n = self.addrevision(buffer(self.listcache), transaction, link,
193 p1, p2, cachedelta)
194 p1, p2, cachedelta)
194 self.mapcache = (n, map)
195 self.mapcache = (n, map)
195
196
196 return n
197 return n
@@ -1,1374 +1,1369 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import bin, hex, nullid, nullrev, short
13 from node import bin, hex, nullid, nullrev, short
14 from i18n import _
14 from i18n import _
15 import changegroup, errno, ancestor, mdiff, parsers
15 import changegroup, errno, ancestor, mdiff, parsers
16 import struct, util, zlib
16 import struct, util, zlib
17
17
18 _pack = struct.pack
18 _pack = struct.pack
19 _unpack = struct.unpack
19 _unpack = struct.unpack
20 _compress = zlib.compress
20 _compress = zlib.compress
21 _decompress = zlib.decompress
21 _decompress = zlib.decompress
22 _sha = util.sha1
22 _sha = util.sha1
23
23
24 # revlog flags
24 # revlog flags
25 REVLOGV0 = 0
25 REVLOGV0 = 0
26 REVLOGNG = 1
26 REVLOGNG = 1
27 REVLOGNGINLINEDATA = (1 << 16)
27 REVLOGNGINLINEDATA = (1 << 16)
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31
31
32 class RevlogError(Exception):
32 class RevlogError(Exception):
33 pass
33 pass
34
34
35 class LookupError(RevlogError, KeyError):
35 class LookupError(RevlogError, KeyError):
36 def __init__(self, name, index, message):
36 def __init__(self, name, index, message):
37 self.name = name
37 self.name = name
38 if isinstance(name, str) and len(name) == 20:
38 if isinstance(name, str) and len(name) == 20:
39 name = short(name)
39 name = short(name)
40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
41
41
42 def __str__(self):
42 def __str__(self):
43 return RevlogError.__str__(self)
43 return RevlogError.__str__(self)
44
44
45 def getoffset(q):
45 def getoffset(q):
46 return int(q >> 16)
46 return int(q >> 16)
47
47
48 def gettype(q):
48 def gettype(q):
49 return int(q & 0xFFFF)
49 return int(q & 0xFFFF)
50
50
51 def offset_type(offset, type):
51 def offset_type(offset, type):
52 return long(long(offset) << 16 | type)
52 return long(long(offset) << 16 | type)
53
53
54 def hash(text, p1, p2):
54 def hash(text, p1, p2):
55 """generate a hash from the given text and its parent hashes
55 """generate a hash from the given text and its parent hashes
56
56
57 This hash combines both the current file contents and its history
57 This hash combines both the current file contents and its history
58 in a manner that makes it easy to distinguish nodes with the same
58 in a manner that makes it easy to distinguish nodes with the same
59 content in the revision graph.
59 content in the revision graph.
60 """
60 """
61 l = [p1, p2]
61 l = [p1, p2]
62 l.sort()
62 l.sort()
63 s = _sha(l[0])
63 s = _sha(l[0])
64 s.update(l[1])
64 s.update(l[1])
65 s.update(text)
65 s.update(text)
66 return s.digest()
66 return s.digest()
67
67
68 def compress(text):
68 def compress(text):
69 """ generate a possibly-compressed representation of text """
69 """ generate a possibly-compressed representation of text """
70 if not text:
70 if not text:
71 return ("", text)
71 return ("", text)
72 l = len(text)
72 l = len(text)
73 bin = None
73 bin = None
74 if l < 44:
74 if l < 44:
75 pass
75 pass
76 elif l > 1000000:
76 elif l > 1000000:
77 # zlib makes an internal copy, thus doubling memory usage for
77 # zlib makes an internal copy, thus doubling memory usage for
78 # large files, so lets do this in pieces
78 # large files, so lets do this in pieces
79 z = zlib.compressobj()
79 z = zlib.compressobj()
80 p = []
80 p = []
81 pos = 0
81 pos = 0
82 while pos < l:
82 while pos < l:
83 pos2 = pos + 2**20
83 pos2 = pos + 2**20
84 p.append(z.compress(text[pos:pos2]))
84 p.append(z.compress(text[pos:pos2]))
85 pos = pos2
85 pos = pos2
86 p.append(z.flush())
86 p.append(z.flush())
87 if sum(map(len, p)) < l:
87 if sum(map(len, p)) < l:
88 bin = "".join(p)
88 bin = "".join(p)
89 else:
89 else:
90 bin = _compress(text)
90 bin = _compress(text)
91 if bin is None or len(bin) > l:
91 if bin is None or len(bin) > l:
92 if text[0] == '\0':
92 if text[0] == '\0':
93 return ("", text)
93 return ("", text)
94 return ('u', text)
94 return ('u', text)
95 return ("", bin)
95 return ("", bin)
96
96
97 def decompress(bin):
97 def decompress(bin):
98 """ decompress the given input """
98 """ decompress the given input """
99 if not bin:
99 if not bin:
100 return bin
100 return bin
101 t = bin[0]
101 t = bin[0]
102 if t == '\0':
102 if t == '\0':
103 return bin
103 return bin
104 if t == 'x':
104 if t == 'x':
105 return _decompress(bin)
105 return _decompress(bin)
106 if t == 'u':
106 if t == 'u':
107 return bin[1:]
107 return bin[1:]
108 raise RevlogError(_("unknown compression type %r") % t)
108 raise RevlogError(_("unknown compression type %r") % t)
109
109
110 class lazyparser(object):
110 class lazyparser(object):
111 """
111 """
112 this class avoids the need to parse the entirety of large indices
112 this class avoids the need to parse the entirety of large indices
113 """
113 """
114
114
115 # lazyparser is not safe to use on windows if win32 extensions not
115 # lazyparser is not safe to use on windows if win32 extensions not
116 # available. it keeps file handle open, which make it not possible
116 # available. it keeps file handle open, which make it not possible
117 # to break hardlinks on local cloned repos.
117 # to break hardlinks on local cloned repos.
118
118
119 def __init__(self, dataf, size):
119 def __init__(self, dataf, size):
120 self.dataf = dataf
120 self.dataf = dataf
121 self.s = struct.calcsize(indexformatng)
121 self.s = struct.calcsize(indexformatng)
122 self.datasize = size
122 self.datasize = size
123 self.l = size/self.s
123 self.l = size/self.s
124 self.index = [None] * self.l
124 self.index = [None] * self.l
125 self.map = {nullid: nullrev}
125 self.map = {nullid: nullrev}
126 self.allmap = 0
126 self.allmap = 0
127 self.all = 0
127 self.all = 0
128 self.mapfind_count = 0
128 self.mapfind_count = 0
129
129
130 def loadmap(self):
130 def loadmap(self):
131 """
131 """
132 during a commit, we need to make sure the rev being added is
132 during a commit, we need to make sure the rev being added is
133 not a duplicate. This requires loading the entire index,
133 not a duplicate. This requires loading the entire index,
134 which is fairly slow. loadmap can load up just the node map,
134 which is fairly slow. loadmap can load up just the node map,
135 which takes much less time.
135 which takes much less time.
136 """
136 """
137 if self.allmap:
137 if self.allmap:
138 return
138 return
139 end = self.datasize
139 end = self.datasize
140 self.allmap = 1
140 self.allmap = 1
141 cur = 0
141 cur = 0
142 count = 0
142 count = 0
143 blocksize = self.s * 256
143 blocksize = self.s * 256
144 self.dataf.seek(0)
144 self.dataf.seek(0)
145 while cur < end:
145 while cur < end:
146 data = self.dataf.read(blocksize)
146 data = self.dataf.read(blocksize)
147 off = 0
147 off = 0
148 for x in xrange(256):
148 for x in xrange(256):
149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
150 self.map[n] = count
150 self.map[n] = count
151 count += 1
151 count += 1
152 if count >= self.l:
152 if count >= self.l:
153 break
153 break
154 off += self.s
154 off += self.s
155 cur += blocksize
155 cur += blocksize
156
156
157 def loadblock(self, blockstart, blocksize, data=None):
157 def loadblock(self, blockstart, blocksize, data=None):
158 if self.all:
158 if self.all:
159 return
159 return
160 if data is None:
160 if data is None:
161 self.dataf.seek(blockstart)
161 self.dataf.seek(blockstart)
162 if blockstart + blocksize > self.datasize:
162 if blockstart + blocksize > self.datasize:
163 # the revlog may have grown since we've started running,
163 # the revlog may have grown since we've started running,
164 # but we don't have space in self.index for more entries.
164 # but we don't have space in self.index for more entries.
165 # limit blocksize so that we don't get too much data.
165 # limit blocksize so that we don't get too much data.
166 blocksize = max(self.datasize - blockstart, 0)
166 blocksize = max(self.datasize - blockstart, 0)
167 data = self.dataf.read(blocksize)
167 data = self.dataf.read(blocksize)
168 lend = len(data) / self.s
168 lend = len(data) / self.s
169 i = blockstart / self.s
169 i = blockstart / self.s
170 off = 0
170 off = 0
171 # lazyindex supports __delitem__
171 # lazyindex supports __delitem__
172 if lend > len(self.index) - i:
172 if lend > len(self.index) - i:
173 lend = len(self.index) - i
173 lend = len(self.index) - i
174 for x in xrange(lend):
174 for x in xrange(lend):
175 if self.index[i + x] == None:
175 if self.index[i + x] == None:
176 b = data[off : off + self.s]
176 b = data[off : off + self.s]
177 self.index[i + x] = b
177 self.index[i + x] = b
178 n = b[ngshaoffset:ngshaoffset + 20]
178 n = b[ngshaoffset:ngshaoffset + 20]
179 self.map[n] = i + x
179 self.map[n] = i + x
180 off += self.s
180 off += self.s
181
181
182 def findnode(self, node):
182 def findnode(self, node):
183 """search backwards through the index file for a specific node"""
183 """search backwards through the index file for a specific node"""
184 if self.allmap:
184 if self.allmap:
185 return None
185 return None
186
186
187 # hg log will cause many many searches for the manifest
187 # hg log will cause many many searches for the manifest
188 # nodes. After we get called a few times, just load the whole
188 # nodes. After we get called a few times, just load the whole
189 # thing.
189 # thing.
190 if self.mapfind_count > 8:
190 if self.mapfind_count > 8:
191 self.loadmap()
191 self.loadmap()
192 if node in self.map:
192 if node in self.map:
193 return node
193 return node
194 return None
194 return None
195 self.mapfind_count += 1
195 self.mapfind_count += 1
196 last = self.l - 1
196 last = self.l - 1
197 while self.index[last] != None:
197 while self.index[last] != None:
198 if last == 0:
198 if last == 0:
199 self.all = 1
199 self.all = 1
200 self.allmap = 1
200 self.allmap = 1
201 return None
201 return None
202 last -= 1
202 last -= 1
203 end = (last + 1) * self.s
203 end = (last + 1) * self.s
204 blocksize = self.s * 256
204 blocksize = self.s * 256
205 while end >= 0:
205 while end >= 0:
206 start = max(end - blocksize, 0)
206 start = max(end - blocksize, 0)
207 self.dataf.seek(start)
207 self.dataf.seek(start)
208 data = self.dataf.read(end - start)
208 data = self.dataf.read(end - start)
209 findend = end - start
209 findend = end - start
210 while True:
210 while True:
211 # we're searching backwards, so we have to make sure
211 # we're searching backwards, so we have to make sure
212 # we don't find a changeset where this node is a parent
212 # we don't find a changeset where this node is a parent
213 off = data.find(node, 0, findend)
213 off = data.find(node, 0, findend)
214 findend = off
214 findend = off
215 if off >= 0:
215 if off >= 0:
216 i = off / self.s
216 i = off / self.s
217 off = i * self.s
217 off = i * self.s
218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
219 if n == node:
219 if n == node:
220 self.map[n] = i + start / self.s
220 self.map[n] = i + start / self.s
221 return node
221 return node
222 else:
222 else:
223 break
223 break
224 end -= blocksize
224 end -= blocksize
225 return None
225 return None
226
226
227 def loadindex(self, i=None, end=None):
227 def loadindex(self, i=None, end=None):
228 if self.all:
228 if self.all:
229 return
229 return
230 all = False
230 all = False
231 if i == None:
231 if i == None:
232 blockstart = 0
232 blockstart = 0
233 blocksize = (65536 / self.s) * self.s
233 blocksize = (65536 / self.s) * self.s
234 end = self.datasize
234 end = self.datasize
235 all = True
235 all = True
236 else:
236 else:
237 if end:
237 if end:
238 blockstart = i * self.s
238 blockstart = i * self.s
239 end = end * self.s
239 end = end * self.s
240 blocksize = end - blockstart
240 blocksize = end - blockstart
241 else:
241 else:
242 blockstart = (i & ~1023) * self.s
242 blockstart = (i & ~1023) * self.s
243 blocksize = self.s * 1024
243 blocksize = self.s * 1024
244 end = blockstart + blocksize
244 end = blockstart + blocksize
245 while blockstart < end:
245 while blockstart < end:
246 self.loadblock(blockstart, blocksize)
246 self.loadblock(blockstart, blocksize)
247 blockstart += blocksize
247 blockstart += blocksize
248 if all:
248 if all:
249 self.all = True
249 self.all = True
250
250
251 class lazyindex(object):
251 class lazyindex(object):
252 """a lazy version of the index array"""
252 """a lazy version of the index array"""
253 def __init__(self, parser):
253 def __init__(self, parser):
254 self.p = parser
254 self.p = parser
255 def __len__(self):
255 def __len__(self):
256 return len(self.p.index)
256 return len(self.p.index)
257 def load(self, pos):
257 def load(self, pos):
258 if pos < 0:
258 if pos < 0:
259 pos += len(self.p.index)
259 pos += len(self.p.index)
260 self.p.loadindex(pos)
260 self.p.loadindex(pos)
261 return self.p.index[pos]
261 return self.p.index[pos]
262 def __getitem__(self, pos):
262 def __getitem__(self, pos):
263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
264 def __setitem__(self, pos, item):
264 def __setitem__(self, pos, item):
265 self.p.index[pos] = _pack(indexformatng, *item)
265 self.p.index[pos] = _pack(indexformatng, *item)
266 def __delitem__(self, pos):
266 def __delitem__(self, pos):
267 del self.p.index[pos]
267 del self.p.index[pos]
268 def insert(self, pos, e):
268 def insert(self, pos, e):
269 self.p.index.insert(pos, _pack(indexformatng, *e))
269 self.p.index.insert(pos, _pack(indexformatng, *e))
270 def append(self, e):
270 def append(self, e):
271 self.p.index.append(_pack(indexformatng, *e))
271 self.p.index.append(_pack(indexformatng, *e))
272
272
273 class lazymap(object):
273 class lazymap(object):
274 """a lazy version of the node map"""
274 """a lazy version of the node map"""
275 def __init__(self, parser):
275 def __init__(self, parser):
276 self.p = parser
276 self.p = parser
277 def load(self, key):
277 def load(self, key):
278 n = self.p.findnode(key)
278 n = self.p.findnode(key)
279 if n == None:
279 if n == None:
280 raise KeyError(key)
280 raise KeyError(key)
281 def __contains__(self, key):
281 def __contains__(self, key):
282 if key in self.p.map:
282 if key in self.p.map:
283 return True
283 return True
284 self.p.loadmap()
284 self.p.loadmap()
285 return key in self.p.map
285 return key in self.p.map
286 def __iter__(self):
286 def __iter__(self):
287 yield nullid
287 yield nullid
288 for i in xrange(self.p.l):
288 for i in xrange(self.p.l):
289 ret = self.p.index[i]
289 ret = self.p.index[i]
290 if not ret:
290 if not ret:
291 self.p.loadindex(i)
291 self.p.loadindex(i)
292 ret = self.p.index[i]
292 ret = self.p.index[i]
293 if isinstance(ret, str):
293 if isinstance(ret, str):
294 ret = _unpack(indexformatng, ret)
294 ret = _unpack(indexformatng, ret)
295 yield ret[7]
295 yield ret[7]
296 def __getitem__(self, key):
296 def __getitem__(self, key):
297 try:
297 try:
298 return self.p.map[key]
298 return self.p.map[key]
299 except KeyError:
299 except KeyError:
300 try:
300 try:
301 self.load(key)
301 self.load(key)
302 return self.p.map[key]
302 return self.p.map[key]
303 except KeyError:
303 except KeyError:
304 raise KeyError("node " + hex(key))
304 raise KeyError("node " + hex(key))
305 def __setitem__(self, key, val):
305 def __setitem__(self, key, val):
306 self.p.map[key] = val
306 self.p.map[key] = val
307 def __delitem__(self, key):
307 def __delitem__(self, key):
308 del self.p.map[key]
308 del self.p.map[key]
309
309
310 indexformatv0 = ">4l20s20s20s"
310 indexformatv0 = ">4l20s20s20s"
311 v0shaoffset = 56
311 v0shaoffset = 56
312
312
313 class revlogoldio(object):
313 class revlogoldio(object):
314 def __init__(self):
314 def __init__(self):
315 self.size = struct.calcsize(indexformatv0)
315 self.size = struct.calcsize(indexformatv0)
316
316
317 def parseindex(self, fp, inline):
317 def parseindex(self, fp, inline):
318 s = self.size
318 s = self.size
319 index = []
319 index = []
320 nodemap = {nullid: nullrev}
320 nodemap = {nullid: nullrev}
321 n = off = 0
321 n = off = 0
322 data = fp.read()
322 data = fp.read()
323 l = len(data)
323 l = len(data)
324 while off + s <= l:
324 while off + s <= l:
325 cur = data[off:off + s]
325 cur = data[off:off + s]
326 off += s
326 off += s
327 e = _unpack(indexformatv0, cur)
327 e = _unpack(indexformatv0, cur)
328 # transform to revlogv1 format
328 # transform to revlogv1 format
329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
331 index.append(e2)
331 index.append(e2)
332 nodemap[e[6]] = n
332 nodemap[e[6]] = n
333 n += 1
333 n += 1
334
334
335 return index, nodemap, None
335 return index, nodemap, None
336
336
337 def packentry(self, entry, node, version, rev):
337 def packentry(self, entry, node, version, rev):
338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
339 node(entry[5]), node(entry[6]), entry[7])
339 node(entry[5]), node(entry[6]), entry[7])
340 return _pack(indexformatv0, *e2)
340 return _pack(indexformatv0, *e2)
341
341
342 # index ng:
342 # index ng:
343 # 6 bytes offset
343 # 6 bytes offset
344 # 2 bytes flags
344 # 2 bytes flags
345 # 4 bytes compressed length
345 # 4 bytes compressed length
346 # 4 bytes uncompressed length
346 # 4 bytes uncompressed length
347 # 4 bytes: base rev
347 # 4 bytes: base rev
348 # 4 bytes link rev
348 # 4 bytes link rev
349 # 4 bytes parent 1 rev
349 # 4 bytes parent 1 rev
350 # 4 bytes parent 2 rev
350 # 4 bytes parent 2 rev
351 # 32 bytes: nodeid
351 # 32 bytes: nodeid
352 indexformatng = ">Qiiiiii20s12x"
352 indexformatng = ">Qiiiiii20s12x"
353 ngshaoffset = 32
353 ngshaoffset = 32
354 versionformat = ">I"
354 versionformat = ">I"
355
355
356 class revlogio(object):
356 class revlogio(object):
357 def __init__(self):
357 def __init__(self):
358 self.size = struct.calcsize(indexformatng)
358 self.size = struct.calcsize(indexformatng)
359
359
360 def parseindex(self, fp, inline):
360 def parseindex(self, fp, inline):
361 try:
361 try:
362 size = util.fstat(fp).st_size
362 size = util.fstat(fp).st_size
363 except AttributeError:
363 except AttributeError:
364 size = 0
364 size = 0
365
365
366 if util.openhardlinks() and not inline and size > 1000000:
366 if util.openhardlinks() and not inline and size > 1000000:
367 # big index, let's parse it on demand
367 # big index, let's parse it on demand
368 parser = lazyparser(fp, size)
368 parser = lazyparser(fp, size)
369 index = lazyindex(parser)
369 index = lazyindex(parser)
370 nodemap = lazymap(parser)
370 nodemap = lazymap(parser)
371 e = list(index[0])
371 e = list(index[0])
372 type = gettype(e[0])
372 type = gettype(e[0])
373 e[0] = offset_type(0, type)
373 e[0] = offset_type(0, type)
374 index[0] = e
374 index[0] = e
375 return index, nodemap, None
375 return index, nodemap, None
376
376
377 data = fp.read()
377 data = fp.read()
378 # call the C implementation to parse the index data
378 # call the C implementation to parse the index data
379 index, nodemap, cache = parsers.parse_index(data, inline)
379 index, nodemap, cache = parsers.parse_index(data, inline)
380 return index, nodemap, cache
380 return index, nodemap, cache
381
381
382 def packentry(self, entry, node, version, rev):
382 def packentry(self, entry, node, version, rev):
383 p = _pack(indexformatng, *entry)
383 p = _pack(indexformatng, *entry)
384 if rev == 0:
384 if rev == 0:
385 p = _pack(versionformat, version) + p[4:]
385 p = _pack(versionformat, version) + p[4:]
386 return p
386 return p
387
387
388 class revlog(object):
388 class revlog(object):
389 """
389 """
390 the underlying revision storage object
390 the underlying revision storage object
391
391
392 A revlog consists of two parts, an index and the revision data.
392 A revlog consists of two parts, an index and the revision data.
393
393
394 The index is a file with a fixed record size containing
394 The index is a file with a fixed record size containing
395 information on each revision, including its nodeid (hash), the
395 information on each revision, including its nodeid (hash), the
396 nodeids of its parents, the position and offset of its data within
396 nodeids of its parents, the position and offset of its data within
397 the data file, and the revision it's based on. Finally, each entry
397 the data file, and the revision it's based on. Finally, each entry
398 contains a linkrev entry that can serve as a pointer to external
398 contains a linkrev entry that can serve as a pointer to external
399 data.
399 data.
400
400
401 The revision data itself is a linear collection of data chunks.
401 The revision data itself is a linear collection of data chunks.
402 Each chunk represents a revision and is usually represented as a
402 Each chunk represents a revision and is usually represented as a
403 delta against the previous chunk. To bound lookup time, runs of
403 delta against the previous chunk. To bound lookup time, runs of
404 deltas are limited to about 2 times the length of the original
404 deltas are limited to about 2 times the length of the original
405 version data. This makes retrieval of a version proportional to
405 version data. This makes retrieval of a version proportional to
406 its size, or O(1) relative to the number of revisions.
406 its size, or O(1) relative to the number of revisions.
407
407
408 Both pieces of the revlog are written to in an append-only
408 Both pieces of the revlog are written to in an append-only
409 fashion, which means we never need to rewrite a file to insert or
409 fashion, which means we never need to rewrite a file to insert or
410 remove data, and can use some simple techniques to avoid the need
410 remove data, and can use some simple techniques to avoid the need
411 for locking while reading.
411 for locking while reading.
412 """
412 """
413 def __init__(self, opener, indexfile):
413 def __init__(self, opener, indexfile):
414 """
414 """
415 create a revlog object
415 create a revlog object
416
416
417 opener is a function that abstracts the file opening operation
417 opener is a function that abstracts the file opening operation
418 and can be used to implement COW semantics or the like.
418 and can be used to implement COW semantics or the like.
419 """
419 """
420 self.indexfile = indexfile
420 self.indexfile = indexfile
421 self.datafile = indexfile[:-2] + ".d"
421 self.datafile = indexfile[:-2] + ".d"
422 self.opener = opener
422 self.opener = opener
423 self._cache = None
423 self._cache = None
424 self._chunkcache = None
424 self._chunkcache = None
425 self.nodemap = {nullid: nullrev}
425 self.nodemap = {nullid: nullrev}
426 self.index = []
426 self.index = []
427
427
428 v = REVLOG_DEFAULT_VERSION
428 v = REVLOG_DEFAULT_VERSION
429 if hasattr(opener, "defversion"):
429 if hasattr(opener, "defversion"):
430 v = opener.defversion
430 v = opener.defversion
431 if v & REVLOGNG:
431 if v & REVLOGNG:
432 v |= REVLOGNGINLINEDATA
432 v |= REVLOGNGINLINEDATA
433
433
434 i = ""
434 i = ""
435 try:
435 try:
436 f = self.opener(self.indexfile)
436 f = self.opener(self.indexfile)
437 i = f.read(4)
437 i = f.read(4)
438 f.seek(0)
438 f.seek(0)
439 if len(i) > 0:
439 if len(i) > 0:
440 v = struct.unpack(versionformat, i)[0]
440 v = struct.unpack(versionformat, i)[0]
441 except IOError, inst:
441 except IOError, inst:
442 if inst.errno != errno.ENOENT:
442 if inst.errno != errno.ENOENT:
443 raise
443 raise
444
444
445 self.version = v
445 self.version = v
446 self._inline = v & REVLOGNGINLINEDATA
446 self._inline = v & REVLOGNGINLINEDATA
447 flags = v & ~0xFFFF
447 flags = v & ~0xFFFF
448 fmt = v & 0xFFFF
448 fmt = v & 0xFFFF
449 if fmt == REVLOGV0 and flags:
449 if fmt == REVLOGV0 and flags:
450 raise RevlogError(_("index %s unknown flags %#04x for format v0")
450 raise RevlogError(_("index %s unknown flags %#04x for format v0")
451 % (self.indexfile, flags >> 16))
451 % (self.indexfile, flags >> 16))
452 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
452 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
453 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
453 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
454 % (self.indexfile, flags >> 16))
454 % (self.indexfile, flags >> 16))
455 elif fmt > REVLOGNG:
455 elif fmt > REVLOGNG:
456 raise RevlogError(_("index %s unknown format %d")
456 raise RevlogError(_("index %s unknown format %d")
457 % (self.indexfile, fmt))
457 % (self.indexfile, fmt))
458
458
459 self._io = revlogio()
459 self._io = revlogio()
460 if self.version == REVLOGV0:
460 if self.version == REVLOGV0:
461 self._io = revlogoldio()
461 self._io = revlogoldio()
462 if i:
462 if i:
463 d = self._io.parseindex(f, self._inline)
463 d = self._io.parseindex(f, self._inline)
464 self.index, self.nodemap, self._chunkcache = d
464 self.index, self.nodemap, self._chunkcache = d
465
465
466 # add the magic null revision at -1 (if it hasn't been done already)
466 # add the magic null revision at -1 (if it hasn't been done already)
467 if (self.index == [] or isinstance(self.index, lazyindex) or
467 if (self.index == [] or isinstance(self.index, lazyindex) or
468 self.index[-1][7] != nullid) :
468 self.index[-1][7] != nullid) :
469 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
469 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
470
470
471 def _loadindex(self, start, end):
471 def _loadindex(self, start, end):
472 """load a block of indexes all at once from the lazy parser"""
472 """load a block of indexes all at once from the lazy parser"""
473 if isinstance(self.index, lazyindex):
473 if isinstance(self.index, lazyindex):
474 self.index.p.loadindex(start, end)
474 self.index.p.loadindex(start, end)
475
475
476 def _loadindexmap(self):
476 def _loadindexmap(self):
477 """loads both the map and the index from the lazy parser"""
477 """loads both the map and the index from the lazy parser"""
478 if isinstance(self.index, lazyindex):
478 if isinstance(self.index, lazyindex):
479 p = self.index.p
479 p = self.index.p
480 p.loadindex()
480 p.loadindex()
481 self.nodemap = p.map
481 self.nodemap = p.map
482
482
483 def _loadmap(self):
483 def _loadmap(self):
484 """loads the map from the lazy parser"""
484 """loads the map from the lazy parser"""
485 if isinstance(self.nodemap, lazymap):
485 if isinstance(self.nodemap, lazymap):
486 self.nodemap.p.loadmap()
486 self.nodemap.p.loadmap()
487 self.nodemap = self.nodemap.p.map
487 self.nodemap = self.nodemap.p.map
488
488
489 def tip(self):
489 def tip(self):
490 return self.node(len(self.index) - 2)
490 return self.node(len(self.index) - 2)
491 def __len__(self):
491 def __len__(self):
492 return len(self.index) - 1
492 return len(self.index) - 1
493 def __iter__(self):
493 def __iter__(self):
494 for i in xrange(len(self)):
494 for i in xrange(len(self)):
495 yield i
495 yield i
496 def rev(self, node):
496 def rev(self, node):
497 try:
497 try:
498 return self.nodemap[node]
498 return self.nodemap[node]
499 except KeyError:
499 except KeyError:
500 raise LookupError(node, self.indexfile, _('no node'))
500 raise LookupError(node, self.indexfile, _('no node'))
501 def node(self, rev):
501 def node(self, rev):
502 return self.index[rev][7]
502 return self.index[rev][7]
503 def linkrev(self, rev):
503 def linkrev(self, rev):
504 return self.index[rev][4]
504 return self.index[rev][4]
505 def parents(self, node):
505 def parents(self, node):
506 d = self.index[self.rev(node)][5:7]
506 d = self.index[self.rev(node)][5:7]
507 return (self.node(d[0]), self.node(d[1]))
507 return (self.node(d[0]), self.node(d[1]))
508 def parentrevs(self, rev):
508 def parentrevs(self, rev):
509 return self.index[rev][5:7]
509 return self.index[rev][5:7]
510 def start(self, rev):
510 def start(self, rev):
511 return int(self.index[rev][0] >> 16)
511 return int(self.index[rev][0] >> 16)
512 def end(self, rev):
512 def end(self, rev):
513 return self.start(rev) + self.length(rev)
513 return self.start(rev) + self.length(rev)
514 def length(self, rev):
514 def length(self, rev):
515 return self.index[rev][1]
515 return self.index[rev][1]
516 def base(self, rev):
516 def base(self, rev):
517 return self.index[rev][3]
517 return self.index[rev][3]
518
518
519 def size(self, rev):
519 def size(self, rev):
520 """return the length of the uncompressed text for a given revision"""
520 """return the length of the uncompressed text for a given revision"""
521 l = self.index[rev][2]
521 l = self.index[rev][2]
522 if l >= 0:
522 if l >= 0:
523 return l
523 return l
524
524
525 t = self.revision(self.node(rev))
525 t = self.revision(self.node(rev))
526 return len(t)
526 return len(t)
527
527
528 # alternate implementation, The advantage to this code is it
528 # alternate implementation, The advantage to this code is it
529 # will be faster for a single revision. But, the results are not
529 # will be faster for a single revision. But, the results are not
530 # cached, so finding the size of every revision will be slower.
530 # cached, so finding the size of every revision will be slower.
531 """
531 """
532 if self.cache and self.cache[1] == rev:
532 if self.cache and self.cache[1] == rev:
533 return len(self.cache[2])
533 return len(self.cache[2])
534
534
535 base = self.base(rev)
535 base = self.base(rev)
536 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
536 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
537 base = self.cache[1]
537 base = self.cache[1]
538 text = self.cache[2]
538 text = self.cache[2]
539 else:
539 else:
540 text = self.revision(self.node(base))
540 text = self.revision(self.node(base))
541
541
542 l = len(text)
542 l = len(text)
543 for x in xrange(base + 1, rev + 1):
543 for x in xrange(base + 1, rev + 1):
544 l = mdiff.patchedsize(l, self.chunk(x))
544 l = mdiff.patchedsize(l, self.chunk(x))
545 return l
545 return l
546 """
546 """
547
547
548 def reachable(self, node, stop=None):
548 def reachable(self, node, stop=None):
549 """return a hash of all nodes ancestral to a given node, including
549 """return a hash of all nodes ancestral to a given node, including
550 the node itself, stopping when stop is matched"""
550 the node itself, stopping when stop is matched"""
551 reachable = {}
551 reachable = {}
552 visit = [node]
552 visit = [node]
553 reachable[node] = 1
553 reachable[node] = 1
554 if stop:
554 if stop:
555 stopn = self.rev(stop)
555 stopn = self.rev(stop)
556 else:
556 else:
557 stopn = 0
557 stopn = 0
558 while visit:
558 while visit:
559 n = visit.pop(0)
559 n = visit.pop(0)
560 if n == stop:
560 if n == stop:
561 continue
561 continue
562 if n == nullid:
562 if n == nullid:
563 continue
563 continue
564 for p in self.parents(n):
564 for p in self.parents(n):
565 if self.rev(p) < stopn:
565 if self.rev(p) < stopn:
566 continue
566 continue
567 if p not in reachable:
567 if p not in reachable:
568 reachable[p] = 1
568 reachable[p] = 1
569 visit.append(p)
569 visit.append(p)
570 return reachable
570 return reachable
571
571
572 def ancestors(self, *revs):
572 def ancestors(self, *revs):
573 'Generate the ancestors of revs using a breadth-first visit'
573 'Generate the ancestors of revs using a breadth-first visit'
574 visit = list(revs)
574 visit = list(revs)
575 seen = util.set([nullrev])
575 seen = util.set([nullrev])
576 while visit:
576 while visit:
577 for parent in self.parentrevs(visit.pop(0)):
577 for parent in self.parentrevs(visit.pop(0)):
578 if parent not in seen:
578 if parent not in seen:
579 visit.append(parent)
579 visit.append(parent)
580 seen.add(parent)
580 seen.add(parent)
581 yield parent
581 yield parent
582
582
583 def descendants(self, *revs):
583 def descendants(self, *revs):
584 'Generate the descendants of revs in topological order'
584 'Generate the descendants of revs in topological order'
585 seen = util.set(revs)
585 seen = util.set(revs)
586 for i in xrange(min(revs) + 1, len(self)):
586 for i in xrange(min(revs) + 1, len(self)):
587 for x in self.parentrevs(i):
587 for x in self.parentrevs(i):
588 if x != nullrev and x in seen:
588 if x != nullrev and x in seen:
589 seen.add(i)
589 seen.add(i)
590 yield i
590 yield i
591 break
591 break
592
592
593 def findmissing(self, common=None, heads=None):
593 def findmissing(self, common=None, heads=None):
594 '''
594 '''
595 returns the topologically sorted list of nodes from the set:
595 returns the topologically sorted list of nodes from the set:
596 missing = (ancestors(heads) \ ancestors(common))
596 missing = (ancestors(heads) \ ancestors(common))
597
597
598 where ancestors() is the set of ancestors from heads, heads included
598 where ancestors() is the set of ancestors from heads, heads included
599
599
600 if heads is None, the heads of the revlog are used
600 if heads is None, the heads of the revlog are used
601 if common is None, nullid is assumed to be a common node
601 if common is None, nullid is assumed to be a common node
602 '''
602 '''
603 if common is None:
603 if common is None:
604 common = [nullid]
604 common = [nullid]
605 if heads is None:
605 if heads is None:
606 heads = self.heads()
606 heads = self.heads()
607
607
608 common = [self.rev(n) for n in common]
608 common = [self.rev(n) for n in common]
609 heads = [self.rev(n) for n in heads]
609 heads = [self.rev(n) for n in heads]
610
610
611 # we want the ancestors, but inclusive
611 # we want the ancestors, but inclusive
612 has = dict.fromkeys(self.ancestors(*common))
612 has = dict.fromkeys(self.ancestors(*common))
613 has[nullrev] = None
613 has[nullrev] = None
614 for r in common:
614 for r in common:
615 has[r] = None
615 has[r] = None
616
616
617 # take all ancestors from heads that aren't in has
617 # take all ancestors from heads that aren't in has
618 missing = {}
618 missing = {}
619 visit = [r for r in heads if r not in has]
619 visit = [r for r in heads if r not in has]
620 while visit:
620 while visit:
621 r = visit.pop(0)
621 r = visit.pop(0)
622 if r in missing:
622 if r in missing:
623 continue
623 continue
624 else:
624 else:
625 missing[r] = None
625 missing[r] = None
626 for p in self.parentrevs(r):
626 for p in self.parentrevs(r):
627 if p not in has:
627 if p not in has:
628 visit.append(p)
628 visit.append(p)
629 missing = missing.keys()
629 missing = missing.keys()
630 missing.sort()
630 missing.sort()
631 return [self.node(r) for r in missing]
631 return [self.node(r) for r in missing]
632
632
633 def nodesbetween(self, roots=None, heads=None):
633 def nodesbetween(self, roots=None, heads=None):
634 """Return a tuple containing three elements. Elements 1 and 2 contain
634 """Return a tuple containing three elements. Elements 1 and 2 contain
635 a final list bases and heads after all the unreachable ones have been
635 a final list bases and heads after all the unreachable ones have been
636 pruned. Element 0 contains a topologically sorted list of all
636 pruned. Element 0 contains a topologically sorted list of all
637
637
638 nodes that satisfy these constraints:
638 nodes that satisfy these constraints:
639 1. All nodes must be descended from a node in roots (the nodes on
639 1. All nodes must be descended from a node in roots (the nodes on
640 roots are considered descended from themselves).
640 roots are considered descended from themselves).
641 2. All nodes must also be ancestors of a node in heads (the nodes in
641 2. All nodes must also be ancestors of a node in heads (the nodes in
642 heads are considered to be their own ancestors).
642 heads are considered to be their own ancestors).
643
643
644 If roots is unspecified, nullid is assumed as the only root.
644 If roots is unspecified, nullid is assumed as the only root.
645 If heads is unspecified, it is taken to be the output of the
645 If heads is unspecified, it is taken to be the output of the
646 heads method (i.e. a list of all nodes in the repository that
646 heads method (i.e. a list of all nodes in the repository that
647 have no children)."""
647 have no children)."""
648 nonodes = ([], [], [])
648 nonodes = ([], [], [])
649 if roots is not None:
649 if roots is not None:
650 roots = list(roots)
650 roots = list(roots)
651 if not roots:
651 if not roots:
652 return nonodes
652 return nonodes
653 lowestrev = min([self.rev(n) for n in roots])
653 lowestrev = min([self.rev(n) for n in roots])
654 else:
654 else:
655 roots = [nullid] # Everybody's a descendent of nullid
655 roots = [nullid] # Everybody's a descendent of nullid
656 lowestrev = nullrev
656 lowestrev = nullrev
657 if (lowestrev == nullrev) and (heads is None):
657 if (lowestrev == nullrev) and (heads is None):
658 # We want _all_ the nodes!
658 # We want _all_ the nodes!
659 return ([self.node(r) for r in self], [nullid], list(self.heads()))
659 return ([self.node(r) for r in self], [nullid], list(self.heads()))
660 if heads is None:
660 if heads is None:
661 # All nodes are ancestors, so the latest ancestor is the last
661 # All nodes are ancestors, so the latest ancestor is the last
662 # node.
662 # node.
663 highestrev = len(self) - 1
663 highestrev = len(self) - 1
664 # Set ancestors to None to signal that every node is an ancestor.
664 # Set ancestors to None to signal that every node is an ancestor.
665 ancestors = None
665 ancestors = None
666 # Set heads to an empty dictionary for later discovery of heads
666 # Set heads to an empty dictionary for later discovery of heads
667 heads = {}
667 heads = {}
668 else:
668 else:
669 heads = list(heads)
669 heads = list(heads)
670 if not heads:
670 if not heads:
671 return nonodes
671 return nonodes
672 ancestors = {}
672 ancestors = {}
673 # Turn heads into a dictionary so we can remove 'fake' heads.
673 # Turn heads into a dictionary so we can remove 'fake' heads.
674 # Also, later we will be using it to filter out the heads we can't
674 # Also, later we will be using it to filter out the heads we can't
675 # find from roots.
675 # find from roots.
676 heads = dict.fromkeys(heads, 0)
676 heads = dict.fromkeys(heads, 0)
677 # Start at the top and keep marking parents until we're done.
677 # Start at the top and keep marking parents until we're done.
678 nodestotag = heads.keys()
678 nodestotag = heads.keys()
679 # Remember where the top was so we can use it as a limit later.
679 # Remember where the top was so we can use it as a limit later.
680 highestrev = max([self.rev(n) for n in nodestotag])
680 highestrev = max([self.rev(n) for n in nodestotag])
681 while nodestotag:
681 while nodestotag:
682 # grab a node to tag
682 # grab a node to tag
683 n = nodestotag.pop()
683 n = nodestotag.pop()
684 # Never tag nullid
684 # Never tag nullid
685 if n == nullid:
685 if n == nullid:
686 continue
686 continue
687 # A node's revision number represents its place in a
687 # A node's revision number represents its place in a
688 # topologically sorted list of nodes.
688 # topologically sorted list of nodes.
689 r = self.rev(n)
689 r = self.rev(n)
690 if r >= lowestrev:
690 if r >= lowestrev:
691 if n not in ancestors:
691 if n not in ancestors:
692 # If we are possibly a descendent of one of the roots
692 # If we are possibly a descendent of one of the roots
693 # and we haven't already been marked as an ancestor
693 # and we haven't already been marked as an ancestor
694 ancestors[n] = 1 # Mark as ancestor
694 ancestors[n] = 1 # Mark as ancestor
695 # Add non-nullid parents to list of nodes to tag.
695 # Add non-nullid parents to list of nodes to tag.
696 nodestotag.extend([p for p in self.parents(n) if
696 nodestotag.extend([p for p in self.parents(n) if
697 p != nullid])
697 p != nullid])
698 elif n in heads: # We've seen it before, is it a fake head?
698 elif n in heads: # We've seen it before, is it a fake head?
699 # So it is, real heads should not be the ancestors of
699 # So it is, real heads should not be the ancestors of
700 # any other heads.
700 # any other heads.
701 heads.pop(n)
701 heads.pop(n)
702 if not ancestors:
702 if not ancestors:
703 return nonodes
703 return nonodes
704 # Now that we have our set of ancestors, we want to remove any
704 # Now that we have our set of ancestors, we want to remove any
705 # roots that are not ancestors.
705 # roots that are not ancestors.
706
706
707 # If one of the roots was nullid, everything is included anyway.
707 # If one of the roots was nullid, everything is included anyway.
708 if lowestrev > nullrev:
708 if lowestrev > nullrev:
709 # But, since we weren't, let's recompute the lowest rev to not
709 # But, since we weren't, let's recompute the lowest rev to not
710 # include roots that aren't ancestors.
710 # include roots that aren't ancestors.
711
711
712 # Filter out roots that aren't ancestors of heads
712 # Filter out roots that aren't ancestors of heads
713 roots = [n for n in roots if n in ancestors]
713 roots = [n for n in roots if n in ancestors]
714 # Recompute the lowest revision
714 # Recompute the lowest revision
715 if roots:
715 if roots:
716 lowestrev = min([self.rev(n) for n in roots])
716 lowestrev = min([self.rev(n) for n in roots])
717 else:
717 else:
718 # No more roots? Return empty list
718 # No more roots? Return empty list
719 return nonodes
719 return nonodes
720 else:
720 else:
721 # We are descending from nullid, and don't need to care about
721 # We are descending from nullid, and don't need to care about
722 # any other roots.
722 # any other roots.
723 lowestrev = nullrev
723 lowestrev = nullrev
724 roots = [nullid]
724 roots = [nullid]
725 # Transform our roots list into a 'set' (i.e. a dictionary where the
725 # Transform our roots list into a 'set' (i.e. a dictionary where the
726 # values don't matter.
726 # values don't matter.
727 descendents = dict.fromkeys(roots, 1)
727 descendents = dict.fromkeys(roots, 1)
728 # Also, keep the original roots so we can filter out roots that aren't
728 # Also, keep the original roots so we can filter out roots that aren't
729 # 'real' roots (i.e. are descended from other roots).
729 # 'real' roots (i.e. are descended from other roots).
730 roots = descendents.copy()
730 roots = descendents.copy()
731 # Our topologically sorted list of output nodes.
731 # Our topologically sorted list of output nodes.
732 orderedout = []
732 orderedout = []
733 # Don't start at nullid since we don't want nullid in our output list,
733 # Don't start at nullid since we don't want nullid in our output list,
734 # and if nullid shows up in descedents, empty parents will look like
734 # and if nullid shows up in descedents, empty parents will look like
735 # they're descendents.
735 # they're descendents.
736 for r in xrange(max(lowestrev, 0), highestrev + 1):
736 for r in xrange(max(lowestrev, 0), highestrev + 1):
737 n = self.node(r)
737 n = self.node(r)
738 isdescendent = False
738 isdescendent = False
739 if lowestrev == nullrev: # Everybody is a descendent of nullid
739 if lowestrev == nullrev: # Everybody is a descendent of nullid
740 isdescendent = True
740 isdescendent = True
741 elif n in descendents:
741 elif n in descendents:
742 # n is already a descendent
742 # n is already a descendent
743 isdescendent = True
743 isdescendent = True
744 # This check only needs to be done here because all the roots
744 # This check only needs to be done here because all the roots
745 # will start being marked is descendents before the loop.
745 # will start being marked is descendents before the loop.
746 if n in roots:
746 if n in roots:
747 # If n was a root, check if it's a 'real' root.
747 # If n was a root, check if it's a 'real' root.
748 p = tuple(self.parents(n))
748 p = tuple(self.parents(n))
749 # If any of its parents are descendents, it's not a root.
749 # If any of its parents are descendents, it's not a root.
750 if (p[0] in descendents) or (p[1] in descendents):
750 if (p[0] in descendents) or (p[1] in descendents):
751 roots.pop(n)
751 roots.pop(n)
752 else:
752 else:
753 p = tuple(self.parents(n))
753 p = tuple(self.parents(n))
754 # A node is a descendent if either of its parents are
754 # A node is a descendent if either of its parents are
755 # descendents. (We seeded the dependents list with the roots
755 # descendents. (We seeded the dependents list with the roots
756 # up there, remember?)
756 # up there, remember?)
757 if (p[0] in descendents) or (p[1] in descendents):
757 if (p[0] in descendents) or (p[1] in descendents):
758 descendents[n] = 1
758 descendents[n] = 1
759 isdescendent = True
759 isdescendent = True
760 if isdescendent and ((ancestors is None) or (n in ancestors)):
760 if isdescendent and ((ancestors is None) or (n in ancestors)):
761 # Only include nodes that are both descendents and ancestors.
761 # Only include nodes that are both descendents and ancestors.
762 orderedout.append(n)
762 orderedout.append(n)
763 if (ancestors is not None) and (n in heads):
763 if (ancestors is not None) and (n in heads):
764 # We're trying to figure out which heads are reachable
764 # We're trying to figure out which heads are reachable
765 # from roots.
765 # from roots.
766 # Mark this head as having been reached
766 # Mark this head as having been reached
767 heads[n] = 1
767 heads[n] = 1
768 elif ancestors is None:
768 elif ancestors is None:
769 # Otherwise, we're trying to discover the heads.
769 # Otherwise, we're trying to discover the heads.
770 # Assume this is a head because if it isn't, the next step
770 # Assume this is a head because if it isn't, the next step
771 # will eventually remove it.
771 # will eventually remove it.
772 heads[n] = 1
772 heads[n] = 1
773 # But, obviously its parents aren't.
773 # But, obviously its parents aren't.
774 for p in self.parents(n):
774 for p in self.parents(n):
775 heads.pop(p, None)
775 heads.pop(p, None)
776 heads = [n for n in heads.iterkeys() if heads[n] != 0]
776 heads = [n for n in heads.iterkeys() if heads[n] != 0]
777 roots = roots.keys()
777 roots = roots.keys()
778 assert orderedout
778 assert orderedout
779 assert roots
779 assert roots
780 assert heads
780 assert heads
781 return (orderedout, roots, heads)
781 return (orderedout, roots, heads)
782
782
783 def heads(self, start=None, stop=None):
783 def heads(self, start=None, stop=None):
784 """return the list of all nodes that have no children
784 """return the list of all nodes that have no children
785
785
786 if start is specified, only heads that are descendants of
786 if start is specified, only heads that are descendants of
787 start will be returned
787 start will be returned
788 if stop is specified, it will consider all the revs from stop
788 if stop is specified, it will consider all the revs from stop
789 as if they had no children
789 as if they had no children
790 """
790 """
791 if start is None and stop is None:
791 if start is None and stop is None:
792 count = len(self)
792 count = len(self)
793 if not count:
793 if not count:
794 return [nullid]
794 return [nullid]
795 ishead = [1] * (count + 1)
795 ishead = [1] * (count + 1)
796 index = self.index
796 index = self.index
797 for r in xrange(count):
797 for r in xrange(count):
798 e = index[r]
798 e = index[r]
799 ishead[e[5]] = ishead[e[6]] = 0
799 ishead[e[5]] = ishead[e[6]] = 0
800 return [self.node(r) for r in xrange(count) if ishead[r]]
800 return [self.node(r) for r in xrange(count) if ishead[r]]
801
801
802 if start is None:
802 if start is None:
803 start = nullid
803 start = nullid
804 if stop is None:
804 if stop is None:
805 stop = []
805 stop = []
806 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
806 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
807 startrev = self.rev(start)
807 startrev = self.rev(start)
808 reachable = {startrev: 1}
808 reachable = {startrev: 1}
809 heads = {startrev: 1}
809 heads = {startrev: 1}
810
810
811 parentrevs = self.parentrevs
811 parentrevs = self.parentrevs
812 for r in xrange(startrev + 1, len(self)):
812 for r in xrange(startrev + 1, len(self)):
813 for p in parentrevs(r):
813 for p in parentrevs(r):
814 if p in reachable:
814 if p in reachable:
815 if r not in stoprevs:
815 if r not in stoprevs:
816 reachable[r] = 1
816 reachable[r] = 1
817 heads[r] = 1
817 heads[r] = 1
818 if p in heads and p not in stoprevs:
818 if p in heads and p not in stoprevs:
819 del heads[p]
819 del heads[p]
820
820
821 return [self.node(r) for r in heads]
821 return [self.node(r) for r in heads]
822
822
823 def children(self, node):
823 def children(self, node):
824 """find the children of a given node"""
824 """find the children of a given node"""
825 c = []
825 c = []
826 p = self.rev(node)
826 p = self.rev(node)
827 for r in range(p + 1, len(self)):
827 for r in range(p + 1, len(self)):
828 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
828 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
829 if prevs:
829 if prevs:
830 for pr in prevs:
830 for pr in prevs:
831 if pr == p:
831 if pr == p:
832 c.append(self.node(r))
832 c.append(self.node(r))
833 elif p == nullrev:
833 elif p == nullrev:
834 c.append(self.node(r))
834 c.append(self.node(r))
835 return c
835 return c
836
836
837 def _match(self, id):
837 def _match(self, id):
838 if isinstance(id, (long, int)):
838 if isinstance(id, (long, int)):
839 # rev
839 # rev
840 return self.node(id)
840 return self.node(id)
841 if len(id) == 20:
841 if len(id) == 20:
842 # possibly a binary node
842 # possibly a binary node
843 # odds of a binary node being all hex in ASCII are 1 in 10**25
843 # odds of a binary node being all hex in ASCII are 1 in 10**25
844 try:
844 try:
845 node = id
845 node = id
846 r = self.rev(node) # quick search the index
846 r = self.rev(node) # quick search the index
847 return node
847 return node
848 except LookupError:
848 except LookupError:
849 pass # may be partial hex id
849 pass # may be partial hex id
850 try:
850 try:
851 # str(rev)
851 # str(rev)
852 rev = int(id)
852 rev = int(id)
853 if str(rev) != id:
853 if str(rev) != id:
854 raise ValueError
854 raise ValueError
855 if rev < 0:
855 if rev < 0:
856 rev = len(self) + rev
856 rev = len(self) + rev
857 if rev < 0 or rev >= len(self):
857 if rev < 0 or rev >= len(self):
858 raise ValueError
858 raise ValueError
859 return self.node(rev)
859 return self.node(rev)
860 except (ValueError, OverflowError):
860 except (ValueError, OverflowError):
861 pass
861 pass
862 if len(id) == 40:
862 if len(id) == 40:
863 try:
863 try:
864 # a full hex nodeid?
864 # a full hex nodeid?
865 node = bin(id)
865 node = bin(id)
866 r = self.rev(node)
866 r = self.rev(node)
867 return node
867 return node
868 except (TypeError, LookupError):
868 except (TypeError, LookupError):
869 pass
869 pass
870
870
871 def _partialmatch(self, id):
871 def _partialmatch(self, id):
872 if len(id) < 40:
872 if len(id) < 40:
873 try:
873 try:
874 # hex(node)[:...]
874 # hex(node)[:...]
875 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
875 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
876 node = None
876 node = None
877 for n in self.nodemap:
877 for n in self.nodemap:
878 if n.startswith(bin_id) and hex(n).startswith(id):
878 if n.startswith(bin_id) and hex(n).startswith(id):
879 if node is not None:
879 if node is not None:
880 raise LookupError(id, self.indexfile,
880 raise LookupError(id, self.indexfile,
881 _('ambiguous identifier'))
881 _('ambiguous identifier'))
882 node = n
882 node = n
883 if node is not None:
883 if node is not None:
884 return node
884 return node
885 except TypeError:
885 except TypeError:
886 pass
886 pass
887
887
888 def lookup(self, id):
888 def lookup(self, id):
889 """locate a node based on:
889 """locate a node based on:
890 - revision number or str(revision number)
890 - revision number or str(revision number)
891 - nodeid or subset of hex nodeid
891 - nodeid or subset of hex nodeid
892 """
892 """
893 n = self._match(id)
893 n = self._match(id)
894 if n is not None:
894 if n is not None:
895 return n
895 return n
896 n = self._partialmatch(id)
896 n = self._partialmatch(id)
897 if n:
897 if n:
898 return n
898 return n
899
899
900 raise LookupError(id, self.indexfile, _('no match found'))
900 raise LookupError(id, self.indexfile, _('no match found'))
901
901
902 def cmp(self, node, text):
902 def cmp(self, node, text):
903 """compare text with a given file revision"""
903 """compare text with a given file revision"""
904 p1, p2 = self.parents(node)
904 p1, p2 = self.parents(node)
905 return hash(text, p1, p2) != node
905 return hash(text, p1, p2) != node
906
906
907 def chunk(self, rev, df=None):
907 def chunk(self, rev, df=None):
908 def loadcache(df):
908 def loadcache(df):
909 if not df:
909 if not df:
910 if self._inline:
910 if self._inline:
911 df = self.opener(self.indexfile)
911 df = self.opener(self.indexfile)
912 else:
912 else:
913 df = self.opener(self.datafile)
913 df = self.opener(self.datafile)
914 df.seek(start)
914 df.seek(start)
915 self._chunkcache = (start, df.read(cache_length))
915 self._chunkcache = (start, df.read(cache_length))
916
916
917 start, length = self.start(rev), self.length(rev)
917 start, length = self.start(rev), self.length(rev)
918 if self._inline:
918 if self._inline:
919 start += (rev + 1) * self._io.size
919 start += (rev + 1) * self._io.size
920 end = start + length
920 end = start + length
921
921
922 offset = 0
922 offset = 0
923 if not self._chunkcache:
923 if not self._chunkcache:
924 cache_length = max(65536, length)
924 cache_length = max(65536, length)
925 loadcache(df)
925 loadcache(df)
926 else:
926 else:
927 cache_start = self._chunkcache[0]
927 cache_start = self._chunkcache[0]
928 cache_length = len(self._chunkcache[1])
928 cache_length = len(self._chunkcache[1])
929 cache_end = cache_start + cache_length
929 cache_end = cache_start + cache_length
930 if start >= cache_start and end <= cache_end:
930 if start >= cache_start and end <= cache_end:
931 # it is cached
931 # it is cached
932 offset = start - cache_start
932 offset = start - cache_start
933 else:
933 else:
934 cache_length = max(65536, length)
934 cache_length = max(65536, length)
935 loadcache(df)
935 loadcache(df)
936
936
937 # avoid copying large chunks
937 # avoid copying large chunks
938 c = self._chunkcache[1]
938 c = self._chunkcache[1]
939 if cache_length != length:
939 if cache_length != length:
940 c = c[offset:offset + length]
940 c = c[offset:offset + length]
941
941
942 return decompress(c)
942 return decompress(c)
943
943
944 def delta(self, node):
945 """return or calculate a delta between a node and its predecessor"""
946 r = self.rev(node)
947 return self.revdiff(r - 1, r)
948
949 def revdiff(self, rev1, rev2):
944 def revdiff(self, rev1, rev2):
950 """return or calculate a delta between two revisions"""
945 """return or calculate a delta between two revisions"""
951 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
946 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
952 return self.chunk(rev2)
947 return self.chunk(rev2)
953
948
954 return mdiff.textdiff(self.revision(self.node(rev1)),
949 return mdiff.textdiff(self.revision(self.node(rev1)),
955 self.revision(self.node(rev2)))
950 self.revision(self.node(rev2)))
956
951
957 def revision(self, node):
952 def revision(self, node):
958 """return an uncompressed revision of a given node"""
953 """return an uncompressed revision of a given node"""
959 if node == nullid:
954 if node == nullid:
960 return ""
955 return ""
961 if self._cache and self._cache[0] == node:
956 if self._cache and self._cache[0] == node:
962 return str(self._cache[2])
957 return str(self._cache[2])
963
958
964 # look up what we need to read
959 # look up what we need to read
965 text = None
960 text = None
966 rev = self.rev(node)
961 rev = self.rev(node)
967 base = self.base(rev)
962 base = self.base(rev)
968
963
969 # check rev flags
964 # check rev flags
970 if self.index[rev][0] & 0xFFFF:
965 if self.index[rev][0] & 0xFFFF:
971 raise RevlogError(_('incompatible revision flag %x') %
966 raise RevlogError(_('incompatible revision flag %x') %
972 (self.index[rev][0] & 0xFFFF))
967 (self.index[rev][0] & 0xFFFF))
973
968
974 df = None
969 df = None
975
970
976 # do we have useful data cached?
971 # do we have useful data cached?
977 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
972 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
978 base = self._cache[1]
973 base = self._cache[1]
979 text = str(self._cache[2])
974 text = str(self._cache[2])
980 self._loadindex(base, rev + 1)
975 self._loadindex(base, rev + 1)
981 if not self._inline and rev > base + 1:
976 if not self._inline and rev > base + 1:
982 df = self.opener(self.datafile)
977 df = self.opener(self.datafile)
983 else:
978 else:
984 self._loadindex(base, rev + 1)
979 self._loadindex(base, rev + 1)
985 if not self._inline and rev > base:
980 if not self._inline and rev > base:
986 df = self.opener(self.datafile)
981 df = self.opener(self.datafile)
987 text = self.chunk(base, df=df)
982 text = self.chunk(base, df=df)
988
983
989 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
984 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
990 text = mdiff.patches(text, bins)
985 text = mdiff.patches(text, bins)
991 p1, p2 = self.parents(node)
986 p1, p2 = self.parents(node)
992 if node != hash(text, p1, p2):
987 if node != hash(text, p1, p2):
993 raise RevlogError(_("integrity check failed on %s:%d")
988 raise RevlogError(_("integrity check failed on %s:%d")
994 % (self.datafile, rev))
989 % (self.datafile, rev))
995
990
996 self._cache = (node, rev, text)
991 self._cache = (node, rev, text)
997 return text
992 return text
998
993
999 def checkinlinesize(self, tr, fp=None):
994 def checkinlinesize(self, tr, fp=None):
1000 if not self._inline:
995 if not self._inline:
1001 return
996 return
1002 if not fp:
997 if not fp:
1003 fp = self.opener(self.indexfile, 'r')
998 fp = self.opener(self.indexfile, 'r')
1004 fp.seek(0, 2)
999 fp.seek(0, 2)
1005 size = fp.tell()
1000 size = fp.tell()
1006 if size < 131072:
1001 if size < 131072:
1007 return
1002 return
1008 trinfo = tr.find(self.indexfile)
1003 trinfo = tr.find(self.indexfile)
1009 if trinfo == None:
1004 if trinfo == None:
1010 raise RevlogError(_("%s not found in the transaction")
1005 raise RevlogError(_("%s not found in the transaction")
1011 % self.indexfile)
1006 % self.indexfile)
1012
1007
1013 trindex = trinfo[2]
1008 trindex = trinfo[2]
1014 dataoff = self.start(trindex)
1009 dataoff = self.start(trindex)
1015
1010
1016 tr.add(self.datafile, dataoff)
1011 tr.add(self.datafile, dataoff)
1017 df = self.opener(self.datafile, 'w')
1012 df = self.opener(self.datafile, 'w')
1018 try:
1013 try:
1019 calc = self._io.size
1014 calc = self._io.size
1020 for r in self:
1015 for r in self:
1021 start = self.start(r) + (r + 1) * calc
1016 start = self.start(r) + (r + 1) * calc
1022 length = self.length(r)
1017 length = self.length(r)
1023 fp.seek(start)
1018 fp.seek(start)
1024 d = fp.read(length)
1019 d = fp.read(length)
1025 df.write(d)
1020 df.write(d)
1026 finally:
1021 finally:
1027 df.close()
1022 df.close()
1028
1023
1029 fp.close()
1024 fp.close()
1030 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1025 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1031 self.version &= ~(REVLOGNGINLINEDATA)
1026 self.version &= ~(REVLOGNGINLINEDATA)
1032 self._inline = False
1027 self._inline = False
1033 for i in self:
1028 for i in self:
1034 e = self._io.packentry(self.index[i], self.node, self.version, i)
1029 e = self._io.packentry(self.index[i], self.node, self.version, i)
1035 fp.write(e)
1030 fp.write(e)
1036
1031
1037 # if we don't call rename, the temp file will never replace the
1032 # if we don't call rename, the temp file will never replace the
1038 # real index
1033 # real index
1039 fp.rename()
1034 fp.rename()
1040
1035
1041 tr.replace(self.indexfile, trindex * calc)
1036 tr.replace(self.indexfile, trindex * calc)
1042 self._chunkcache = None
1037 self._chunkcache = None
1043
1038
1044 def addrevision(self, text, transaction, link, p1, p2, d=None):
1039 def addrevision(self, text, transaction, link, p1, p2, d=None):
1045 """add a revision to the log
1040 """add a revision to the log
1046
1041
1047 text - the revision data to add
1042 text - the revision data to add
1048 transaction - the transaction object used for rollback
1043 transaction - the transaction object used for rollback
1049 link - the linkrev data to add
1044 link - the linkrev data to add
1050 p1, p2 - the parent nodeids of the revision
1045 p1, p2 - the parent nodeids of the revision
1051 d - an optional precomputed delta
1046 d - an optional precomputed delta
1052 """
1047 """
1053 dfh = None
1048 dfh = None
1054 if not self._inline:
1049 if not self._inline:
1055 dfh = self.opener(self.datafile, "a")
1050 dfh = self.opener(self.datafile, "a")
1056 ifh = self.opener(self.indexfile, "a+")
1051 ifh = self.opener(self.indexfile, "a+")
1057 try:
1052 try:
1058 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1053 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1059 finally:
1054 finally:
1060 if dfh:
1055 if dfh:
1061 dfh.close()
1056 dfh.close()
1062 ifh.close()
1057 ifh.close()
1063
1058
1064 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1059 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1065 node = hash(text, p1, p2)
1060 node = hash(text, p1, p2)
1066 if node in self.nodemap:
1061 if node in self.nodemap:
1067 return node
1062 return node
1068
1063
1069 curr = len(self)
1064 curr = len(self)
1070 prev = curr - 1
1065 prev = curr - 1
1071 base = self.base(prev)
1066 base = self.base(prev)
1072 offset = self.end(prev)
1067 offset = self.end(prev)
1073
1068
1074 if curr:
1069 if curr:
1075 if not d:
1070 if not d:
1076 ptext = self.revision(self.node(prev))
1071 ptext = self.revision(self.node(prev))
1077 d = mdiff.textdiff(ptext, text)
1072 d = mdiff.textdiff(ptext, text)
1078 data = compress(d)
1073 data = compress(d)
1079 l = len(data[1]) + len(data[0])
1074 l = len(data[1]) + len(data[0])
1080 dist = l + offset - self.start(base)
1075 dist = l + offset - self.start(base)
1081
1076
1082 # full versions are inserted when the needed deltas
1077 # full versions are inserted when the needed deltas
1083 # become comparable to the uncompressed text
1078 # become comparable to the uncompressed text
1084 if not curr or dist > len(text) * 2:
1079 if not curr or dist > len(text) * 2:
1085 data = compress(text)
1080 data = compress(text)
1086 l = len(data[1]) + len(data[0])
1081 l = len(data[1]) + len(data[0])
1087 base = curr
1082 base = curr
1088
1083
1089 e = (offset_type(offset, 0), l, len(text),
1084 e = (offset_type(offset, 0), l, len(text),
1090 base, link, self.rev(p1), self.rev(p2), node)
1085 base, link, self.rev(p1), self.rev(p2), node)
1091 self.index.insert(-1, e)
1086 self.index.insert(-1, e)
1092 self.nodemap[node] = curr
1087 self.nodemap[node] = curr
1093
1088
1094 entry = self._io.packentry(e, self.node, self.version, curr)
1089 entry = self._io.packentry(e, self.node, self.version, curr)
1095 if not self._inline:
1090 if not self._inline:
1096 transaction.add(self.datafile, offset)
1091 transaction.add(self.datafile, offset)
1097 transaction.add(self.indexfile, curr * len(entry))
1092 transaction.add(self.indexfile, curr * len(entry))
1098 if data[0]:
1093 if data[0]:
1099 dfh.write(data[0])
1094 dfh.write(data[0])
1100 dfh.write(data[1])
1095 dfh.write(data[1])
1101 dfh.flush()
1096 dfh.flush()
1102 ifh.write(entry)
1097 ifh.write(entry)
1103 else:
1098 else:
1104 offset += curr * self._io.size
1099 offset += curr * self._io.size
1105 transaction.add(self.indexfile, offset, curr)
1100 transaction.add(self.indexfile, offset, curr)
1106 ifh.write(entry)
1101 ifh.write(entry)
1107 ifh.write(data[0])
1102 ifh.write(data[0])
1108 ifh.write(data[1])
1103 ifh.write(data[1])
1109 self.checkinlinesize(transaction, ifh)
1104 self.checkinlinesize(transaction, ifh)
1110
1105
1111 self._cache = (node, curr, text)
1106 self._cache = (node, curr, text)
1112 return node
1107 return node
1113
1108
1114 def ancestor(self, a, b):
1109 def ancestor(self, a, b):
1115 """calculate the least common ancestor of nodes a and b"""
1110 """calculate the least common ancestor of nodes a and b"""
1116
1111
1117 def parents(rev):
1112 def parents(rev):
1118 return [p for p in self.parentrevs(rev) if p != nullrev]
1113 return [p for p in self.parentrevs(rev) if p != nullrev]
1119
1114
1120 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1115 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1121 if c is None:
1116 if c is None:
1122 return nullid
1117 return nullid
1123
1118
1124 return self.node(c)
1119 return self.node(c)
1125
1120
1126 def group(self, nodelist, lookup, infocollect=None):
1121 def group(self, nodelist, lookup, infocollect=None):
1127 """calculate a delta group
1122 """calculate a delta group
1128
1123
1129 Given a list of changeset revs, return a set of deltas and
1124 Given a list of changeset revs, return a set of deltas and
1130 metadata corresponding to nodes. the first delta is
1125 metadata corresponding to nodes. the first delta is
1131 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1126 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1132 have this parent as it has all history before these
1127 have this parent as it has all history before these
1133 changesets. parent is parent[0]
1128 changesets. parent is parent[0]
1134 """
1129 """
1135 revs = [self.rev(n) for n in nodelist]
1130 revs = [self.rev(n) for n in nodelist]
1136
1131
1137 # if we don't have any revisions touched by these changesets, bail
1132 # if we don't have any revisions touched by these changesets, bail
1138 if not revs:
1133 if not revs:
1139 yield changegroup.closechunk()
1134 yield changegroup.closechunk()
1140 return
1135 return
1141
1136
1142 # add the parent of the first rev
1137 # add the parent of the first rev
1143 p = self.parents(self.node(revs[0]))[0]
1138 p = self.parents(self.node(revs[0]))[0]
1144 revs.insert(0, self.rev(p))
1139 revs.insert(0, self.rev(p))
1145
1140
1146 # build deltas
1141 # build deltas
1147 for d in xrange(0, len(revs) - 1):
1142 for d in xrange(0, len(revs) - 1):
1148 a, b = revs[d], revs[d + 1]
1143 a, b = revs[d], revs[d + 1]
1149 nb = self.node(b)
1144 nb = self.node(b)
1150
1145
1151 if infocollect is not None:
1146 if infocollect is not None:
1152 infocollect(nb)
1147 infocollect(nb)
1153
1148
1154 p = self.parents(nb)
1149 p = self.parents(nb)
1155 meta = nb + p[0] + p[1] + lookup(nb)
1150 meta = nb + p[0] + p[1] + lookup(nb)
1156 if a == -1:
1151 if a == -1:
1157 d = self.revision(nb)
1152 d = self.revision(nb)
1158 meta += mdiff.trivialdiffheader(len(d))
1153 meta += mdiff.trivialdiffheader(len(d))
1159 else:
1154 else:
1160 d = self.revdiff(a, b)
1155 d = self.revdiff(a, b)
1161 yield changegroup.chunkheader(len(meta) + len(d))
1156 yield changegroup.chunkheader(len(meta) + len(d))
1162 yield meta
1157 yield meta
1163 if len(d) > 2**20:
1158 if len(d) > 2**20:
1164 pos = 0
1159 pos = 0
1165 while pos < len(d):
1160 while pos < len(d):
1166 pos2 = pos + 2 ** 18
1161 pos2 = pos + 2 ** 18
1167 yield d[pos:pos2]
1162 yield d[pos:pos2]
1168 pos = pos2
1163 pos = pos2
1169 else:
1164 else:
1170 yield d
1165 yield d
1171
1166
1172 yield changegroup.closechunk()
1167 yield changegroup.closechunk()
1173
1168
1174 def addgroup(self, revs, linkmapper, transaction):
1169 def addgroup(self, revs, linkmapper, transaction):
1175 """
1170 """
1176 add a delta group
1171 add a delta group
1177
1172
1178 given a set of deltas, add them to the revision log. the
1173 given a set of deltas, add them to the revision log. the
1179 first delta is against its parent, which should be in our
1174 first delta is against its parent, which should be in our
1180 log, the rest are against the previous delta.
1175 log, the rest are against the previous delta.
1181 """
1176 """
1182
1177
1183 #track the base of the current delta log
1178 #track the base of the current delta log
1184 r = len(self)
1179 r = len(self)
1185 t = r - 1
1180 t = r - 1
1186 node = None
1181 node = None
1187
1182
1188 base = prev = nullrev
1183 base = prev = nullrev
1189 start = end = textlen = 0
1184 start = end = textlen = 0
1190 if r:
1185 if r:
1191 end = self.end(t)
1186 end = self.end(t)
1192
1187
1193 ifh = self.opener(self.indexfile, "a+")
1188 ifh = self.opener(self.indexfile, "a+")
1194 isize = r * self._io.size
1189 isize = r * self._io.size
1195 if self._inline:
1190 if self._inline:
1196 transaction.add(self.indexfile, end + isize, r)
1191 transaction.add(self.indexfile, end + isize, r)
1197 dfh = None
1192 dfh = None
1198 else:
1193 else:
1199 transaction.add(self.indexfile, isize, r)
1194 transaction.add(self.indexfile, isize, r)
1200 transaction.add(self.datafile, end)
1195 transaction.add(self.datafile, end)
1201 dfh = self.opener(self.datafile, "a")
1196 dfh = self.opener(self.datafile, "a")
1202
1197
1203 try:
1198 try:
1204 # loop through our set of deltas
1199 # loop through our set of deltas
1205 chain = None
1200 chain = None
1206 for chunk in revs:
1201 for chunk in revs:
1207 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1202 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1208 link = linkmapper(cs)
1203 link = linkmapper(cs)
1209 if node in self.nodemap:
1204 if node in self.nodemap:
1210 # this can happen if two branches make the same change
1205 # this can happen if two branches make the same change
1211 chain = node
1206 chain = node
1212 continue
1207 continue
1213 delta = buffer(chunk, 80)
1208 delta = buffer(chunk, 80)
1214 del chunk
1209 del chunk
1215
1210
1216 for p in (p1, p2):
1211 for p in (p1, p2):
1217 if not p in self.nodemap:
1212 if not p in self.nodemap:
1218 raise LookupError(p, self.indexfile, _('unknown parent'))
1213 raise LookupError(p, self.indexfile, _('unknown parent'))
1219
1214
1220 if not chain:
1215 if not chain:
1221 # retrieve the parent revision of the delta chain
1216 # retrieve the parent revision of the delta chain
1222 chain = p1
1217 chain = p1
1223 if not chain in self.nodemap:
1218 if not chain in self.nodemap:
1224 raise LookupError(chain, self.indexfile, _('unknown base'))
1219 raise LookupError(chain, self.indexfile, _('unknown base'))
1225
1220
1226 # full versions are inserted when the needed deltas become
1221 # full versions are inserted when the needed deltas become
1227 # comparable to the uncompressed text or when the previous
1222 # comparable to the uncompressed text or when the previous
1228 # version is not the one we have a delta against. We use
1223 # version is not the one we have a delta against. We use
1229 # the size of the previous full rev as a proxy for the
1224 # the size of the previous full rev as a proxy for the
1230 # current size.
1225 # current size.
1231
1226
1232 if chain == prev:
1227 if chain == prev:
1233 cdelta = compress(delta)
1228 cdelta = compress(delta)
1234 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1229 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1235 textlen = mdiff.patchedsize(textlen, delta)
1230 textlen = mdiff.patchedsize(textlen, delta)
1236
1231
1237 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1232 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1238 # flush our writes here so we can read it in revision
1233 # flush our writes here so we can read it in revision
1239 if dfh:
1234 if dfh:
1240 dfh.flush()
1235 dfh.flush()
1241 ifh.flush()
1236 ifh.flush()
1242 text = self.revision(chain)
1237 text = self.revision(chain)
1243 if len(text) == 0:
1238 if len(text) == 0:
1244 # skip over trivial delta header
1239 # skip over trivial delta header
1245 text = buffer(delta, 12)
1240 text = buffer(delta, 12)
1246 else:
1241 else:
1247 text = mdiff.patches(text, [delta])
1242 text = mdiff.patches(text, [delta])
1248 del delta
1243 del delta
1249 chk = self._addrevision(text, transaction, link, p1, p2, None,
1244 chk = self._addrevision(text, transaction, link, p1, p2, None,
1250 ifh, dfh)
1245 ifh, dfh)
1251 if not dfh and not self._inline:
1246 if not dfh and not self._inline:
1252 # addrevision switched from inline to conventional
1247 # addrevision switched from inline to conventional
1253 # reopen the index
1248 # reopen the index
1254 dfh = self.opener(self.datafile, "a")
1249 dfh = self.opener(self.datafile, "a")
1255 ifh = self.opener(self.indexfile, "a")
1250 ifh = self.opener(self.indexfile, "a")
1256 if chk != node:
1251 if chk != node:
1257 raise RevlogError(_("consistency error adding group"))
1252 raise RevlogError(_("consistency error adding group"))
1258 textlen = len(text)
1253 textlen = len(text)
1259 else:
1254 else:
1260 e = (offset_type(end, 0), cdeltalen, textlen, base,
1255 e = (offset_type(end, 0), cdeltalen, textlen, base,
1261 link, self.rev(p1), self.rev(p2), node)
1256 link, self.rev(p1), self.rev(p2), node)
1262 self.index.insert(-1, e)
1257 self.index.insert(-1, e)
1263 self.nodemap[node] = r
1258 self.nodemap[node] = r
1264 entry = self._io.packentry(e, self.node, self.version, r)
1259 entry = self._io.packentry(e, self.node, self.version, r)
1265 if self._inline:
1260 if self._inline:
1266 ifh.write(entry)
1261 ifh.write(entry)
1267 ifh.write(cdelta[0])
1262 ifh.write(cdelta[0])
1268 ifh.write(cdelta[1])
1263 ifh.write(cdelta[1])
1269 self.checkinlinesize(transaction, ifh)
1264 self.checkinlinesize(transaction, ifh)
1270 if not self._inline:
1265 if not self._inline:
1271 dfh = self.opener(self.datafile, "a")
1266 dfh = self.opener(self.datafile, "a")
1272 ifh = self.opener(self.indexfile, "a")
1267 ifh = self.opener(self.indexfile, "a")
1273 else:
1268 else:
1274 dfh.write(cdelta[0])
1269 dfh.write(cdelta[0])
1275 dfh.write(cdelta[1])
1270 dfh.write(cdelta[1])
1276 ifh.write(entry)
1271 ifh.write(entry)
1277
1272
1278 t, r, chain, prev = r, r + 1, node, node
1273 t, r, chain, prev = r, r + 1, node, node
1279 base = self.base(t)
1274 base = self.base(t)
1280 start = self.start(base)
1275 start = self.start(base)
1281 end = self.end(t)
1276 end = self.end(t)
1282 finally:
1277 finally:
1283 if dfh:
1278 if dfh:
1284 dfh.close()
1279 dfh.close()
1285 ifh.close()
1280 ifh.close()
1286
1281
1287 return node
1282 return node
1288
1283
1289 def strip(self, minlink):
1284 def strip(self, minlink):
1290 """truncate the revlog on the first revision with a linkrev >= minlink
1285 """truncate the revlog on the first revision with a linkrev >= minlink
1291
1286
1292 This function is called when we're stripping revision minlink and
1287 This function is called when we're stripping revision minlink and
1293 its descendants from the repository.
1288 its descendants from the repository.
1294
1289
1295 We have to remove all revisions with linkrev >= minlink, because
1290 We have to remove all revisions with linkrev >= minlink, because
1296 the equivalent changelog revisions will be renumbered after the
1291 the equivalent changelog revisions will be renumbered after the
1297 strip.
1292 strip.
1298
1293
1299 So we truncate the revlog on the first of these revisions, and
1294 So we truncate the revlog on the first of these revisions, and
1300 trust that the caller has saved the revisions that shouldn't be
1295 trust that the caller has saved the revisions that shouldn't be
1301 removed and that it'll readd them after this truncation.
1296 removed and that it'll readd them after this truncation.
1302 """
1297 """
1303 if len(self) == 0:
1298 if len(self) == 0:
1304 return
1299 return
1305
1300
1306 if isinstance(self.index, lazyindex):
1301 if isinstance(self.index, lazyindex):
1307 self._loadindexmap()
1302 self._loadindexmap()
1308
1303
1309 for rev in self:
1304 for rev in self:
1310 if self.index[rev][4] >= minlink:
1305 if self.index[rev][4] >= minlink:
1311 break
1306 break
1312 else:
1307 else:
1313 return
1308 return
1314
1309
1315 # first truncate the files on disk
1310 # first truncate the files on disk
1316 end = self.start(rev)
1311 end = self.start(rev)
1317 if not self._inline:
1312 if not self._inline:
1318 df = self.opener(self.datafile, "a")
1313 df = self.opener(self.datafile, "a")
1319 df.truncate(end)
1314 df.truncate(end)
1320 end = rev * self._io.size
1315 end = rev * self._io.size
1321 else:
1316 else:
1322 end += rev * self._io.size
1317 end += rev * self._io.size
1323
1318
1324 indexf = self.opener(self.indexfile, "a")
1319 indexf = self.opener(self.indexfile, "a")
1325 indexf.truncate(end)
1320 indexf.truncate(end)
1326
1321
1327 # then reset internal state in memory to forget those revisions
1322 # then reset internal state in memory to forget those revisions
1328 self._cache = None
1323 self._cache = None
1329 self._chunkcache = None
1324 self._chunkcache = None
1330 for x in xrange(rev, len(self)):
1325 for x in xrange(rev, len(self)):
1331 del self.nodemap[self.node(x)]
1326 del self.nodemap[self.node(x)]
1332
1327
1333 del self.index[rev:-1]
1328 del self.index[rev:-1]
1334
1329
1335 def checksize(self):
1330 def checksize(self):
1336 expected = 0
1331 expected = 0
1337 if len(self):
1332 if len(self):
1338 expected = max(0, self.end(len(self) - 1))
1333 expected = max(0, self.end(len(self) - 1))
1339
1334
1340 try:
1335 try:
1341 f = self.opener(self.datafile)
1336 f = self.opener(self.datafile)
1342 f.seek(0, 2)
1337 f.seek(0, 2)
1343 actual = f.tell()
1338 actual = f.tell()
1344 dd = actual - expected
1339 dd = actual - expected
1345 except IOError, inst:
1340 except IOError, inst:
1346 if inst.errno != errno.ENOENT:
1341 if inst.errno != errno.ENOENT:
1347 raise
1342 raise
1348 dd = 0
1343 dd = 0
1349
1344
1350 try:
1345 try:
1351 f = self.opener(self.indexfile)
1346 f = self.opener(self.indexfile)
1352 f.seek(0, 2)
1347 f.seek(0, 2)
1353 actual = f.tell()
1348 actual = f.tell()
1354 s = self._io.size
1349 s = self._io.size
1355 i = max(0, actual / s)
1350 i = max(0, actual / s)
1356 di = actual - (i * s)
1351 di = actual - (i * s)
1357 if self._inline:
1352 if self._inline:
1358 databytes = 0
1353 databytes = 0
1359 for r in self:
1354 for r in self:
1360 databytes += max(0, self.length(r))
1355 databytes += max(0, self.length(r))
1361 dd = 0
1356 dd = 0
1362 di = actual - len(self) * s - databytes
1357 di = actual - len(self) * s - databytes
1363 except IOError, inst:
1358 except IOError, inst:
1364 if inst.errno != errno.ENOENT:
1359 if inst.errno != errno.ENOENT:
1365 raise
1360 raise
1366 di = 0
1361 di = 0
1367
1362
1368 return (dd, di)
1363 return (dd, di)
1369
1364
1370 def files(self):
1365 def files(self):
1371 res = [ self.indexfile ]
1366 res = [ self.indexfile ]
1372 if not self._inline:
1367 if not self._inline:
1373 res.append(self.datafile)
1368 res.append(self.datafile)
1374 return res
1369 return res
General Comments 0
You need to be logged in to leave comments. Login now