Show More
@@ -1,1337 +1,1345 | |||||
1 | # revlog.py - storage back-end for mercurial |
|
1 | # revlog.py - storage back-end for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | """Storage back-end for Mercurial. |
|
8 | """Storage back-end for Mercurial. | |
9 |
|
9 | |||
10 | This provides efficient delta storage with O(1) retrieve and append |
|
10 | This provides efficient delta storage with O(1) retrieve and append | |
11 | and O(changes) merge between branches. |
|
11 | and O(changes) merge between branches. | |
12 | """ |
|
12 | """ | |
13 |
|
13 | |||
14 | # import stuff from node for others to import from revlog |
|
14 | # import stuff from node for others to import from revlog | |
15 | from node import bin, hex, nullid, nullrev |
|
15 | from node import bin, hex, nullid, nullrev | |
16 | from i18n import _ |
|
16 | from i18n import _ | |
17 | import ancestor, mdiff, parsers, error, util, dagutil |
|
17 | import ancestor, mdiff, parsers, error, util, dagutil | |
18 | import struct, zlib, errno |
|
18 | import struct, zlib, errno | |
19 |
|
19 | |||
20 | _pack = struct.pack |
|
20 | _pack = struct.pack | |
21 | _unpack = struct.unpack |
|
21 | _unpack = struct.unpack | |
22 | _compress = zlib.compress |
|
22 | _compress = zlib.compress | |
23 | _decompress = zlib.decompress |
|
23 | _decompress = zlib.decompress | |
24 | _sha = util.sha1 |
|
24 | _sha = util.sha1 | |
25 |
|
25 | |||
26 | # revlog header flags |
|
26 | # revlog header flags | |
27 | REVLOGV0 = 0 |
|
27 | REVLOGV0 = 0 | |
28 | REVLOGNG = 1 |
|
28 | REVLOGNG = 1 | |
29 | REVLOGNGINLINEDATA = (1 << 16) |
|
29 | REVLOGNGINLINEDATA = (1 << 16) | |
30 | REVLOGGENERALDELTA = (1 << 17) |
|
30 | REVLOGGENERALDELTA = (1 << 17) | |
31 | REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA |
|
31 | REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA | |
32 | REVLOG_DEFAULT_FORMAT = REVLOGNG |
|
32 | REVLOG_DEFAULT_FORMAT = REVLOGNG | |
33 | REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS |
|
33 | REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS | |
34 | REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA |
|
34 | REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA | |
35 |
|
35 | |||
36 | # revlog index flags |
|
36 | # revlog index flags | |
37 | REVIDX_KNOWN_FLAGS = 0 |
|
37 | REVIDX_KNOWN_FLAGS = 0 | |
38 |
|
38 | |||
39 | # max size of revlog with inline data |
|
39 | # max size of revlog with inline data | |
40 | _maxinline = 131072 |
|
40 | _maxinline = 131072 | |
41 | _chunksize = 1048576 |
|
41 | _chunksize = 1048576 | |
42 |
|
42 | |||
43 | RevlogError = error.RevlogError |
|
43 | RevlogError = error.RevlogError | |
44 | LookupError = error.LookupError |
|
44 | LookupError = error.LookupError | |
45 |
|
45 | |||
46 | def getoffset(q): |
|
46 | def getoffset(q): | |
47 | return int(q >> 16) |
|
47 | return int(q >> 16) | |
48 |
|
48 | |||
49 | def gettype(q): |
|
49 | def gettype(q): | |
50 | return int(q & 0xFFFF) |
|
50 | return int(q & 0xFFFF) | |
51 |
|
51 | |||
52 | def offset_type(offset, type): |
|
52 | def offset_type(offset, type): | |
53 | return long(long(offset) << 16 | type) |
|
53 | return long(long(offset) << 16 | type) | |
54 |
|
54 | |||
55 | nullhash = _sha(nullid) |
|
55 | nullhash = _sha(nullid) | |
56 |
|
56 | |||
57 | def hash(text, p1, p2): |
|
57 | def hash(text, p1, p2): | |
58 | """generate a hash from the given text and its parent hashes |
|
58 | """generate a hash from the given text and its parent hashes | |
59 |
|
59 | |||
60 | This hash combines both the current file contents and its history |
|
60 | This hash combines both the current file contents and its history | |
61 | in a manner that makes it easy to distinguish nodes with the same |
|
61 | in a manner that makes it easy to distinguish nodes with the same | |
62 | content in the revision graph. |
|
62 | content in the revision graph. | |
63 | """ |
|
63 | """ | |
64 | # As of now, if one of the parent node is null, p2 is null |
|
64 | # As of now, if one of the parent node is null, p2 is null | |
65 | if p2 == nullid: |
|
65 | if p2 == nullid: | |
66 | # deep copy of a hash is faster than creating one |
|
66 | # deep copy of a hash is faster than creating one | |
67 | s = nullhash.copy() |
|
67 | s = nullhash.copy() | |
68 | s.update(p1) |
|
68 | s.update(p1) | |
69 | else: |
|
69 | else: | |
70 | # none of the parent nodes are nullid |
|
70 | # none of the parent nodes are nullid | |
71 | l = [p1, p2] |
|
71 | l = [p1, p2] | |
72 | l.sort() |
|
72 | l.sort() | |
73 | s = _sha(l[0]) |
|
73 | s = _sha(l[0]) | |
74 | s.update(l[1]) |
|
74 | s.update(l[1]) | |
75 | s.update(text) |
|
75 | s.update(text) | |
76 | return s.digest() |
|
76 | return s.digest() | |
77 |
|
77 | |||
78 | def decompress(bin): |
|
78 | def decompress(bin): | |
79 | """ decompress the given input """ |
|
79 | """ decompress the given input """ | |
80 | if not bin: |
|
80 | if not bin: | |
81 | return bin |
|
81 | return bin | |
82 | t = bin[0] |
|
82 | t = bin[0] | |
83 | if t == '\0': |
|
83 | if t == '\0': | |
84 | return bin |
|
84 | return bin | |
85 | if t == 'x': |
|
85 | if t == 'x': | |
86 | try: |
|
86 | try: | |
87 | return _decompress(bin) |
|
87 | return _decompress(bin) | |
88 | except zlib.error, e: |
|
88 | except zlib.error, e: | |
89 | raise RevlogError(_("revlog decompress error: %s") % str(e)) |
|
89 | raise RevlogError(_("revlog decompress error: %s") % str(e)) | |
90 | if t == 'u': |
|
90 | if t == 'u': | |
91 | return bin[1:] |
|
91 | return bin[1:] | |
92 | raise RevlogError(_("unknown compression type %r") % t) |
|
92 | raise RevlogError(_("unknown compression type %r") % t) | |
93 |
|
93 | |||
|
94 | # index v0: | |||
|
95 | # 4 bytes: offset | |||
|
96 | # 4 bytes: compressed length | |||
|
97 | # 4 bytes: base rev | |||
|
98 | # 4 bytes: link rev | |||
|
99 | # 32 bytes: parent 1 nodeid | |||
|
100 | # 32 bytes: parent 2 nodeid | |||
|
101 | # 32 bytes: nodeid | |||
94 | indexformatv0 = ">4l20s20s20s" |
|
102 | indexformatv0 = ">4l20s20s20s" | |
95 | v0shaoffset = 56 |
|
103 | v0shaoffset = 56 | |
96 |
|
104 | |||
97 | class revlogoldio(object): |
|
105 | class revlogoldio(object): | |
98 | def __init__(self): |
|
106 | def __init__(self): | |
99 | self.size = struct.calcsize(indexformatv0) |
|
107 | self.size = struct.calcsize(indexformatv0) | |
100 |
|
108 | |||
101 | def parseindex(self, data, inline): |
|
109 | def parseindex(self, data, inline): | |
102 | s = self.size |
|
110 | s = self.size | |
103 | index = [] |
|
111 | index = [] | |
104 | nodemap = {nullid: nullrev} |
|
112 | nodemap = {nullid: nullrev} | |
105 | n = off = 0 |
|
113 | n = off = 0 | |
106 | l = len(data) |
|
114 | l = len(data) | |
107 | while off + s <= l: |
|
115 | while off + s <= l: | |
108 | cur = data[off:off + s] |
|
116 | cur = data[off:off + s] | |
109 | off += s |
|
117 | off += s | |
110 | e = _unpack(indexformatv0, cur) |
|
118 | e = _unpack(indexformatv0, cur) | |
111 | # transform to revlogv1 format |
|
119 | # transform to revlogv1 format | |
112 | e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], |
|
120 | e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], | |
113 | nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) |
|
121 | nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) | |
114 | index.append(e2) |
|
122 | index.append(e2) | |
115 | nodemap[e[6]] = n |
|
123 | nodemap[e[6]] = n | |
116 | n += 1 |
|
124 | n += 1 | |
117 |
|
125 | |||
118 | # add the magic null revision at -1 |
|
126 | # add the magic null revision at -1 | |
119 | index.append((0, 0, 0, -1, -1, -1, -1, nullid)) |
|
127 | index.append((0, 0, 0, -1, -1, -1, -1, nullid)) | |
120 |
|
128 | |||
121 | return index, nodemap, None |
|
129 | return index, nodemap, None | |
122 |
|
130 | |||
123 | def packentry(self, entry, node, version, rev): |
|
131 | def packentry(self, entry, node, version, rev): | |
124 | if gettype(entry[0]): |
|
132 | if gettype(entry[0]): | |
125 | raise RevlogError(_("index entry flags need RevlogNG")) |
|
133 | raise RevlogError(_("index entry flags need RevlogNG")) | |
126 | e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], |
|
134 | e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], | |
127 | node(entry[5]), node(entry[6]), entry[7]) |
|
135 | node(entry[5]), node(entry[6]), entry[7]) | |
128 | return _pack(indexformatv0, *e2) |
|
136 | return _pack(indexformatv0, *e2) | |
129 |
|
137 | |||
130 | # index ng: |
|
138 | # index ng: | |
131 | # 6 bytes: offset |
|
139 | # 6 bytes: offset | |
132 | # 2 bytes: flags |
|
140 | # 2 bytes: flags | |
133 | # 4 bytes: compressed length |
|
141 | # 4 bytes: compressed length | |
134 | # 4 bytes: uncompressed length |
|
142 | # 4 bytes: uncompressed length | |
135 | # 4 bytes: base rev |
|
143 | # 4 bytes: base rev | |
136 | # 4 bytes: link rev |
|
144 | # 4 bytes: link rev | |
137 | # 4 bytes: parent 1 rev |
|
145 | # 4 bytes: parent 1 rev | |
138 | # 4 bytes: parent 2 rev |
|
146 | # 4 bytes: parent 2 rev | |
139 | # 32 bytes: nodeid |
|
147 | # 32 bytes: nodeid | |
140 | indexformatng = ">Qiiiiii20s12x" |
|
148 | indexformatng = ">Qiiiiii20s12x" | |
141 | ngshaoffset = 32 |
|
149 | ngshaoffset = 32 | |
142 | versionformat = ">I" |
|
150 | versionformat = ">I" | |
143 |
|
151 | |||
144 | class revlogio(object): |
|
152 | class revlogio(object): | |
145 | def __init__(self): |
|
153 | def __init__(self): | |
146 | self.size = struct.calcsize(indexformatng) |
|
154 | self.size = struct.calcsize(indexformatng) | |
147 |
|
155 | |||
148 | def parseindex(self, data, inline): |
|
156 | def parseindex(self, data, inline): | |
149 | # call the C implementation to parse the index data |
|
157 | # call the C implementation to parse the index data | |
150 | index, cache = parsers.parse_index2(data, inline) |
|
158 | index, cache = parsers.parse_index2(data, inline) | |
151 | return index, getattr(index, 'nodemap', None), cache |
|
159 | return index, getattr(index, 'nodemap', None), cache | |
152 |
|
160 | |||
153 | def packentry(self, entry, node, version, rev): |
|
161 | def packentry(self, entry, node, version, rev): | |
154 | p = _pack(indexformatng, *entry) |
|
162 | p = _pack(indexformatng, *entry) | |
155 | if rev == 0: |
|
163 | if rev == 0: | |
156 | p = _pack(versionformat, version) + p[4:] |
|
164 | p = _pack(versionformat, version) + p[4:] | |
157 | return p |
|
165 | return p | |
158 |
|
166 | |||
159 | class revlog(object): |
|
167 | class revlog(object): | |
160 | """ |
|
168 | """ | |
161 | the underlying revision storage object |
|
169 | the underlying revision storage object | |
162 |
|
170 | |||
163 | A revlog consists of two parts, an index and the revision data. |
|
171 | A revlog consists of two parts, an index and the revision data. | |
164 |
|
172 | |||
165 | The index is a file with a fixed record size containing |
|
173 | The index is a file with a fixed record size containing | |
166 | information on each revision, including its nodeid (hash), the |
|
174 | information on each revision, including its nodeid (hash), the | |
167 | nodeids of its parents, the position and offset of its data within |
|
175 | nodeids of its parents, the position and offset of its data within | |
168 | the data file, and the revision it's based on. Finally, each entry |
|
176 | the data file, and the revision it's based on. Finally, each entry | |
169 | contains a linkrev entry that can serve as a pointer to external |
|
177 | contains a linkrev entry that can serve as a pointer to external | |
170 | data. |
|
178 | data. | |
171 |
|
179 | |||
172 | The revision data itself is a linear collection of data chunks. |
|
180 | The revision data itself is a linear collection of data chunks. | |
173 | Each chunk represents a revision and is usually represented as a |
|
181 | Each chunk represents a revision and is usually represented as a | |
174 | delta against the previous chunk. To bound lookup time, runs of |
|
182 | delta against the previous chunk. To bound lookup time, runs of | |
175 | deltas are limited to about 2 times the length of the original |
|
183 | deltas are limited to about 2 times the length of the original | |
176 | version data. This makes retrieval of a version proportional to |
|
184 | version data. This makes retrieval of a version proportional to | |
177 | its size, or O(1) relative to the number of revisions. |
|
185 | its size, or O(1) relative to the number of revisions. | |
178 |
|
186 | |||
179 | Both pieces of the revlog are written to in an append-only |
|
187 | Both pieces of the revlog are written to in an append-only | |
180 | fashion, which means we never need to rewrite a file to insert or |
|
188 | fashion, which means we never need to rewrite a file to insert or | |
181 | remove data, and can use some simple techniques to avoid the need |
|
189 | remove data, and can use some simple techniques to avoid the need | |
182 | for locking while reading. |
|
190 | for locking while reading. | |
183 | """ |
|
191 | """ | |
184 | def __init__(self, opener, indexfile): |
|
192 | def __init__(self, opener, indexfile): | |
185 | """ |
|
193 | """ | |
186 | create a revlog object |
|
194 | create a revlog object | |
187 |
|
195 | |||
188 | opener is a function that abstracts the file opening operation |
|
196 | opener is a function that abstracts the file opening operation | |
189 | and can be used to implement COW semantics or the like. |
|
197 | and can be used to implement COW semantics or the like. | |
190 | """ |
|
198 | """ | |
191 | self.indexfile = indexfile |
|
199 | self.indexfile = indexfile | |
192 | self.datafile = indexfile[:-2] + ".d" |
|
200 | self.datafile = indexfile[:-2] + ".d" | |
193 | self.opener = opener |
|
201 | self.opener = opener | |
194 | self._cache = None |
|
202 | self._cache = None | |
195 | self._basecache = (0, 0) |
|
203 | self._basecache = (0, 0) | |
196 | self._chunkcache = (0, '') |
|
204 | self._chunkcache = (0, '') | |
197 | self.index = [] |
|
205 | self.index = [] | |
198 | self._pcache = {} |
|
206 | self._pcache = {} | |
199 | self._nodecache = {nullid: nullrev} |
|
207 | self._nodecache = {nullid: nullrev} | |
200 | self._nodepos = None |
|
208 | self._nodepos = None | |
201 |
|
209 | |||
202 | v = REVLOG_DEFAULT_VERSION |
|
210 | v = REVLOG_DEFAULT_VERSION | |
203 | opts = getattr(opener, 'options', None) |
|
211 | opts = getattr(opener, 'options', None) | |
204 | if opts is not None: |
|
212 | if opts is not None: | |
205 | if 'revlogv1' in opts: |
|
213 | if 'revlogv1' in opts: | |
206 | if 'generaldelta' in opts: |
|
214 | if 'generaldelta' in opts: | |
207 | v |= REVLOGGENERALDELTA |
|
215 | v |= REVLOGGENERALDELTA | |
208 | else: |
|
216 | else: | |
209 | v = 0 |
|
217 | v = 0 | |
210 |
|
218 | |||
211 | i = '' |
|
219 | i = '' | |
212 | self._initempty = True |
|
220 | self._initempty = True | |
213 | try: |
|
221 | try: | |
214 | f = self.opener(self.indexfile) |
|
222 | f = self.opener(self.indexfile) | |
215 | i = f.read() |
|
223 | i = f.read() | |
216 | f.close() |
|
224 | f.close() | |
217 | if len(i) > 0: |
|
225 | if len(i) > 0: | |
218 | v = struct.unpack(versionformat, i[:4])[0] |
|
226 | v = struct.unpack(versionformat, i[:4])[0] | |
219 | self._initempty = False |
|
227 | self._initempty = False | |
220 | except IOError, inst: |
|
228 | except IOError, inst: | |
221 | if inst.errno != errno.ENOENT: |
|
229 | if inst.errno != errno.ENOENT: | |
222 | raise |
|
230 | raise | |
223 |
|
231 | |||
224 | self.version = v |
|
232 | self.version = v | |
225 | self._inline = v & REVLOGNGINLINEDATA |
|
233 | self._inline = v & REVLOGNGINLINEDATA | |
226 | self._generaldelta = v & REVLOGGENERALDELTA |
|
234 | self._generaldelta = v & REVLOGGENERALDELTA | |
227 | flags = v & ~0xFFFF |
|
235 | flags = v & ~0xFFFF | |
228 | fmt = v & 0xFFFF |
|
236 | fmt = v & 0xFFFF | |
229 | if fmt == REVLOGV0 and flags: |
|
237 | if fmt == REVLOGV0 and flags: | |
230 | raise RevlogError(_("index %s unknown flags %#04x for format v0") |
|
238 | raise RevlogError(_("index %s unknown flags %#04x for format v0") | |
231 | % (self.indexfile, flags >> 16)) |
|
239 | % (self.indexfile, flags >> 16)) | |
232 | elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS: |
|
240 | elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS: | |
233 | raise RevlogError(_("index %s unknown flags %#04x for revlogng") |
|
241 | raise RevlogError(_("index %s unknown flags %#04x for revlogng") | |
234 | % (self.indexfile, flags >> 16)) |
|
242 | % (self.indexfile, flags >> 16)) | |
235 | elif fmt > REVLOGNG: |
|
243 | elif fmt > REVLOGNG: | |
236 | raise RevlogError(_("index %s unknown format %d") |
|
244 | raise RevlogError(_("index %s unknown format %d") | |
237 | % (self.indexfile, fmt)) |
|
245 | % (self.indexfile, fmt)) | |
238 |
|
246 | |||
239 | self._io = revlogio() |
|
247 | self._io = revlogio() | |
240 | if self.version == REVLOGV0: |
|
248 | if self.version == REVLOGV0: | |
241 | self._io = revlogoldio() |
|
249 | self._io = revlogoldio() | |
242 | try: |
|
250 | try: | |
243 | d = self._io.parseindex(i, self._inline) |
|
251 | d = self._io.parseindex(i, self._inline) | |
244 | except (ValueError, IndexError): |
|
252 | except (ValueError, IndexError): | |
245 | raise RevlogError(_("index %s is corrupted") % (self.indexfile)) |
|
253 | raise RevlogError(_("index %s is corrupted") % (self.indexfile)) | |
246 | self.index, nodemap, self._chunkcache = d |
|
254 | self.index, nodemap, self._chunkcache = d | |
247 | if nodemap is not None: |
|
255 | if nodemap is not None: | |
248 | self.nodemap = self._nodecache = nodemap |
|
256 | self.nodemap = self._nodecache = nodemap | |
249 | if not self._chunkcache: |
|
257 | if not self._chunkcache: | |
250 | self._chunkclear() |
|
258 | self._chunkclear() | |
251 |
|
259 | |||
252 | def tip(self): |
|
260 | def tip(self): | |
253 | return self.node(len(self.index) - 2) |
|
261 | return self.node(len(self.index) - 2) | |
254 | def __len__(self): |
|
262 | def __len__(self): | |
255 | return len(self.index) - 1 |
|
263 | return len(self.index) - 1 | |
256 | def __iter__(self): |
|
264 | def __iter__(self): | |
257 | return iter(xrange(len(self))) |
|
265 | return iter(xrange(len(self))) | |
258 | def revs(self, start=0, stop=None): |
|
266 | def revs(self, start=0, stop=None): | |
259 | """iterate over all rev in this revlog (from start to stop)""" |
|
267 | """iterate over all rev in this revlog (from start to stop)""" | |
260 | step = 1 |
|
268 | step = 1 | |
261 | if stop is not None: |
|
269 | if stop is not None: | |
262 | if start > stop: |
|
270 | if start > stop: | |
263 | step = -1 |
|
271 | step = -1 | |
264 | stop += step |
|
272 | stop += step | |
265 | else: |
|
273 | else: | |
266 | stop = len(self) |
|
274 | stop = len(self) | |
267 | return xrange(start, stop, step) |
|
275 | return xrange(start, stop, step) | |
268 |
|
276 | |||
269 | @util.propertycache |
|
277 | @util.propertycache | |
270 | def nodemap(self): |
|
278 | def nodemap(self): | |
271 | self.rev(self.node(0)) |
|
279 | self.rev(self.node(0)) | |
272 | return self._nodecache |
|
280 | return self._nodecache | |
273 |
|
281 | |||
274 | def hasnode(self, node): |
|
282 | def hasnode(self, node): | |
275 | try: |
|
283 | try: | |
276 | self.rev(node) |
|
284 | self.rev(node) | |
277 | return True |
|
285 | return True | |
278 | except KeyError: |
|
286 | except KeyError: | |
279 | return False |
|
287 | return False | |
280 |
|
288 | |||
281 | def clearcaches(self): |
|
289 | def clearcaches(self): | |
282 | try: |
|
290 | try: | |
283 | self._nodecache.clearcaches() |
|
291 | self._nodecache.clearcaches() | |
284 | except AttributeError: |
|
292 | except AttributeError: | |
285 | self._nodecache = {nullid: nullrev} |
|
293 | self._nodecache = {nullid: nullrev} | |
286 | self._nodepos = None |
|
294 | self._nodepos = None | |
287 |
|
295 | |||
288 | def rev(self, node): |
|
296 | def rev(self, node): | |
289 | try: |
|
297 | try: | |
290 | return self._nodecache[node] |
|
298 | return self._nodecache[node] | |
291 | except RevlogError: |
|
299 | except RevlogError: | |
292 | # parsers.c radix tree lookup failed |
|
300 | # parsers.c radix tree lookup failed | |
293 | raise LookupError(node, self.indexfile, _('no node')) |
|
301 | raise LookupError(node, self.indexfile, _('no node')) | |
294 | except KeyError: |
|
302 | except KeyError: | |
295 | # pure python cache lookup failed |
|
303 | # pure python cache lookup failed | |
296 | n = self._nodecache |
|
304 | n = self._nodecache | |
297 | i = self.index |
|
305 | i = self.index | |
298 | p = self._nodepos |
|
306 | p = self._nodepos | |
299 | if p is None: |
|
307 | if p is None: | |
300 | p = len(i) - 2 |
|
308 | p = len(i) - 2 | |
301 | for r in xrange(p, -1, -1): |
|
309 | for r in xrange(p, -1, -1): | |
302 | v = i[r][7] |
|
310 | v = i[r][7] | |
303 | n[v] = r |
|
311 | n[v] = r | |
304 | if v == node: |
|
312 | if v == node: | |
305 | self._nodepos = r - 1 |
|
313 | self._nodepos = r - 1 | |
306 | return r |
|
314 | return r | |
307 | raise LookupError(node, self.indexfile, _('no node')) |
|
315 | raise LookupError(node, self.indexfile, _('no node')) | |
308 |
|
316 | |||
309 | def node(self, rev): |
|
317 | def node(self, rev): | |
310 | return self.index[rev][7] |
|
318 | return self.index[rev][7] | |
311 | def linkrev(self, rev): |
|
319 | def linkrev(self, rev): | |
312 | return self.index[rev][4] |
|
320 | return self.index[rev][4] | |
313 | def parents(self, node): |
|
321 | def parents(self, node): | |
314 | i = self.index |
|
322 | i = self.index | |
315 | d = i[self.rev(node)] |
|
323 | d = i[self.rev(node)] | |
316 | return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline |
|
324 | return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline | |
317 | def parentrevs(self, rev): |
|
325 | def parentrevs(self, rev): | |
318 | return self.index[rev][5:7] |
|
326 | return self.index[rev][5:7] | |
319 | def start(self, rev): |
|
327 | def start(self, rev): | |
320 | return int(self.index[rev][0] >> 16) |
|
328 | return int(self.index[rev][0] >> 16) | |
321 | def end(self, rev): |
|
329 | def end(self, rev): | |
322 | return self.start(rev) + self.length(rev) |
|
330 | return self.start(rev) + self.length(rev) | |
323 | def length(self, rev): |
|
331 | def length(self, rev): | |
324 | return self.index[rev][1] |
|
332 | return self.index[rev][1] | |
325 | def chainbase(self, rev): |
|
333 | def chainbase(self, rev): | |
326 | index = self.index |
|
334 | index = self.index | |
327 | base = index[rev][3] |
|
335 | base = index[rev][3] | |
328 | while base != rev: |
|
336 | while base != rev: | |
329 | rev = base |
|
337 | rev = base | |
330 | base = index[rev][3] |
|
338 | base = index[rev][3] | |
331 | return base |
|
339 | return base | |
332 | def flags(self, rev): |
|
340 | def flags(self, rev): | |
333 | return self.index[rev][0] & 0xFFFF |
|
341 | return self.index[rev][0] & 0xFFFF | |
334 | def rawsize(self, rev): |
|
342 | def rawsize(self, rev): | |
335 | """return the length of the uncompressed text for a given revision""" |
|
343 | """return the length of the uncompressed text for a given revision""" | |
336 | l = self.index[rev][2] |
|
344 | l = self.index[rev][2] | |
337 | if l >= 0: |
|
345 | if l >= 0: | |
338 | return l |
|
346 | return l | |
339 |
|
347 | |||
340 | t = self.revision(self.node(rev)) |
|
348 | t = self.revision(self.node(rev)) | |
341 | return len(t) |
|
349 | return len(t) | |
342 | size = rawsize |
|
350 | size = rawsize | |
343 |
|
351 | |||
344 | def ancestors(self, revs, stoprev=0, inclusive=False): |
|
352 | def ancestors(self, revs, stoprev=0, inclusive=False): | |
345 | """Generate the ancestors of 'revs' in reverse topological order. |
|
353 | """Generate the ancestors of 'revs' in reverse topological order. | |
346 | Does not generate revs lower than stoprev. |
|
354 | Does not generate revs lower than stoprev. | |
347 |
|
355 | |||
348 | See the documentation for ancestor.lazyancestors for more details.""" |
|
356 | See the documentation for ancestor.lazyancestors for more details.""" | |
349 |
|
357 | |||
350 | return ancestor.lazyancestors(self, revs, stoprev=stoprev, |
|
358 | return ancestor.lazyancestors(self, revs, stoprev=stoprev, | |
351 | inclusive=inclusive) |
|
359 | inclusive=inclusive) | |
352 |
|
360 | |||
353 | def descendants(self, revs): |
|
361 | def descendants(self, revs): | |
354 | """Generate the descendants of 'revs' in revision order. |
|
362 | """Generate the descendants of 'revs' in revision order. | |
355 |
|
363 | |||
356 | Yield a sequence of revision numbers starting with a child of |
|
364 | Yield a sequence of revision numbers starting with a child of | |
357 | some rev in revs, i.e., each revision is *not* considered a |
|
365 | some rev in revs, i.e., each revision is *not* considered a | |
358 | descendant of itself. Results are ordered by revision number (a |
|
366 | descendant of itself. Results are ordered by revision number (a | |
359 | topological sort).""" |
|
367 | topological sort).""" | |
360 | first = min(revs) |
|
368 | first = min(revs) | |
361 | if first == nullrev: |
|
369 | if first == nullrev: | |
362 | for i in self: |
|
370 | for i in self: | |
363 | yield i |
|
371 | yield i | |
364 | return |
|
372 | return | |
365 |
|
373 | |||
366 | seen = set(revs) |
|
374 | seen = set(revs) | |
367 | for i in self.revs(start=first + 1): |
|
375 | for i in self.revs(start=first + 1): | |
368 | for x in self.parentrevs(i): |
|
376 | for x in self.parentrevs(i): | |
369 | if x != nullrev and x in seen: |
|
377 | if x != nullrev and x in seen: | |
370 | seen.add(i) |
|
378 | seen.add(i) | |
371 | yield i |
|
379 | yield i | |
372 | break |
|
380 | break | |
373 |
|
381 | |||
374 | def findcommonmissing(self, common=None, heads=None): |
|
382 | def findcommonmissing(self, common=None, heads=None): | |
375 | """Return a tuple of the ancestors of common and the ancestors of heads |
|
383 | """Return a tuple of the ancestors of common and the ancestors of heads | |
376 | that are not ancestors of common. In revset terminology, we return the |
|
384 | that are not ancestors of common. In revset terminology, we return the | |
377 | tuple: |
|
385 | tuple: | |
378 |
|
386 | |||
379 | ::common, (::heads) - (::common) |
|
387 | ::common, (::heads) - (::common) | |
380 |
|
388 | |||
381 | The list is sorted by revision number, meaning it is |
|
389 | The list is sorted by revision number, meaning it is | |
382 | topologically sorted. |
|
390 | topologically sorted. | |
383 |
|
391 | |||
384 | 'heads' and 'common' are both lists of node IDs. If heads is |
|
392 | 'heads' and 'common' are both lists of node IDs. If heads is | |
385 | not supplied, uses all of the revlog's heads. If common is not |
|
393 | not supplied, uses all of the revlog's heads. If common is not | |
386 | supplied, uses nullid.""" |
|
394 | supplied, uses nullid.""" | |
387 | if common is None: |
|
395 | if common is None: | |
388 | common = [nullid] |
|
396 | common = [nullid] | |
389 | if heads is None: |
|
397 | if heads is None: | |
390 | heads = self.heads() |
|
398 | heads = self.heads() | |
391 |
|
399 | |||
392 | common = [self.rev(n) for n in common] |
|
400 | common = [self.rev(n) for n in common] | |
393 | heads = [self.rev(n) for n in heads] |
|
401 | heads = [self.rev(n) for n in heads] | |
394 |
|
402 | |||
395 | # we want the ancestors, but inclusive |
|
403 | # we want the ancestors, but inclusive | |
396 | has = set(self.ancestors(common)) |
|
404 | has = set(self.ancestors(common)) | |
397 | has.add(nullrev) |
|
405 | has.add(nullrev) | |
398 | has.update(common) |
|
406 | has.update(common) | |
399 |
|
407 | |||
400 | # take all ancestors from heads that aren't in has |
|
408 | # take all ancestors from heads that aren't in has | |
401 | missing = set() |
|
409 | missing = set() | |
402 | visit = util.deque(r for r in heads if r not in has) |
|
410 | visit = util.deque(r for r in heads if r not in has) | |
403 | while visit: |
|
411 | while visit: | |
404 | r = visit.popleft() |
|
412 | r = visit.popleft() | |
405 | if r in missing: |
|
413 | if r in missing: | |
406 | continue |
|
414 | continue | |
407 | else: |
|
415 | else: | |
408 | missing.add(r) |
|
416 | missing.add(r) | |
409 | for p in self.parentrevs(r): |
|
417 | for p in self.parentrevs(r): | |
410 | if p not in has: |
|
418 | if p not in has: | |
411 | visit.append(p) |
|
419 | visit.append(p) | |
412 | missing = list(missing) |
|
420 | missing = list(missing) | |
413 | missing.sort() |
|
421 | missing.sort() | |
414 | return has, [self.node(r) for r in missing] |
|
422 | return has, [self.node(r) for r in missing] | |
415 |
|
423 | |||
416 | def findmissingrevs(self, common=None, heads=None): |
|
424 | def findmissingrevs(self, common=None, heads=None): | |
417 | """Return the revision numbers of the ancestors of heads that |
|
425 | """Return the revision numbers of the ancestors of heads that | |
418 | are not ancestors of common. |
|
426 | are not ancestors of common. | |
419 |
|
427 | |||
420 | More specifically, return a list of revision numbers corresponding to |
|
428 | More specifically, return a list of revision numbers corresponding to | |
421 | nodes N such that every N satisfies the following constraints: |
|
429 | nodes N such that every N satisfies the following constraints: | |
422 |
|
430 | |||
423 | 1. N is an ancestor of some node in 'heads' |
|
431 | 1. N is an ancestor of some node in 'heads' | |
424 | 2. N is not an ancestor of any node in 'common' |
|
432 | 2. N is not an ancestor of any node in 'common' | |
425 |
|
433 | |||
426 | The list is sorted by revision number, meaning it is |
|
434 | The list is sorted by revision number, meaning it is | |
427 | topologically sorted. |
|
435 | topologically sorted. | |
428 |
|
436 | |||
429 | 'heads' and 'common' are both lists of revision numbers. If heads is |
|
437 | 'heads' and 'common' are both lists of revision numbers. If heads is | |
430 | not supplied, uses all of the revlog's heads. If common is not |
|
438 | not supplied, uses all of the revlog's heads. If common is not | |
431 | supplied, uses nullid.""" |
|
439 | supplied, uses nullid.""" | |
432 | if common is None: |
|
440 | if common is None: | |
433 | common = [nullrev] |
|
441 | common = [nullrev] | |
434 | if heads is None: |
|
442 | if heads is None: | |
435 | heads = self.headrevs() |
|
443 | heads = self.headrevs() | |
436 |
|
444 | |||
437 | return ancestor.missingancestors(heads, common, self.parentrevs) |
|
445 | return ancestor.missingancestors(heads, common, self.parentrevs) | |
438 |
|
446 | |||
439 | def findmissing(self, common=None, heads=None): |
|
447 | def findmissing(self, common=None, heads=None): | |
440 | """Return the ancestors of heads that are not ancestors of common. |
|
448 | """Return the ancestors of heads that are not ancestors of common. | |
441 |
|
449 | |||
442 | More specifically, return a list of nodes N such that every N |
|
450 | More specifically, return a list of nodes N such that every N | |
443 | satisfies the following constraints: |
|
451 | satisfies the following constraints: | |
444 |
|
452 | |||
445 | 1. N is an ancestor of some node in 'heads' |
|
453 | 1. N is an ancestor of some node in 'heads' | |
446 | 2. N is not an ancestor of any node in 'common' |
|
454 | 2. N is not an ancestor of any node in 'common' | |
447 |
|
455 | |||
448 | The list is sorted by revision number, meaning it is |
|
456 | The list is sorted by revision number, meaning it is | |
449 | topologically sorted. |
|
457 | topologically sorted. | |
450 |
|
458 | |||
451 | 'heads' and 'common' are both lists of node IDs. If heads is |
|
459 | 'heads' and 'common' are both lists of node IDs. If heads is | |
452 | not supplied, uses all of the revlog's heads. If common is not |
|
460 | not supplied, uses all of the revlog's heads. If common is not | |
453 | supplied, uses nullid.""" |
|
461 | supplied, uses nullid.""" | |
454 | if common is None: |
|
462 | if common is None: | |
455 | common = [nullid] |
|
463 | common = [nullid] | |
456 | if heads is None: |
|
464 | if heads is None: | |
457 | heads = self.heads() |
|
465 | heads = self.heads() | |
458 |
|
466 | |||
459 | common = [self.rev(n) for n in common] |
|
467 | common = [self.rev(n) for n in common] | |
460 | heads = [self.rev(n) for n in heads] |
|
468 | heads = [self.rev(n) for n in heads] | |
461 |
|
469 | |||
462 | return [self.node(r) for r in |
|
470 | return [self.node(r) for r in | |
463 | ancestor.missingancestors(heads, common, self.parentrevs)] |
|
471 | ancestor.missingancestors(heads, common, self.parentrevs)] | |
464 |
|
472 | |||
465 | def nodesbetween(self, roots=None, heads=None): |
|
473 | def nodesbetween(self, roots=None, heads=None): | |
466 | """Return a topological path from 'roots' to 'heads'. |
|
474 | """Return a topological path from 'roots' to 'heads'. | |
467 |
|
475 | |||
468 | Return a tuple (nodes, outroots, outheads) where 'nodes' is a |
|
476 | Return a tuple (nodes, outroots, outheads) where 'nodes' is a | |
469 | topologically sorted list of all nodes N that satisfy both of |
|
477 | topologically sorted list of all nodes N that satisfy both of | |
470 | these constraints: |
|
478 | these constraints: | |
471 |
|
479 | |||
472 | 1. N is a descendant of some node in 'roots' |
|
480 | 1. N is a descendant of some node in 'roots' | |
473 | 2. N is an ancestor of some node in 'heads' |
|
481 | 2. N is an ancestor of some node in 'heads' | |
474 |
|
482 | |||
475 | Every node is considered to be both a descendant and an ancestor |
|
483 | Every node is considered to be both a descendant and an ancestor | |
476 | of itself, so every reachable node in 'roots' and 'heads' will be |
|
484 | of itself, so every reachable node in 'roots' and 'heads' will be | |
477 | included in 'nodes'. |
|
485 | included in 'nodes'. | |
478 |
|
486 | |||
479 | 'outroots' is the list of reachable nodes in 'roots', i.e., the |
|
487 | 'outroots' is the list of reachable nodes in 'roots', i.e., the | |
480 | subset of 'roots' that is returned in 'nodes'. Likewise, |
|
488 | subset of 'roots' that is returned in 'nodes'. Likewise, | |
481 | 'outheads' is the subset of 'heads' that is also in 'nodes'. |
|
489 | 'outheads' is the subset of 'heads' that is also in 'nodes'. | |
482 |
|
490 | |||
483 | 'roots' and 'heads' are both lists of node IDs. If 'roots' is |
|
491 | 'roots' and 'heads' are both lists of node IDs. If 'roots' is | |
484 | unspecified, uses nullid as the only root. If 'heads' is |
|
492 | unspecified, uses nullid as the only root. If 'heads' is | |
485 | unspecified, uses list of all of the revlog's heads.""" |
|
493 | unspecified, uses list of all of the revlog's heads.""" | |
486 | nonodes = ([], [], []) |
|
494 | nonodes = ([], [], []) | |
487 | if roots is not None: |
|
495 | if roots is not None: | |
488 | roots = list(roots) |
|
496 | roots = list(roots) | |
489 | if not roots: |
|
497 | if not roots: | |
490 | return nonodes |
|
498 | return nonodes | |
491 | lowestrev = min([self.rev(n) for n in roots]) |
|
499 | lowestrev = min([self.rev(n) for n in roots]) | |
492 | else: |
|
500 | else: | |
493 | roots = [nullid] # Everybody's a descendant of nullid |
|
501 | roots = [nullid] # Everybody's a descendant of nullid | |
494 | lowestrev = nullrev |
|
502 | lowestrev = nullrev | |
495 | if (lowestrev == nullrev) and (heads is None): |
|
503 | if (lowestrev == nullrev) and (heads is None): | |
496 | # We want _all_ the nodes! |
|
504 | # We want _all_ the nodes! | |
497 | return ([self.node(r) for r in self], [nullid], list(self.heads())) |
|
505 | return ([self.node(r) for r in self], [nullid], list(self.heads())) | |
498 | if heads is None: |
|
506 | if heads is None: | |
499 | # All nodes are ancestors, so the latest ancestor is the last |
|
507 | # All nodes are ancestors, so the latest ancestor is the last | |
500 | # node. |
|
508 | # node. | |
501 | highestrev = len(self) - 1 |
|
509 | highestrev = len(self) - 1 | |
502 | # Set ancestors to None to signal that every node is an ancestor. |
|
510 | # Set ancestors to None to signal that every node is an ancestor. | |
503 | ancestors = None |
|
511 | ancestors = None | |
504 | # Set heads to an empty dictionary for later discovery of heads |
|
512 | # Set heads to an empty dictionary for later discovery of heads | |
505 | heads = {} |
|
513 | heads = {} | |
506 | else: |
|
514 | else: | |
507 | heads = list(heads) |
|
515 | heads = list(heads) | |
508 | if not heads: |
|
516 | if not heads: | |
509 | return nonodes |
|
517 | return nonodes | |
510 | ancestors = set() |
|
518 | ancestors = set() | |
511 | # Turn heads into a dictionary so we can remove 'fake' heads. |
|
519 | # Turn heads into a dictionary so we can remove 'fake' heads. | |
512 | # Also, later we will be using it to filter out the heads we can't |
|
520 | # Also, later we will be using it to filter out the heads we can't | |
513 | # find from roots. |
|
521 | # find from roots. | |
514 | heads = dict.fromkeys(heads, False) |
|
522 | heads = dict.fromkeys(heads, False) | |
515 | # Start at the top and keep marking parents until we're done. |
|
523 | # Start at the top and keep marking parents until we're done. | |
516 | nodestotag = set(heads) |
|
524 | nodestotag = set(heads) | |
517 | # Remember where the top was so we can use it as a limit later. |
|
525 | # Remember where the top was so we can use it as a limit later. | |
518 | highestrev = max([self.rev(n) for n in nodestotag]) |
|
526 | highestrev = max([self.rev(n) for n in nodestotag]) | |
519 | while nodestotag: |
|
527 | while nodestotag: | |
520 | # grab a node to tag |
|
528 | # grab a node to tag | |
521 | n = nodestotag.pop() |
|
529 | n = nodestotag.pop() | |
522 | # Never tag nullid |
|
530 | # Never tag nullid | |
523 | if n == nullid: |
|
531 | if n == nullid: | |
524 | continue |
|
532 | continue | |
525 | # A node's revision number represents its place in a |
|
533 | # A node's revision number represents its place in a | |
526 | # topologically sorted list of nodes. |
|
534 | # topologically sorted list of nodes. | |
527 | r = self.rev(n) |
|
535 | r = self.rev(n) | |
528 | if r >= lowestrev: |
|
536 | if r >= lowestrev: | |
529 | if n not in ancestors: |
|
537 | if n not in ancestors: | |
530 | # If we are possibly a descendant of one of the roots |
|
538 | # If we are possibly a descendant of one of the roots | |
531 | # and we haven't already been marked as an ancestor |
|
539 | # and we haven't already been marked as an ancestor | |
532 | ancestors.add(n) # Mark as ancestor |
|
540 | ancestors.add(n) # Mark as ancestor | |
533 | # Add non-nullid parents to list of nodes to tag. |
|
541 | # Add non-nullid parents to list of nodes to tag. | |
534 | nodestotag.update([p for p in self.parents(n) if |
|
542 | nodestotag.update([p for p in self.parents(n) if | |
535 | p != nullid]) |
|
543 | p != nullid]) | |
536 | elif n in heads: # We've seen it before, is it a fake head? |
|
544 | elif n in heads: # We've seen it before, is it a fake head? | |
537 | # So it is, real heads should not be the ancestors of |
|
545 | # So it is, real heads should not be the ancestors of | |
538 | # any other heads. |
|
546 | # any other heads. | |
539 | heads.pop(n) |
|
547 | heads.pop(n) | |
540 | if not ancestors: |
|
548 | if not ancestors: | |
541 | return nonodes |
|
549 | return nonodes | |
542 | # Now that we have our set of ancestors, we want to remove any |
|
550 | # Now that we have our set of ancestors, we want to remove any | |
543 | # roots that are not ancestors. |
|
551 | # roots that are not ancestors. | |
544 |
|
552 | |||
545 | # If one of the roots was nullid, everything is included anyway. |
|
553 | # If one of the roots was nullid, everything is included anyway. | |
546 | if lowestrev > nullrev: |
|
554 | if lowestrev > nullrev: | |
547 | # But, since we weren't, let's recompute the lowest rev to not |
|
555 | # But, since we weren't, let's recompute the lowest rev to not | |
548 | # include roots that aren't ancestors. |
|
556 | # include roots that aren't ancestors. | |
549 |
|
557 | |||
550 | # Filter out roots that aren't ancestors of heads |
|
558 | # Filter out roots that aren't ancestors of heads | |
551 | roots = [n for n in roots if n in ancestors] |
|
559 | roots = [n for n in roots if n in ancestors] | |
552 | # Recompute the lowest revision |
|
560 | # Recompute the lowest revision | |
553 | if roots: |
|
561 | if roots: | |
554 | lowestrev = min([self.rev(n) for n in roots]) |
|
562 | lowestrev = min([self.rev(n) for n in roots]) | |
555 | else: |
|
563 | else: | |
556 | # No more roots? Return empty list |
|
564 | # No more roots? Return empty list | |
557 | return nonodes |
|
565 | return nonodes | |
558 | else: |
|
566 | else: | |
559 | # We are descending from nullid, and don't need to care about |
|
567 | # We are descending from nullid, and don't need to care about | |
560 | # any other roots. |
|
568 | # any other roots. | |
561 | lowestrev = nullrev |
|
569 | lowestrev = nullrev | |
562 | roots = [nullid] |
|
570 | roots = [nullid] | |
563 | # Transform our roots list into a set. |
|
571 | # Transform our roots list into a set. | |
564 | descendants = set(roots) |
|
572 | descendants = set(roots) | |
565 | # Also, keep the original roots so we can filter out roots that aren't |
|
573 | # Also, keep the original roots so we can filter out roots that aren't | |
566 | # 'real' roots (i.e. are descended from other roots). |
|
574 | # 'real' roots (i.e. are descended from other roots). | |
567 | roots = descendants.copy() |
|
575 | roots = descendants.copy() | |
568 | # Our topologically sorted list of output nodes. |
|
576 | # Our topologically sorted list of output nodes. | |
569 | orderedout = [] |
|
577 | orderedout = [] | |
570 | # Don't start at nullid since we don't want nullid in our output list, |
|
578 | # Don't start at nullid since we don't want nullid in our output list, | |
571 | # and if nullid shows up in descendants, empty parents will look like |
|
579 | # and if nullid shows up in descendants, empty parents will look like | |
572 | # they're descendants. |
|
580 | # they're descendants. | |
573 | for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1): |
|
581 | for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1): | |
574 | n = self.node(r) |
|
582 | n = self.node(r) | |
575 | isdescendant = False |
|
583 | isdescendant = False | |
576 | if lowestrev == nullrev: # Everybody is a descendant of nullid |
|
584 | if lowestrev == nullrev: # Everybody is a descendant of nullid | |
577 | isdescendant = True |
|
585 | isdescendant = True | |
578 | elif n in descendants: |
|
586 | elif n in descendants: | |
579 | # n is already a descendant |
|
587 | # n is already a descendant | |
580 | isdescendant = True |
|
588 | isdescendant = True | |
581 | # This check only needs to be done here because all the roots |
|
589 | # This check only needs to be done here because all the roots | |
582 | # will start being marked is descendants before the loop. |
|
590 | # will start being marked is descendants before the loop. | |
583 | if n in roots: |
|
591 | if n in roots: | |
584 | # If n was a root, check if it's a 'real' root. |
|
592 | # If n was a root, check if it's a 'real' root. | |
585 | p = tuple(self.parents(n)) |
|
593 | p = tuple(self.parents(n)) | |
586 | # If any of its parents are descendants, it's not a root. |
|
594 | # If any of its parents are descendants, it's not a root. | |
587 | if (p[0] in descendants) or (p[1] in descendants): |
|
595 | if (p[0] in descendants) or (p[1] in descendants): | |
588 | roots.remove(n) |
|
596 | roots.remove(n) | |
589 | else: |
|
597 | else: | |
590 | p = tuple(self.parents(n)) |
|
598 | p = tuple(self.parents(n)) | |
591 | # A node is a descendant if either of its parents are |
|
599 | # A node is a descendant if either of its parents are | |
592 | # descendants. (We seeded the dependents list with the roots |
|
600 | # descendants. (We seeded the dependents list with the roots | |
593 | # up there, remember?) |
|
601 | # up there, remember?) | |
594 | if (p[0] in descendants) or (p[1] in descendants): |
|
602 | if (p[0] in descendants) or (p[1] in descendants): | |
595 | descendants.add(n) |
|
603 | descendants.add(n) | |
596 | isdescendant = True |
|
604 | isdescendant = True | |
597 | if isdescendant and ((ancestors is None) or (n in ancestors)): |
|
605 | if isdescendant and ((ancestors is None) or (n in ancestors)): | |
598 | # Only include nodes that are both descendants and ancestors. |
|
606 | # Only include nodes that are both descendants and ancestors. | |
599 | orderedout.append(n) |
|
607 | orderedout.append(n) | |
600 | if (ancestors is not None) and (n in heads): |
|
608 | if (ancestors is not None) and (n in heads): | |
601 | # We're trying to figure out which heads are reachable |
|
609 | # We're trying to figure out which heads are reachable | |
602 | # from roots. |
|
610 | # from roots. | |
603 | # Mark this head as having been reached |
|
611 | # Mark this head as having been reached | |
604 | heads[n] = True |
|
612 | heads[n] = True | |
605 | elif ancestors is None: |
|
613 | elif ancestors is None: | |
606 | # Otherwise, we're trying to discover the heads. |
|
614 | # Otherwise, we're trying to discover the heads. | |
607 | # Assume this is a head because if it isn't, the next step |
|
615 | # Assume this is a head because if it isn't, the next step | |
608 | # will eventually remove it. |
|
616 | # will eventually remove it. | |
609 | heads[n] = True |
|
617 | heads[n] = True | |
610 | # But, obviously its parents aren't. |
|
618 | # But, obviously its parents aren't. | |
611 | for p in self.parents(n): |
|
619 | for p in self.parents(n): | |
612 | heads.pop(p, None) |
|
620 | heads.pop(p, None) | |
613 | heads = [n for n, flag in heads.iteritems() if flag] |
|
621 | heads = [n for n, flag in heads.iteritems() if flag] | |
614 | roots = list(roots) |
|
622 | roots = list(roots) | |
615 | assert orderedout |
|
623 | assert orderedout | |
616 | assert roots |
|
624 | assert roots | |
617 | assert heads |
|
625 | assert heads | |
618 | return (orderedout, roots, heads) |
|
626 | return (orderedout, roots, heads) | |
619 |
|
627 | |||
620 | def headrevs(self): |
|
628 | def headrevs(self): | |
621 | try: |
|
629 | try: | |
622 | return self.index.headrevs() |
|
630 | return self.index.headrevs() | |
623 | except AttributeError: |
|
631 | except AttributeError: | |
624 | return self._headrevs() |
|
632 | return self._headrevs() | |
625 |
|
633 | |||
626 | def _headrevs(self): |
|
634 | def _headrevs(self): | |
627 | count = len(self) |
|
635 | count = len(self) | |
628 | if not count: |
|
636 | if not count: | |
629 | return [nullrev] |
|
637 | return [nullrev] | |
630 | # we won't iter over filtered rev so nobody is a head at start |
|
638 | # we won't iter over filtered rev so nobody is a head at start | |
631 | ishead = [0] * (count + 1) |
|
639 | ishead = [0] * (count + 1) | |
632 | index = self.index |
|
640 | index = self.index | |
633 | for r in self: |
|
641 | for r in self: | |
634 | ishead[r] = 1 # I may be an head |
|
642 | ishead[r] = 1 # I may be an head | |
635 | e = index[r] |
|
643 | e = index[r] | |
636 | ishead[e[5]] = ishead[e[6]] = 0 # my parent are not |
|
644 | ishead[e[5]] = ishead[e[6]] = 0 # my parent are not | |
637 | return [r for r, val in enumerate(ishead) if val] |
|
645 | return [r for r, val in enumerate(ishead) if val] | |
638 |
|
646 | |||
639 | def heads(self, start=None, stop=None): |
|
647 | def heads(self, start=None, stop=None): | |
640 | """return the list of all nodes that have no children |
|
648 | """return the list of all nodes that have no children | |
641 |
|
649 | |||
642 | if start is specified, only heads that are descendants of |
|
650 | if start is specified, only heads that are descendants of | |
643 | start will be returned |
|
651 | start will be returned | |
644 | if stop is specified, it will consider all the revs from stop |
|
652 | if stop is specified, it will consider all the revs from stop | |
645 | as if they had no children |
|
653 | as if they had no children | |
646 | """ |
|
654 | """ | |
647 | if start is None and stop is None: |
|
655 | if start is None and stop is None: | |
648 | if not len(self): |
|
656 | if not len(self): | |
649 | return [nullid] |
|
657 | return [nullid] | |
650 | return [self.node(r) for r in self.headrevs()] |
|
658 | return [self.node(r) for r in self.headrevs()] | |
651 |
|
659 | |||
652 | if start is None: |
|
660 | if start is None: | |
653 | start = nullid |
|
661 | start = nullid | |
654 | if stop is None: |
|
662 | if stop is None: | |
655 | stop = [] |
|
663 | stop = [] | |
656 | stoprevs = set([self.rev(n) for n in stop]) |
|
664 | stoprevs = set([self.rev(n) for n in stop]) | |
657 | startrev = self.rev(start) |
|
665 | startrev = self.rev(start) | |
658 | reachable = set((startrev,)) |
|
666 | reachable = set((startrev,)) | |
659 | heads = set((startrev,)) |
|
667 | heads = set((startrev,)) | |
660 |
|
668 | |||
661 | parentrevs = self.parentrevs |
|
669 | parentrevs = self.parentrevs | |
662 | for r in self.revs(start=startrev + 1): |
|
670 | for r in self.revs(start=startrev + 1): | |
663 | for p in parentrevs(r): |
|
671 | for p in parentrevs(r): | |
664 | if p in reachable: |
|
672 | if p in reachable: | |
665 | if r not in stoprevs: |
|
673 | if r not in stoprevs: | |
666 | reachable.add(r) |
|
674 | reachable.add(r) | |
667 | heads.add(r) |
|
675 | heads.add(r) | |
668 | if p in heads and p not in stoprevs: |
|
676 | if p in heads and p not in stoprevs: | |
669 | heads.remove(p) |
|
677 | heads.remove(p) | |
670 |
|
678 | |||
671 | return [self.node(r) for r in heads] |
|
679 | return [self.node(r) for r in heads] | |
672 |
|
680 | |||
673 | def children(self, node): |
|
681 | def children(self, node): | |
674 | """find the children of a given node""" |
|
682 | """find the children of a given node""" | |
675 | c = [] |
|
683 | c = [] | |
676 | p = self.rev(node) |
|
684 | p = self.rev(node) | |
677 | for r in self.revs(start=p + 1): |
|
685 | for r in self.revs(start=p + 1): | |
678 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] |
|
686 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] | |
679 | if prevs: |
|
687 | if prevs: | |
680 | for pr in prevs: |
|
688 | for pr in prevs: | |
681 | if pr == p: |
|
689 | if pr == p: | |
682 | c.append(self.node(r)) |
|
690 | c.append(self.node(r)) | |
683 | elif p == nullrev: |
|
691 | elif p == nullrev: | |
684 | c.append(self.node(r)) |
|
692 | c.append(self.node(r)) | |
685 | return c |
|
693 | return c | |
686 |
|
694 | |||
687 | def descendant(self, start, end): |
|
695 | def descendant(self, start, end): | |
688 | if start == nullrev: |
|
696 | if start == nullrev: | |
689 | return True |
|
697 | return True | |
690 | for i in self.descendants([start]): |
|
698 | for i in self.descendants([start]): | |
691 | if i == end: |
|
699 | if i == end: | |
692 | return True |
|
700 | return True | |
693 | elif i > end: |
|
701 | elif i > end: | |
694 | break |
|
702 | break | |
695 | return False |
|
703 | return False | |
696 |
|
704 | |||
697 | def ancestor(self, a, b): |
|
705 | def ancestor(self, a, b): | |
698 | """calculate the least common ancestor of nodes a and b""" |
|
706 | """calculate the least common ancestor of nodes a and b""" | |
699 |
|
707 | |||
700 | # fast path, check if it is a descendant |
|
708 | # fast path, check if it is a descendant | |
701 | a, b = self.rev(a), self.rev(b) |
|
709 | a, b = self.rev(a), self.rev(b) | |
702 | start, end = sorted((a, b)) |
|
710 | start, end = sorted((a, b)) | |
703 | if self.descendant(start, end): |
|
711 | if self.descendant(start, end): | |
704 | return self.node(start) |
|
712 | return self.node(start) | |
705 |
|
713 | |||
706 | def parents(rev): |
|
714 | def parents(rev): | |
707 | return [p for p in self.parentrevs(rev) if p != nullrev] |
|
715 | return [p for p in self.parentrevs(rev) if p != nullrev] | |
708 |
|
716 | |||
709 | c = ancestor.ancestor(a, b, parents) |
|
717 | c = ancestor.ancestor(a, b, parents) | |
710 | if c is None: |
|
718 | if c is None: | |
711 | return nullid |
|
719 | return nullid | |
712 |
|
720 | |||
713 | return self.node(c) |
|
721 | return self.node(c) | |
714 |
|
722 | |||
715 | def _match(self, id): |
|
723 | def _match(self, id): | |
716 | if isinstance(id, int): |
|
724 | if isinstance(id, int): | |
717 | # rev |
|
725 | # rev | |
718 | return self.node(id) |
|
726 | return self.node(id) | |
719 | if len(id) == 20: |
|
727 | if len(id) == 20: | |
720 | # possibly a binary node |
|
728 | # possibly a binary node | |
721 | # odds of a binary node being all hex in ASCII are 1 in 10**25 |
|
729 | # odds of a binary node being all hex in ASCII are 1 in 10**25 | |
722 | try: |
|
730 | try: | |
723 | node = id |
|
731 | node = id | |
724 | self.rev(node) # quick search the index |
|
732 | self.rev(node) # quick search the index | |
725 | return node |
|
733 | return node | |
726 | except LookupError: |
|
734 | except LookupError: | |
727 | pass # may be partial hex id |
|
735 | pass # may be partial hex id | |
728 | try: |
|
736 | try: | |
729 | # str(rev) |
|
737 | # str(rev) | |
730 | rev = int(id) |
|
738 | rev = int(id) | |
731 | if str(rev) != id: |
|
739 | if str(rev) != id: | |
732 | raise ValueError |
|
740 | raise ValueError | |
733 | if rev < 0: |
|
741 | if rev < 0: | |
734 | rev = len(self) + rev |
|
742 | rev = len(self) + rev | |
735 | if rev < 0 or rev >= len(self): |
|
743 | if rev < 0 or rev >= len(self): | |
736 | raise ValueError |
|
744 | raise ValueError | |
737 | return self.node(rev) |
|
745 | return self.node(rev) | |
738 | except (ValueError, OverflowError): |
|
746 | except (ValueError, OverflowError): | |
739 | pass |
|
747 | pass | |
740 | if len(id) == 40: |
|
748 | if len(id) == 40: | |
741 | try: |
|
749 | try: | |
742 | # a full hex nodeid? |
|
750 | # a full hex nodeid? | |
743 | node = bin(id) |
|
751 | node = bin(id) | |
744 | self.rev(node) |
|
752 | self.rev(node) | |
745 | return node |
|
753 | return node | |
746 | except (TypeError, LookupError): |
|
754 | except (TypeError, LookupError): | |
747 | pass |
|
755 | pass | |
748 |
|
756 | |||
749 | def _partialmatch(self, id): |
|
757 | def _partialmatch(self, id): | |
750 | try: |
|
758 | try: | |
751 | return self.index.partialmatch(id) |
|
759 | return self.index.partialmatch(id) | |
752 | except RevlogError: |
|
760 | except RevlogError: | |
753 | # parsers.c radix tree lookup gave multiple matches |
|
761 | # parsers.c radix tree lookup gave multiple matches | |
754 | raise LookupError(id, self.indexfile, _("ambiguous identifier")) |
|
762 | raise LookupError(id, self.indexfile, _("ambiguous identifier")) | |
755 | except (AttributeError, ValueError): |
|
763 | except (AttributeError, ValueError): | |
756 | # we are pure python, or key was too short to search radix tree |
|
764 | # we are pure python, or key was too short to search radix tree | |
757 | pass |
|
765 | pass | |
758 |
|
766 | |||
759 | if id in self._pcache: |
|
767 | if id in self._pcache: | |
760 | return self._pcache[id] |
|
768 | return self._pcache[id] | |
761 |
|
769 | |||
762 | if len(id) < 40: |
|
770 | if len(id) < 40: | |
763 | try: |
|
771 | try: | |
764 | # hex(node)[:...] |
|
772 | # hex(node)[:...] | |
765 | l = len(id) // 2 # grab an even number of digits |
|
773 | l = len(id) // 2 # grab an even number of digits | |
766 | prefix = bin(id[:l * 2]) |
|
774 | prefix = bin(id[:l * 2]) | |
767 | nl = [e[7] for e in self.index if e[7].startswith(prefix)] |
|
775 | nl = [e[7] for e in self.index if e[7].startswith(prefix)] | |
768 | nl = [n for n in nl if hex(n).startswith(id)] |
|
776 | nl = [n for n in nl if hex(n).startswith(id)] | |
769 | if len(nl) > 0: |
|
777 | if len(nl) > 0: | |
770 | if len(nl) == 1: |
|
778 | if len(nl) == 1: | |
771 | self._pcache[id] = nl[0] |
|
779 | self._pcache[id] = nl[0] | |
772 | return nl[0] |
|
780 | return nl[0] | |
773 | raise LookupError(id, self.indexfile, |
|
781 | raise LookupError(id, self.indexfile, | |
774 | _('ambiguous identifier')) |
|
782 | _('ambiguous identifier')) | |
775 | return None |
|
783 | return None | |
776 | except TypeError: |
|
784 | except TypeError: | |
777 | pass |
|
785 | pass | |
778 |
|
786 | |||
779 | def lookup(self, id): |
|
787 | def lookup(self, id): | |
780 | """locate a node based on: |
|
788 | """locate a node based on: | |
781 | - revision number or str(revision number) |
|
789 | - revision number or str(revision number) | |
782 | - nodeid or subset of hex nodeid |
|
790 | - nodeid or subset of hex nodeid | |
783 | """ |
|
791 | """ | |
784 | n = self._match(id) |
|
792 | n = self._match(id) | |
785 | if n is not None: |
|
793 | if n is not None: | |
786 | return n |
|
794 | return n | |
787 | n = self._partialmatch(id) |
|
795 | n = self._partialmatch(id) | |
788 | if n: |
|
796 | if n: | |
789 | return n |
|
797 | return n | |
790 |
|
798 | |||
791 | raise LookupError(id, self.indexfile, _('no match found')) |
|
799 | raise LookupError(id, self.indexfile, _('no match found')) | |
792 |
|
800 | |||
793 | def cmp(self, node, text): |
|
801 | def cmp(self, node, text): | |
794 | """compare text with a given file revision |
|
802 | """compare text with a given file revision | |
795 |
|
803 | |||
796 | returns True if text is different than what is stored. |
|
804 | returns True if text is different than what is stored. | |
797 | """ |
|
805 | """ | |
798 | p1, p2 = self.parents(node) |
|
806 | p1, p2 = self.parents(node) | |
799 | return hash(text, p1, p2) != node |
|
807 | return hash(text, p1, p2) != node | |
800 |
|
808 | |||
801 | def _addchunk(self, offset, data): |
|
809 | def _addchunk(self, offset, data): | |
802 | o, d = self._chunkcache |
|
810 | o, d = self._chunkcache | |
803 | # try to add to existing cache |
|
811 | # try to add to existing cache | |
804 | if o + len(d) == offset and len(d) + len(data) < _chunksize: |
|
812 | if o + len(d) == offset and len(d) + len(data) < _chunksize: | |
805 | self._chunkcache = o, d + data |
|
813 | self._chunkcache = o, d + data | |
806 | else: |
|
814 | else: | |
807 | self._chunkcache = offset, data |
|
815 | self._chunkcache = offset, data | |
808 |
|
816 | |||
809 | def _loadchunk(self, offset, length): |
|
817 | def _loadchunk(self, offset, length): | |
810 | if self._inline: |
|
818 | if self._inline: | |
811 | df = self.opener(self.indexfile) |
|
819 | df = self.opener(self.indexfile) | |
812 | else: |
|
820 | else: | |
813 | df = self.opener(self.datafile) |
|
821 | df = self.opener(self.datafile) | |
814 |
|
822 | |||
815 | readahead = max(65536, length) |
|
823 | readahead = max(65536, length) | |
816 | df.seek(offset) |
|
824 | df.seek(offset) | |
817 | d = df.read(readahead) |
|
825 | d = df.read(readahead) | |
818 | df.close() |
|
826 | df.close() | |
819 | self._addchunk(offset, d) |
|
827 | self._addchunk(offset, d) | |
820 | if readahead > length: |
|
828 | if readahead > length: | |
821 | return util.buffer(d, 0, length) |
|
829 | return util.buffer(d, 0, length) | |
822 | return d |
|
830 | return d | |
823 |
|
831 | |||
824 | def _getchunk(self, offset, length): |
|
832 | def _getchunk(self, offset, length): | |
825 | o, d = self._chunkcache |
|
833 | o, d = self._chunkcache | |
826 | l = len(d) |
|
834 | l = len(d) | |
827 |
|
835 | |||
828 | # is it in the cache? |
|
836 | # is it in the cache? | |
829 | cachestart = offset - o |
|
837 | cachestart = offset - o | |
830 | cacheend = cachestart + length |
|
838 | cacheend = cachestart + length | |
831 | if cachestart >= 0 and cacheend <= l: |
|
839 | if cachestart >= 0 and cacheend <= l: | |
832 | if cachestart == 0 and cacheend == l: |
|
840 | if cachestart == 0 and cacheend == l: | |
833 | return d # avoid a copy |
|
841 | return d # avoid a copy | |
834 | return util.buffer(d, cachestart, cacheend - cachestart) |
|
842 | return util.buffer(d, cachestart, cacheend - cachestart) | |
835 |
|
843 | |||
836 | return self._loadchunk(offset, length) |
|
844 | return self._loadchunk(offset, length) | |
837 |
|
845 | |||
838 | def _chunkraw(self, startrev, endrev): |
|
846 | def _chunkraw(self, startrev, endrev): | |
839 | start = self.start(startrev) |
|
847 | start = self.start(startrev) | |
840 | length = self.end(endrev) - start |
|
848 | length = self.end(endrev) - start | |
841 | if self._inline: |
|
849 | if self._inline: | |
842 | start += (startrev + 1) * self._io.size |
|
850 | start += (startrev + 1) * self._io.size | |
843 | return self._getchunk(start, length) |
|
851 | return self._getchunk(start, length) | |
844 |
|
852 | |||
845 | def _chunk(self, rev): |
|
853 | def _chunk(self, rev): | |
846 | return decompress(self._chunkraw(rev, rev)) |
|
854 | return decompress(self._chunkraw(rev, rev)) | |
847 |
|
855 | |||
848 | def _chunkbase(self, rev): |
|
856 | def _chunkbase(self, rev): | |
849 | return self._chunk(rev) |
|
857 | return self._chunk(rev) | |
850 |
|
858 | |||
851 | def _chunkclear(self): |
|
859 | def _chunkclear(self): | |
852 | self._chunkcache = (0, '') |
|
860 | self._chunkcache = (0, '') | |
853 |
|
861 | |||
854 | def deltaparent(self, rev): |
|
862 | def deltaparent(self, rev): | |
855 | """return deltaparent of the given revision""" |
|
863 | """return deltaparent of the given revision""" | |
856 | base = self.index[rev][3] |
|
864 | base = self.index[rev][3] | |
857 | if base == rev: |
|
865 | if base == rev: | |
858 | return nullrev |
|
866 | return nullrev | |
859 | elif self._generaldelta: |
|
867 | elif self._generaldelta: | |
860 | return base |
|
868 | return base | |
861 | else: |
|
869 | else: | |
862 | return rev - 1 |
|
870 | return rev - 1 | |
863 |
|
871 | |||
864 | def revdiff(self, rev1, rev2): |
|
872 | def revdiff(self, rev1, rev2): | |
865 | """return or calculate a delta between two revisions""" |
|
873 | """return or calculate a delta between two revisions""" | |
866 | if rev1 != nullrev and self.deltaparent(rev2) == rev1: |
|
874 | if rev1 != nullrev and self.deltaparent(rev2) == rev1: | |
867 | return str(self._chunk(rev2)) |
|
875 | return str(self._chunk(rev2)) | |
868 |
|
876 | |||
869 | return mdiff.textdiff(self.revision(rev1), |
|
877 | return mdiff.textdiff(self.revision(rev1), | |
870 | self.revision(rev2)) |
|
878 | self.revision(rev2)) | |
871 |
|
879 | |||
872 | def revision(self, nodeorrev): |
|
880 | def revision(self, nodeorrev): | |
873 | """return an uncompressed revision of a given node or revision |
|
881 | """return an uncompressed revision of a given node or revision | |
874 | number. |
|
882 | number. | |
875 | """ |
|
883 | """ | |
876 | if isinstance(nodeorrev, int): |
|
884 | if isinstance(nodeorrev, int): | |
877 | rev = nodeorrev |
|
885 | rev = nodeorrev | |
878 | node = self.node(rev) |
|
886 | node = self.node(rev) | |
879 | else: |
|
887 | else: | |
880 | node = nodeorrev |
|
888 | node = nodeorrev | |
881 | rev = None |
|
889 | rev = None | |
882 |
|
890 | |||
883 | cachedrev = None |
|
891 | cachedrev = None | |
884 | if node == nullid: |
|
892 | if node == nullid: | |
885 | return "" |
|
893 | return "" | |
886 | if self._cache: |
|
894 | if self._cache: | |
887 | if self._cache[0] == node: |
|
895 | if self._cache[0] == node: | |
888 | return self._cache[2] |
|
896 | return self._cache[2] | |
889 | cachedrev = self._cache[1] |
|
897 | cachedrev = self._cache[1] | |
890 |
|
898 | |||
891 | # look up what we need to read |
|
899 | # look up what we need to read | |
892 | text = None |
|
900 | text = None | |
893 | if rev is None: |
|
901 | if rev is None: | |
894 | rev = self.rev(node) |
|
902 | rev = self.rev(node) | |
895 |
|
903 | |||
896 | # check rev flags |
|
904 | # check rev flags | |
897 | if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: |
|
905 | if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: | |
898 | raise RevlogError(_('incompatible revision flag %x') % |
|
906 | raise RevlogError(_('incompatible revision flag %x') % | |
899 | (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) |
|
907 | (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) | |
900 |
|
908 | |||
901 | # build delta chain |
|
909 | # build delta chain | |
902 | chain = [] |
|
910 | chain = [] | |
903 | index = self.index # for performance |
|
911 | index = self.index # for performance | |
904 | generaldelta = self._generaldelta |
|
912 | generaldelta = self._generaldelta | |
905 | iterrev = rev |
|
913 | iterrev = rev | |
906 | e = index[iterrev] |
|
914 | e = index[iterrev] | |
907 | while iterrev != e[3] and iterrev != cachedrev: |
|
915 | while iterrev != e[3] and iterrev != cachedrev: | |
908 | chain.append(iterrev) |
|
916 | chain.append(iterrev) | |
909 | if generaldelta: |
|
917 | if generaldelta: | |
910 | iterrev = e[3] |
|
918 | iterrev = e[3] | |
911 | else: |
|
919 | else: | |
912 | iterrev -= 1 |
|
920 | iterrev -= 1 | |
913 | e = index[iterrev] |
|
921 | e = index[iterrev] | |
914 | chain.reverse() |
|
922 | chain.reverse() | |
915 | base = iterrev |
|
923 | base = iterrev | |
916 |
|
924 | |||
917 | if iterrev == cachedrev: |
|
925 | if iterrev == cachedrev: | |
918 | # cache hit |
|
926 | # cache hit | |
919 | text = self._cache[2] |
|
927 | text = self._cache[2] | |
920 |
|
928 | |||
921 | # drop cache to save memory |
|
929 | # drop cache to save memory | |
922 | self._cache = None |
|
930 | self._cache = None | |
923 |
|
931 | |||
924 | self._chunkraw(base, rev) |
|
932 | self._chunkraw(base, rev) | |
925 | if text is None: |
|
933 | if text is None: | |
926 | text = str(self._chunkbase(base)) |
|
934 | text = str(self._chunkbase(base)) | |
927 |
|
935 | |||
928 | bins = [self._chunk(r) for r in chain] |
|
936 | bins = [self._chunk(r) for r in chain] | |
929 | text = mdiff.patches(text, bins) |
|
937 | text = mdiff.patches(text, bins) | |
930 |
|
938 | |||
931 | text = self._checkhash(text, node, rev) |
|
939 | text = self._checkhash(text, node, rev) | |
932 |
|
940 | |||
933 | self._cache = (node, rev, text) |
|
941 | self._cache = (node, rev, text) | |
934 | return text |
|
942 | return text | |
935 |
|
943 | |||
936 | def _checkhash(self, text, node, rev): |
|
944 | def _checkhash(self, text, node, rev): | |
937 | p1, p2 = self.parents(node) |
|
945 | p1, p2 = self.parents(node) | |
938 | if node != hash(text, p1, p2): |
|
946 | if node != hash(text, p1, p2): | |
939 | raise RevlogError(_("integrity check failed on %s:%d") |
|
947 | raise RevlogError(_("integrity check failed on %s:%d") | |
940 | % (self.indexfile, rev)) |
|
948 | % (self.indexfile, rev)) | |
941 | return text |
|
949 | return text | |
942 |
|
950 | |||
943 | def checkinlinesize(self, tr, fp=None): |
|
951 | def checkinlinesize(self, tr, fp=None): | |
944 | if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline: |
|
952 | if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline: | |
945 | return |
|
953 | return | |
946 |
|
954 | |||
947 | trinfo = tr.find(self.indexfile) |
|
955 | trinfo = tr.find(self.indexfile) | |
948 | if trinfo is None: |
|
956 | if trinfo is None: | |
949 | raise RevlogError(_("%s not found in the transaction") |
|
957 | raise RevlogError(_("%s not found in the transaction") | |
950 | % self.indexfile) |
|
958 | % self.indexfile) | |
951 |
|
959 | |||
952 | trindex = trinfo[2] |
|
960 | trindex = trinfo[2] | |
953 | dataoff = self.start(trindex) |
|
961 | dataoff = self.start(trindex) | |
954 |
|
962 | |||
955 | tr.add(self.datafile, dataoff) |
|
963 | tr.add(self.datafile, dataoff) | |
956 |
|
964 | |||
957 | if fp: |
|
965 | if fp: | |
958 | fp.flush() |
|
966 | fp.flush() | |
959 | fp.close() |
|
967 | fp.close() | |
960 |
|
968 | |||
961 | df = self.opener(self.datafile, 'w') |
|
969 | df = self.opener(self.datafile, 'w') | |
962 | try: |
|
970 | try: | |
963 | for r in self: |
|
971 | for r in self: | |
964 | df.write(self._chunkraw(r, r)) |
|
972 | df.write(self._chunkraw(r, r)) | |
965 | finally: |
|
973 | finally: | |
966 | df.close() |
|
974 | df.close() | |
967 |
|
975 | |||
968 | fp = self.opener(self.indexfile, 'w', atomictemp=True) |
|
976 | fp = self.opener(self.indexfile, 'w', atomictemp=True) | |
969 | self.version &= ~(REVLOGNGINLINEDATA) |
|
977 | self.version &= ~(REVLOGNGINLINEDATA) | |
970 | self._inline = False |
|
978 | self._inline = False | |
971 | for i in self: |
|
979 | for i in self: | |
972 | e = self._io.packentry(self.index[i], self.node, self.version, i) |
|
980 | e = self._io.packentry(self.index[i], self.node, self.version, i) | |
973 | fp.write(e) |
|
981 | fp.write(e) | |
974 |
|
982 | |||
975 | # if we don't call close, the temp file will never replace the |
|
983 | # if we don't call close, the temp file will never replace the | |
976 | # real index |
|
984 | # real index | |
977 | fp.close() |
|
985 | fp.close() | |
978 |
|
986 | |||
979 | tr.replace(self.indexfile, trindex * self._io.size) |
|
987 | tr.replace(self.indexfile, trindex * self._io.size) | |
980 | self._chunkclear() |
|
988 | self._chunkclear() | |
981 |
|
989 | |||
982 | def addrevision(self, text, transaction, link, p1, p2, cachedelta=None): |
|
990 | def addrevision(self, text, transaction, link, p1, p2, cachedelta=None): | |
983 | """add a revision to the log |
|
991 | """add a revision to the log | |
984 |
|
992 | |||
985 | text - the revision data to add |
|
993 | text - the revision data to add | |
986 | transaction - the transaction object used for rollback |
|
994 | transaction - the transaction object used for rollback | |
987 | link - the linkrev data to add |
|
995 | link - the linkrev data to add | |
988 | p1, p2 - the parent nodeids of the revision |
|
996 | p1, p2 - the parent nodeids of the revision | |
989 | cachedelta - an optional precomputed delta |
|
997 | cachedelta - an optional precomputed delta | |
990 | """ |
|
998 | """ | |
991 | node = hash(text, p1, p2) |
|
999 | node = hash(text, p1, p2) | |
992 | if node in self.nodemap: |
|
1000 | if node in self.nodemap: | |
993 | return node |
|
1001 | return node | |
994 |
|
1002 | |||
995 | dfh = None |
|
1003 | dfh = None | |
996 | if not self._inline: |
|
1004 | if not self._inline: | |
997 | dfh = self.opener(self.datafile, "a") |
|
1005 | dfh = self.opener(self.datafile, "a") | |
998 | ifh = self.opener(self.indexfile, "a+") |
|
1006 | ifh = self.opener(self.indexfile, "a+") | |
999 | try: |
|
1007 | try: | |
1000 | return self._addrevision(node, text, transaction, link, p1, p2, |
|
1008 | return self._addrevision(node, text, transaction, link, p1, p2, | |
1001 | cachedelta, ifh, dfh) |
|
1009 | cachedelta, ifh, dfh) | |
1002 | finally: |
|
1010 | finally: | |
1003 | if dfh: |
|
1011 | if dfh: | |
1004 | dfh.close() |
|
1012 | dfh.close() | |
1005 | ifh.close() |
|
1013 | ifh.close() | |
1006 |
|
1014 | |||
1007 | def compress(self, text): |
|
1015 | def compress(self, text): | |
1008 | """ generate a possibly-compressed representation of text """ |
|
1016 | """ generate a possibly-compressed representation of text """ | |
1009 | if not text: |
|
1017 | if not text: | |
1010 | return ("", text) |
|
1018 | return ("", text) | |
1011 | l = len(text) |
|
1019 | l = len(text) | |
1012 | bin = None |
|
1020 | bin = None | |
1013 | if l < 44: |
|
1021 | if l < 44: | |
1014 | pass |
|
1022 | pass | |
1015 | elif l > 1000000: |
|
1023 | elif l > 1000000: | |
1016 | # zlib makes an internal copy, thus doubling memory usage for |
|
1024 | # zlib makes an internal copy, thus doubling memory usage for | |
1017 | # large files, so lets do this in pieces |
|
1025 | # large files, so lets do this in pieces | |
1018 | z = zlib.compressobj() |
|
1026 | z = zlib.compressobj() | |
1019 | p = [] |
|
1027 | p = [] | |
1020 | pos = 0 |
|
1028 | pos = 0 | |
1021 | while pos < l: |
|
1029 | while pos < l: | |
1022 | pos2 = pos + 2**20 |
|
1030 | pos2 = pos + 2**20 | |
1023 | p.append(z.compress(text[pos:pos2])) |
|
1031 | p.append(z.compress(text[pos:pos2])) | |
1024 | pos = pos2 |
|
1032 | pos = pos2 | |
1025 | p.append(z.flush()) |
|
1033 | p.append(z.flush()) | |
1026 | if sum(map(len, p)) < l: |
|
1034 | if sum(map(len, p)) < l: | |
1027 | bin = "".join(p) |
|
1035 | bin = "".join(p) | |
1028 | else: |
|
1036 | else: | |
1029 | bin = _compress(text) |
|
1037 | bin = _compress(text) | |
1030 | if bin is None or len(bin) > l: |
|
1038 | if bin is None or len(bin) > l: | |
1031 | if text[0] == '\0': |
|
1039 | if text[0] == '\0': | |
1032 | return ("", text) |
|
1040 | return ("", text) | |
1033 | return ('u', text) |
|
1041 | return ('u', text) | |
1034 | return ("", bin) |
|
1042 | return ("", bin) | |
1035 |
|
1043 | |||
1036 | def _addrevision(self, node, text, transaction, link, p1, p2, |
|
1044 | def _addrevision(self, node, text, transaction, link, p1, p2, | |
1037 | cachedelta, ifh, dfh): |
|
1045 | cachedelta, ifh, dfh): | |
1038 | """internal function to add revisions to the log |
|
1046 | """internal function to add revisions to the log | |
1039 |
|
1047 | |||
1040 | see addrevision for argument descriptions. |
|
1048 | see addrevision for argument descriptions. | |
1041 | invariants: |
|
1049 | invariants: | |
1042 | - text is optional (can be None); if not set, cachedelta must be set. |
|
1050 | - text is optional (can be None); if not set, cachedelta must be set. | |
1043 | if both are set, they must correspond to each other. |
|
1051 | if both are set, they must correspond to each other. | |
1044 | """ |
|
1052 | """ | |
1045 | btext = [text] |
|
1053 | btext = [text] | |
1046 | def buildtext(): |
|
1054 | def buildtext(): | |
1047 | if btext[0] is not None: |
|
1055 | if btext[0] is not None: | |
1048 | return btext[0] |
|
1056 | return btext[0] | |
1049 | # flush any pending writes here so we can read it in revision |
|
1057 | # flush any pending writes here so we can read it in revision | |
1050 | if dfh: |
|
1058 | if dfh: | |
1051 | dfh.flush() |
|
1059 | dfh.flush() | |
1052 | ifh.flush() |
|
1060 | ifh.flush() | |
1053 | basetext = self.revision(self.node(cachedelta[0])) |
|
1061 | basetext = self.revision(self.node(cachedelta[0])) | |
1054 | btext[0] = mdiff.patch(basetext, cachedelta[1]) |
|
1062 | btext[0] = mdiff.patch(basetext, cachedelta[1]) | |
1055 | chk = hash(btext[0], p1, p2) |
|
1063 | chk = hash(btext[0], p1, p2) | |
1056 | if chk != node: |
|
1064 | if chk != node: | |
1057 | raise RevlogError(_("consistency error in delta")) |
|
1065 | raise RevlogError(_("consistency error in delta")) | |
1058 | return btext[0] |
|
1066 | return btext[0] | |
1059 |
|
1067 | |||
1060 | def builddelta(rev): |
|
1068 | def builddelta(rev): | |
1061 | # can we use the cached delta? |
|
1069 | # can we use the cached delta? | |
1062 | if cachedelta and cachedelta[0] == rev: |
|
1070 | if cachedelta and cachedelta[0] == rev: | |
1063 | delta = cachedelta[1] |
|
1071 | delta = cachedelta[1] | |
1064 | else: |
|
1072 | else: | |
1065 | t = buildtext() |
|
1073 | t = buildtext() | |
1066 | ptext = self.revision(self.node(rev)) |
|
1074 | ptext = self.revision(self.node(rev)) | |
1067 | delta = mdiff.textdiff(ptext, t) |
|
1075 | delta = mdiff.textdiff(ptext, t) | |
1068 | data = self.compress(delta) |
|
1076 | data = self.compress(delta) | |
1069 | l = len(data[1]) + len(data[0]) |
|
1077 | l = len(data[1]) + len(data[0]) | |
1070 | if basecache[0] == rev: |
|
1078 | if basecache[0] == rev: | |
1071 | chainbase = basecache[1] |
|
1079 | chainbase = basecache[1] | |
1072 | else: |
|
1080 | else: | |
1073 | chainbase = self.chainbase(rev) |
|
1081 | chainbase = self.chainbase(rev) | |
1074 | dist = l + offset - self.start(chainbase) |
|
1082 | dist = l + offset - self.start(chainbase) | |
1075 | if self._generaldelta: |
|
1083 | if self._generaldelta: | |
1076 | base = rev |
|
1084 | base = rev | |
1077 | else: |
|
1085 | else: | |
1078 | base = chainbase |
|
1086 | base = chainbase | |
1079 | return dist, l, data, base, chainbase |
|
1087 | return dist, l, data, base, chainbase | |
1080 |
|
1088 | |||
1081 | curr = len(self) |
|
1089 | curr = len(self) | |
1082 | prev = curr - 1 |
|
1090 | prev = curr - 1 | |
1083 | base = chainbase = curr |
|
1091 | base = chainbase = curr | |
1084 | offset = self.end(prev) |
|
1092 | offset = self.end(prev) | |
1085 | flags = 0 |
|
1093 | flags = 0 | |
1086 | d = None |
|
1094 | d = None | |
1087 | basecache = self._basecache |
|
1095 | basecache = self._basecache | |
1088 | p1r, p2r = self.rev(p1), self.rev(p2) |
|
1096 | p1r, p2r = self.rev(p1), self.rev(p2) | |
1089 |
|
1097 | |||
1090 | # should we try to build a delta? |
|
1098 | # should we try to build a delta? | |
1091 | if prev != nullrev: |
|
1099 | if prev != nullrev: | |
1092 | if self._generaldelta: |
|
1100 | if self._generaldelta: | |
1093 | if p1r >= basecache[1]: |
|
1101 | if p1r >= basecache[1]: | |
1094 | d = builddelta(p1r) |
|
1102 | d = builddelta(p1r) | |
1095 | elif p2r >= basecache[1]: |
|
1103 | elif p2r >= basecache[1]: | |
1096 | d = builddelta(p2r) |
|
1104 | d = builddelta(p2r) | |
1097 | else: |
|
1105 | else: | |
1098 | d = builddelta(prev) |
|
1106 | d = builddelta(prev) | |
1099 | else: |
|
1107 | else: | |
1100 | d = builddelta(prev) |
|
1108 | d = builddelta(prev) | |
1101 | dist, l, data, base, chainbase = d |
|
1109 | dist, l, data, base, chainbase = d | |
1102 |
|
1110 | |||
1103 | # full versions are inserted when the needed deltas |
|
1111 | # full versions are inserted when the needed deltas | |
1104 | # become comparable to the uncompressed text |
|
1112 | # become comparable to the uncompressed text | |
1105 | if text is None: |
|
1113 | if text is None: | |
1106 | textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]), |
|
1114 | textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]), | |
1107 | cachedelta[1]) |
|
1115 | cachedelta[1]) | |
1108 | else: |
|
1116 | else: | |
1109 | textlen = len(text) |
|
1117 | textlen = len(text) | |
1110 | if d is None or dist > textlen * 2: |
|
1118 | if d is None or dist > textlen * 2: | |
1111 | text = buildtext() |
|
1119 | text = buildtext() | |
1112 | data = self.compress(text) |
|
1120 | data = self.compress(text) | |
1113 | l = len(data[1]) + len(data[0]) |
|
1121 | l = len(data[1]) + len(data[0]) | |
1114 | base = chainbase = curr |
|
1122 | base = chainbase = curr | |
1115 |
|
1123 | |||
1116 | e = (offset_type(offset, flags), l, textlen, |
|
1124 | e = (offset_type(offset, flags), l, textlen, | |
1117 | base, link, p1r, p2r, node) |
|
1125 | base, link, p1r, p2r, node) | |
1118 | self.index.insert(-1, e) |
|
1126 | self.index.insert(-1, e) | |
1119 | self.nodemap[node] = curr |
|
1127 | self.nodemap[node] = curr | |
1120 |
|
1128 | |||
1121 | entry = self._io.packentry(e, self.node, self.version, curr) |
|
1129 | entry = self._io.packentry(e, self.node, self.version, curr) | |
1122 | if not self._inline: |
|
1130 | if not self._inline: | |
1123 | transaction.add(self.datafile, offset) |
|
1131 | transaction.add(self.datafile, offset) | |
1124 | transaction.add(self.indexfile, curr * len(entry)) |
|
1132 | transaction.add(self.indexfile, curr * len(entry)) | |
1125 | if data[0]: |
|
1133 | if data[0]: | |
1126 | dfh.write(data[0]) |
|
1134 | dfh.write(data[0]) | |
1127 | dfh.write(data[1]) |
|
1135 | dfh.write(data[1]) | |
1128 | dfh.flush() |
|
1136 | dfh.flush() | |
1129 | ifh.write(entry) |
|
1137 | ifh.write(entry) | |
1130 | else: |
|
1138 | else: | |
1131 | offset += curr * self._io.size |
|
1139 | offset += curr * self._io.size | |
1132 | transaction.add(self.indexfile, offset, curr) |
|
1140 | transaction.add(self.indexfile, offset, curr) | |
1133 | ifh.write(entry) |
|
1141 | ifh.write(entry) | |
1134 | ifh.write(data[0]) |
|
1142 | ifh.write(data[0]) | |
1135 | ifh.write(data[1]) |
|
1143 | ifh.write(data[1]) | |
1136 | self.checkinlinesize(transaction, ifh) |
|
1144 | self.checkinlinesize(transaction, ifh) | |
1137 |
|
1145 | |||
1138 | if type(text) == str: # only accept immutable objects |
|
1146 | if type(text) == str: # only accept immutable objects | |
1139 | self._cache = (node, curr, text) |
|
1147 | self._cache = (node, curr, text) | |
1140 | self._basecache = (curr, chainbase) |
|
1148 | self._basecache = (curr, chainbase) | |
1141 | return node |
|
1149 | return node | |
1142 |
|
1150 | |||
1143 | def group(self, nodelist, bundler, reorder=None): |
|
1151 | def group(self, nodelist, bundler, reorder=None): | |
1144 | """Calculate a delta group, yielding a sequence of changegroup chunks |
|
1152 | """Calculate a delta group, yielding a sequence of changegroup chunks | |
1145 | (strings). |
|
1153 | (strings). | |
1146 |
|
1154 | |||
1147 | Given a list of changeset revs, return a set of deltas and |
|
1155 | Given a list of changeset revs, return a set of deltas and | |
1148 | metadata corresponding to nodes. The first delta is |
|
1156 | metadata corresponding to nodes. The first delta is | |
1149 | first parent(nodelist[0]) -> nodelist[0], the receiver is |
|
1157 | first parent(nodelist[0]) -> nodelist[0], the receiver is | |
1150 | guaranteed to have this parent as it has all history before |
|
1158 | guaranteed to have this parent as it has all history before | |
1151 | these changesets. In the case firstparent is nullrev the |
|
1159 | these changesets. In the case firstparent is nullrev the | |
1152 | changegroup starts with a full revision. |
|
1160 | changegroup starts with a full revision. | |
1153 | """ |
|
1161 | """ | |
1154 |
|
1162 | |||
1155 | # if we don't have any revisions touched by these changesets, bail |
|
1163 | # if we don't have any revisions touched by these changesets, bail | |
1156 | if len(nodelist) == 0: |
|
1164 | if len(nodelist) == 0: | |
1157 | yield bundler.close() |
|
1165 | yield bundler.close() | |
1158 | return |
|
1166 | return | |
1159 |
|
1167 | |||
1160 | # for generaldelta revlogs, we linearize the revs; this will both be |
|
1168 | # for generaldelta revlogs, we linearize the revs; this will both be | |
1161 | # much quicker and generate a much smaller bundle |
|
1169 | # much quicker and generate a much smaller bundle | |
1162 | if (self._generaldelta and reorder is not False) or reorder: |
|
1170 | if (self._generaldelta and reorder is not False) or reorder: | |
1163 | dag = dagutil.revlogdag(self) |
|
1171 | dag = dagutil.revlogdag(self) | |
1164 | revs = set(self.rev(n) for n in nodelist) |
|
1172 | revs = set(self.rev(n) for n in nodelist) | |
1165 | revs = dag.linearize(revs) |
|
1173 | revs = dag.linearize(revs) | |
1166 | else: |
|
1174 | else: | |
1167 | revs = sorted([self.rev(n) for n in nodelist]) |
|
1175 | revs = sorted([self.rev(n) for n in nodelist]) | |
1168 |
|
1176 | |||
1169 | # add the parent of the first rev |
|
1177 | # add the parent of the first rev | |
1170 | p = self.parentrevs(revs[0])[0] |
|
1178 | p = self.parentrevs(revs[0])[0] | |
1171 | revs.insert(0, p) |
|
1179 | revs.insert(0, p) | |
1172 |
|
1180 | |||
1173 | # build deltas |
|
1181 | # build deltas | |
1174 | for r in xrange(len(revs) - 1): |
|
1182 | for r in xrange(len(revs) - 1): | |
1175 | prev, curr = revs[r], revs[r + 1] |
|
1183 | prev, curr = revs[r], revs[r + 1] | |
1176 | for c in bundler.revchunk(self, curr, prev): |
|
1184 | for c in bundler.revchunk(self, curr, prev): | |
1177 | yield c |
|
1185 | yield c | |
1178 |
|
1186 | |||
1179 | yield bundler.close() |
|
1187 | yield bundler.close() | |
1180 |
|
1188 | |||
1181 | def addgroup(self, bundle, linkmapper, transaction): |
|
1189 | def addgroup(self, bundle, linkmapper, transaction): | |
1182 | """ |
|
1190 | """ | |
1183 | add a delta group |
|
1191 | add a delta group | |
1184 |
|
1192 | |||
1185 | given a set of deltas, add them to the revision log. the |
|
1193 | given a set of deltas, add them to the revision log. the | |
1186 | first delta is against its parent, which should be in our |
|
1194 | first delta is against its parent, which should be in our | |
1187 | log, the rest are against the previous delta. |
|
1195 | log, the rest are against the previous delta. | |
1188 | """ |
|
1196 | """ | |
1189 |
|
1197 | |||
1190 | # track the base of the current delta log |
|
1198 | # track the base of the current delta log | |
1191 | content = [] |
|
1199 | content = [] | |
1192 | node = None |
|
1200 | node = None | |
1193 |
|
1201 | |||
1194 | r = len(self) |
|
1202 | r = len(self) | |
1195 | end = 0 |
|
1203 | end = 0 | |
1196 | if r: |
|
1204 | if r: | |
1197 | end = self.end(r - 1) |
|
1205 | end = self.end(r - 1) | |
1198 | ifh = self.opener(self.indexfile, "a+") |
|
1206 | ifh = self.opener(self.indexfile, "a+") | |
1199 | isize = r * self._io.size |
|
1207 | isize = r * self._io.size | |
1200 | if self._inline: |
|
1208 | if self._inline: | |
1201 | transaction.add(self.indexfile, end + isize, r) |
|
1209 | transaction.add(self.indexfile, end + isize, r) | |
1202 | dfh = None |
|
1210 | dfh = None | |
1203 | else: |
|
1211 | else: | |
1204 | transaction.add(self.indexfile, isize, r) |
|
1212 | transaction.add(self.indexfile, isize, r) | |
1205 | transaction.add(self.datafile, end) |
|
1213 | transaction.add(self.datafile, end) | |
1206 | dfh = self.opener(self.datafile, "a") |
|
1214 | dfh = self.opener(self.datafile, "a") | |
1207 |
|
1215 | |||
1208 | try: |
|
1216 | try: | |
1209 | # loop through our set of deltas |
|
1217 | # loop through our set of deltas | |
1210 | chain = None |
|
1218 | chain = None | |
1211 | while True: |
|
1219 | while True: | |
1212 | chunkdata = bundle.deltachunk(chain) |
|
1220 | chunkdata = bundle.deltachunk(chain) | |
1213 | if not chunkdata: |
|
1221 | if not chunkdata: | |
1214 | break |
|
1222 | break | |
1215 | node = chunkdata['node'] |
|
1223 | node = chunkdata['node'] | |
1216 | p1 = chunkdata['p1'] |
|
1224 | p1 = chunkdata['p1'] | |
1217 | p2 = chunkdata['p2'] |
|
1225 | p2 = chunkdata['p2'] | |
1218 | cs = chunkdata['cs'] |
|
1226 | cs = chunkdata['cs'] | |
1219 | deltabase = chunkdata['deltabase'] |
|
1227 | deltabase = chunkdata['deltabase'] | |
1220 | delta = chunkdata['delta'] |
|
1228 | delta = chunkdata['delta'] | |
1221 |
|
1229 | |||
1222 | content.append(node) |
|
1230 | content.append(node) | |
1223 |
|
1231 | |||
1224 | link = linkmapper(cs) |
|
1232 | link = linkmapper(cs) | |
1225 | if node in self.nodemap: |
|
1233 | if node in self.nodemap: | |
1226 | # this can happen if two branches make the same change |
|
1234 | # this can happen if two branches make the same change | |
1227 | chain = node |
|
1235 | chain = node | |
1228 | continue |
|
1236 | continue | |
1229 |
|
1237 | |||
1230 | for p in (p1, p2): |
|
1238 | for p in (p1, p2): | |
1231 | if p not in self.nodemap: |
|
1239 | if p not in self.nodemap: | |
1232 | raise LookupError(p, self.indexfile, |
|
1240 | raise LookupError(p, self.indexfile, | |
1233 | _('unknown parent')) |
|
1241 | _('unknown parent')) | |
1234 |
|
1242 | |||
1235 | if deltabase not in self.nodemap: |
|
1243 | if deltabase not in self.nodemap: | |
1236 | raise LookupError(deltabase, self.indexfile, |
|
1244 | raise LookupError(deltabase, self.indexfile, | |
1237 | _('unknown delta base')) |
|
1245 | _('unknown delta base')) | |
1238 |
|
1246 | |||
1239 | baserev = self.rev(deltabase) |
|
1247 | baserev = self.rev(deltabase) | |
1240 | chain = self._addrevision(node, None, transaction, link, |
|
1248 | chain = self._addrevision(node, None, transaction, link, | |
1241 | p1, p2, (baserev, delta), ifh, dfh) |
|
1249 | p1, p2, (baserev, delta), ifh, dfh) | |
1242 | if not dfh and not self._inline: |
|
1250 | if not dfh and not self._inline: | |
1243 | # addrevision switched from inline to conventional |
|
1251 | # addrevision switched from inline to conventional | |
1244 | # reopen the index |
|
1252 | # reopen the index | |
1245 | ifh.close() |
|
1253 | ifh.close() | |
1246 | dfh = self.opener(self.datafile, "a") |
|
1254 | dfh = self.opener(self.datafile, "a") | |
1247 | ifh = self.opener(self.indexfile, "a") |
|
1255 | ifh = self.opener(self.indexfile, "a") | |
1248 | finally: |
|
1256 | finally: | |
1249 | if dfh: |
|
1257 | if dfh: | |
1250 | dfh.close() |
|
1258 | dfh.close() | |
1251 | ifh.close() |
|
1259 | ifh.close() | |
1252 |
|
1260 | |||
1253 | return content |
|
1261 | return content | |
1254 |
|
1262 | |||
1255 | def strip(self, minlink, transaction): |
|
1263 | def strip(self, minlink, transaction): | |
1256 | """truncate the revlog on the first revision with a linkrev >= minlink |
|
1264 | """truncate the revlog on the first revision with a linkrev >= minlink | |
1257 |
|
1265 | |||
1258 | This function is called when we're stripping revision minlink and |
|
1266 | This function is called when we're stripping revision minlink and | |
1259 | its descendants from the repository. |
|
1267 | its descendants from the repository. | |
1260 |
|
1268 | |||
1261 | We have to remove all revisions with linkrev >= minlink, because |
|
1269 | We have to remove all revisions with linkrev >= minlink, because | |
1262 | the equivalent changelog revisions will be renumbered after the |
|
1270 | the equivalent changelog revisions will be renumbered after the | |
1263 | strip. |
|
1271 | strip. | |
1264 |
|
1272 | |||
1265 | So we truncate the revlog on the first of these revisions, and |
|
1273 | So we truncate the revlog on the first of these revisions, and | |
1266 | trust that the caller has saved the revisions that shouldn't be |
|
1274 | trust that the caller has saved the revisions that shouldn't be | |
1267 | removed and that it'll re-add them after this truncation. |
|
1275 | removed and that it'll re-add them after this truncation. | |
1268 | """ |
|
1276 | """ | |
1269 | if len(self) == 0: |
|
1277 | if len(self) == 0: | |
1270 | return |
|
1278 | return | |
1271 |
|
1279 | |||
1272 | for rev in self: |
|
1280 | for rev in self: | |
1273 | if self.index[rev][4] >= minlink: |
|
1281 | if self.index[rev][4] >= minlink: | |
1274 | break |
|
1282 | break | |
1275 | else: |
|
1283 | else: | |
1276 | return |
|
1284 | return | |
1277 |
|
1285 | |||
1278 | # first truncate the files on disk |
|
1286 | # first truncate the files on disk | |
1279 | end = self.start(rev) |
|
1287 | end = self.start(rev) | |
1280 | if not self._inline: |
|
1288 | if not self._inline: | |
1281 | transaction.add(self.datafile, end) |
|
1289 | transaction.add(self.datafile, end) | |
1282 | end = rev * self._io.size |
|
1290 | end = rev * self._io.size | |
1283 | else: |
|
1291 | else: | |
1284 | end += rev * self._io.size |
|
1292 | end += rev * self._io.size | |
1285 |
|
1293 | |||
1286 | transaction.add(self.indexfile, end) |
|
1294 | transaction.add(self.indexfile, end) | |
1287 |
|
1295 | |||
1288 | # then reset internal state in memory to forget those revisions |
|
1296 | # then reset internal state in memory to forget those revisions | |
1289 | self._cache = None |
|
1297 | self._cache = None | |
1290 | self._chunkclear() |
|
1298 | self._chunkclear() | |
1291 | for x in xrange(rev, len(self)): |
|
1299 | for x in xrange(rev, len(self)): | |
1292 | del self.nodemap[self.node(x)] |
|
1300 | del self.nodemap[self.node(x)] | |
1293 |
|
1301 | |||
1294 | del self.index[rev:-1] |
|
1302 | del self.index[rev:-1] | |
1295 |
|
1303 | |||
1296 | def checksize(self): |
|
1304 | def checksize(self): | |
1297 | expected = 0 |
|
1305 | expected = 0 | |
1298 | if len(self): |
|
1306 | if len(self): | |
1299 | expected = max(0, self.end(len(self) - 1)) |
|
1307 | expected = max(0, self.end(len(self) - 1)) | |
1300 |
|
1308 | |||
1301 | try: |
|
1309 | try: | |
1302 | f = self.opener(self.datafile) |
|
1310 | f = self.opener(self.datafile) | |
1303 | f.seek(0, 2) |
|
1311 | f.seek(0, 2) | |
1304 | actual = f.tell() |
|
1312 | actual = f.tell() | |
1305 | f.close() |
|
1313 | f.close() | |
1306 | dd = actual - expected |
|
1314 | dd = actual - expected | |
1307 | except IOError, inst: |
|
1315 | except IOError, inst: | |
1308 | if inst.errno != errno.ENOENT: |
|
1316 | if inst.errno != errno.ENOENT: | |
1309 | raise |
|
1317 | raise | |
1310 | dd = 0 |
|
1318 | dd = 0 | |
1311 |
|
1319 | |||
1312 | try: |
|
1320 | try: | |
1313 | f = self.opener(self.indexfile) |
|
1321 | f = self.opener(self.indexfile) | |
1314 | f.seek(0, 2) |
|
1322 | f.seek(0, 2) | |
1315 | actual = f.tell() |
|
1323 | actual = f.tell() | |
1316 | f.close() |
|
1324 | f.close() | |
1317 | s = self._io.size |
|
1325 | s = self._io.size | |
1318 | i = max(0, actual // s) |
|
1326 | i = max(0, actual // s) | |
1319 | di = actual - (i * s) |
|
1327 | di = actual - (i * s) | |
1320 | if self._inline: |
|
1328 | if self._inline: | |
1321 | databytes = 0 |
|
1329 | databytes = 0 | |
1322 | for r in self: |
|
1330 | for r in self: | |
1323 | databytes += max(0, self.length(r)) |
|
1331 | databytes += max(0, self.length(r)) | |
1324 | dd = 0 |
|
1332 | dd = 0 | |
1325 | di = actual - len(self) * s - databytes |
|
1333 | di = actual - len(self) * s - databytes | |
1326 | except IOError, inst: |
|
1334 | except IOError, inst: | |
1327 | if inst.errno != errno.ENOENT: |
|
1335 | if inst.errno != errno.ENOENT: | |
1328 | raise |
|
1336 | raise | |
1329 | di = 0 |
|
1337 | di = 0 | |
1330 |
|
1338 | |||
1331 | return (dd, di) |
|
1339 | return (dd, di) | |
1332 |
|
1340 | |||
1333 | def files(self): |
|
1341 | def files(self): | |
1334 | res = [self.indexfile] |
|
1342 | res = [self.indexfile] | |
1335 | if not self._inline: |
|
1343 | if not self._inline: | |
1336 | res.append(self.datafile) |
|
1344 | res.append(self.datafile) | |
1337 | return res |
|
1345 | return res |
General Comments 0
You need to be logged in to leave comments.
Login now