Show More
@@ -1,814 +1,816 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | revlog.py - storage back-end for mercurial |
|
3 | 3 | |
|
4 | 4 | This provides efficient delta storage with O(1) retrieve and append |
|
5 | 5 | and O(changes) merge between branches |
|
6 | 6 | |
|
7 | 7 | Copyright 2005 Matt Mackall <mpm@selenic.com> |
|
8 | 8 | |
|
9 | 9 | This software may be used and distributed according to the terms |
|
10 | 10 | of the GNU General Public License, incorporated herein by reference. |
|
11 | 11 | """ |
|
12 | 12 | |
|
13 | 13 | from node import * |
|
14 | 14 | from demandload import demandload |
|
15 | 15 | demandload(globals(), "binascii errno heapq mdiff sha struct zlib") |
|
16 | 16 | |
|
17 | 17 | def hash(text, p1, p2): |
|
18 | 18 | """generate a hash from the given text and its parent hashes |
|
19 | 19 | |
|
20 | 20 | This hash combines both the current file contents and its history |
|
21 | 21 | in a manner that makes it easy to distinguish nodes with the same |
|
22 | 22 | content in the revision graph. |
|
23 | 23 | """ |
|
24 | 24 | l = [p1, p2] |
|
25 | 25 | l.sort() |
|
26 | 26 | s = sha.new(l[0]) |
|
27 | 27 | s.update(l[1]) |
|
28 | 28 | s.update(text) |
|
29 | 29 | return s.digest() |
|
30 | 30 | |
|
31 | 31 | def compress(text): |
|
32 | 32 | """ generate a possibly-compressed representation of text """ |
|
33 | 33 | if not text: return text |
|
34 | 34 | if len(text) < 44: |
|
35 | 35 | if text[0] == '\0': return text |
|
36 | 36 | return 'u' + text |
|
37 | 37 | bin = zlib.compress(text) |
|
38 | 38 | if len(bin) > len(text): |
|
39 | 39 | if text[0] == '\0': return text |
|
40 | 40 | return 'u' + text |
|
41 | 41 | return bin |
|
42 | 42 | |
|
43 | 43 | def decompress(bin): |
|
44 | 44 | """ decompress the given input """ |
|
45 | 45 | if not bin: return bin |
|
46 | 46 | t = bin[0] |
|
47 | 47 | if t == '\0': return bin |
|
48 | 48 | if t == 'x': return zlib.decompress(bin) |
|
49 | 49 | if t == 'u': return bin[1:] |
|
50 | 50 | raise RevlogError("unknown compression type %s" % t) |
|
51 | 51 | |
|
52 | 52 | indexformat = ">4l20s20s20s" |
|
53 | 53 | |
|
54 | 54 | class lazyparser: |
|
55 | 55 | """ |
|
56 | 56 | this class avoids the need to parse the entirety of large indices |
|
57 | 57 | |
|
58 | 58 | By default we parse and load 1000 entries at a time. |
|
59 | 59 | |
|
60 | 60 | If no position is specified, we load the whole index, and replace |
|
61 | 61 | the lazy objects in revlog with the underlying objects for |
|
62 | 62 | efficiency in cases where we look at most of the nodes. |
|
63 | 63 | """ |
|
64 | 64 | def __init__(self, data, revlog): |
|
65 | 65 | self.data = data |
|
66 | 66 | self.s = struct.calcsize(indexformat) |
|
67 | 67 | self.l = len(data)/self.s |
|
68 | 68 | self.index = [None] * self.l |
|
69 | 69 | self.map = {nullid: -1} |
|
70 | 70 | self.all = 0 |
|
71 | 71 | self.revlog = revlog |
|
72 | 72 | |
|
73 | 73 | def load(self, pos=None): |
|
74 | 74 | if self.all: return |
|
75 | 75 | if pos is not None: |
|
76 | 76 | block = pos / 1000 |
|
77 | 77 | i = block * 1000 |
|
78 | 78 | end = min(self.l, i + 1000) |
|
79 | 79 | else: |
|
80 | 80 | self.all = 1 |
|
81 | 81 | i = 0 |
|
82 | 82 | end = self.l |
|
83 | 83 | self.revlog.index = self.index |
|
84 | 84 | self.revlog.nodemap = self.map |
|
85 | 85 | |
|
86 | 86 | while i < end: |
|
87 | 87 | d = self.data[i * self.s: (i + 1) * self.s] |
|
88 | 88 | e = struct.unpack(indexformat, d) |
|
89 | 89 | self.index[i] = e |
|
90 | 90 | self.map[e[6]] = i |
|
91 | 91 | i += 1 |
|
92 | 92 | |
|
93 | 93 | class lazyindex: |
|
94 | 94 | """a lazy version of the index array""" |
|
95 | 95 | def __init__(self, parser): |
|
96 | 96 | self.p = parser |
|
97 | 97 | def __len__(self): |
|
98 | 98 | return len(self.p.index) |
|
99 | 99 | def load(self, pos): |
|
100 | 100 | self.p.load(pos) |
|
101 | 101 | return self.p.index[pos] |
|
102 | 102 | def __getitem__(self, pos): |
|
103 | 103 | return self.p.index[pos] or self.load(pos) |
|
104 | 104 | def append(self, e): |
|
105 | 105 | self.p.index.append(e) |
|
106 | 106 | |
|
107 | 107 | class lazymap: |
|
108 | 108 | """a lazy version of the node map""" |
|
109 | 109 | def __init__(self, parser): |
|
110 | 110 | self.p = parser |
|
111 | 111 | def load(self, key): |
|
112 | 112 | if self.p.all: return |
|
113 | 113 | n = self.p.data.find(key) |
|
114 | 114 | if n < 0: |
|
115 | 115 | raise KeyError(key) |
|
116 | 116 | pos = n / self.p.s |
|
117 | 117 | self.p.load(pos) |
|
118 | 118 | def __contains__(self, key): |
|
119 | 119 | self.p.load() |
|
120 | 120 | return key in self.p.map |
|
121 | 121 | def __iter__(self): |
|
122 | 122 | yield nullid |
|
123 | 123 | for i in xrange(self.p.l): |
|
124 | 124 | try: |
|
125 | 125 | yield self.p.index[i][6] |
|
126 | 126 | except: |
|
127 | 127 | self.p.load(i) |
|
128 | 128 | yield self.p.index[i][6] |
|
129 | 129 | def __getitem__(self, key): |
|
130 | 130 | try: |
|
131 | 131 | return self.p.map[key] |
|
132 | 132 | except KeyError: |
|
133 | 133 | try: |
|
134 | 134 | self.load(key) |
|
135 | 135 | return self.p.map[key] |
|
136 | 136 | except KeyError: |
|
137 | 137 | raise KeyError("node " + hex(key)) |
|
138 | 138 | def __setitem__(self, key, val): |
|
139 | 139 | self.p.map[key] = val |
|
140 | 140 | |
|
141 | 141 | class RevlogError(Exception): pass |
|
142 | 142 | |
|
143 | 143 | class revlog: |
|
144 | 144 | """ |
|
145 | 145 | the underlying revision storage object |
|
146 | 146 | |
|
147 | 147 | A revlog consists of two parts, an index and the revision data. |
|
148 | 148 | |
|
149 | 149 | The index is a file with a fixed record size containing |
|
150 | 150 | information on each revision, includings its nodeid (hash), the |
|
151 | 151 | nodeids of its parents, the position and offset of its data within |
|
152 | 152 | the data file, and the revision it's based on. Finally, each entry |
|
153 | 153 | contains a linkrev entry that can serve as a pointer to external |
|
154 | 154 | data. |
|
155 | 155 | |
|
156 | 156 | The revision data itself is a linear collection of data chunks. |
|
157 | 157 | Each chunk represents a revision and is usually represented as a |
|
158 | 158 | delta against the previous chunk. To bound lookup time, runs of |
|
159 | 159 | deltas are limited to about 2 times the length of the original |
|
160 | 160 | version data. This makes retrieval of a version proportional to |
|
161 | 161 | its size, or O(1) relative to the number of revisions. |
|
162 | 162 | |
|
163 | 163 | Both pieces of the revlog are written to in an append-only |
|
164 | 164 | fashion, which means we never need to rewrite a file to insert or |
|
165 | 165 | remove data, and can use some simple techniques to avoid the need |
|
166 | 166 | for locking while reading. |
|
167 | 167 | """ |
|
168 | 168 | def __init__(self, opener, indexfile, datafile): |
|
169 | 169 | """ |
|
170 | 170 | create a revlog object |
|
171 | 171 | |
|
172 | 172 | opener is a function that abstracts the file opening operation |
|
173 | 173 | and can be used to implement COW semantics or the like. |
|
174 | 174 | """ |
|
175 | 175 | self.indexfile = indexfile |
|
176 | 176 | self.datafile = datafile |
|
177 | 177 | self.opener = opener |
|
178 | 178 | self.cache = None |
|
179 | 179 | |
|
180 | 180 | try: |
|
181 | 181 | i = self.opener(self.indexfile).read() |
|
182 | 182 | except IOError, inst: |
|
183 | 183 | if inst.errno != errno.ENOENT: |
|
184 | 184 | raise |
|
185 | 185 | i = "" |
|
186 | 186 | |
|
187 | 187 | if len(i) > 10000: |
|
188 | 188 | # big index, let's parse it on demand |
|
189 | 189 | parser = lazyparser(i, self) |
|
190 | 190 | self.index = lazyindex(parser) |
|
191 | 191 | self.nodemap = lazymap(parser) |
|
192 | 192 | else: |
|
193 | 193 | s = struct.calcsize(indexformat) |
|
194 | 194 | l = len(i) / s |
|
195 | 195 | self.index = [None] * l |
|
196 | 196 | m = [None] * l |
|
197 | 197 | |
|
198 | 198 | n = 0 |
|
199 | 199 | for f in xrange(0, len(i), s): |
|
200 | 200 | # offset, size, base, linkrev, p1, p2, nodeid |
|
201 | 201 | e = struct.unpack(indexformat, i[f:f + s]) |
|
202 | 202 | m[n] = (e[6], n) |
|
203 | 203 | self.index[n] = e |
|
204 | 204 | n += 1 |
|
205 | 205 | |
|
206 | 206 | self.nodemap = dict(m) |
|
207 | 207 | self.nodemap[nullid] = -1 |
|
208 | 208 | |
|
209 | 209 | def tip(self): return self.node(len(self.index) - 1) |
|
210 | 210 | def count(self): return len(self.index) |
|
211 | 211 | def node(self, rev): return (rev < 0) and nullid or self.index[rev][6] |
|
212 | 212 | def rev(self, node): |
|
213 | 213 | try: |
|
214 | 214 | return self.nodemap[node] |
|
215 | 215 | except KeyError: |
|
216 | 216 | raise RevlogError('%s: no node %s' % (self.indexfile, hex(node))) |
|
217 | 217 | def linkrev(self, node): return self.index[self.rev(node)][3] |
|
218 | 218 | def parents(self, node): |
|
219 | 219 | if node == nullid: return (nullid, nullid) |
|
220 | 220 | return self.index[self.rev(node)][4:6] |
|
221 | 221 | |
|
222 | 222 | def start(self, rev): return self.index[rev][0] |
|
223 | 223 | def length(self, rev): return self.index[rev][1] |
|
224 | 224 | def end(self, rev): return self.start(rev) + self.length(rev) |
|
225 | 225 | def base(self, rev): return self.index[rev][2] |
|
226 | 226 | |
|
227 | 227 | def reachable(self, rev, stop=None): |
|
228 | 228 | reachable = {} |
|
229 | 229 | visit = [rev] |
|
230 | 230 | reachable[rev] = 1 |
|
231 | 231 | if stop: |
|
232 | 232 | stopn = self.rev(stop) |
|
233 | 233 | else: |
|
234 | 234 | stopn = 0 |
|
235 | 235 | while visit: |
|
236 | 236 | n = visit.pop(0) |
|
237 | 237 | if n == stop: |
|
238 | 238 | continue |
|
239 | 239 | if n == nullid: |
|
240 | 240 | continue |
|
241 | 241 | for p in self.parents(n): |
|
242 | 242 | if self.rev(p) < stopn: |
|
243 | 243 | continue |
|
244 | 244 | if p not in reachable: |
|
245 | 245 | reachable[p] = 1 |
|
246 | 246 | visit.append(p) |
|
247 | 247 | return reachable |
|
248 | 248 | |
|
249 | 249 | def nodesbetween(self, roots=None, heads=None): |
|
250 | 250 | """Return a tuple containing three elements. Elements 1 and 2 contain |
|
251 | 251 | a final list bases and heads after all the unreachable ones have been |
|
252 | 252 | pruned. Element 0 contains a topologically sorted list of all |
|
253 | 253 | |
|
254 | 254 | nodes that satisfy these constraints: |
|
255 | 255 | 1. All nodes must be descended from a node in roots (the nodes on |
|
256 | 256 | roots are considered descended from themselves). |
|
257 | 257 | 2. All nodes must also be ancestors of a node in heads (the nodes in |
|
258 | 258 | heads are considered to be their own ancestors). |
|
259 | 259 | |
|
260 | 260 | If roots is unspecified, nullid is assumed as the only root. |
|
261 | 261 | If heads is unspecified, it is taken to be the output of the |
|
262 | 262 | heads method (i.e. a list of all nodes in the repository that |
|
263 | 263 | have no children).""" |
|
264 | 264 | if roots is not None: |
|
265 | 265 | roots = list(roots) |
|
266 | 266 | lowestrev = min([self.rev(n) for n in roots]) |
|
267 | 267 | else: |
|
268 | 268 | roots = [nullid] # Everybody's a descendent of nullid |
|
269 | 269 | lowestrev = -1 |
|
270 | 270 | if (lowestrev == -1) and (heads is None): |
|
271 | 271 | # We want _all_ the nodes! |
|
272 | 272 | return ([self.node(r) for r in xrange(0, self.count())], |
|
273 | 273 | [nullid], list(self.heads())) |
|
274 | 274 | if heads is None: |
|
275 | 275 | # All nodes are ancestors, so the latest ancestor is the last |
|
276 | 276 | # node. |
|
277 | 277 | highestrev = self.count() - 1 |
|
278 | 278 | # Set ancestors to None to signal that every node is an ancestor. |
|
279 | 279 | ancestors = None |
|
280 | 280 | # Set heads to an empty dictionary for later discovery of heads |
|
281 | 281 | heads = {} |
|
282 | 282 | else: |
|
283 | 283 | ancestors = {} |
|
284 | 284 | # Start at the top and keep marking parents until we're done. |
|
285 | 285 | nodestotag = list(heads) |
|
286 | 286 | # Turn heads into a dictionary so we can remove 'fake' heads. |
|
287 | 287 | # Also, later we will be using it to filter out the heads we can't |
|
288 | 288 | # find from roots. |
|
289 | 289 | heads = dict.fromkeys(heads, 0) |
|
290 | 290 | # Remember where the top was so we can use it as a limit later. |
|
291 | 291 | highestrev = max([self.rev(n) for n in nodestotag]) |
|
292 | 292 | while nodestotag: |
|
293 | 293 | # grab a node to tag |
|
294 | 294 | n = nodestotag.pop() |
|
295 | 295 | # Never tag nullid |
|
296 | 296 | if n == nullid: |
|
297 | 297 | continue |
|
298 | 298 | # A node's revision number represents its place in a |
|
299 | 299 | # topologically sorted list of nodes. |
|
300 | 300 | r = self.rev(n) |
|
301 | 301 | if r >= lowestrev: |
|
302 | 302 | if n not in ancestors: |
|
303 | 303 | # If we are possibly a descendent of one of the roots |
|
304 | 304 | # and we haven't already been marked as an ancestor |
|
305 | 305 | ancestors[n] = 1 # Mark as ancestor |
|
306 | 306 | # Add non-nullid parents to list of nodes to tag. |
|
307 | 307 | nodestotag.extend([p for p in self.parents(n) if |
|
308 | 308 | p != nullid]) |
|
309 | 309 | elif n in heads: # We've seen it before, is it a fake head? |
|
310 | 310 | # So it is, real heads should not be the ancestors of |
|
311 | 311 | # any other heads. |
|
312 | 312 | heads.pop(n) |
|
313 | if not ancestors: | |
|
314 | return ([], [], []) | |
|
313 | 315 | # Now that we have our set of ancestors, we want to remove any |
|
314 | 316 | # roots that are not ancestors. |
|
315 | 317 | |
|
316 | 318 | # If one of the roots was nullid, everything is included anyway. |
|
317 | 319 | if lowestrev > -1: |
|
318 | 320 | # But, since we weren't, let's recompute the lowest rev to not |
|
319 | 321 | # include roots that aren't ancestors. |
|
320 | 322 | |
|
321 | 323 | # Filter out roots that aren't ancestors of heads |
|
322 | 324 | roots = [n for n in roots if n in ancestors] |
|
323 | 325 | # Recompute the lowest revision |
|
324 | 326 | if roots: |
|
325 | 327 | lowestrev = min([self.rev(n) for n in roots]) |
|
326 | 328 | else: |
|
327 | 329 | # No more roots? Return empty list |
|
328 | 330 | return ([], [], []) |
|
329 | 331 | else: |
|
330 | 332 | # We are descending from nullid, and don't need to care about |
|
331 | 333 | # any other roots. |
|
332 | 334 | lowestrev = -1 |
|
333 | 335 | roots = [nullid] |
|
334 | 336 | # Transform our roots list into a 'set' (i.e. a dictionary where the |
|
335 | 337 | # values don't matter. |
|
336 | 338 | descendents = dict.fromkeys(roots, 1) |
|
337 | 339 | # Also, keep the original roots so we can filter out roots that aren't |
|
338 | 340 | # 'real' roots (i.e. are descended from other roots). |
|
339 | 341 | roots = descendents.copy() |
|
340 | 342 | # Our topologically sorted list of output nodes. |
|
341 | 343 | orderedout = [] |
|
342 | 344 | # Don't start at nullid since we don't want nullid in our output list, |
|
343 | 345 | # and if nullid shows up in descedents, empty parents will look like |
|
344 | 346 | # they're descendents. |
|
345 | 347 | for r in xrange(max(lowestrev, 0), highestrev + 1): |
|
346 | 348 | n = self.node(r) |
|
347 | 349 | isdescendent = False |
|
348 | 350 | if lowestrev == -1: # Everybody is a descendent of nullid |
|
349 | 351 | isdescendent = True |
|
350 | 352 | elif n in descendents: |
|
351 | 353 | # n is already a descendent |
|
352 | 354 | isdescendent = True |
|
353 | 355 | # This check only needs to be done here because all the roots |
|
354 | 356 | # will start being marked is descendents before the loop. |
|
355 | 357 | if n in roots: |
|
356 | 358 | # If n was a root, check if it's a 'real' root. |
|
357 | 359 | p = tuple(self.parents(n)) |
|
358 | 360 | # If any of its parents are descendents, it's not a root. |
|
359 | 361 | if (p[0] in descendents) or (p[1] in descendents): |
|
360 | 362 | roots.pop(n) |
|
361 | 363 | else: |
|
362 | 364 | p = tuple(self.parents(n)) |
|
363 | 365 | # A node is a descendent if either of its parents are |
|
364 | 366 | # descendents. (We seeded the dependents list with the roots |
|
365 | 367 | # up there, remember?) |
|
366 | 368 | if (p[0] in descendents) or (p[1] in descendents): |
|
367 | 369 | descendents[n] = 1 |
|
368 | 370 | isdescendent = True |
|
369 | 371 | if isdescendent and ((ancestors is None) or (n in ancestors)): |
|
370 | 372 | # Only include nodes that are both descendents and ancestors. |
|
371 | 373 | orderedout.append(n) |
|
372 | 374 | if (ancestors is not None) and (n in heads): |
|
373 | 375 | # We're trying to figure out which heads are reachable |
|
374 | 376 | # from roots. |
|
375 | 377 | # Mark this head as having been reached |
|
376 | 378 | heads[n] = 1 |
|
377 | 379 | elif ancestors is None: |
|
378 | 380 | # Otherwise, we're trying to discover the heads. |
|
379 | 381 | # Assume this is a head because if it isn't, the next step |
|
380 | 382 | # will eventually remove it. |
|
381 | 383 | heads[n] = 1 |
|
382 | 384 | # But, obviously its parents aren't. |
|
383 | 385 | for p in self.parents(n): |
|
384 | 386 | heads.pop(p, None) |
|
385 | 387 | heads = [n for n in heads.iterkeys() if heads[n] != 0] |
|
386 | 388 | roots = roots.keys() |
|
387 | 389 | assert orderedout |
|
388 | 390 | assert roots |
|
389 | 391 | assert heads |
|
390 | 392 | return (orderedout, roots, heads) |
|
391 | 393 | |
|
392 | 394 | def heads(self, stop=None): |
|
393 | 395 | """return the list of all nodes that have no children""" |
|
394 | 396 | p = {} |
|
395 | 397 | h = [] |
|
396 | 398 | stoprev = 0 |
|
397 | 399 | if stop and stop in self.nodemap: |
|
398 | 400 | stoprev = self.rev(stop) |
|
399 | 401 | |
|
400 | 402 | for r in range(self.count() - 1, -1, -1): |
|
401 | 403 | n = self.node(r) |
|
402 | 404 | if n not in p: |
|
403 | 405 | h.append(n) |
|
404 | 406 | if n == stop: |
|
405 | 407 | break |
|
406 | 408 | if r < stoprev: |
|
407 | 409 | break |
|
408 | 410 | for pn in self.parents(n): |
|
409 | 411 | p[pn] = 1 |
|
410 | 412 | return h |
|
411 | 413 | |
|
412 | 414 | def children(self, node): |
|
413 | 415 | """find the children of a given node""" |
|
414 | 416 | c = [] |
|
415 | 417 | p = self.rev(node) |
|
416 | 418 | for r in range(p + 1, self.count()): |
|
417 | 419 | n = self.node(r) |
|
418 | 420 | for pn in self.parents(n): |
|
419 | 421 | if pn == node: |
|
420 | 422 | c.append(n) |
|
421 | 423 | continue |
|
422 | 424 | elif pn == nullid: |
|
423 | 425 | continue |
|
424 | 426 | return c |
|
425 | 427 | |
|
426 | 428 | def lookup(self, id): |
|
427 | 429 | """locate a node based on revision number or subset of hex nodeid""" |
|
428 | 430 | try: |
|
429 | 431 | rev = int(id) |
|
430 | 432 | if str(rev) != id: raise ValueError |
|
431 | 433 | if rev < 0: rev = self.count() + rev |
|
432 | 434 | if rev < 0 or rev >= self.count(): raise ValueError |
|
433 | 435 | return self.node(rev) |
|
434 | 436 | except (ValueError, OverflowError): |
|
435 | 437 | c = [] |
|
436 | 438 | for n in self.nodemap: |
|
437 | 439 | if hex(n).startswith(id): |
|
438 | 440 | c.append(n) |
|
439 | 441 | if len(c) > 1: raise KeyError("Ambiguous identifier") |
|
440 | 442 | if len(c) < 1: raise KeyError("No match found") |
|
441 | 443 | return c[0] |
|
442 | 444 | |
|
443 | 445 | return None |
|
444 | 446 | |
|
445 | 447 | def diff(self, a, b): |
|
446 | 448 | """return a delta between two revisions""" |
|
447 | 449 | return mdiff.textdiff(a, b) |
|
448 | 450 | |
|
449 | 451 | def patches(self, t, pl): |
|
450 | 452 | """apply a list of patches to a string""" |
|
451 | 453 | return mdiff.patches(t, pl) |
|
452 | 454 | |
|
453 | 455 | def delta(self, node): |
|
454 | 456 | """return or calculate a delta between a node and its predecessor""" |
|
455 | 457 | r = self.rev(node) |
|
456 | 458 | b = self.base(r) |
|
457 | 459 | if r == b: |
|
458 | 460 | return self.diff(self.revision(self.node(r - 1)), |
|
459 | 461 | self.revision(node)) |
|
460 | 462 | else: |
|
461 | 463 | f = self.opener(self.datafile) |
|
462 | 464 | f.seek(self.start(r)) |
|
463 | 465 | data = f.read(self.length(r)) |
|
464 | 466 | return decompress(data) |
|
465 | 467 | |
|
466 | 468 | def revision(self, node): |
|
467 | 469 | """return an uncompressed revision of a given""" |
|
468 | 470 | if node == nullid: return "" |
|
469 | 471 | if self.cache and self.cache[0] == node: return self.cache[2] |
|
470 | 472 | |
|
471 | 473 | # look up what we need to read |
|
472 | 474 | text = None |
|
473 | 475 | rev = self.rev(node) |
|
474 | 476 | start, length, base, link, p1, p2, node = self.index[rev] |
|
475 | 477 | end = start + length |
|
476 | 478 | if base != rev: start = self.start(base) |
|
477 | 479 | |
|
478 | 480 | # do we have useful data cached? |
|
479 | 481 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: |
|
480 | 482 | base = self.cache[1] |
|
481 | 483 | start = self.start(base + 1) |
|
482 | 484 | text = self.cache[2] |
|
483 | 485 | last = 0 |
|
484 | 486 | |
|
485 | 487 | f = self.opener(self.datafile) |
|
486 | 488 | f.seek(start) |
|
487 | 489 | data = f.read(end - start) |
|
488 | 490 | |
|
489 | 491 | if text is None: |
|
490 | 492 | last = self.length(base) |
|
491 | 493 | text = decompress(data[:last]) |
|
492 | 494 | |
|
493 | 495 | bins = [] |
|
494 | 496 | for r in xrange(base + 1, rev + 1): |
|
495 | 497 | s = self.length(r) |
|
496 | 498 | bins.append(decompress(data[last:last + s])) |
|
497 | 499 | last = last + s |
|
498 | 500 | |
|
499 | 501 | text = mdiff.patches(text, bins) |
|
500 | 502 | |
|
501 | 503 | if node != hash(text, p1, p2): |
|
502 | 504 | raise RevlogError("integrity check failed on %s:%d" |
|
503 | 505 | % (self.datafile, rev)) |
|
504 | 506 | |
|
505 | 507 | self.cache = (node, rev, text) |
|
506 | 508 | return text |
|
507 | 509 | |
|
508 | 510 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): |
|
509 | 511 | """add a revision to the log |
|
510 | 512 | |
|
511 | 513 | text - the revision data to add |
|
512 | 514 | transaction - the transaction object used for rollback |
|
513 | 515 | link - the linkrev data to add |
|
514 | 516 | p1, p2 - the parent nodeids of the revision |
|
515 | 517 | d - an optional precomputed delta |
|
516 | 518 | """ |
|
517 | 519 | if text is None: text = "" |
|
518 | 520 | if p1 is None: p1 = self.tip() |
|
519 | 521 | if p2 is None: p2 = nullid |
|
520 | 522 | |
|
521 | 523 | node = hash(text, p1, p2) |
|
522 | 524 | |
|
523 | 525 | if node in self.nodemap: |
|
524 | 526 | return node |
|
525 | 527 | |
|
526 | 528 | n = self.count() |
|
527 | 529 | t = n - 1 |
|
528 | 530 | |
|
529 | 531 | if n: |
|
530 | 532 | base = self.base(t) |
|
531 | 533 | start = self.start(base) |
|
532 | 534 | end = self.end(t) |
|
533 | 535 | if not d: |
|
534 | 536 | prev = self.revision(self.tip()) |
|
535 | 537 | d = self.diff(prev, text) |
|
536 | 538 | data = compress(d) |
|
537 | 539 | dist = end - start + len(data) |
|
538 | 540 | |
|
539 | 541 | # full versions are inserted when the needed deltas |
|
540 | 542 | # become comparable to the uncompressed text |
|
541 | 543 | if not n or dist > len(text) * 2: |
|
542 | 544 | data = compress(text) |
|
543 | 545 | base = n |
|
544 | 546 | else: |
|
545 | 547 | base = self.base(t) |
|
546 | 548 | |
|
547 | 549 | offset = 0 |
|
548 | 550 | if t >= 0: |
|
549 | 551 | offset = self.end(t) |
|
550 | 552 | |
|
551 | 553 | e = (offset, len(data), base, link, p1, p2, node) |
|
552 | 554 | |
|
553 | 555 | self.index.append(e) |
|
554 | 556 | self.nodemap[node] = n |
|
555 | 557 | entry = struct.pack(indexformat, *e) |
|
556 | 558 | |
|
557 | 559 | transaction.add(self.datafile, e[0]) |
|
558 | 560 | self.opener(self.datafile, "a").write(data) |
|
559 | 561 | transaction.add(self.indexfile, n * len(entry)) |
|
560 | 562 | self.opener(self.indexfile, "a").write(entry) |
|
561 | 563 | |
|
562 | 564 | self.cache = (node, n, text) |
|
563 | 565 | return node |
|
564 | 566 | |
|
565 | 567 | def ancestor(self, a, b): |
|
566 | 568 | """calculate the least common ancestor of nodes a and b""" |
|
567 | 569 | # calculate the distance of every node from root |
|
568 | 570 | dist = {nullid: 0} |
|
569 | 571 | for i in xrange(self.count()): |
|
570 | 572 | n = self.node(i) |
|
571 | 573 | p1, p2 = self.parents(n) |
|
572 | 574 | dist[n] = max(dist[p1], dist[p2]) + 1 |
|
573 | 575 | |
|
574 | 576 | # traverse ancestors in order of decreasing distance from root |
|
575 | 577 | def ancestors(node): |
|
576 | 578 | # we store negative distances because heap returns smallest member |
|
577 | 579 | h = [(-dist[node], node)] |
|
578 | 580 | seen = {} |
|
579 | 581 | earliest = self.count() |
|
580 | 582 | while h: |
|
581 | 583 | d, n = heapq.heappop(h) |
|
582 | 584 | if n not in seen: |
|
583 | 585 | seen[n] = 1 |
|
584 | 586 | r = self.rev(n) |
|
585 | 587 | yield (-d, n) |
|
586 | 588 | for p in self.parents(n): |
|
587 | 589 | heapq.heappush(h, (-dist[p], p)) |
|
588 | 590 | |
|
589 | 591 | def generations(node): |
|
590 | 592 | sg, s = None, {} |
|
591 | 593 | for g,n in ancestors(node): |
|
592 | 594 | if g != sg: |
|
593 | 595 | if sg: |
|
594 | 596 | yield sg, s |
|
595 | 597 | sg, s = g, {n:1} |
|
596 | 598 | else: |
|
597 | 599 | s[n] = 1 |
|
598 | 600 | yield sg, s |
|
599 | 601 | |
|
600 | 602 | x = generations(a) |
|
601 | 603 | y = generations(b) |
|
602 | 604 | gx = x.next() |
|
603 | 605 | gy = y.next() |
|
604 | 606 | |
|
605 | 607 | # increment each ancestor list until it is closer to root than |
|
606 | 608 | # the other, or they match |
|
607 | 609 | while 1: |
|
608 | 610 | #print "ancestor gen %s %s" % (gx[0], gy[0]) |
|
609 | 611 | if gx[0] == gy[0]: |
|
610 | 612 | # find the intersection |
|
611 | 613 | i = [ n for n in gx[1] if n in gy[1] ] |
|
612 | 614 | if i: |
|
613 | 615 | return i[0] |
|
614 | 616 | else: |
|
615 | 617 | #print "next" |
|
616 | 618 | gy = y.next() |
|
617 | 619 | gx = x.next() |
|
618 | 620 | elif gx[0] < gy[0]: |
|
619 | 621 | #print "next y" |
|
620 | 622 | gy = y.next() |
|
621 | 623 | else: |
|
622 | 624 | #print "next x" |
|
623 | 625 | gx = x.next() |
|
624 | 626 | |
|
625 | 627 | def group(self, nodelist, lookup, infocollect = None): |
|
626 | 628 | """calculate a delta group |
|
627 | 629 | |
|
628 | 630 | Given a list of changeset revs, return a set of deltas and |
|
629 | 631 | metadata corresponding to nodes. the first delta is |
|
630 | 632 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to |
|
631 | 633 | have this parent as it has all history before these |
|
632 | 634 | changesets. parent is parent[0] |
|
633 | 635 | """ |
|
634 | 636 | revs = [self.rev(n) for n in nodelist] |
|
635 | 637 | needed = dict.fromkeys(revs, 1) |
|
636 | 638 | |
|
637 | 639 | # if we don't have any revisions touched by these changesets, bail |
|
638 | 640 | if not revs: |
|
639 | 641 | yield struct.pack(">l", 0) |
|
640 | 642 | return |
|
641 | 643 | |
|
642 | 644 | # add the parent of the first rev |
|
643 | 645 | p = self.parents(self.node(revs[0]))[0] |
|
644 | 646 | revs.insert(0, self.rev(p)) |
|
645 | 647 | |
|
646 | 648 | # for each delta that isn't contiguous in the log, we need to |
|
647 | 649 | # reconstruct the base, reconstruct the result, and then |
|
648 | 650 | # calculate the delta. We also need to do this where we've |
|
649 | 651 | # stored a full version and not a delta |
|
650 | 652 | for i in xrange(0, len(revs) - 1): |
|
651 | 653 | a, b = revs[i], revs[i + 1] |
|
652 | 654 | if a + 1 != b or self.base(b) == b: |
|
653 | 655 | for j in xrange(self.base(a), a + 1): |
|
654 | 656 | needed[j] = 1 |
|
655 | 657 | for j in xrange(self.base(b), b + 1): |
|
656 | 658 | needed[j] = 1 |
|
657 | 659 | |
|
658 | 660 | # calculate spans to retrieve from datafile |
|
659 | 661 | needed = needed.keys() |
|
660 | 662 | needed.sort() |
|
661 | 663 | spans = [] |
|
662 | 664 | oo = -1 |
|
663 | 665 | ol = 0 |
|
664 | 666 | for n in needed: |
|
665 | 667 | if n < 0: continue |
|
666 | 668 | o = self.start(n) |
|
667 | 669 | l = self.length(n) |
|
668 | 670 | if oo + ol == o: # can we merge with the previous? |
|
669 | 671 | nl = spans[-1][2] |
|
670 | 672 | nl.append((n, l)) |
|
671 | 673 | ol += l |
|
672 | 674 | spans[-1] = (oo, ol, nl) |
|
673 | 675 | else: |
|
674 | 676 | oo = o |
|
675 | 677 | ol = l |
|
676 | 678 | spans.append((oo, ol, [(n, l)])) |
|
677 | 679 | |
|
678 | 680 | # read spans in, divide up chunks |
|
679 | 681 | chunks = {} |
|
680 | 682 | for span in spans: |
|
681 | 683 | # we reopen the file for each span to make http happy for now |
|
682 | 684 | f = self.opener(self.datafile) |
|
683 | 685 | f.seek(span[0]) |
|
684 | 686 | data = f.read(span[1]) |
|
685 | 687 | |
|
686 | 688 | # divide up the span |
|
687 | 689 | pos = 0 |
|
688 | 690 | for r, l in span[2]: |
|
689 | 691 | chunks[r] = decompress(data[pos: pos + l]) |
|
690 | 692 | pos += l |
|
691 | 693 | |
|
692 | 694 | # helper to reconstruct intermediate versions |
|
693 | 695 | def construct(text, base, rev): |
|
694 | 696 | bins = [chunks[r] for r in xrange(base + 1, rev + 1)] |
|
695 | 697 | return mdiff.patches(text, bins) |
|
696 | 698 | |
|
697 | 699 | # build deltas |
|
698 | 700 | deltas = [] |
|
699 | 701 | for d in xrange(0, len(revs) - 1): |
|
700 | 702 | a, b = revs[d], revs[d + 1] |
|
701 | 703 | n = self.node(b) |
|
702 | 704 | |
|
703 | 705 | if infocollect is not None: |
|
704 | 706 | infocollect(n) |
|
705 | 707 | |
|
706 | 708 | # do we need to construct a new delta? |
|
707 | 709 | if a + 1 != b or self.base(b) == b: |
|
708 | 710 | if a >= 0: |
|
709 | 711 | base = self.base(a) |
|
710 | 712 | ta = chunks[self.base(a)] |
|
711 | 713 | ta = construct(ta, base, a) |
|
712 | 714 | else: |
|
713 | 715 | ta = "" |
|
714 | 716 | |
|
715 | 717 | base = self.base(b) |
|
716 | 718 | if a > base: |
|
717 | 719 | base = a |
|
718 | 720 | tb = ta |
|
719 | 721 | else: |
|
720 | 722 | tb = chunks[self.base(b)] |
|
721 | 723 | tb = construct(tb, base, b) |
|
722 | 724 | d = self.diff(ta, tb) |
|
723 | 725 | else: |
|
724 | 726 | d = chunks[b] |
|
725 | 727 | |
|
726 | 728 | p = self.parents(n) |
|
727 | 729 | meta = n + p[0] + p[1] + lookup(n) |
|
728 | 730 | l = struct.pack(">l", len(meta) + len(d) + 4) |
|
729 | 731 | yield l |
|
730 | 732 | yield meta |
|
731 | 733 | yield d |
|
732 | 734 | |
|
733 | 735 | yield struct.pack(">l", 0) |
|
734 | 736 | |
|
735 | 737 | def addgroup(self, revs, linkmapper, transaction, unique=0): |
|
736 | 738 | """ |
|
737 | 739 | add a delta group |
|
738 | 740 | |
|
739 | 741 | given a set of deltas, add them to the revision log. the |
|
740 | 742 | first delta is against its parent, which should be in our |
|
741 | 743 | log, the rest are against the previous delta. |
|
742 | 744 | """ |
|
743 | 745 | |
|
744 | 746 | #track the base of the current delta log |
|
745 | 747 | r = self.count() |
|
746 | 748 | t = r - 1 |
|
747 | 749 | node = nullid |
|
748 | 750 | |
|
749 | 751 | base = prev = -1 |
|
750 | 752 | start = end = measure = 0 |
|
751 | 753 | if r: |
|
752 | 754 | start = self.start(self.base(t)) |
|
753 | 755 | end = self.end(t) |
|
754 | 756 | measure = self.length(self.base(t)) |
|
755 | 757 | base = self.base(t) |
|
756 | 758 | prev = self.tip() |
|
757 | 759 | |
|
758 | 760 | transaction.add(self.datafile, end) |
|
759 | 761 | transaction.add(self.indexfile, r * struct.calcsize(indexformat)) |
|
760 | 762 | dfh = self.opener(self.datafile, "a") |
|
761 | 763 | ifh = self.opener(self.indexfile, "a") |
|
762 | 764 | |
|
763 | 765 | # loop through our set of deltas |
|
764 | 766 | chain = None |
|
765 | 767 | for chunk in revs: |
|
766 | 768 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
767 | 769 | link = linkmapper(cs) |
|
768 | 770 | if node in self.nodemap: |
|
769 | 771 | # this can happen if two branches make the same change |
|
770 | 772 | # if unique: |
|
771 | 773 | # raise RevlogError("already have %s" % hex(node[:4])) |
|
772 | 774 | chain = node |
|
773 | 775 | continue |
|
774 | 776 | delta = chunk[80:] |
|
775 | 777 | |
|
776 | 778 | if not chain: |
|
777 | 779 | # retrieve the parent revision of the delta chain |
|
778 | 780 | chain = p1 |
|
779 | 781 | if not chain in self.nodemap: |
|
780 | 782 | raise RevlogError("unknown base %s" % short(chain[:4])) |
|
781 | 783 | |
|
782 | 784 | # full versions are inserted when the needed deltas become |
|
783 | 785 | # comparable to the uncompressed text or when the previous |
|
784 | 786 | # version is not the one we have a delta against. We use |
|
785 | 787 | # the size of the previous full rev as a proxy for the |
|
786 | 788 | # current size. |
|
787 | 789 | |
|
788 | 790 | if chain == prev: |
|
789 | 791 | cdelta = compress(delta) |
|
790 | 792 | |
|
791 | 793 | if chain != prev or (end - start + len(cdelta)) > measure * 2: |
|
792 | 794 | # flush our writes here so we can read it in revision |
|
793 | 795 | dfh.flush() |
|
794 | 796 | ifh.flush() |
|
795 | 797 | text = self.revision(chain) |
|
796 | 798 | text = self.patches(text, [delta]) |
|
797 | 799 | chk = self.addrevision(text, transaction, link, p1, p2) |
|
798 | 800 | if chk != node: |
|
799 | 801 | raise RevlogError("consistency error adding group") |
|
800 | 802 | measure = len(text) |
|
801 | 803 | else: |
|
802 | 804 | e = (end, len(cdelta), self.base(t), link, p1, p2, node) |
|
803 | 805 | self.index.append(e) |
|
804 | 806 | self.nodemap[node] = r |
|
805 | 807 | dfh.write(cdelta) |
|
806 | 808 | ifh.write(struct.pack(indexformat, *e)) |
|
807 | 809 | |
|
808 | 810 | t, r, chain, prev = r, r + 1, node, node |
|
809 | 811 | start = self.start(self.base(t)) |
|
810 | 812 | end = self.end(t) |
|
811 | 813 | |
|
812 | 814 | dfh.close() |
|
813 | 815 | ifh.close() |
|
814 | 816 | return node |
General Comments 0
You need to be logged in to leave comments.
Login now