##// END OF EJS Templates
lookup: speed up partial lookup
Matt Mackall -
r7365:ec3aafa8 default
parent child Browse files
Show More
@@ -1,1370 +1,1370 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import bin, hex, nullid, nullrev, short
14 14 from i18n import _
15 15 import changegroup, errno, ancestor, mdiff, parsers
16 16 import struct, util, zlib
17 17
18 18 _pack = struct.pack
19 19 _unpack = struct.unpack
20 20 _compress = zlib.compress
21 21 _decompress = zlib.decompress
22 22 _sha = util.sha1
23 23
24 24 # revlog flags
25 25 REVLOGV0 = 0
26 26 REVLOGNG = 1
27 27 REVLOGNGINLINEDATA = (1 << 16)
28 28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 29 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31 31
32 32 class RevlogError(Exception):
33 33 pass
34 34
35 35 class LookupError(RevlogError, KeyError):
36 36 def __init__(self, name, index, message):
37 37 self.name = name
38 38 if isinstance(name, str) and len(name) == 20:
39 39 name = short(name)
40 40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
41 41
42 42 def __str__(self):
43 43 return RevlogError.__str__(self)
44 44
45 45 def getoffset(q):
46 46 return int(q >> 16)
47 47
48 48 def gettype(q):
49 49 return int(q & 0xFFFF)
50 50
51 51 def offset_type(offset, type):
52 52 return long(long(offset) << 16 | type)
53 53
54 54 def hash(text, p1, p2):
55 55 """generate a hash from the given text and its parent hashes
56 56
57 57 This hash combines both the current file contents and its history
58 58 in a manner that makes it easy to distinguish nodes with the same
59 59 content in the revision graph.
60 60 """
61 61 l = [p1, p2]
62 62 l.sort()
63 63 s = _sha(l[0])
64 64 s.update(l[1])
65 65 s.update(text)
66 66 return s.digest()
67 67
68 68 def compress(text):
69 69 """ generate a possibly-compressed representation of text """
70 70 if not text:
71 71 return ("", text)
72 72 l = len(text)
73 73 bin = None
74 74 if l < 44:
75 75 pass
76 76 elif l > 1000000:
77 77 # zlib makes an internal copy, thus doubling memory usage for
78 78 # large files, so lets do this in pieces
79 79 z = zlib.compressobj()
80 80 p = []
81 81 pos = 0
82 82 while pos < l:
83 83 pos2 = pos + 2**20
84 84 p.append(z.compress(text[pos:pos2]))
85 85 pos = pos2
86 86 p.append(z.flush())
87 87 if sum(map(len, p)) < l:
88 88 bin = "".join(p)
89 89 else:
90 90 bin = _compress(text)
91 91 if bin is None or len(bin) > l:
92 92 if text[0] == '\0':
93 93 return ("", text)
94 94 return ('u', text)
95 95 return ("", bin)
96 96
97 97 def decompress(bin):
98 98 """ decompress the given input """
99 99 if not bin:
100 100 return bin
101 101 t = bin[0]
102 102 if t == '\0':
103 103 return bin
104 104 if t == 'x':
105 105 return _decompress(bin)
106 106 if t == 'u':
107 107 return bin[1:]
108 108 raise RevlogError(_("unknown compression type %r") % t)
109 109
110 110 class lazyparser(object):
111 111 """
112 112 this class avoids the need to parse the entirety of large indices
113 113 """
114 114
115 115 # lazyparser is not safe to use on windows if win32 extensions not
116 116 # available. it keeps file handle open, which make it not possible
117 117 # to break hardlinks on local cloned repos.
118 118
119 119 def __init__(self, dataf, size):
120 120 self.dataf = dataf
121 121 self.s = struct.calcsize(indexformatng)
122 122 self.datasize = size
123 123 self.l = size/self.s
124 124 self.index = [None] * self.l
125 125 self.map = {nullid: nullrev}
126 126 self.allmap = 0
127 127 self.all = 0
128 128 self.mapfind_count = 0
129 129
130 130 def loadmap(self):
131 131 """
132 132 during a commit, we need to make sure the rev being added is
133 133 not a duplicate. This requires loading the entire index,
134 134 which is fairly slow. loadmap can load up just the node map,
135 135 which takes much less time.
136 136 """
137 137 if self.allmap:
138 138 return
139 139 end = self.datasize
140 140 self.allmap = 1
141 141 cur = 0
142 142 count = 0
143 143 blocksize = self.s * 256
144 144 self.dataf.seek(0)
145 145 while cur < end:
146 146 data = self.dataf.read(blocksize)
147 147 off = 0
148 148 for x in xrange(256):
149 149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
150 150 self.map[n] = count
151 151 count += 1
152 152 if count >= self.l:
153 153 break
154 154 off += self.s
155 155 cur += blocksize
156 156
157 157 def loadblock(self, blockstart, blocksize, data=None):
158 158 if self.all:
159 159 return
160 160 if data is None:
161 161 self.dataf.seek(blockstart)
162 162 if blockstart + blocksize > self.datasize:
163 163 # the revlog may have grown since we've started running,
164 164 # but we don't have space in self.index for more entries.
165 165 # limit blocksize so that we don't get too much data.
166 166 blocksize = max(self.datasize - blockstart, 0)
167 167 data = self.dataf.read(blocksize)
168 168 lend = len(data) / self.s
169 169 i = blockstart / self.s
170 170 off = 0
171 171 # lazyindex supports __delitem__
172 172 if lend > len(self.index) - i:
173 173 lend = len(self.index) - i
174 174 for x in xrange(lend):
175 175 if self.index[i + x] == None:
176 176 b = data[off : off + self.s]
177 177 self.index[i + x] = b
178 178 n = b[ngshaoffset:ngshaoffset + 20]
179 179 self.map[n] = i + x
180 180 off += self.s
181 181
182 182 def findnode(self, node):
183 183 """search backwards through the index file for a specific node"""
184 184 if self.allmap:
185 185 return None
186 186
187 187 # hg log will cause many many searches for the manifest
188 188 # nodes. After we get called a few times, just load the whole
189 189 # thing.
190 190 if self.mapfind_count > 8:
191 191 self.loadmap()
192 192 if node in self.map:
193 193 return node
194 194 return None
195 195 self.mapfind_count += 1
196 196 last = self.l - 1
197 197 while self.index[last] != None:
198 198 if last == 0:
199 199 self.all = 1
200 200 self.allmap = 1
201 201 return None
202 202 last -= 1
203 203 end = (last + 1) * self.s
204 204 blocksize = self.s * 256
205 205 while end >= 0:
206 206 start = max(end - blocksize, 0)
207 207 self.dataf.seek(start)
208 208 data = self.dataf.read(end - start)
209 209 findend = end - start
210 210 while True:
211 211 # we're searching backwards, so we have to make sure
212 212 # we don't find a changeset where this node is a parent
213 213 off = data.find(node, 0, findend)
214 214 findend = off
215 215 if off >= 0:
216 216 i = off / self.s
217 217 off = i * self.s
218 218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
219 219 if n == node:
220 220 self.map[n] = i + start / self.s
221 221 return node
222 222 else:
223 223 break
224 224 end -= blocksize
225 225 return None
226 226
227 227 def loadindex(self, i=None, end=None):
228 228 if self.all:
229 229 return
230 230 all = False
231 231 if i == None:
232 232 blockstart = 0
233 233 blocksize = (65536 / self.s) * self.s
234 234 end = self.datasize
235 235 all = True
236 236 else:
237 237 if end:
238 238 blockstart = i * self.s
239 239 end = end * self.s
240 240 blocksize = end - blockstart
241 241 else:
242 242 blockstart = (i & ~1023) * self.s
243 243 blocksize = self.s * 1024
244 244 end = blockstart + blocksize
245 245 while blockstart < end:
246 246 self.loadblock(blockstart, blocksize)
247 247 blockstart += blocksize
248 248 if all:
249 249 self.all = True
250 250
251 251 class lazyindex(object):
252 252 """a lazy version of the index array"""
253 253 def __init__(self, parser):
254 254 self.p = parser
255 255 def __len__(self):
256 256 return len(self.p.index)
257 257 def load(self, pos):
258 258 if pos < 0:
259 259 pos += len(self.p.index)
260 260 self.p.loadindex(pos)
261 261 return self.p.index[pos]
262 262 def __getitem__(self, pos):
263 263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
264 264 def __setitem__(self, pos, item):
265 265 self.p.index[pos] = _pack(indexformatng, *item)
266 266 def __delitem__(self, pos):
267 267 del self.p.index[pos]
268 268 def insert(self, pos, e):
269 269 self.p.index.insert(pos, _pack(indexformatng, *e))
270 270 def append(self, e):
271 271 self.p.index.append(_pack(indexformatng, *e))
272 272
273 273 class lazymap(object):
274 274 """a lazy version of the node map"""
275 275 def __init__(self, parser):
276 276 self.p = parser
277 277 def load(self, key):
278 278 n = self.p.findnode(key)
279 279 if n == None:
280 280 raise KeyError(key)
281 281 def __contains__(self, key):
282 282 if key in self.p.map:
283 283 return True
284 284 self.p.loadmap()
285 285 return key in self.p.map
286 286 def __iter__(self):
287 287 yield nullid
288 288 for i in xrange(self.p.l):
289 289 ret = self.p.index[i]
290 290 if not ret:
291 291 self.p.loadindex(i)
292 292 ret = self.p.index[i]
293 293 if isinstance(ret, str):
294 294 ret = _unpack(indexformatng, ret)
295 295 yield ret[7]
296 296 def __getitem__(self, key):
297 297 try:
298 298 return self.p.map[key]
299 299 except KeyError:
300 300 try:
301 301 self.load(key)
302 302 return self.p.map[key]
303 303 except KeyError:
304 304 raise KeyError("node " + hex(key))
305 305 def __setitem__(self, key, val):
306 306 self.p.map[key] = val
307 307 def __delitem__(self, key):
308 308 del self.p.map[key]
309 309
310 310 indexformatv0 = ">4l20s20s20s"
311 311 v0shaoffset = 56
312 312
313 313 class revlogoldio(object):
314 314 def __init__(self):
315 315 self.size = struct.calcsize(indexformatv0)
316 316
317 317 def parseindex(self, fp, inline):
318 318 s = self.size
319 319 index = []
320 320 nodemap = {nullid: nullrev}
321 321 n = off = 0
322 322 data = fp.read()
323 323 l = len(data)
324 324 while off + s <= l:
325 325 cur = data[off:off + s]
326 326 off += s
327 327 e = _unpack(indexformatv0, cur)
328 328 # transform to revlogv1 format
329 329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
330 330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
331 331 index.append(e2)
332 332 nodemap[e[6]] = n
333 333 n += 1
334 334
335 335 return index, nodemap, None
336 336
337 337 def packentry(self, entry, node, version, rev):
338 338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
339 339 node(entry[5]), node(entry[6]), entry[7])
340 340 return _pack(indexformatv0, *e2)
341 341
342 342 # index ng:
343 343 # 6 bytes offset
344 344 # 2 bytes flags
345 345 # 4 bytes compressed length
346 346 # 4 bytes uncompressed length
347 347 # 4 bytes: base rev
348 348 # 4 bytes link rev
349 349 # 4 bytes parent 1 rev
350 350 # 4 bytes parent 2 rev
351 351 # 32 bytes: nodeid
352 352 indexformatng = ">Qiiiiii20s12x"
353 353 ngshaoffset = 32
354 354 versionformat = ">I"
355 355
356 356 class revlogio(object):
357 357 def __init__(self):
358 358 self.size = struct.calcsize(indexformatng)
359 359
360 360 def parseindex(self, fp, inline):
361 361 try:
362 362 size = util.fstat(fp).st_size
363 363 except AttributeError:
364 364 size = 0
365 365
366 366 if util.openhardlinks() and not inline and size > 1000000:
367 367 # big index, let's parse it on demand
368 368 parser = lazyparser(fp, size)
369 369 index = lazyindex(parser)
370 370 nodemap = lazymap(parser)
371 371 e = list(index[0])
372 372 type = gettype(e[0])
373 373 e[0] = offset_type(0, type)
374 374 index[0] = e
375 375 return index, nodemap, None
376 376
377 377 data = fp.read()
378 378 # call the C implementation to parse the index data
379 379 index, nodemap, cache = parsers.parse_index(data, inline)
380 380 return index, nodemap, cache
381 381
382 382 def packentry(self, entry, node, version, rev):
383 383 p = _pack(indexformatng, *entry)
384 384 if rev == 0:
385 385 p = _pack(versionformat, version) + p[4:]
386 386 return p
387 387
388 388 class revlog(object):
389 389 """
390 390 the underlying revision storage object
391 391
392 392 A revlog consists of two parts, an index and the revision data.
393 393
394 394 The index is a file with a fixed record size containing
395 395 information on each revision, including its nodeid (hash), the
396 396 nodeids of its parents, the position and offset of its data within
397 397 the data file, and the revision it's based on. Finally, each entry
398 398 contains a linkrev entry that can serve as a pointer to external
399 399 data.
400 400
401 401 The revision data itself is a linear collection of data chunks.
402 402 Each chunk represents a revision and is usually represented as a
403 403 delta against the previous chunk. To bound lookup time, runs of
404 404 deltas are limited to about 2 times the length of the original
405 405 version data. This makes retrieval of a version proportional to
406 406 its size, or O(1) relative to the number of revisions.
407 407
408 408 Both pieces of the revlog are written to in an append-only
409 409 fashion, which means we never need to rewrite a file to insert or
410 410 remove data, and can use some simple techniques to avoid the need
411 411 for locking while reading.
412 412 """
413 413 def __init__(self, opener, indexfile):
414 414 """
415 415 create a revlog object
416 416
417 417 opener is a function that abstracts the file opening operation
418 418 and can be used to implement COW semantics or the like.
419 419 """
420 420 self.indexfile = indexfile
421 421 self.datafile = indexfile[:-2] + ".d"
422 422 self.opener = opener
423 423 self._cache = None
424 424 self._chunkcache = None
425 425 self.nodemap = {nullid: nullrev}
426 426 self.index = []
427 427
428 428 v = REVLOG_DEFAULT_VERSION
429 429 if hasattr(opener, "defversion"):
430 430 v = opener.defversion
431 431 if v & REVLOGNG:
432 432 v |= REVLOGNGINLINEDATA
433 433
434 434 i = ""
435 435 try:
436 436 f = self.opener(self.indexfile)
437 437 i = f.read(4)
438 438 f.seek(0)
439 439 if len(i) > 0:
440 440 v = struct.unpack(versionformat, i)[0]
441 441 except IOError, inst:
442 442 if inst.errno != errno.ENOENT:
443 443 raise
444 444
445 445 self.version = v
446 446 self._inline = v & REVLOGNGINLINEDATA
447 447 flags = v & ~0xFFFF
448 448 fmt = v & 0xFFFF
449 449 if fmt == REVLOGV0 and flags:
450 450 raise RevlogError(_("index %s unknown flags %#04x for format v0")
451 451 % (self.indexfile, flags >> 16))
452 452 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
453 453 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
454 454 % (self.indexfile, flags >> 16))
455 455 elif fmt > REVLOGNG:
456 456 raise RevlogError(_("index %s unknown format %d")
457 457 % (self.indexfile, fmt))
458 458
459 459 self._io = revlogio()
460 460 if self.version == REVLOGV0:
461 461 self._io = revlogoldio()
462 462 if i:
463 463 d = self._io.parseindex(f, self._inline)
464 464 self.index, self.nodemap, self._chunkcache = d
465 465
466 466 # add the magic null revision at -1 (if it hasn't been done already)
467 467 if (self.index == [] or isinstance(self.index, lazyindex) or
468 468 self.index[-1][7] != nullid) :
469 469 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
470 470
471 471 def _loadindex(self, start, end):
472 472 """load a block of indexes all at once from the lazy parser"""
473 473 if isinstance(self.index, lazyindex):
474 474 self.index.p.loadindex(start, end)
475 475
476 476 def _loadindexmap(self):
477 477 """loads both the map and the index from the lazy parser"""
478 478 if isinstance(self.index, lazyindex):
479 479 p = self.index.p
480 480 p.loadindex()
481 481 self.nodemap = p.map
482 482
483 483 def _loadmap(self):
484 484 """loads the map from the lazy parser"""
485 485 if isinstance(self.nodemap, lazymap):
486 486 self.nodemap.p.loadmap()
487 487 self.nodemap = self.nodemap.p.map
488 488
489 489 def tip(self):
490 490 return self.node(len(self.index) - 2)
491 491 def __len__(self):
492 492 return len(self.index) - 1
493 493 def __iter__(self):
494 494 for i in xrange(len(self)):
495 495 yield i
496 496 def rev(self, node):
497 497 try:
498 498 return self.nodemap[node]
499 499 except KeyError:
500 500 raise LookupError(node, self.indexfile, _('no node'))
501 501 def node(self, rev):
502 502 return self.index[rev][7]
503 503 def linkrev(self, rev):
504 504 return self.index[rev][4]
505 505 def parents(self, node):
506 506 i = self.index
507 507 d = i[self.rev(node)]
508 508 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
509 509 def parentrevs(self, rev):
510 510 return self.index[rev][5:7]
511 511 def start(self, rev):
512 512 return int(self.index[rev][0] >> 16)
513 513 def end(self, rev):
514 514 return self.start(rev) + self.length(rev)
515 515 def length(self, rev):
516 516 return self.index[rev][1]
517 517 def base(self, rev):
518 518 return self.index[rev][3]
519 519
520 520 def size(self, rev):
521 521 """return the length of the uncompressed text for a given revision"""
522 522 l = self.index[rev][2]
523 523 if l >= 0:
524 524 return l
525 525
526 526 t = self.revision(self.node(rev))
527 527 return len(t)
528 528
529 529 # alternate implementation, The advantage to this code is it
530 530 # will be faster for a single revision. But, the results are not
531 531 # cached, so finding the size of every revision will be slower.
532 532 """
533 533 if self.cache and self.cache[1] == rev:
534 534 return len(self.cache[2])
535 535
536 536 base = self.base(rev)
537 537 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
538 538 base = self.cache[1]
539 539 text = self.cache[2]
540 540 else:
541 541 text = self.revision(self.node(base))
542 542
543 543 l = len(text)
544 544 for x in xrange(base + 1, rev + 1):
545 545 l = mdiff.patchedsize(l, self.chunk(x))
546 546 return l
547 547 """
548 548
549 549 def reachable(self, node, stop=None):
550 550 """return a hash of all nodes ancestral to a given node, including
551 551 the node itself, stopping when stop is matched"""
552 552 reachable = {}
553 553 visit = [node]
554 554 reachable[node] = 1
555 555 if stop:
556 556 stopn = self.rev(stop)
557 557 else:
558 558 stopn = 0
559 559 while visit:
560 560 n = visit.pop(0)
561 561 if n == stop:
562 562 continue
563 563 if n == nullid:
564 564 continue
565 565 for p in self.parents(n):
566 566 if self.rev(p) < stopn:
567 567 continue
568 568 if p not in reachable:
569 569 reachable[p] = 1
570 570 visit.append(p)
571 571 return reachable
572 572
573 573 def ancestors(self, *revs):
574 574 'Generate the ancestors of revs using a breadth-first visit'
575 575 visit = list(revs)
576 576 seen = util.set([nullrev])
577 577 while visit:
578 578 for parent in self.parentrevs(visit.pop(0)):
579 579 if parent not in seen:
580 580 visit.append(parent)
581 581 seen.add(parent)
582 582 yield parent
583 583
584 584 def descendants(self, *revs):
585 585 'Generate the descendants of revs in topological order'
586 586 seen = util.set(revs)
587 587 for i in xrange(min(revs) + 1, len(self)):
588 588 for x in self.parentrevs(i):
589 589 if x != nullrev and x in seen:
590 590 seen.add(i)
591 591 yield i
592 592 break
593 593
594 594 def findmissing(self, common=None, heads=None):
595 595 '''
596 596 returns the topologically sorted list of nodes from the set:
597 597 missing = (ancestors(heads) \ ancestors(common))
598 598
599 599 where ancestors() is the set of ancestors from heads, heads included
600 600
601 601 if heads is None, the heads of the revlog are used
602 602 if common is None, nullid is assumed to be a common node
603 603 '''
604 604 if common is None:
605 605 common = [nullid]
606 606 if heads is None:
607 607 heads = self.heads()
608 608
609 609 common = [self.rev(n) for n in common]
610 610 heads = [self.rev(n) for n in heads]
611 611
612 612 # we want the ancestors, but inclusive
613 613 has = dict.fromkeys(self.ancestors(*common))
614 614 has[nullrev] = None
615 615 for r in common:
616 616 has[r] = None
617 617
618 618 # take all ancestors from heads that aren't in has
619 619 missing = {}
620 620 visit = [r for r in heads if r not in has]
621 621 while visit:
622 622 r = visit.pop(0)
623 623 if r in missing:
624 624 continue
625 625 else:
626 626 missing[r] = None
627 627 for p in self.parentrevs(r):
628 628 if p not in has:
629 629 visit.append(p)
630 630 missing = missing.keys()
631 631 missing.sort()
632 632 return [self.node(r) for r in missing]
633 633
634 634 def nodesbetween(self, roots=None, heads=None):
635 635 """Return a tuple containing three elements. Elements 1 and 2 contain
636 636 a final list bases and heads after all the unreachable ones have been
637 637 pruned. Element 0 contains a topologically sorted list of all
638 638
639 639 nodes that satisfy these constraints:
640 640 1. All nodes must be descended from a node in roots (the nodes on
641 641 roots are considered descended from themselves).
642 642 2. All nodes must also be ancestors of a node in heads (the nodes in
643 643 heads are considered to be their own ancestors).
644 644
645 645 If roots is unspecified, nullid is assumed as the only root.
646 646 If heads is unspecified, it is taken to be the output of the
647 647 heads method (i.e. a list of all nodes in the repository that
648 648 have no children)."""
649 649 nonodes = ([], [], [])
650 650 if roots is not None:
651 651 roots = list(roots)
652 652 if not roots:
653 653 return nonodes
654 654 lowestrev = min([self.rev(n) for n in roots])
655 655 else:
656 656 roots = [nullid] # Everybody's a descendent of nullid
657 657 lowestrev = nullrev
658 658 if (lowestrev == nullrev) and (heads is None):
659 659 # We want _all_ the nodes!
660 660 return ([self.node(r) for r in self], [nullid], list(self.heads()))
661 661 if heads is None:
662 662 # All nodes are ancestors, so the latest ancestor is the last
663 663 # node.
664 664 highestrev = len(self) - 1
665 665 # Set ancestors to None to signal that every node is an ancestor.
666 666 ancestors = None
667 667 # Set heads to an empty dictionary for later discovery of heads
668 668 heads = {}
669 669 else:
670 670 heads = list(heads)
671 671 if not heads:
672 672 return nonodes
673 673 ancestors = {}
674 674 # Turn heads into a dictionary so we can remove 'fake' heads.
675 675 # Also, later we will be using it to filter out the heads we can't
676 676 # find from roots.
677 677 heads = dict.fromkeys(heads, 0)
678 678 # Start at the top and keep marking parents until we're done.
679 679 nodestotag = heads.keys()
680 680 # Remember where the top was so we can use it as a limit later.
681 681 highestrev = max([self.rev(n) for n in nodestotag])
682 682 while nodestotag:
683 683 # grab a node to tag
684 684 n = nodestotag.pop()
685 685 # Never tag nullid
686 686 if n == nullid:
687 687 continue
688 688 # A node's revision number represents its place in a
689 689 # topologically sorted list of nodes.
690 690 r = self.rev(n)
691 691 if r >= lowestrev:
692 692 if n not in ancestors:
693 693 # If we are possibly a descendent of one of the roots
694 694 # and we haven't already been marked as an ancestor
695 695 ancestors[n] = 1 # Mark as ancestor
696 696 # Add non-nullid parents to list of nodes to tag.
697 697 nodestotag.extend([p for p in self.parents(n) if
698 698 p != nullid])
699 699 elif n in heads: # We've seen it before, is it a fake head?
700 700 # So it is, real heads should not be the ancestors of
701 701 # any other heads.
702 702 heads.pop(n)
703 703 if not ancestors:
704 704 return nonodes
705 705 # Now that we have our set of ancestors, we want to remove any
706 706 # roots that are not ancestors.
707 707
708 708 # If one of the roots was nullid, everything is included anyway.
709 709 if lowestrev > nullrev:
710 710 # But, since we weren't, let's recompute the lowest rev to not
711 711 # include roots that aren't ancestors.
712 712
713 713 # Filter out roots that aren't ancestors of heads
714 714 roots = [n for n in roots if n in ancestors]
715 715 # Recompute the lowest revision
716 716 if roots:
717 717 lowestrev = min([self.rev(n) for n in roots])
718 718 else:
719 719 # No more roots? Return empty list
720 720 return nonodes
721 721 else:
722 722 # We are descending from nullid, and don't need to care about
723 723 # any other roots.
724 724 lowestrev = nullrev
725 725 roots = [nullid]
726 726 # Transform our roots list into a 'set' (i.e. a dictionary where the
727 727 # values don't matter.
728 728 descendents = dict.fromkeys(roots, 1)
729 729 # Also, keep the original roots so we can filter out roots that aren't
730 730 # 'real' roots (i.e. are descended from other roots).
731 731 roots = descendents.copy()
732 732 # Our topologically sorted list of output nodes.
733 733 orderedout = []
734 734 # Don't start at nullid since we don't want nullid in our output list,
735 735 # and if nullid shows up in descedents, empty parents will look like
736 736 # they're descendents.
737 737 for r in xrange(max(lowestrev, 0), highestrev + 1):
738 738 n = self.node(r)
739 739 isdescendent = False
740 740 if lowestrev == nullrev: # Everybody is a descendent of nullid
741 741 isdescendent = True
742 742 elif n in descendents:
743 743 # n is already a descendent
744 744 isdescendent = True
745 745 # This check only needs to be done here because all the roots
746 746 # will start being marked is descendents before the loop.
747 747 if n in roots:
748 748 # If n was a root, check if it's a 'real' root.
749 749 p = tuple(self.parents(n))
750 750 # If any of its parents are descendents, it's not a root.
751 751 if (p[0] in descendents) or (p[1] in descendents):
752 752 roots.pop(n)
753 753 else:
754 754 p = tuple(self.parents(n))
755 755 # A node is a descendent if either of its parents are
756 756 # descendents. (We seeded the dependents list with the roots
757 757 # up there, remember?)
758 758 if (p[0] in descendents) or (p[1] in descendents):
759 759 descendents[n] = 1
760 760 isdescendent = True
761 761 if isdescendent and ((ancestors is None) or (n in ancestors)):
762 762 # Only include nodes that are both descendents and ancestors.
763 763 orderedout.append(n)
764 764 if (ancestors is not None) and (n in heads):
765 765 # We're trying to figure out which heads are reachable
766 766 # from roots.
767 767 # Mark this head as having been reached
768 768 heads[n] = 1
769 769 elif ancestors is None:
770 770 # Otherwise, we're trying to discover the heads.
771 771 # Assume this is a head because if it isn't, the next step
772 772 # will eventually remove it.
773 773 heads[n] = 1
774 774 # But, obviously its parents aren't.
775 775 for p in self.parents(n):
776 776 heads.pop(p, None)
777 777 heads = [n for n in heads.iterkeys() if heads[n] != 0]
778 778 roots = roots.keys()
779 779 assert orderedout
780 780 assert roots
781 781 assert heads
782 782 return (orderedout, roots, heads)
783 783
784 784 def heads(self, start=None, stop=None):
785 785 """return the list of all nodes that have no children
786 786
787 787 if start is specified, only heads that are descendants of
788 788 start will be returned
789 789 if stop is specified, it will consider all the revs from stop
790 790 as if they had no children
791 791 """
792 792 if start is None and stop is None:
793 793 count = len(self)
794 794 if not count:
795 795 return [nullid]
796 796 ishead = [1] * (count + 1)
797 797 index = self.index
798 798 for r in xrange(count):
799 799 e = index[r]
800 800 ishead[e[5]] = ishead[e[6]] = 0
801 801 return [self.node(r) for r in xrange(count) if ishead[r]]
802 802
803 803 if start is None:
804 804 start = nullid
805 805 if stop is None:
806 806 stop = []
807 807 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
808 808 startrev = self.rev(start)
809 809 reachable = {startrev: 1}
810 810 heads = {startrev: 1}
811 811
812 812 parentrevs = self.parentrevs
813 813 for r in xrange(startrev + 1, len(self)):
814 814 for p in parentrevs(r):
815 815 if p in reachable:
816 816 if r not in stoprevs:
817 817 reachable[r] = 1
818 818 heads[r] = 1
819 819 if p in heads and p not in stoprevs:
820 820 del heads[p]
821 821
822 822 return [self.node(r) for r in heads]
823 823
824 824 def children(self, node):
825 825 """find the children of a given node"""
826 826 c = []
827 827 p = self.rev(node)
828 828 for r in range(p + 1, len(self)):
829 829 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
830 830 if prevs:
831 831 for pr in prevs:
832 832 if pr == p:
833 833 c.append(self.node(r))
834 834 elif p == nullrev:
835 835 c.append(self.node(r))
836 836 return c
837 837
838 838 def _match(self, id):
839 839 if isinstance(id, (long, int)):
840 840 # rev
841 841 return self.node(id)
842 842 if len(id) == 20:
843 843 # possibly a binary node
844 844 # odds of a binary node being all hex in ASCII are 1 in 10**25
845 845 try:
846 846 node = id
847 847 r = self.rev(node) # quick search the index
848 848 return node
849 849 except LookupError:
850 850 pass # may be partial hex id
851 851 try:
852 852 # str(rev)
853 853 rev = int(id)
854 854 if str(rev) != id:
855 855 raise ValueError
856 856 if rev < 0:
857 857 rev = len(self) + rev
858 858 if rev < 0 or rev >= len(self):
859 859 raise ValueError
860 860 return self.node(rev)
861 861 except (ValueError, OverflowError):
862 862 pass
863 863 if len(id) == 40:
864 864 try:
865 865 # a full hex nodeid?
866 866 node = bin(id)
867 867 r = self.rev(node)
868 868 return node
869 869 except (TypeError, LookupError):
870 870 pass
871 871
872 872 def _partialmatch(self, id):
873 873 if len(id) < 40:
874 874 try:
875 875 # hex(node)[:...]
876 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
877 node = None
878 for n in self.nodemap:
879 if n.startswith(bin_id) and hex(n).startswith(id):
880 if node is not None:
881 raise LookupError(id, self.indexfile,
882 _('ambiguous identifier'))
883 node = n
884 if node is not None:
885 return node
876 l = len(id) / 2 # grab an even number of digits
877 bin_id = bin(id[:l*2])
878 nl = [n for n in self.nodemap if n[:l] == bin_id]
879 nl = [n for n in nl if hex(n).startswith(id)]
880 if len(nl) > 0:
881 if len(nl) == 1:
882 return nl[0]
883 raise LookupError(id, self.indexfile,
884 _('ambiguous identifier'))
885 return None
886 886 except TypeError:
887 887 pass
888 888
889 889 def lookup(self, id):
890 890 """locate a node based on:
891 891 - revision number or str(revision number)
892 892 - nodeid or subset of hex nodeid
893 893 """
894 894 n = self._match(id)
895 895 if n is not None:
896 896 return n
897 897 n = self._partialmatch(id)
898 898 if n:
899 899 return n
900 900
901 901 raise LookupError(id, self.indexfile, _('no match found'))
902 902
903 903 def cmp(self, node, text):
904 904 """compare text with a given file revision"""
905 905 p1, p2 = self.parents(node)
906 906 return hash(text, p1, p2) != node
907 907
908 908 def chunk(self, rev, df=None):
909 909 def loadcache(df):
910 910 if not df:
911 911 if self._inline:
912 912 df = self.opener(self.indexfile)
913 913 else:
914 914 df = self.opener(self.datafile)
915 915 df.seek(start)
916 916 self._chunkcache = (start, df.read(cache_length))
917 917
918 918 start, length = self.start(rev), self.length(rev)
919 919 if self._inline:
920 920 start += (rev + 1) * self._io.size
921 921 end = start + length
922 922
923 923 offset = 0
924 924 if not self._chunkcache:
925 925 cache_length = max(65536, length)
926 926 loadcache(df)
927 927 else:
928 928 cache_start = self._chunkcache[0]
929 929 cache_length = len(self._chunkcache[1])
930 930 cache_end = cache_start + cache_length
931 931 if start >= cache_start and end <= cache_end:
932 932 # it is cached
933 933 offset = start - cache_start
934 934 else:
935 935 cache_length = max(65536, length)
936 936 loadcache(df)
937 937
938 938 # avoid copying large chunks
939 939 c = self._chunkcache[1]
940 940 if cache_length != length:
941 941 c = c[offset:offset + length]
942 942
943 943 return decompress(c)
944 944
945 945 def revdiff(self, rev1, rev2):
946 946 """return or calculate a delta between two revisions"""
947 947 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
948 948 return self.chunk(rev2)
949 949
950 950 return mdiff.textdiff(self.revision(self.node(rev1)),
951 951 self.revision(self.node(rev2)))
952 952
953 953 def revision(self, node):
954 954 """return an uncompressed revision of a given node"""
955 955 if node == nullid:
956 956 return ""
957 957 if self._cache and self._cache[0] == node:
958 958 return str(self._cache[2])
959 959
960 960 # look up what we need to read
961 961 text = None
962 962 rev = self.rev(node)
963 963 base = self.base(rev)
964 964
965 965 # check rev flags
966 966 if self.index[rev][0] & 0xFFFF:
967 967 raise RevlogError(_('incompatible revision flag %x') %
968 968 (self.index[rev][0] & 0xFFFF))
969 969
970 970 df = None
971 971
972 972 # do we have useful data cached?
973 973 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
974 974 base = self._cache[1]
975 975 text = str(self._cache[2])
976 976 self._loadindex(base, rev + 1)
977 977 if not self._inline and rev > base + 1:
978 978 df = self.opener(self.datafile)
979 979 else:
980 980 self._loadindex(base, rev + 1)
981 981 if not self._inline and rev > base:
982 982 df = self.opener(self.datafile)
983 983 text = self.chunk(base, df=df)
984 984
985 985 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
986 986 text = mdiff.patches(text, bins)
987 987 p1, p2 = self.parents(node)
988 988 if node != hash(text, p1, p2):
989 989 raise RevlogError(_("integrity check failed on %s:%d")
990 990 % (self.datafile, rev))
991 991
992 992 self._cache = (node, rev, text)
993 993 return text
994 994
995 995 def checkinlinesize(self, tr, fp=None):
996 996 if not self._inline:
997 997 return
998 998 if not fp:
999 999 fp = self.opener(self.indexfile, 'r')
1000 1000 fp.seek(0, 2)
1001 1001 size = fp.tell()
1002 1002 if size < 131072:
1003 1003 return
1004 1004 trinfo = tr.find(self.indexfile)
1005 1005 if trinfo == None:
1006 1006 raise RevlogError(_("%s not found in the transaction")
1007 1007 % self.indexfile)
1008 1008
1009 1009 trindex = trinfo[2]
1010 1010 dataoff = self.start(trindex)
1011 1011
1012 1012 tr.add(self.datafile, dataoff)
1013 1013 df = self.opener(self.datafile, 'w')
1014 1014 try:
1015 1015 calc = self._io.size
1016 1016 for r in self:
1017 1017 start = self.start(r) + (r + 1) * calc
1018 1018 length = self.length(r)
1019 1019 fp.seek(start)
1020 1020 d = fp.read(length)
1021 1021 df.write(d)
1022 1022 finally:
1023 1023 df.close()
1024 1024
1025 1025 fp.close()
1026 1026 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1027 1027 self.version &= ~(REVLOGNGINLINEDATA)
1028 1028 self._inline = False
1029 1029 for i in self:
1030 1030 e = self._io.packentry(self.index[i], self.node, self.version, i)
1031 1031 fp.write(e)
1032 1032
1033 1033 # if we don't call rename, the temp file will never replace the
1034 1034 # real index
1035 1035 fp.rename()
1036 1036
1037 1037 tr.replace(self.indexfile, trindex * calc)
1038 1038 self._chunkcache = None
1039 1039
1040 1040 def addrevision(self, text, transaction, link, p1, p2, d=None):
1041 1041 """add a revision to the log
1042 1042
1043 1043 text - the revision data to add
1044 1044 transaction - the transaction object used for rollback
1045 1045 link - the linkrev data to add
1046 1046 p1, p2 - the parent nodeids of the revision
1047 1047 d - an optional precomputed delta
1048 1048 """
1049 1049 dfh = None
1050 1050 if not self._inline:
1051 1051 dfh = self.opener(self.datafile, "a")
1052 1052 ifh = self.opener(self.indexfile, "a+")
1053 1053 try:
1054 1054 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1055 1055 finally:
1056 1056 if dfh:
1057 1057 dfh.close()
1058 1058 ifh.close()
1059 1059
1060 1060 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1061 1061 node = hash(text, p1, p2)
1062 1062 if node in self.nodemap:
1063 1063 return node
1064 1064
1065 1065 curr = len(self)
1066 1066 prev = curr - 1
1067 1067 base = self.base(prev)
1068 1068 offset = self.end(prev)
1069 1069
1070 1070 if curr:
1071 1071 if not d:
1072 1072 ptext = self.revision(self.node(prev))
1073 1073 d = mdiff.textdiff(ptext, text)
1074 1074 data = compress(d)
1075 1075 l = len(data[1]) + len(data[0])
1076 1076 dist = l + offset - self.start(base)
1077 1077
1078 1078 # full versions are inserted when the needed deltas
1079 1079 # become comparable to the uncompressed text
1080 1080 if not curr or dist > len(text) * 2:
1081 1081 data = compress(text)
1082 1082 l = len(data[1]) + len(data[0])
1083 1083 base = curr
1084 1084
1085 1085 e = (offset_type(offset, 0), l, len(text),
1086 1086 base, link, self.rev(p1), self.rev(p2), node)
1087 1087 self.index.insert(-1, e)
1088 1088 self.nodemap[node] = curr
1089 1089
1090 1090 entry = self._io.packentry(e, self.node, self.version, curr)
1091 1091 if not self._inline:
1092 1092 transaction.add(self.datafile, offset)
1093 1093 transaction.add(self.indexfile, curr * len(entry))
1094 1094 if data[0]:
1095 1095 dfh.write(data[0])
1096 1096 dfh.write(data[1])
1097 1097 dfh.flush()
1098 1098 ifh.write(entry)
1099 1099 else:
1100 1100 offset += curr * self._io.size
1101 1101 transaction.add(self.indexfile, offset, curr)
1102 1102 ifh.write(entry)
1103 1103 ifh.write(data[0])
1104 1104 ifh.write(data[1])
1105 1105 self.checkinlinesize(transaction, ifh)
1106 1106
1107 1107 self._cache = (node, curr, text)
1108 1108 return node
1109 1109
1110 1110 def ancestor(self, a, b):
1111 1111 """calculate the least common ancestor of nodes a and b"""
1112 1112
1113 1113 def parents(rev):
1114 1114 return [p for p in self.parentrevs(rev) if p != nullrev]
1115 1115
1116 1116 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1117 1117 if c is None:
1118 1118 return nullid
1119 1119
1120 1120 return self.node(c)
1121 1121
1122 1122 def group(self, nodelist, lookup, infocollect=None):
1123 1123 """calculate a delta group
1124 1124
1125 1125 Given a list of changeset revs, return a set of deltas and
1126 1126 metadata corresponding to nodes. the first delta is
1127 1127 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1128 1128 have this parent as it has all history before these
1129 1129 changesets. parent is parent[0]
1130 1130 """
1131 1131 revs = [self.rev(n) for n in nodelist]
1132 1132
1133 1133 # if we don't have any revisions touched by these changesets, bail
1134 1134 if not revs:
1135 1135 yield changegroup.closechunk()
1136 1136 return
1137 1137
1138 1138 # add the parent of the first rev
1139 1139 p = self.parents(self.node(revs[0]))[0]
1140 1140 revs.insert(0, self.rev(p))
1141 1141
1142 1142 # build deltas
1143 1143 for d in xrange(0, len(revs) - 1):
1144 1144 a, b = revs[d], revs[d + 1]
1145 1145 nb = self.node(b)
1146 1146
1147 1147 if infocollect is not None:
1148 1148 infocollect(nb)
1149 1149
1150 1150 p = self.parents(nb)
1151 1151 meta = nb + p[0] + p[1] + lookup(nb)
1152 1152 if a == -1:
1153 1153 d = self.revision(nb)
1154 1154 meta += mdiff.trivialdiffheader(len(d))
1155 1155 else:
1156 1156 d = self.revdiff(a, b)
1157 1157 yield changegroup.chunkheader(len(meta) + len(d))
1158 1158 yield meta
1159 1159 if len(d) > 2**20:
1160 1160 pos = 0
1161 1161 while pos < len(d):
1162 1162 pos2 = pos + 2 ** 18
1163 1163 yield d[pos:pos2]
1164 1164 pos = pos2
1165 1165 else:
1166 1166 yield d
1167 1167
1168 1168 yield changegroup.closechunk()
1169 1169
1170 1170 def addgroup(self, revs, linkmapper, transaction):
1171 1171 """
1172 1172 add a delta group
1173 1173
1174 1174 given a set of deltas, add them to the revision log. the
1175 1175 first delta is against its parent, which should be in our
1176 1176 log, the rest are against the previous delta.
1177 1177 """
1178 1178
1179 1179 #track the base of the current delta log
1180 1180 r = len(self)
1181 1181 t = r - 1
1182 1182 node = None
1183 1183
1184 1184 base = prev = nullrev
1185 1185 start = end = textlen = 0
1186 1186 if r:
1187 1187 end = self.end(t)
1188 1188
1189 1189 ifh = self.opener(self.indexfile, "a+")
1190 1190 isize = r * self._io.size
1191 1191 if self._inline:
1192 1192 transaction.add(self.indexfile, end + isize, r)
1193 1193 dfh = None
1194 1194 else:
1195 1195 transaction.add(self.indexfile, isize, r)
1196 1196 transaction.add(self.datafile, end)
1197 1197 dfh = self.opener(self.datafile, "a")
1198 1198
1199 1199 try:
1200 1200 # loop through our set of deltas
1201 1201 chain = None
1202 1202 for chunk in revs:
1203 1203 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1204 1204 link = linkmapper(cs)
1205 1205 if node in self.nodemap:
1206 1206 # this can happen if two branches make the same change
1207 1207 chain = node
1208 1208 continue
1209 1209 delta = buffer(chunk, 80)
1210 1210 del chunk
1211 1211
1212 1212 for p in (p1, p2):
1213 1213 if not p in self.nodemap:
1214 1214 raise LookupError(p, self.indexfile, _('unknown parent'))
1215 1215
1216 1216 if not chain:
1217 1217 # retrieve the parent revision of the delta chain
1218 1218 chain = p1
1219 1219 if not chain in self.nodemap:
1220 1220 raise LookupError(chain, self.indexfile, _('unknown base'))
1221 1221
1222 1222 # full versions are inserted when the needed deltas become
1223 1223 # comparable to the uncompressed text or when the previous
1224 1224 # version is not the one we have a delta against. We use
1225 1225 # the size of the previous full rev as a proxy for the
1226 1226 # current size.
1227 1227
1228 1228 if chain == prev:
1229 1229 cdelta = compress(delta)
1230 1230 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1231 1231 textlen = mdiff.patchedsize(textlen, delta)
1232 1232
1233 1233 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1234 1234 # flush our writes here so we can read it in revision
1235 1235 if dfh:
1236 1236 dfh.flush()
1237 1237 ifh.flush()
1238 1238 text = self.revision(chain)
1239 1239 if len(text) == 0:
1240 1240 # skip over trivial delta header
1241 1241 text = buffer(delta, 12)
1242 1242 else:
1243 1243 text = mdiff.patches(text, [delta])
1244 1244 del delta
1245 1245 chk = self._addrevision(text, transaction, link, p1, p2, None,
1246 1246 ifh, dfh)
1247 1247 if not dfh and not self._inline:
1248 1248 # addrevision switched from inline to conventional
1249 1249 # reopen the index
1250 1250 dfh = self.opener(self.datafile, "a")
1251 1251 ifh = self.opener(self.indexfile, "a")
1252 1252 if chk != node:
1253 1253 raise RevlogError(_("consistency error adding group"))
1254 1254 textlen = len(text)
1255 1255 else:
1256 1256 e = (offset_type(end, 0), cdeltalen, textlen, base,
1257 1257 link, self.rev(p1), self.rev(p2), node)
1258 1258 self.index.insert(-1, e)
1259 1259 self.nodemap[node] = r
1260 1260 entry = self._io.packentry(e, self.node, self.version, r)
1261 1261 if self._inline:
1262 1262 ifh.write(entry)
1263 1263 ifh.write(cdelta[0])
1264 1264 ifh.write(cdelta[1])
1265 1265 self.checkinlinesize(transaction, ifh)
1266 1266 if not self._inline:
1267 1267 dfh = self.opener(self.datafile, "a")
1268 1268 ifh = self.opener(self.indexfile, "a")
1269 1269 else:
1270 1270 dfh.write(cdelta[0])
1271 1271 dfh.write(cdelta[1])
1272 1272 ifh.write(entry)
1273 1273
1274 1274 t, r, chain, prev = r, r + 1, node, node
1275 1275 base = self.base(t)
1276 1276 start = self.start(base)
1277 1277 end = self.end(t)
1278 1278 finally:
1279 1279 if dfh:
1280 1280 dfh.close()
1281 1281 ifh.close()
1282 1282
1283 1283 return node
1284 1284
1285 1285 def strip(self, minlink):
1286 1286 """truncate the revlog on the first revision with a linkrev >= minlink
1287 1287
1288 1288 This function is called when we're stripping revision minlink and
1289 1289 its descendants from the repository.
1290 1290
1291 1291 We have to remove all revisions with linkrev >= minlink, because
1292 1292 the equivalent changelog revisions will be renumbered after the
1293 1293 strip.
1294 1294
1295 1295 So we truncate the revlog on the first of these revisions, and
1296 1296 trust that the caller has saved the revisions that shouldn't be
1297 1297 removed and that it'll readd them after this truncation.
1298 1298 """
1299 1299 if len(self) == 0:
1300 1300 return
1301 1301
1302 1302 if isinstance(self.index, lazyindex):
1303 1303 self._loadindexmap()
1304 1304
1305 1305 for rev in self:
1306 1306 if self.index[rev][4] >= minlink:
1307 1307 break
1308 1308 else:
1309 1309 return
1310 1310
1311 1311 # first truncate the files on disk
1312 1312 end = self.start(rev)
1313 1313 if not self._inline:
1314 1314 df = self.opener(self.datafile, "a")
1315 1315 df.truncate(end)
1316 1316 end = rev * self._io.size
1317 1317 else:
1318 1318 end += rev * self._io.size
1319 1319
1320 1320 indexf = self.opener(self.indexfile, "a")
1321 1321 indexf.truncate(end)
1322 1322
1323 1323 # then reset internal state in memory to forget those revisions
1324 1324 self._cache = None
1325 1325 self._chunkcache = None
1326 1326 for x in xrange(rev, len(self)):
1327 1327 del self.nodemap[self.node(x)]
1328 1328
1329 1329 del self.index[rev:-1]
1330 1330
1331 1331 def checksize(self):
1332 1332 expected = 0
1333 1333 if len(self):
1334 1334 expected = max(0, self.end(len(self) - 1))
1335 1335
1336 1336 try:
1337 1337 f = self.opener(self.datafile)
1338 1338 f.seek(0, 2)
1339 1339 actual = f.tell()
1340 1340 dd = actual - expected
1341 1341 except IOError, inst:
1342 1342 if inst.errno != errno.ENOENT:
1343 1343 raise
1344 1344 dd = 0
1345 1345
1346 1346 try:
1347 1347 f = self.opener(self.indexfile)
1348 1348 f.seek(0, 2)
1349 1349 actual = f.tell()
1350 1350 s = self._io.size
1351 1351 i = max(0, actual / s)
1352 1352 di = actual - (i * s)
1353 1353 if self._inline:
1354 1354 databytes = 0
1355 1355 for r in self:
1356 1356 databytes += max(0, self.length(r))
1357 1357 dd = 0
1358 1358 di = actual - len(self) * s - databytes
1359 1359 except IOError, inst:
1360 1360 if inst.errno != errno.ENOENT:
1361 1361 raise
1362 1362 di = 0
1363 1363
1364 1364 return (dd, di)
1365 1365
1366 1366 def files(self):
1367 1367 res = [ self.indexfile ]
1368 1368 if not self._inline:
1369 1369 res.append(self.datafile)
1370 1370 return res
General Comments 0
You need to be logged in to leave comments. Login now