##// END OF EJS Templates
revlog: don't silently discard revlog flags on revlogv0
Benoit Boissinot -
r10395:ea52a2d4 default
parent child Browse files
Show More
@@ -1,1414 +1,1416 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 16 from i18n import _
17 17 import changegroup, ancestor, mdiff, parsers, error, util
18 18 import struct, zlib, errno
19 19
20 20 _pack = struct.pack
21 21 _unpack = struct.unpack
22 22 _compress = zlib.compress
23 23 _decompress = zlib.decompress
24 24 _sha = util.sha1
25 25
26 26 # revlog flags
27 27 REVLOGV0 = 0
28 28 REVLOGNG = 1
29 29 REVLOGNGINLINEDATA = (1 << 16)
30 30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 31 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 33
34 34 _prereadsize = 1048576
35 35
36 36 RevlogError = error.RevlogError
37 37 LookupError = error.LookupError
38 38
39 39 def getoffset(q):
40 40 return int(q >> 16)
41 41
42 42 def gettype(q):
43 43 return int(q & 0xFFFF)
44 44
45 45 def offset_type(offset, type):
46 46 return long(long(offset) << 16 | type)
47 47
48 48 nullhash = _sha(nullid)
49 49
50 50 def hash(text, p1, p2):
51 51 """generate a hash from the given text and its parent hashes
52 52
53 53 This hash combines both the current file contents and its history
54 54 in a manner that makes it easy to distinguish nodes with the same
55 55 content in the revision graph.
56 56 """
57 57 # As of now, if one of the parent node is null, p2 is null
58 58 if p2 == nullid:
59 59 # deep copy of a hash is faster than creating one
60 60 s = nullhash.copy()
61 61 s.update(p1)
62 62 else:
63 63 # none of the parent nodes are nullid
64 64 l = [p1, p2]
65 65 l.sort()
66 66 s = _sha(l[0])
67 67 s.update(l[1])
68 68 s.update(text)
69 69 return s.digest()
70 70
71 71 def compress(text):
72 72 """ generate a possibly-compressed representation of text """
73 73 if not text:
74 74 return ("", text)
75 75 l = len(text)
76 76 bin = None
77 77 if l < 44:
78 78 pass
79 79 elif l > 1000000:
80 80 # zlib makes an internal copy, thus doubling memory usage for
81 81 # large files, so lets do this in pieces
82 82 z = zlib.compressobj()
83 83 p = []
84 84 pos = 0
85 85 while pos < l:
86 86 pos2 = pos + 2**20
87 87 p.append(z.compress(text[pos:pos2]))
88 88 pos = pos2
89 89 p.append(z.flush())
90 90 if sum(map(len, p)) < l:
91 91 bin = "".join(p)
92 92 else:
93 93 bin = _compress(text)
94 94 if bin is None or len(bin) > l:
95 95 if text[0] == '\0':
96 96 return ("", text)
97 97 return ('u', text)
98 98 return ("", bin)
99 99
100 100 def decompress(bin):
101 101 """ decompress the given input """
102 102 if not bin:
103 103 return bin
104 104 t = bin[0]
105 105 if t == '\0':
106 106 return bin
107 107 if t == 'x':
108 108 return _decompress(bin)
109 109 if t == 'u':
110 110 return bin[1:]
111 111 raise RevlogError(_("unknown compression type %r") % t)
112 112
113 113 class lazyparser(object):
114 114 """
115 115 this class avoids the need to parse the entirety of large indices
116 116 """
117 117
118 118 # lazyparser is not safe to use on windows if win32 extensions not
119 119 # available. it keeps file handle open, which make it not possible
120 120 # to break hardlinks on local cloned repos.
121 121
122 122 def __init__(self, dataf):
123 123 try:
124 124 size = util.fstat(dataf).st_size
125 125 except AttributeError:
126 126 size = 0
127 127 self.dataf = dataf
128 128 self.s = struct.calcsize(indexformatng)
129 129 self.datasize = size
130 130 self.l = size / self.s
131 131 self.index = [None] * self.l
132 132 self.map = {nullid: nullrev}
133 133 self.allmap = 0
134 134 self.all = 0
135 135 self.mapfind_count = 0
136 136
137 137 def loadmap(self):
138 138 """
139 139 during a commit, we need to make sure the rev being added is
140 140 not a duplicate. This requires loading the entire index,
141 141 which is fairly slow. loadmap can load up just the node map,
142 142 which takes much less time.
143 143 """
144 144 if self.allmap:
145 145 return
146 146 end = self.datasize
147 147 self.allmap = 1
148 148 cur = 0
149 149 count = 0
150 150 blocksize = self.s * 256
151 151 self.dataf.seek(0)
152 152 while cur < end:
153 153 data = self.dataf.read(blocksize)
154 154 off = 0
155 155 for x in xrange(256):
156 156 n = data[off + ngshaoffset:off + ngshaoffset + 20]
157 157 self.map[n] = count
158 158 count += 1
159 159 if count >= self.l:
160 160 break
161 161 off += self.s
162 162 cur += blocksize
163 163
164 164 def loadblock(self, blockstart, blocksize, data=None):
165 165 if self.all:
166 166 return
167 167 if data is None:
168 168 self.dataf.seek(blockstart)
169 169 if blockstart + blocksize > self.datasize:
170 170 # the revlog may have grown since we've started running,
171 171 # but we don't have space in self.index for more entries.
172 172 # limit blocksize so that we don't get too much data.
173 173 blocksize = max(self.datasize - blockstart, 0)
174 174 data = self.dataf.read(blocksize)
175 175 lend = len(data) / self.s
176 176 i = blockstart / self.s
177 177 off = 0
178 178 # lazyindex supports __delitem__
179 179 if lend > len(self.index) - i:
180 180 lend = len(self.index) - i
181 181 for x in xrange(lend):
182 182 if self.index[i + x] is None:
183 183 b = data[off : off + self.s]
184 184 self.index[i + x] = b
185 185 n = b[ngshaoffset:ngshaoffset + 20]
186 186 self.map[n] = i + x
187 187 off += self.s
188 188
189 189 def findnode(self, node):
190 190 """search backwards through the index file for a specific node"""
191 191 if self.allmap:
192 192 return None
193 193
194 194 # hg log will cause many many searches for the manifest
195 195 # nodes. After we get called a few times, just load the whole
196 196 # thing.
197 197 if self.mapfind_count > 8:
198 198 self.loadmap()
199 199 if node in self.map:
200 200 return node
201 201 return None
202 202 self.mapfind_count += 1
203 203 last = self.l - 1
204 204 while self.index[last] != None:
205 205 if last == 0:
206 206 self.all = 1
207 207 self.allmap = 1
208 208 return None
209 209 last -= 1
210 210 end = (last + 1) * self.s
211 211 blocksize = self.s * 256
212 212 while end >= 0:
213 213 start = max(end - blocksize, 0)
214 214 self.dataf.seek(start)
215 215 data = self.dataf.read(end - start)
216 216 findend = end - start
217 217 while True:
218 218 # we're searching backwards, so we have to make sure
219 219 # we don't find a changeset where this node is a parent
220 220 off = data.find(node, 0, findend)
221 221 findend = off
222 222 if off >= 0:
223 223 i = off / self.s
224 224 off = i * self.s
225 225 n = data[off + ngshaoffset:off + ngshaoffset + 20]
226 226 if n == node:
227 227 self.map[n] = i + start / self.s
228 228 return node
229 229 else:
230 230 break
231 231 end -= blocksize
232 232 return None
233 233
234 234 def loadindex(self, i=None, end=None):
235 235 if self.all:
236 236 return
237 237 all = False
238 238 if i is None:
239 239 blockstart = 0
240 240 blocksize = (65536 / self.s) * self.s
241 241 end = self.datasize
242 242 all = True
243 243 else:
244 244 if end:
245 245 blockstart = i * self.s
246 246 end = end * self.s
247 247 blocksize = end - blockstart
248 248 else:
249 249 blockstart = (i & ~1023) * self.s
250 250 blocksize = self.s * 1024
251 251 end = blockstart + blocksize
252 252 while blockstart < end:
253 253 self.loadblock(blockstart, blocksize)
254 254 blockstart += blocksize
255 255 if all:
256 256 self.all = True
257 257
258 258 class lazyindex(object):
259 259 """a lazy version of the index array"""
260 260 def __init__(self, parser):
261 261 self.p = parser
262 262 def __len__(self):
263 263 return len(self.p.index)
264 264 def load(self, pos):
265 265 if pos < 0:
266 266 pos += len(self.p.index)
267 267 self.p.loadindex(pos)
268 268 return self.p.index[pos]
269 269 def __getitem__(self, pos):
270 270 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
271 271 def __setitem__(self, pos, item):
272 272 self.p.index[pos] = _pack(indexformatng, *item)
273 273 def __delitem__(self, pos):
274 274 del self.p.index[pos]
275 275 def insert(self, pos, e):
276 276 self.p.index.insert(pos, _pack(indexformatng, *e))
277 277 def append(self, e):
278 278 self.p.index.append(_pack(indexformatng, *e))
279 279
280 280 class lazymap(object):
281 281 """a lazy version of the node map"""
282 282 def __init__(self, parser):
283 283 self.p = parser
284 284 def load(self, key):
285 285 n = self.p.findnode(key)
286 286 if n is None:
287 287 raise KeyError(key)
288 288 def __contains__(self, key):
289 289 if key in self.p.map:
290 290 return True
291 291 self.p.loadmap()
292 292 return key in self.p.map
293 293 def __iter__(self):
294 294 yield nullid
295 295 for i in xrange(self.p.l):
296 296 ret = self.p.index[i]
297 297 if not ret:
298 298 self.p.loadindex(i)
299 299 ret = self.p.index[i]
300 300 if isinstance(ret, str):
301 301 ret = _unpack(indexformatng, ret)
302 302 yield ret[7]
303 303 def __getitem__(self, key):
304 304 try:
305 305 return self.p.map[key]
306 306 except KeyError:
307 307 try:
308 308 self.load(key)
309 309 return self.p.map[key]
310 310 except KeyError:
311 311 raise KeyError("node " + hex(key))
312 312 def __setitem__(self, key, val):
313 313 self.p.map[key] = val
314 314 def __delitem__(self, key):
315 315 del self.p.map[key]
316 316
317 317 indexformatv0 = ">4l20s20s20s"
318 318 v0shaoffset = 56
319 319
320 320 class revlogoldio(object):
321 321 def __init__(self):
322 322 self.size = struct.calcsize(indexformatv0)
323 323
324 324 def parseindex(self, fp, data, inline):
325 325 s = self.size
326 326 index = []
327 327 nodemap = {nullid: nullrev}
328 328 n = off = 0
329 329 if len(data) == _prereadsize:
330 330 data += fp.read() # read the rest
331 331 l = len(data)
332 332 while off + s <= l:
333 333 cur = data[off:off + s]
334 334 off += s
335 335 e = _unpack(indexformatv0, cur)
336 336 # transform to revlogv1 format
337 337 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
338 338 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
339 339 index.append(e2)
340 340 nodemap[e[6]] = n
341 341 n += 1
342 342
343 343 return index, nodemap, None
344 344
345 345 def packentry(self, entry, node, version, rev):
346 if gettype(entry[0]):
347 raise RevlogError(_("index entry flags need RevlogNG"))
346 348 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
347 349 node(entry[5]), node(entry[6]), entry[7])
348 350 return _pack(indexformatv0, *e2)
349 351
350 352 # index ng:
351 353 # 6 bytes offset
352 354 # 2 bytes flags
353 355 # 4 bytes compressed length
354 356 # 4 bytes uncompressed length
355 357 # 4 bytes: base rev
356 358 # 4 bytes link rev
357 359 # 4 bytes parent 1 rev
358 360 # 4 bytes parent 2 rev
359 361 # 32 bytes: nodeid
360 362 indexformatng = ">Qiiiiii20s12x"
361 363 ngshaoffset = 32
362 364 versionformat = ">I"
363 365
364 366 class revlogio(object):
365 367 def __init__(self):
366 368 self.size = struct.calcsize(indexformatng)
367 369
368 370 def parseindex(self, fp, data, inline):
369 371 if len(data) == _prereadsize:
370 372 if util.openhardlinks() and not inline:
371 373 # big index, let's parse it on demand
372 374 parser = lazyparser(fp)
373 375 index = lazyindex(parser)
374 376 nodemap = lazymap(parser)
375 377 e = list(index[0])
376 378 type = gettype(e[0])
377 379 e[0] = offset_type(0, type)
378 380 index[0] = e
379 381 return index, nodemap, None
380 382 else:
381 383 data += fp.read()
382 384
383 385 # call the C implementation to parse the index data
384 386 index, nodemap, cache = parsers.parse_index(data, inline)
385 387 return index, nodemap, cache
386 388
387 389 def packentry(self, entry, node, version, rev):
388 390 p = _pack(indexformatng, *entry)
389 391 if rev == 0:
390 392 p = _pack(versionformat, version) + p[4:]
391 393 return p
392 394
393 395 class revlog(object):
394 396 """
395 397 the underlying revision storage object
396 398
397 399 A revlog consists of two parts, an index and the revision data.
398 400
399 401 The index is a file with a fixed record size containing
400 402 information on each revision, including its nodeid (hash), the
401 403 nodeids of its parents, the position and offset of its data within
402 404 the data file, and the revision it's based on. Finally, each entry
403 405 contains a linkrev entry that can serve as a pointer to external
404 406 data.
405 407
406 408 The revision data itself is a linear collection of data chunks.
407 409 Each chunk represents a revision and is usually represented as a
408 410 delta against the previous chunk. To bound lookup time, runs of
409 411 deltas are limited to about 2 times the length of the original
410 412 version data. This makes retrieval of a version proportional to
411 413 its size, or O(1) relative to the number of revisions.
412 414
413 415 Both pieces of the revlog are written to in an append-only
414 416 fashion, which means we never need to rewrite a file to insert or
415 417 remove data, and can use some simple techniques to avoid the need
416 418 for locking while reading.
417 419 """
418 420 def __init__(self, opener, indexfile):
419 421 """
420 422 create a revlog object
421 423
422 424 opener is a function that abstracts the file opening operation
423 425 and can be used to implement COW semantics or the like.
424 426 """
425 427 self.indexfile = indexfile
426 428 self.datafile = indexfile[:-2] + ".d"
427 429 self.opener = opener
428 430 self._cache = None
429 431 self._chunkcache = (0, '')
430 432 self.nodemap = {nullid: nullrev}
431 433 self.index = []
432 434
433 435 v = REVLOG_DEFAULT_VERSION
434 436 if hasattr(opener, 'options') and 'defversion' in opener.options:
435 437 v = opener.options['defversion']
436 438 if v & REVLOGNG:
437 439 v |= REVLOGNGINLINEDATA
438 440
439 441 i = ''
440 442 try:
441 443 f = self.opener(self.indexfile)
442 444 i = f.read(_prereadsize)
443 445 if len(i) > 0:
444 446 v = struct.unpack(versionformat, i[:4])[0]
445 447 except IOError, inst:
446 448 if inst.errno != errno.ENOENT:
447 449 raise
448 450
449 451 self.version = v
450 452 self._inline = v & REVLOGNGINLINEDATA
451 453 flags = v & ~0xFFFF
452 454 fmt = v & 0xFFFF
453 455 if fmt == REVLOGV0 and flags:
454 456 raise RevlogError(_("index %s unknown flags %#04x for format v0")
455 457 % (self.indexfile, flags >> 16))
456 458 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
457 459 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
458 460 % (self.indexfile, flags >> 16))
459 461 elif fmt > REVLOGNG:
460 462 raise RevlogError(_("index %s unknown format %d")
461 463 % (self.indexfile, fmt))
462 464
463 465 self._io = revlogio()
464 466 if self.version == REVLOGV0:
465 467 self._io = revlogoldio()
466 468 if i:
467 469 try:
468 470 d = self._io.parseindex(f, i, self._inline)
469 471 except (ValueError, IndexError):
470 472 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
471 473 self.index, self.nodemap, self._chunkcache = d
472 474 if not self._chunkcache:
473 475 self._chunkclear()
474 476
475 477 # add the magic null revision at -1 (if it hasn't been done already)
476 478 if (self.index == [] or isinstance(self.index, lazyindex) or
477 479 self.index[-1][7] != nullid) :
478 480 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
479 481
480 482 def _loadindex(self, start, end):
481 483 """load a block of indexes all at once from the lazy parser"""
482 484 if isinstance(self.index, lazyindex):
483 485 self.index.p.loadindex(start, end)
484 486
485 487 def _loadindexmap(self):
486 488 """loads both the map and the index from the lazy parser"""
487 489 if isinstance(self.index, lazyindex):
488 490 p = self.index.p
489 491 p.loadindex()
490 492 self.nodemap = p.map
491 493
492 494 def _loadmap(self):
493 495 """loads the map from the lazy parser"""
494 496 if isinstance(self.nodemap, lazymap):
495 497 self.nodemap.p.loadmap()
496 498 self.nodemap = self.nodemap.p.map
497 499
498 500 def tip(self):
499 501 return self.node(len(self.index) - 2)
500 502 def __len__(self):
501 503 return len(self.index) - 1
502 504 def __iter__(self):
503 505 for i in xrange(len(self)):
504 506 yield i
505 507 def rev(self, node):
506 508 try:
507 509 return self.nodemap[node]
508 510 except KeyError:
509 511 raise LookupError(node, self.indexfile, _('no node'))
510 512 def node(self, rev):
511 513 return self.index[rev][7]
512 514 def linkrev(self, rev):
513 515 return self.index[rev][4]
514 516 def parents(self, node):
515 517 i = self.index
516 518 d = i[self.rev(node)]
517 519 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
518 520 def parentrevs(self, rev):
519 521 return self.index[rev][5:7]
520 522 def start(self, rev):
521 523 return int(self.index[rev][0] >> 16)
522 524 def end(self, rev):
523 525 return self.start(rev) + self.length(rev)
524 526 def length(self, rev):
525 527 return self.index[rev][1]
526 528 def base(self, rev):
527 529 return self.index[rev][3]
528 530
529 531 def size(self, rev):
530 532 """return the length of the uncompressed text for a given revision"""
531 533 l = self.index[rev][2]
532 534 if l >= 0:
533 535 return l
534 536
535 537 t = self.revision(self.node(rev))
536 538 return len(t)
537 539
538 540 # Alternate implementation. The advantage to this code is it
539 541 # will be faster for a single revision. However, the results
540 542 # are not cached, so finding the size of every revision will
541 543 # be slower.
542 544 #
543 545 # if self.cache and self.cache[1] == rev:
544 546 # return len(self.cache[2])
545 547 #
546 548 # base = self.base(rev)
547 549 # if self.cache and self.cache[1] >= base and self.cache[1] < rev:
548 550 # base = self.cache[1]
549 551 # text = self.cache[2]
550 552 # else:
551 553 # text = self.revision(self.node(base))
552 554 #
553 555 # l = len(text)
554 556 # for x in xrange(base + 1, rev + 1):
555 557 # l = mdiff.patchedsize(l, self._chunk(x))
556 558 # return l
557 559
558 560 def reachable(self, node, stop=None):
559 561 """return the set of all nodes ancestral to a given node, including
560 562 the node itself, stopping when stop is matched"""
561 563 reachable = set((node,))
562 564 visit = [node]
563 565 if stop:
564 566 stopn = self.rev(stop)
565 567 else:
566 568 stopn = 0
567 569 while visit:
568 570 n = visit.pop(0)
569 571 if n == stop:
570 572 continue
571 573 if n == nullid:
572 574 continue
573 575 for p in self.parents(n):
574 576 if self.rev(p) < stopn:
575 577 continue
576 578 if p not in reachable:
577 579 reachable.add(p)
578 580 visit.append(p)
579 581 return reachable
580 582
581 583 def ancestors(self, *revs):
582 584 """Generate the ancestors of 'revs' in reverse topological order.
583 585
584 586 Yield a sequence of revision numbers starting with the parents
585 587 of each revision in revs, i.e., each revision is *not* considered
586 588 an ancestor of itself. Results are in breadth-first order:
587 589 parents of each rev in revs, then parents of those, etc. Result
588 590 does not include the null revision."""
589 591 visit = list(revs)
590 592 seen = set([nullrev])
591 593 while visit:
592 594 for parent in self.parentrevs(visit.pop(0)):
593 595 if parent not in seen:
594 596 visit.append(parent)
595 597 seen.add(parent)
596 598 yield parent
597 599
598 600 def descendants(self, *revs):
599 601 """Generate the descendants of 'revs' in revision order.
600 602
601 603 Yield a sequence of revision numbers starting with a child of
602 604 some rev in revs, i.e., each revision is *not* considered a
603 605 descendant of itself. Results are ordered by revision number (a
604 606 topological sort)."""
605 607 seen = set(revs)
606 608 for i in xrange(min(revs) + 1, len(self)):
607 609 for x in self.parentrevs(i):
608 610 if x != nullrev and x in seen:
609 611 seen.add(i)
610 612 yield i
611 613 break
612 614
613 615 def findmissing(self, common=None, heads=None):
614 616 """Return the ancestors of heads that are not ancestors of common.
615 617
616 618 More specifically, return a list of nodes N such that every N
617 619 satisfies the following constraints:
618 620
619 621 1. N is an ancestor of some node in 'heads'
620 622 2. N is not an ancestor of any node in 'common'
621 623
622 624 The list is sorted by revision number, meaning it is
623 625 topologically sorted.
624 626
625 627 'heads' and 'common' are both lists of node IDs. If heads is
626 628 not supplied, uses all of the revlog's heads. If common is not
627 629 supplied, uses nullid."""
628 630 if common is None:
629 631 common = [nullid]
630 632 if heads is None:
631 633 heads = self.heads()
632 634
633 635 common = [self.rev(n) for n in common]
634 636 heads = [self.rev(n) for n in heads]
635 637
636 638 # we want the ancestors, but inclusive
637 639 has = set(self.ancestors(*common))
638 640 has.add(nullrev)
639 641 has.update(common)
640 642
641 643 # take all ancestors from heads that aren't in has
642 644 missing = set()
643 645 visit = [r for r in heads if r not in has]
644 646 while visit:
645 647 r = visit.pop(0)
646 648 if r in missing:
647 649 continue
648 650 else:
649 651 missing.add(r)
650 652 for p in self.parentrevs(r):
651 653 if p not in has:
652 654 visit.append(p)
653 655 missing = list(missing)
654 656 missing.sort()
655 657 return [self.node(r) for r in missing]
656 658
657 659 def nodesbetween(self, roots=None, heads=None):
658 660 """Return a topological path from 'roots' to 'heads'.
659 661
660 662 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
661 663 topologically sorted list of all nodes N that satisfy both of
662 664 these constraints:
663 665
664 666 1. N is a descendant of some node in 'roots'
665 667 2. N is an ancestor of some node in 'heads'
666 668
667 669 Every node is considered to be both a descendant and an ancestor
668 670 of itself, so every reachable node in 'roots' and 'heads' will be
669 671 included in 'nodes'.
670 672
671 673 'outroots' is the list of reachable nodes in 'roots', i.e., the
672 674 subset of 'roots' that is returned in 'nodes'. Likewise,
673 675 'outheads' is the subset of 'heads' that is also in 'nodes'.
674 676
675 677 'roots' and 'heads' are both lists of node IDs. If 'roots' is
676 678 unspecified, uses nullid as the only root. If 'heads' is
677 679 unspecified, uses list of all of the revlog's heads."""
678 680 nonodes = ([], [], [])
679 681 if roots is not None:
680 682 roots = list(roots)
681 683 if not roots:
682 684 return nonodes
683 685 lowestrev = min([self.rev(n) for n in roots])
684 686 else:
685 687 roots = [nullid] # Everybody's a descendent of nullid
686 688 lowestrev = nullrev
687 689 if (lowestrev == nullrev) and (heads is None):
688 690 # We want _all_ the nodes!
689 691 return ([self.node(r) for r in self], [nullid], list(self.heads()))
690 692 if heads is None:
691 693 # All nodes are ancestors, so the latest ancestor is the last
692 694 # node.
693 695 highestrev = len(self) - 1
694 696 # Set ancestors to None to signal that every node is an ancestor.
695 697 ancestors = None
696 698 # Set heads to an empty dictionary for later discovery of heads
697 699 heads = {}
698 700 else:
699 701 heads = list(heads)
700 702 if not heads:
701 703 return nonodes
702 704 ancestors = set()
703 705 # Turn heads into a dictionary so we can remove 'fake' heads.
704 706 # Also, later we will be using it to filter out the heads we can't
705 707 # find from roots.
706 708 heads = dict.fromkeys(heads, 0)
707 709 # Start at the top and keep marking parents until we're done.
708 710 nodestotag = set(heads)
709 711 # Remember where the top was so we can use it as a limit later.
710 712 highestrev = max([self.rev(n) for n in nodestotag])
711 713 while nodestotag:
712 714 # grab a node to tag
713 715 n = nodestotag.pop()
714 716 # Never tag nullid
715 717 if n == nullid:
716 718 continue
717 719 # A node's revision number represents its place in a
718 720 # topologically sorted list of nodes.
719 721 r = self.rev(n)
720 722 if r >= lowestrev:
721 723 if n not in ancestors:
722 724 # If we are possibly a descendent of one of the roots
723 725 # and we haven't already been marked as an ancestor
724 726 ancestors.add(n) # Mark as ancestor
725 727 # Add non-nullid parents to list of nodes to tag.
726 728 nodestotag.update([p for p in self.parents(n) if
727 729 p != nullid])
728 730 elif n in heads: # We've seen it before, is it a fake head?
729 731 # So it is, real heads should not be the ancestors of
730 732 # any other heads.
731 733 heads.pop(n)
732 734 if not ancestors:
733 735 return nonodes
734 736 # Now that we have our set of ancestors, we want to remove any
735 737 # roots that are not ancestors.
736 738
737 739 # If one of the roots was nullid, everything is included anyway.
738 740 if lowestrev > nullrev:
739 741 # But, since we weren't, let's recompute the lowest rev to not
740 742 # include roots that aren't ancestors.
741 743
742 744 # Filter out roots that aren't ancestors of heads
743 745 roots = [n for n in roots if n in ancestors]
744 746 # Recompute the lowest revision
745 747 if roots:
746 748 lowestrev = min([self.rev(n) for n in roots])
747 749 else:
748 750 # No more roots? Return empty list
749 751 return nonodes
750 752 else:
751 753 # We are descending from nullid, and don't need to care about
752 754 # any other roots.
753 755 lowestrev = nullrev
754 756 roots = [nullid]
755 757 # Transform our roots list into a set.
756 758 descendents = set(roots)
757 759 # Also, keep the original roots so we can filter out roots that aren't
758 760 # 'real' roots (i.e. are descended from other roots).
759 761 roots = descendents.copy()
760 762 # Our topologically sorted list of output nodes.
761 763 orderedout = []
762 764 # Don't start at nullid since we don't want nullid in our output list,
763 765 # and if nullid shows up in descedents, empty parents will look like
764 766 # they're descendents.
765 767 for r in xrange(max(lowestrev, 0), highestrev + 1):
766 768 n = self.node(r)
767 769 isdescendent = False
768 770 if lowestrev == nullrev: # Everybody is a descendent of nullid
769 771 isdescendent = True
770 772 elif n in descendents:
771 773 # n is already a descendent
772 774 isdescendent = True
773 775 # This check only needs to be done here because all the roots
774 776 # will start being marked is descendents before the loop.
775 777 if n in roots:
776 778 # If n was a root, check if it's a 'real' root.
777 779 p = tuple(self.parents(n))
778 780 # If any of its parents are descendents, it's not a root.
779 781 if (p[0] in descendents) or (p[1] in descendents):
780 782 roots.remove(n)
781 783 else:
782 784 p = tuple(self.parents(n))
783 785 # A node is a descendent if either of its parents are
784 786 # descendents. (We seeded the dependents list with the roots
785 787 # up there, remember?)
786 788 if (p[0] in descendents) or (p[1] in descendents):
787 789 descendents.add(n)
788 790 isdescendent = True
789 791 if isdescendent and ((ancestors is None) or (n in ancestors)):
790 792 # Only include nodes that are both descendents and ancestors.
791 793 orderedout.append(n)
792 794 if (ancestors is not None) and (n in heads):
793 795 # We're trying to figure out which heads are reachable
794 796 # from roots.
795 797 # Mark this head as having been reached
796 798 heads[n] = 1
797 799 elif ancestors is None:
798 800 # Otherwise, we're trying to discover the heads.
799 801 # Assume this is a head because if it isn't, the next step
800 802 # will eventually remove it.
801 803 heads[n] = 1
802 804 # But, obviously its parents aren't.
803 805 for p in self.parents(n):
804 806 heads.pop(p, None)
805 807 heads = [n for n in heads.iterkeys() if heads[n] != 0]
806 808 roots = list(roots)
807 809 assert orderedout
808 810 assert roots
809 811 assert heads
810 812 return (orderedout, roots, heads)
811 813
812 814 def heads(self, start=None, stop=None):
813 815 """return the list of all nodes that have no children
814 816
815 817 if start is specified, only heads that are descendants of
816 818 start will be returned
817 819 if stop is specified, it will consider all the revs from stop
818 820 as if they had no children
819 821 """
820 822 if start is None and stop is None:
821 823 count = len(self)
822 824 if not count:
823 825 return [nullid]
824 826 ishead = [1] * (count + 1)
825 827 index = self.index
826 828 for r in xrange(count):
827 829 e = index[r]
828 830 ishead[e[5]] = ishead[e[6]] = 0
829 831 return [self.node(r) for r in xrange(count) if ishead[r]]
830 832
831 833 if start is None:
832 834 start = nullid
833 835 if stop is None:
834 836 stop = []
835 837 stoprevs = set([self.rev(n) for n in stop])
836 838 startrev = self.rev(start)
837 839 reachable = set((startrev,))
838 840 heads = set((startrev,))
839 841
840 842 parentrevs = self.parentrevs
841 843 for r in xrange(startrev + 1, len(self)):
842 844 for p in parentrevs(r):
843 845 if p in reachable:
844 846 if r not in stoprevs:
845 847 reachable.add(r)
846 848 heads.add(r)
847 849 if p in heads and p not in stoprevs:
848 850 heads.remove(p)
849 851
850 852 return [self.node(r) for r in heads]
851 853
852 854 def children(self, node):
853 855 """find the children of a given node"""
854 856 c = []
855 857 p = self.rev(node)
856 858 for r in range(p + 1, len(self)):
857 859 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
858 860 if prevs:
859 861 for pr in prevs:
860 862 if pr == p:
861 863 c.append(self.node(r))
862 864 elif p == nullrev:
863 865 c.append(self.node(r))
864 866 return c
865 867
866 868 def _match(self, id):
867 869 if isinstance(id, (long, int)):
868 870 # rev
869 871 return self.node(id)
870 872 if len(id) == 20:
871 873 # possibly a binary node
872 874 # odds of a binary node being all hex in ASCII are 1 in 10**25
873 875 try:
874 876 node = id
875 877 self.rev(node) # quick search the index
876 878 return node
877 879 except LookupError:
878 880 pass # may be partial hex id
879 881 try:
880 882 # str(rev)
881 883 rev = int(id)
882 884 if str(rev) != id:
883 885 raise ValueError
884 886 if rev < 0:
885 887 rev = len(self) + rev
886 888 if rev < 0 or rev >= len(self):
887 889 raise ValueError
888 890 return self.node(rev)
889 891 except (ValueError, OverflowError):
890 892 pass
891 893 if len(id) == 40:
892 894 try:
893 895 # a full hex nodeid?
894 896 node = bin(id)
895 897 self.rev(node)
896 898 return node
897 899 except (TypeError, LookupError):
898 900 pass
899 901
900 902 def _partialmatch(self, id):
901 903 if len(id) < 40:
902 904 try:
903 905 # hex(node)[:...]
904 906 l = len(id) // 2 # grab an even number of digits
905 907 bin_id = bin(id[:l * 2])
906 908 nl = [n for n in self.nodemap if n[:l] == bin_id]
907 909 nl = [n for n in nl if hex(n).startswith(id)]
908 910 if len(nl) > 0:
909 911 if len(nl) == 1:
910 912 return nl[0]
911 913 raise LookupError(id, self.indexfile,
912 914 _('ambiguous identifier'))
913 915 return None
914 916 except TypeError:
915 917 pass
916 918
917 919 def lookup(self, id):
918 920 """locate a node based on:
919 921 - revision number or str(revision number)
920 922 - nodeid or subset of hex nodeid
921 923 """
922 924 n = self._match(id)
923 925 if n is not None:
924 926 return n
925 927 n = self._partialmatch(id)
926 928 if n:
927 929 return n
928 930
929 931 raise LookupError(id, self.indexfile, _('no match found'))
930 932
931 933 def cmp(self, node, text):
932 934 """compare text with a given file revision"""
933 935 p1, p2 = self.parents(node)
934 936 return hash(text, p1, p2) != node
935 937
936 938 def _addchunk(self, offset, data):
937 939 o, d = self._chunkcache
938 940 # try to add to existing cache
939 941 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
940 942 self._chunkcache = o, d + data
941 943 else:
942 944 self._chunkcache = offset, data
943 945
944 946 def _loadchunk(self, offset, length):
945 947 if self._inline:
946 948 df = self.opener(self.indexfile)
947 949 else:
948 950 df = self.opener(self.datafile)
949 951
950 952 readahead = max(65536, length)
951 953 df.seek(offset)
952 954 d = df.read(readahead)
953 955 self._addchunk(offset, d)
954 956 if readahead > length:
955 957 return d[:length]
956 958 return d
957 959
958 960 def _getchunk(self, offset, length):
959 961 o, d = self._chunkcache
960 962 l = len(d)
961 963
962 964 # is it in the cache?
963 965 cachestart = offset - o
964 966 cacheend = cachestart + length
965 967 if cachestart >= 0 and cacheend <= l:
966 968 if cachestart == 0 and cacheend == l:
967 969 return d # avoid a copy
968 970 return d[cachestart:cacheend]
969 971
970 972 return self._loadchunk(offset, length)
971 973
972 974 def _chunkraw(self, startrev, endrev):
973 975 start = self.start(startrev)
974 976 length = self.end(endrev) - start
975 977 if self._inline:
976 978 start += (startrev + 1) * self._io.size
977 979 return self._getchunk(start, length)
978 980
979 981 def _chunk(self, rev):
980 982 return decompress(self._chunkraw(rev, rev))
981 983
982 984 def _chunkclear(self):
983 985 self._chunkcache = (0, '')
984 986
985 987 def revdiff(self, rev1, rev2):
986 988 """return or calculate a delta between two revisions"""
987 989 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
988 990 return self._chunk(rev2)
989 991
990 992 return mdiff.textdiff(self.revision(self.node(rev1)),
991 993 self.revision(self.node(rev2)))
992 994
993 995 def revision(self, node):
994 996 """return an uncompressed revision of a given node"""
995 997 if node == nullid:
996 998 return ""
997 999 if self._cache and self._cache[0] == node:
998 1000 return self._cache[2]
999 1001
1000 1002 # look up what we need to read
1001 1003 text = None
1002 1004 rev = self.rev(node)
1003 1005 base = self.base(rev)
1004 1006
1005 1007 # check rev flags
1006 1008 if self.index[rev][0] & 0xFFFF:
1007 1009 raise RevlogError(_('incompatible revision flag %x') %
1008 1010 (self.index[rev][0] & 0xFFFF))
1009 1011
1010 1012 # do we have useful data cached?
1011 1013 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
1012 1014 base = self._cache[1]
1013 1015 text = self._cache[2]
1014 1016
1015 1017 self._loadindex(base, rev + 1)
1016 1018 self._chunkraw(base, rev)
1017 1019 if text is None:
1018 1020 text = self._chunk(base)
1019 1021
1020 1022 bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
1021 1023 text = mdiff.patches(text, bins)
1022 1024 p1, p2 = self.parents(node)
1023 1025 if node != hash(text, p1, p2):
1024 1026 raise RevlogError(_("integrity check failed on %s:%d")
1025 1027 % (self.indexfile, rev))
1026 1028
1027 1029 self._cache = (node, rev, text)
1028 1030 return text
1029 1031
1030 1032 def checkinlinesize(self, tr, fp=None):
1031 1033 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1032 1034 return
1033 1035
1034 1036 trinfo = tr.find(self.indexfile)
1035 1037 if trinfo is None:
1036 1038 raise RevlogError(_("%s not found in the transaction")
1037 1039 % self.indexfile)
1038 1040
1039 1041 trindex = trinfo[2]
1040 1042 dataoff = self.start(trindex)
1041 1043
1042 1044 tr.add(self.datafile, dataoff)
1043 1045
1044 1046 if fp:
1045 1047 fp.flush()
1046 1048 fp.close()
1047 1049
1048 1050 df = self.opener(self.datafile, 'w')
1049 1051 try:
1050 1052 for r in self:
1051 1053 df.write(self._chunkraw(r, r))
1052 1054 finally:
1053 1055 df.close()
1054 1056
1055 1057 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1056 1058 self.version &= ~(REVLOGNGINLINEDATA)
1057 1059 self._inline = False
1058 1060 for i in self:
1059 1061 e = self._io.packentry(self.index[i], self.node, self.version, i)
1060 1062 fp.write(e)
1061 1063
1062 1064 # if we don't call rename, the temp file will never replace the
1063 1065 # real index
1064 1066 fp.rename()
1065 1067
1066 1068 tr.replace(self.indexfile, trindex * self._io.size)
1067 1069 self._chunkclear()
1068 1070
1069 1071 def addrevision(self, text, transaction, link, p1, p2, d=None):
1070 1072 """add a revision to the log
1071 1073
1072 1074 text - the revision data to add
1073 1075 transaction - the transaction object used for rollback
1074 1076 link - the linkrev data to add
1075 1077 p1, p2 - the parent nodeids of the revision
1076 1078 d - an optional precomputed delta
1077 1079 """
1078 1080 dfh = None
1079 1081 if not self._inline:
1080 1082 dfh = self.opener(self.datafile, "a")
1081 1083 ifh = self.opener(self.indexfile, "a+")
1082 1084 try:
1083 1085 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1084 1086 finally:
1085 1087 if dfh:
1086 1088 dfh.close()
1087 1089 ifh.close()
1088 1090
1089 1091 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1090 1092 node = hash(text, p1, p2)
1091 1093 if node in self.nodemap:
1092 1094 return node
1093 1095
1094 1096 curr = len(self)
1095 1097 prev = curr - 1
1096 1098 base = self.base(prev)
1097 1099 offset = self.end(prev)
1098 1100
1099 1101 if curr:
1100 1102 if not d:
1101 1103 ptext = self.revision(self.node(prev))
1102 1104 d = mdiff.textdiff(ptext, text)
1103 1105 data = compress(d)
1104 1106 l = len(data[1]) + len(data[0])
1105 1107 dist = l + offset - self.start(base)
1106 1108
1107 1109 # full versions are inserted when the needed deltas
1108 1110 # become comparable to the uncompressed text
1109 1111 if not curr or dist > len(text) * 2:
1110 1112 data = compress(text)
1111 1113 l = len(data[1]) + len(data[0])
1112 1114 base = curr
1113 1115
1114 1116 e = (offset_type(offset, 0), l, len(text),
1115 1117 base, link, self.rev(p1), self.rev(p2), node)
1116 1118 self.index.insert(-1, e)
1117 1119 self.nodemap[node] = curr
1118 1120
1119 1121 entry = self._io.packentry(e, self.node, self.version, curr)
1120 1122 if not self._inline:
1121 1123 transaction.add(self.datafile, offset)
1122 1124 transaction.add(self.indexfile, curr * len(entry))
1123 1125 if data[0]:
1124 1126 dfh.write(data[0])
1125 1127 dfh.write(data[1])
1126 1128 dfh.flush()
1127 1129 ifh.write(entry)
1128 1130 else:
1129 1131 offset += curr * self._io.size
1130 1132 transaction.add(self.indexfile, offset, curr)
1131 1133 ifh.write(entry)
1132 1134 ifh.write(data[0])
1133 1135 ifh.write(data[1])
1134 1136 self.checkinlinesize(transaction, ifh)
1135 1137
1136 1138 if type(text) == str: # only accept immutable objects
1137 1139 self._cache = (node, curr, text)
1138 1140 return node
1139 1141
1140 1142 def descendant(self, start, end):
1141 1143 for i in self.descendants(start):
1142 1144 if i == end:
1143 1145 return True
1144 1146 elif i > end:
1145 1147 break
1146 1148 return False
1147 1149
1148 1150 def ancestor(self, a, b):
1149 1151 """calculate the least common ancestor of nodes a and b"""
1150 1152
1151 1153 # fast path, check if it is a descendant
1152 1154 a, b = self.rev(a), self.rev(b)
1153 1155 start, end = sorted((a, b))
1154 1156 if self.descendant(start, end):
1155 1157 return self.node(start)
1156 1158
1157 1159 def parents(rev):
1158 1160 return [p for p in self.parentrevs(rev) if p != nullrev]
1159 1161
1160 1162 c = ancestor.ancestor(a, b, parents)
1161 1163 if c is None:
1162 1164 return nullid
1163 1165
1164 1166 return self.node(c)
1165 1167
1166 1168 def group(self, nodelist, lookup, infocollect=None):
1167 1169 """Calculate a delta group, yielding a sequence of changegroup chunks
1168 1170 (strings).
1169 1171
1170 1172 Given a list of changeset revs, return a set of deltas and
1171 1173 metadata corresponding to nodes. the first delta is
1172 1174 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1173 1175 have this parent as it has all history before these
1174 1176 changesets. parent is parent[0]
1175 1177 """
1176 1178
1177 1179 revs = [self.rev(n) for n in nodelist]
1178 1180
1179 1181 # if we don't have any revisions touched by these changesets, bail
1180 1182 if not revs:
1181 1183 yield changegroup.closechunk()
1182 1184 return
1183 1185
1184 1186 # add the parent of the first rev
1185 1187 p = self.parentrevs(revs[0])[0]
1186 1188 revs.insert(0, p)
1187 1189
1188 1190 # build deltas
1189 1191 for d in xrange(len(revs) - 1):
1190 1192 a, b = revs[d], revs[d + 1]
1191 1193 nb = self.node(b)
1192 1194
1193 1195 if infocollect is not None:
1194 1196 infocollect(nb)
1195 1197
1196 1198 p = self.parents(nb)
1197 1199 meta = nb + p[0] + p[1] + lookup(nb)
1198 1200 if a == -1:
1199 1201 d = self.revision(nb)
1200 1202 meta += mdiff.trivialdiffheader(len(d))
1201 1203 else:
1202 1204 d = self.revdiff(a, b)
1203 1205 yield changegroup.chunkheader(len(meta) + len(d))
1204 1206 yield meta
1205 1207 if len(d) > 2**20:
1206 1208 pos = 0
1207 1209 while pos < len(d):
1208 1210 pos2 = pos + 2 ** 18
1209 1211 yield d[pos:pos2]
1210 1212 pos = pos2
1211 1213 else:
1212 1214 yield d
1213 1215
1214 1216 yield changegroup.closechunk()
1215 1217
1216 1218 def addgroup(self, revs, linkmapper, transaction):
1217 1219 """
1218 1220 add a delta group
1219 1221
1220 1222 given a set of deltas, add them to the revision log. the
1221 1223 first delta is against its parent, which should be in our
1222 1224 log, the rest are against the previous delta.
1223 1225 """
1224 1226
1225 1227 #track the base of the current delta log
1226 1228 r = len(self)
1227 1229 t = r - 1
1228 1230 node = None
1229 1231
1230 1232 base = prev = nullrev
1231 1233 start = end = textlen = 0
1232 1234 if r:
1233 1235 end = self.end(t)
1234 1236
1235 1237 ifh = self.opener(self.indexfile, "a+")
1236 1238 isize = r * self._io.size
1237 1239 if self._inline:
1238 1240 transaction.add(self.indexfile, end + isize, r)
1239 1241 dfh = None
1240 1242 else:
1241 1243 transaction.add(self.indexfile, isize, r)
1242 1244 transaction.add(self.datafile, end)
1243 1245 dfh = self.opener(self.datafile, "a")
1244 1246
1245 1247 try:
1246 1248 # loop through our set of deltas
1247 1249 chain = None
1248 1250 for chunk in revs:
1249 1251 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1250 1252 link = linkmapper(cs)
1251 1253 if node in self.nodemap:
1252 1254 # this can happen if two branches make the same change
1253 1255 chain = node
1254 1256 continue
1255 1257 delta = buffer(chunk, 80)
1256 1258 del chunk
1257 1259
1258 1260 for p in (p1, p2):
1259 1261 if not p in self.nodemap:
1260 1262 raise LookupError(p, self.indexfile, _('unknown parent'))
1261 1263
1262 1264 if not chain:
1263 1265 # retrieve the parent revision of the delta chain
1264 1266 chain = p1
1265 1267 if not chain in self.nodemap:
1266 1268 raise LookupError(chain, self.indexfile, _('unknown base'))
1267 1269
1268 1270 # full versions are inserted when the needed deltas become
1269 1271 # comparable to the uncompressed text or when the previous
1270 1272 # version is not the one we have a delta against. We use
1271 1273 # the size of the previous full rev as a proxy for the
1272 1274 # current size.
1273 1275
1274 1276 if chain == prev:
1275 1277 cdelta = compress(delta)
1276 1278 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1277 1279 textlen = mdiff.patchedsize(textlen, delta)
1278 1280
1279 1281 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1280 1282 # flush our writes here so we can read it in revision
1281 1283 if dfh:
1282 1284 dfh.flush()
1283 1285 ifh.flush()
1284 1286 text = self.revision(chain)
1285 1287 if len(text) == 0:
1286 1288 # skip over trivial delta header
1287 1289 text = buffer(delta, 12)
1288 1290 else:
1289 1291 text = mdiff.patches(text, [delta])
1290 1292 del delta
1291 1293 chk = self._addrevision(text, transaction, link, p1, p2, None,
1292 1294 ifh, dfh)
1293 1295 if not dfh and not self._inline:
1294 1296 # addrevision switched from inline to conventional
1295 1297 # reopen the index
1296 1298 dfh = self.opener(self.datafile, "a")
1297 1299 ifh = self.opener(self.indexfile, "a")
1298 1300 if chk != node:
1299 1301 raise RevlogError(_("consistency error adding group"))
1300 1302 textlen = len(text)
1301 1303 else:
1302 1304 e = (offset_type(end, 0), cdeltalen, textlen, base,
1303 1305 link, self.rev(p1), self.rev(p2), node)
1304 1306 self.index.insert(-1, e)
1305 1307 self.nodemap[node] = r
1306 1308 entry = self._io.packentry(e, self.node, self.version, r)
1307 1309 if self._inline:
1308 1310 ifh.write(entry)
1309 1311 ifh.write(cdelta[0])
1310 1312 ifh.write(cdelta[1])
1311 1313 self.checkinlinesize(transaction, ifh)
1312 1314 if not self._inline:
1313 1315 dfh = self.opener(self.datafile, "a")
1314 1316 ifh = self.opener(self.indexfile, "a")
1315 1317 else:
1316 1318 dfh.write(cdelta[0])
1317 1319 dfh.write(cdelta[1])
1318 1320 ifh.write(entry)
1319 1321
1320 1322 t, r, chain, prev = r, r + 1, node, node
1321 1323 base = self.base(t)
1322 1324 start = self.start(base)
1323 1325 end = self.end(t)
1324 1326 finally:
1325 1327 if dfh:
1326 1328 dfh.close()
1327 1329 ifh.close()
1328 1330
1329 1331 return node
1330 1332
1331 1333 def strip(self, minlink, transaction):
1332 1334 """truncate the revlog on the first revision with a linkrev >= minlink
1333 1335
1334 1336 This function is called when we're stripping revision minlink and
1335 1337 its descendants from the repository.
1336 1338
1337 1339 We have to remove all revisions with linkrev >= minlink, because
1338 1340 the equivalent changelog revisions will be renumbered after the
1339 1341 strip.
1340 1342
1341 1343 So we truncate the revlog on the first of these revisions, and
1342 1344 trust that the caller has saved the revisions that shouldn't be
1343 1345 removed and that it'll readd them after this truncation.
1344 1346 """
1345 1347 if len(self) == 0:
1346 1348 return
1347 1349
1348 1350 if isinstance(self.index, lazyindex):
1349 1351 self._loadindexmap()
1350 1352
1351 1353 for rev in self:
1352 1354 if self.index[rev][4] >= minlink:
1353 1355 break
1354 1356 else:
1355 1357 return
1356 1358
1357 1359 # first truncate the files on disk
1358 1360 end = self.start(rev)
1359 1361 if not self._inline:
1360 1362 transaction.add(self.datafile, end)
1361 1363 end = rev * self._io.size
1362 1364 else:
1363 1365 end += rev * self._io.size
1364 1366
1365 1367 transaction.add(self.indexfile, end)
1366 1368
1367 1369 # then reset internal state in memory to forget those revisions
1368 1370 self._cache = None
1369 1371 self._chunkclear()
1370 1372 for x in xrange(rev, len(self)):
1371 1373 del self.nodemap[self.node(x)]
1372 1374
1373 1375 del self.index[rev:-1]
1374 1376
1375 1377 def checksize(self):
1376 1378 expected = 0
1377 1379 if len(self):
1378 1380 expected = max(0, self.end(len(self) - 1))
1379 1381
1380 1382 try:
1381 1383 f = self.opener(self.datafile)
1382 1384 f.seek(0, 2)
1383 1385 actual = f.tell()
1384 1386 dd = actual - expected
1385 1387 except IOError, inst:
1386 1388 if inst.errno != errno.ENOENT:
1387 1389 raise
1388 1390 dd = 0
1389 1391
1390 1392 try:
1391 1393 f = self.opener(self.indexfile)
1392 1394 f.seek(0, 2)
1393 1395 actual = f.tell()
1394 1396 s = self._io.size
1395 1397 i = max(0, actual // s)
1396 1398 di = actual - (i * s)
1397 1399 if self._inline:
1398 1400 databytes = 0
1399 1401 for r in self:
1400 1402 databytes += max(0, self.length(r))
1401 1403 dd = 0
1402 1404 di = actual - len(self) * s - databytes
1403 1405 except IOError, inst:
1404 1406 if inst.errno != errno.ENOENT:
1405 1407 raise
1406 1408 di = 0
1407 1409
1408 1410 return (dd, di)
1409 1411
1410 1412 def files(self):
1411 1413 res = [self.indexfile]
1412 1414 if not self._inline:
1413 1415 res.append(self.datafile)
1414 1416 return res
General Comments 0
You need to be logged in to leave comments. Login now