##// END OF EJS Templates
revlog: fix up previously stupid API change
Dirkjan Ochtman -
r10329:ae0ae869 default
parent child Browse files
Show More
@@ -1,1414 +1,1414 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 16 from i18n import _
17 17 import changegroup, ancestor, mdiff, parsers, error, util
18 18 import struct, zlib, errno
19 19
20 20 _pack = struct.pack
21 21 _unpack = struct.unpack
22 22 _compress = zlib.compress
23 23 _decompress = zlib.decompress
24 24 _sha = util.sha1
25 25
26 26 # revlog flags
27 27 REVLOGV0 = 0
28 28 REVLOGNG = 1
29 29 REVLOGNGINLINEDATA = (1 << 16)
30 30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 31 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 33
34 34 _prereadsize = 1048576
35 35
36 36 RevlogError = error.RevlogError
37 37 LookupError = error.LookupError
38 38
39 39 def getoffset(q):
40 40 return int(q >> 16)
41 41
42 42 def gettype(q):
43 43 return int(q & 0xFFFF)
44 44
45 45 def offset_type(offset, type):
46 46 return long(long(offset) << 16 | type)
47 47
48 48 nullhash = _sha(nullid)
49 49
50 50 def hash(text, p1, p2):
51 51 """generate a hash from the given text and its parent hashes
52 52
53 53 This hash combines both the current file contents and its history
54 54 in a manner that makes it easy to distinguish nodes with the same
55 55 content in the revision graph.
56 56 """
57 57 # As of now, if one of the parent node is null, p2 is null
58 58 if p2 == nullid:
59 59 # deep copy of a hash is faster than creating one
60 60 s = nullhash.copy()
61 61 s.update(p1)
62 62 else:
63 63 # none of the parent nodes are nullid
64 64 l = [p1, p2]
65 65 l.sort()
66 66 s = _sha(l[0])
67 67 s.update(l[1])
68 68 s.update(text)
69 69 return s.digest()
70 70
71 71 def compress(text):
72 72 """ generate a possibly-compressed representation of text """
73 73 if not text:
74 74 return ("", text)
75 75 l = len(text)
76 76 bin = None
77 77 if l < 44:
78 78 pass
79 79 elif l > 1000000:
80 80 # zlib makes an internal copy, thus doubling memory usage for
81 81 # large files, so lets do this in pieces
82 82 z = zlib.compressobj()
83 83 p = []
84 84 pos = 0
85 85 while pos < l:
86 86 pos2 = pos + 2**20
87 87 p.append(z.compress(text[pos:pos2]))
88 88 pos = pos2
89 89 p.append(z.flush())
90 90 if sum(map(len, p)) < l:
91 91 bin = "".join(p)
92 92 else:
93 93 bin = _compress(text)
94 94 if bin is None or len(bin) > l:
95 95 if text[0] == '\0':
96 96 return ("", text)
97 97 return ('u', text)
98 98 return ("", bin)
99 99
100 100 def decompress(bin):
101 101 """ decompress the given input """
102 102 if not bin:
103 103 return bin
104 104 t = bin[0]
105 105 if t == '\0':
106 106 return bin
107 107 if t == 'x':
108 108 return _decompress(bin)
109 109 if t == 'u':
110 110 return bin[1:]
111 111 raise RevlogError(_("unknown compression type %r") % t)
112 112
113 113 class lazyparser(object):
114 114 """
115 115 this class avoids the need to parse the entirety of large indices
116 116 """
117 117
118 118 # lazyparser is not safe to use on windows if win32 extensions not
119 119 # available. it keeps file handle open, which make it not possible
120 120 # to break hardlinks on local cloned repos.
121 121
122 122 def __init__(self, dataf):
123 123 try:
124 124 size = util.fstat(dataf).st_size
125 125 except AttributeError:
126 126 size = 0
127 127 self.dataf = dataf
128 128 self.s = struct.calcsize(indexformatng)
129 129 self.datasize = size
130 130 self.l = size / self.s
131 131 self.index = [None] * self.l
132 132 self.map = {nullid: nullrev}
133 133 self.allmap = 0
134 134 self.all = 0
135 135 self.mapfind_count = 0
136 136
137 137 def loadmap(self):
138 138 """
139 139 during a commit, we need to make sure the rev being added is
140 140 not a duplicate. This requires loading the entire index,
141 141 which is fairly slow. loadmap can load up just the node map,
142 142 which takes much less time.
143 143 """
144 144 if self.allmap:
145 145 return
146 146 end = self.datasize
147 147 self.allmap = 1
148 148 cur = 0
149 149 count = 0
150 150 blocksize = self.s * 256
151 151 self.dataf.seek(0)
152 152 while cur < end:
153 153 data = self.dataf.read(blocksize)
154 154 off = 0
155 155 for x in xrange(256):
156 156 n = data[off + ngshaoffset:off + ngshaoffset + 20]
157 157 self.map[n] = count
158 158 count += 1
159 159 if count >= self.l:
160 160 break
161 161 off += self.s
162 162 cur += blocksize
163 163
164 164 def loadblock(self, blockstart, blocksize, data=None):
165 165 if self.all:
166 166 return
167 167 if data is None:
168 168 self.dataf.seek(blockstart)
169 169 if blockstart + blocksize > self.datasize:
170 170 # the revlog may have grown since we've started running,
171 171 # but we don't have space in self.index for more entries.
172 172 # limit blocksize so that we don't get too much data.
173 173 blocksize = max(self.datasize - blockstart, 0)
174 174 data = self.dataf.read(blocksize)
175 175 lend = len(data) / self.s
176 176 i = blockstart / self.s
177 177 off = 0
178 178 # lazyindex supports __delitem__
179 179 if lend > len(self.index) - i:
180 180 lend = len(self.index) - i
181 181 for x in xrange(lend):
182 182 if self.index[i + x] is None:
183 183 b = data[off : off + self.s]
184 184 self.index[i + x] = b
185 185 n = b[ngshaoffset:ngshaoffset + 20]
186 186 self.map[n] = i + x
187 187 off += self.s
188 188
189 189 def findnode(self, node):
190 190 """search backwards through the index file for a specific node"""
191 191 if self.allmap:
192 192 return None
193 193
194 194 # hg log will cause many many searches for the manifest
195 195 # nodes. After we get called a few times, just load the whole
196 196 # thing.
197 197 if self.mapfind_count > 8:
198 198 self.loadmap()
199 199 if node in self.map:
200 200 return node
201 201 return None
202 202 self.mapfind_count += 1
203 203 last = self.l - 1
204 204 while self.index[last] != None:
205 205 if last == 0:
206 206 self.all = 1
207 207 self.allmap = 1
208 208 return None
209 209 last -= 1
210 210 end = (last + 1) * self.s
211 211 blocksize = self.s * 256
212 212 while end >= 0:
213 213 start = max(end - blocksize, 0)
214 214 self.dataf.seek(start)
215 215 data = self.dataf.read(end - start)
216 216 findend = end - start
217 217 while True:
218 218 # we're searching backwards, so we have to make sure
219 219 # we don't find a changeset where this node is a parent
220 220 off = data.find(node, 0, findend)
221 221 findend = off
222 222 if off >= 0:
223 223 i = off / self.s
224 224 off = i * self.s
225 225 n = data[off + ngshaoffset:off + ngshaoffset + 20]
226 226 if n == node:
227 227 self.map[n] = i + start / self.s
228 228 return node
229 229 else:
230 230 break
231 231 end -= blocksize
232 232 return None
233 233
234 234 def loadindex(self, i=None, end=None):
235 235 if self.all:
236 236 return
237 237 all = False
238 238 if i is None:
239 239 blockstart = 0
240 240 blocksize = (65536 / self.s) * self.s
241 241 end = self.datasize
242 242 all = True
243 243 else:
244 244 if end:
245 245 blockstart = i * self.s
246 246 end = end * self.s
247 247 blocksize = end - blockstart
248 248 else:
249 249 blockstart = (i & ~1023) * self.s
250 250 blocksize = self.s * 1024
251 251 end = blockstart + blocksize
252 252 while blockstart < end:
253 253 self.loadblock(blockstart, blocksize)
254 254 blockstart += blocksize
255 255 if all:
256 256 self.all = True
257 257
258 258 class lazyindex(object):
259 259 """a lazy version of the index array"""
260 260 def __init__(self, parser):
261 261 self.p = parser
262 262 def __len__(self):
263 263 return len(self.p.index)
264 264 def load(self, pos):
265 265 if pos < 0:
266 266 pos += len(self.p.index)
267 267 self.p.loadindex(pos)
268 268 return self.p.index[pos]
269 269 def __getitem__(self, pos):
270 270 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
271 271 def __setitem__(self, pos, item):
272 272 self.p.index[pos] = _pack(indexformatng, *item)
273 273 def __delitem__(self, pos):
274 274 del self.p.index[pos]
275 275 def insert(self, pos, e):
276 276 self.p.index.insert(pos, _pack(indexformatng, *e))
277 277 def append(self, e):
278 278 self.p.index.append(_pack(indexformatng, *e))
279 279
280 280 class lazymap(object):
281 281 """a lazy version of the node map"""
282 282 def __init__(self, parser):
283 283 self.p = parser
284 284 def load(self, key):
285 285 n = self.p.findnode(key)
286 286 if n is None:
287 287 raise KeyError(key)
288 288 def __contains__(self, key):
289 289 if key in self.p.map:
290 290 return True
291 291 self.p.loadmap()
292 292 return key in self.p.map
293 293 def __iter__(self):
294 294 yield nullid
295 295 for i in xrange(self.p.l):
296 296 ret = self.p.index[i]
297 297 if not ret:
298 298 self.p.loadindex(i)
299 299 ret = self.p.index[i]
300 300 if isinstance(ret, str):
301 301 ret = _unpack(indexformatng, ret)
302 302 yield ret[7]
303 303 def __getitem__(self, key):
304 304 try:
305 305 return self.p.map[key]
306 306 except KeyError:
307 307 try:
308 308 self.load(key)
309 309 return self.p.map[key]
310 310 except KeyError:
311 311 raise KeyError("node " + hex(key))
312 312 def __setitem__(self, key, val):
313 313 self.p.map[key] = val
314 314 def __delitem__(self, key):
315 315 del self.p.map[key]
316 316
317 317 indexformatv0 = ">4l20s20s20s"
318 318 v0shaoffset = 56
319 319
320 320 class revlogoldio(object):
321 321 def __init__(self):
322 322 self.size = struct.calcsize(indexformatv0)
323 323
324 324 def parseindex(self, fp, data, inline):
325 325 s = self.size
326 326 index = []
327 327 nodemap = {nullid: nullrev}
328 328 n = off = 0
329 329 if len(data) == _prereadsize:
330 330 data += fp.read() # read the rest
331 331 l = len(data)
332 332 while off + s <= l:
333 333 cur = data[off:off + s]
334 334 off += s
335 335 e = _unpack(indexformatv0, cur)
336 336 # transform to revlogv1 format
337 337 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
338 338 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
339 339 index.append(e2)
340 340 nodemap[e[6]] = n
341 341 n += 1
342 342
343 343 return index, nodemap, None
344 344
345 345 def packentry(self, entry, node, version, rev):
346 346 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
347 347 node(entry[5]), node(entry[6]), entry[7])
348 348 return _pack(indexformatv0, *e2)
349 349
350 350 # index ng:
351 351 # 6 bytes offset
352 352 # 2 bytes flags
353 353 # 4 bytes compressed length
354 354 # 4 bytes uncompressed length
355 355 # 4 bytes: base rev
356 356 # 4 bytes link rev
357 357 # 4 bytes parent 1 rev
358 358 # 4 bytes parent 2 rev
359 359 # 32 bytes: nodeid
360 360 indexformatng = ">Qiiiiii20s12x"
361 361 ngshaoffset = 32
362 362 versionformat = ">I"
363 363
364 364 class revlogio(object):
365 365 def __init__(self):
366 366 self.size = struct.calcsize(indexformatng)
367 367
368 368 def parseindex(self, fp, data, inline):
369 369 if len(data) == _prereadsize:
370 370 if util.openhardlinks() and not inline:
371 371 # big index, let's parse it on demand
372 372 parser = lazyparser(fp)
373 373 index = lazyindex(parser)
374 374 nodemap = lazymap(parser)
375 375 e = list(index[0])
376 376 type = gettype(e[0])
377 377 e[0] = offset_type(0, type)
378 378 index[0] = e
379 379 return index, nodemap, None
380 380 else:
381 381 data += fp.read()
382 382
383 383 # call the C implementation to parse the index data
384 384 index, nodemap, cache = parsers.parse_index(data, inline)
385 385 return index, nodemap, cache
386 386
387 387 def packentry(self, entry, node, version, rev):
388 388 p = _pack(indexformatng, *entry)
389 389 if rev == 0:
390 390 p = _pack(versionformat, version) + p[4:]
391 391 return p
392 392
393 393 class revlog(object):
394 394 """
395 395 the underlying revision storage object
396 396
397 397 A revlog consists of two parts, an index and the revision data.
398 398
399 399 The index is a file with a fixed record size containing
400 400 information on each revision, including its nodeid (hash), the
401 401 nodeids of its parents, the position and offset of its data within
402 402 the data file, and the revision it's based on. Finally, each entry
403 403 contains a linkrev entry that can serve as a pointer to external
404 404 data.
405 405
406 406 The revision data itself is a linear collection of data chunks.
407 407 Each chunk represents a revision and is usually represented as a
408 408 delta against the previous chunk. To bound lookup time, runs of
409 409 deltas are limited to about 2 times the length of the original
410 410 version data. This makes retrieval of a version proportional to
411 411 its size, or O(1) relative to the number of revisions.
412 412
413 413 Both pieces of the revlog are written to in an append-only
414 414 fashion, which means we never need to rewrite a file to insert or
415 415 remove data, and can use some simple techniques to avoid the need
416 416 for locking while reading.
417 417 """
418 418 def __init__(self, opener, indexfile):
419 419 """
420 420 create a revlog object
421 421
422 422 opener is a function that abstracts the file opening operation
423 423 and can be used to implement COW semantics or the like.
424 424 """
425 425 self.indexfile = indexfile
426 426 self.datafile = indexfile[:-2] + ".d"
427 427 self.opener = opener
428 428 self._cache = None
429 429 self._chunkcache = (0, '')
430 430 self.nodemap = {nullid: nullrev}
431 431 self.index = []
432 432
433 433 v = REVLOG_DEFAULT_VERSION
434 434 if hasattr(opener, 'options') and 'defversion' in opener.options:
435 435 v = opener.options['defversion']
436 436 if v & REVLOGNG:
437 437 v |= REVLOGNGINLINEDATA
438 438
439 439 i = ''
440 440 try:
441 441 f = self.opener(self.indexfile)
442 442 i = f.read(_prereadsize)
443 443 if len(i) > 0:
444 444 v = struct.unpack(versionformat, i[:4])[0]
445 445 except IOError, inst:
446 446 if inst.errno != errno.ENOENT:
447 447 raise
448 448
449 449 self.version = v
450 450 self._inline = v & REVLOGNGINLINEDATA
451 451 flags = v & ~0xFFFF
452 452 fmt = v & 0xFFFF
453 453 if fmt == REVLOGV0 and flags:
454 454 raise RevlogError(_("index %s unknown flags %#04x for format v0")
455 455 % (self.indexfile, flags >> 16))
456 456 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
457 457 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
458 458 % (self.indexfile, flags >> 16))
459 459 elif fmt > REVLOGNG:
460 460 raise RevlogError(_("index %s unknown format %d")
461 461 % (self.indexfile, fmt))
462 462
463 463 self._io = revlogio()
464 464 if self.version == REVLOGV0:
465 465 self._io = revlogoldio()
466 466 if i:
467 467 try:
468 468 d = self._io.parseindex(f, i, self._inline)
469 469 except (ValueError, IndexError):
470 470 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
471 471 self.index, self.nodemap, self._chunkcache = d
472 472 if not self._chunkcache:
473 473 self._chunkclear()
474 474
475 475 # add the magic null revision at -1 (if it hasn't been done already)
476 476 if (self.index == [] or isinstance(self.index, lazyindex) or
477 477 self.index[-1][7] != nullid) :
478 478 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
479 479
480 480 def _loadindex(self, start, end):
481 481 """load a block of indexes all at once from the lazy parser"""
482 482 if isinstance(self.index, lazyindex):
483 483 self.index.p.loadindex(start, end)
484 484
485 485 def _loadindexmap(self):
486 486 """loads both the map and the index from the lazy parser"""
487 487 if isinstance(self.index, lazyindex):
488 488 p = self.index.p
489 489 p.loadindex()
490 490 self.nodemap = p.map
491 491
492 492 def _loadmap(self):
493 493 """loads the map from the lazy parser"""
494 494 if isinstance(self.nodemap, lazymap):
495 495 self.nodemap.p.loadmap()
496 496 self.nodemap = self.nodemap.p.map
497 497
498 498 def tip(self):
499 499 return self.node(len(self.index) - 2)
500 500 def __len__(self):
501 501 return len(self.index) - 1
502 502 def __iter__(self):
503 503 for i in xrange(len(self)):
504 504 yield i
505 505 def rev(self, node):
506 506 try:
507 507 return self.nodemap[node]
508 508 except KeyError:
509 509 raise LookupError(node, self.indexfile, _('no node'))
510 510 def node(self, rev):
511 511 return self.index[rev][7]
512 512 def linkrev(self, rev):
513 513 return self.index[rev][4]
514 514 def parents(self, node):
515 515 i = self.index
516 516 d = i[self.rev(node)]
517 517 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
518 518 def parentrevs(self, rev):
519 519 return self.index[rev][5:7]
520 520 def start(self, rev):
521 521 return int(self.index[rev][0] >> 16)
522 522 def end(self, rev):
523 523 return self.start(rev) + self.length(rev)
524 524 def length(self, rev):
525 525 return self.index[rev][1]
526 526 def base(self, rev):
527 527 return self.index[rev][3]
528 528
529 529 def size(self, rev):
530 530 """return the length of the uncompressed text for a given revision"""
531 531 l = self.index[rev][2]
532 532 if l >= 0:
533 533 return l
534 534
535 535 t = self.revision(self.node(rev))
536 536 return len(t)
537 537
538 538 # Alternate implementation. The advantage to this code is it
539 539 # will be faster for a single revision. However, the results
540 540 # are not cached, so finding the size of every revision will
541 541 # be slower.
542 542 #
543 543 # if self.cache and self.cache[1] == rev:
544 544 # return len(self.cache[2])
545 545 #
546 546 # base = self.base(rev)
547 547 # if self.cache and self.cache[1] >= base and self.cache[1] < rev:
548 548 # base = self.cache[1]
549 549 # text = self.cache[2]
550 550 # else:
551 551 # text = self.revision(self.node(base))
552 552 #
553 553 # l = len(text)
554 554 # for x in xrange(base + 1, rev + 1):
555 555 # l = mdiff.patchedsize(l, self._chunk(x))
556 556 # return l
557 557
558 558 def reachable(self, node, stop=None):
559 559 """return the set of all nodes ancestral to a given node, including
560 560 the node itself, stopping when stop is matched"""
561 561 reachable = set((node,))
562 562 visit = [node]
563 563 if stop:
564 564 stopn = self.rev(stop)
565 565 else:
566 566 stopn = 0
567 567 while visit:
568 568 n = visit.pop(0)
569 569 if n == stop:
570 570 continue
571 571 if n == nullid:
572 572 continue
573 573 for p in self.parents(n):
574 574 if self.rev(p) < stopn:
575 575 continue
576 576 if p not in reachable:
577 577 reachable.add(p)
578 578 visit.append(p)
579 579 return reachable
580 580
581 581 def ancestors(self, *revs):
582 582 """Generate the ancestors of 'revs' in reverse topological order.
583 583
584 584 Yield a sequence of revision numbers starting with the parents
585 585 of each revision in revs, i.e., each revision is *not* considered
586 586 an ancestor of itself. Results are in breadth-first order:
587 587 parents of each rev in revs, then parents of those, etc. Result
588 588 does not include the null revision."""
589 589 visit = list(revs)
590 590 seen = set([nullrev])
591 591 while visit:
592 592 for parent in self.parentrevs(visit.pop(0)):
593 593 if parent not in seen:
594 594 visit.append(parent)
595 595 seen.add(parent)
596 596 yield parent
597 597
598 598 def descendants(self, *revs):
599 599 """Generate the descendants of 'revs' in revision order.
600 600
601 601 Yield a sequence of revision numbers starting with a child of
602 602 some rev in revs, i.e., each revision is *not* considered a
603 603 descendant of itself. Results are ordered by revision number (a
604 604 topological sort)."""
605 605 seen = set(revs)
606 606 for i in xrange(min(revs) + 1, len(self)):
607 607 for x in self.parentrevs(i):
608 608 if x != nullrev and x in seen:
609 609 seen.add(i)
610 610 yield i
611 611 break
612 612
613 613 def findmissing(self, common=None, heads=None):
614 614 """Return the ancestors of heads that are not ancestors of common.
615 615
616 616 More specifically, return a list of nodes N such that every N
617 617 satisfies the following constraints:
618 618
619 619 1. N is an ancestor of some node in 'heads'
620 620 2. N is not an ancestor of any node in 'common'
621 621
622 622 The list is sorted by revision number, meaning it is
623 623 topologically sorted.
624 624
625 625 'heads' and 'common' are both lists of node IDs. If heads is
626 626 not supplied, uses all of the revlog's heads. If common is not
627 627 supplied, uses nullid."""
628 628 if common is None:
629 629 common = [nullid]
630 630 if heads is None:
631 631 heads = self.heads()
632 632
633 633 common = [self.rev(n) for n in common]
634 634 heads = [self.rev(n) for n in heads]
635 635
636 636 # we want the ancestors, but inclusive
637 637 has = set(self.ancestors(*common))
638 638 has.add(nullrev)
639 639 has.update(common)
640 640
641 641 # take all ancestors from heads that aren't in has
642 642 missing = set()
643 643 visit = [r for r in heads if r not in has]
644 644 while visit:
645 645 r = visit.pop(0)
646 646 if r in missing:
647 647 continue
648 648 else:
649 649 missing.add(r)
650 650 for p in self.parentrevs(r):
651 651 if p not in has:
652 652 visit.append(p)
653 653 missing = list(missing)
654 654 missing.sort()
655 655 return [self.node(r) for r in missing]
656 656
657 657 def nodesbetween(self, roots=None, heads=None):
658 658 """Return a topological path from 'roots' to 'heads'.
659 659
660 660 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
661 661 topologically sorted list of all nodes N that satisfy both of
662 662 these constraints:
663 663
664 664 1. N is a descendant of some node in 'roots'
665 665 2. N is an ancestor of some node in 'heads'
666 666
667 667 Every node is considered to be both a descendant and an ancestor
668 668 of itself, so every reachable node in 'roots' and 'heads' will be
669 669 included in 'nodes'.
670 670
671 671 'outroots' is the list of reachable nodes in 'roots', i.e., the
672 672 subset of 'roots' that is returned in 'nodes'. Likewise,
673 673 'outheads' is the subset of 'heads' that is also in 'nodes'.
674 674
675 675 'roots' and 'heads' are both lists of node IDs. If 'roots' is
676 676 unspecified, uses nullid as the only root. If 'heads' is
677 677 unspecified, uses list of all of the revlog's heads."""
678 678 nonodes = ([], [], [])
679 679 if roots is not None:
680 680 roots = list(roots)
681 681 if not roots:
682 682 return nonodes
683 683 lowestrev = min([self.rev(n) for n in roots])
684 684 else:
685 685 roots = [nullid] # Everybody's a descendent of nullid
686 686 lowestrev = nullrev
687 687 if (lowestrev == nullrev) and (heads is None):
688 688 # We want _all_ the nodes!
689 689 return ([self.node(r) for r in self], [nullid], list(self.heads()))
690 690 if heads is None:
691 691 # All nodes are ancestors, so the latest ancestor is the last
692 692 # node.
693 693 highestrev = len(self) - 1
694 694 # Set ancestors to None to signal that every node is an ancestor.
695 695 ancestors = None
696 696 # Set heads to an empty dictionary for later discovery of heads
697 697 heads = {}
698 698 else:
699 699 heads = list(heads)
700 700 if not heads:
701 701 return nonodes
702 702 ancestors = set()
703 703 # Turn heads into a dictionary so we can remove 'fake' heads.
704 704 # Also, later we will be using it to filter out the heads we can't
705 705 # find from roots.
706 706 heads = dict.fromkeys(heads, 0)
707 707 # Start at the top and keep marking parents until we're done.
708 708 nodestotag = set(heads)
709 709 # Remember where the top was so we can use it as a limit later.
710 710 highestrev = max([self.rev(n) for n in nodestotag])
711 711 while nodestotag:
712 712 # grab a node to tag
713 713 n = nodestotag.pop()
714 714 # Never tag nullid
715 715 if n == nullid:
716 716 continue
717 717 # A node's revision number represents its place in a
718 718 # topologically sorted list of nodes.
719 719 r = self.rev(n)
720 720 if r >= lowestrev:
721 721 if n not in ancestors:
722 722 # If we are possibly a descendent of one of the roots
723 723 # and we haven't already been marked as an ancestor
724 724 ancestors.add(n) # Mark as ancestor
725 725 # Add non-nullid parents to list of nodes to tag.
726 726 nodestotag.update([p for p in self.parents(n) if
727 727 p != nullid])
728 728 elif n in heads: # We've seen it before, is it a fake head?
729 729 # So it is, real heads should not be the ancestors of
730 730 # any other heads.
731 731 heads.pop(n)
732 732 if not ancestors:
733 733 return nonodes
734 734 # Now that we have our set of ancestors, we want to remove any
735 735 # roots that are not ancestors.
736 736
737 737 # If one of the roots was nullid, everything is included anyway.
738 738 if lowestrev > nullrev:
739 739 # But, since we weren't, let's recompute the lowest rev to not
740 740 # include roots that aren't ancestors.
741 741
742 742 # Filter out roots that aren't ancestors of heads
743 743 roots = [n for n in roots if n in ancestors]
744 744 # Recompute the lowest revision
745 745 if roots:
746 746 lowestrev = min([self.rev(n) for n in roots])
747 747 else:
748 748 # No more roots? Return empty list
749 749 return nonodes
750 750 else:
751 751 # We are descending from nullid, and don't need to care about
752 752 # any other roots.
753 753 lowestrev = nullrev
754 754 roots = [nullid]
755 755 # Transform our roots list into a set.
756 756 descendents = set(roots)
757 757 # Also, keep the original roots so we can filter out roots that aren't
758 758 # 'real' roots (i.e. are descended from other roots).
759 759 roots = descendents.copy()
760 760 # Our topologically sorted list of output nodes.
761 761 orderedout = []
762 762 # Don't start at nullid since we don't want nullid in our output list,
763 763 # and if nullid shows up in descedents, empty parents will look like
764 764 # they're descendents.
765 765 for r in xrange(max(lowestrev, 0), highestrev + 1):
766 766 n = self.node(r)
767 767 isdescendent = False
768 768 if lowestrev == nullrev: # Everybody is a descendent of nullid
769 769 isdescendent = True
770 770 elif n in descendents:
771 771 # n is already a descendent
772 772 isdescendent = True
773 773 # This check only needs to be done here because all the roots
774 774 # will start being marked is descendents before the loop.
775 775 if n in roots:
776 776 # If n was a root, check if it's a 'real' root.
777 777 p = tuple(self.parents(n))
778 778 # If any of its parents are descendents, it's not a root.
779 779 if (p[0] in descendents) or (p[1] in descendents):
780 780 roots.remove(n)
781 781 else:
782 782 p = tuple(self.parents(n))
783 783 # A node is a descendent if either of its parents are
784 784 # descendents. (We seeded the dependents list with the roots
785 785 # up there, remember?)
786 786 if (p[0] in descendents) or (p[1] in descendents):
787 787 descendents.add(n)
788 788 isdescendent = True
789 789 if isdescendent and ((ancestors is None) or (n in ancestors)):
790 790 # Only include nodes that are both descendents and ancestors.
791 791 orderedout.append(n)
792 792 if (ancestors is not None) and (n in heads):
793 793 # We're trying to figure out which heads are reachable
794 794 # from roots.
795 795 # Mark this head as having been reached
796 796 heads[n] = 1
797 797 elif ancestors is None:
798 798 # Otherwise, we're trying to discover the heads.
799 799 # Assume this is a head because if it isn't, the next step
800 800 # will eventually remove it.
801 801 heads[n] = 1
802 802 # But, obviously its parents aren't.
803 803 for p in self.parents(n):
804 804 heads.pop(p, None)
805 805 heads = [n for n in heads.iterkeys() if heads[n] != 0]
806 806 roots = list(roots)
807 807 assert orderedout
808 808 assert roots
809 809 assert heads
810 810 return (orderedout, roots, heads)
811 811
812 812 def heads(self, start=None, stop=None):
813 813 """return the list of all nodes that have no children
814 814
815 815 if start is specified, only heads that are descendants of
816 816 start will be returned
817 817 if stop is specified, it will consider all the revs from stop
818 818 as if they had no children
819 819 """
820 820 if start is None and stop is None:
821 821 count = len(self)
822 822 if not count:
823 823 return [nullid]
824 824 ishead = [1] * (count + 1)
825 825 index = self.index
826 826 for r in xrange(count):
827 827 e = index[r]
828 828 ishead[e[5]] = ishead[e[6]] = 0
829 829 return [self.node(r) for r in xrange(count) if ishead[r]]
830 830
831 831 if start is None:
832 832 start = nullid
833 833 if stop is None:
834 834 stop = []
835 835 stoprevs = set([self.rev(n) for n in stop])
836 836 startrev = self.rev(start)
837 837 reachable = set((startrev,))
838 838 heads = set((startrev,))
839 839
840 840 parentrevs = self.parentrevs
841 841 for r in xrange(startrev + 1, len(self)):
842 842 for p in parentrevs(r):
843 843 if p in reachable:
844 844 if r not in stoprevs:
845 845 reachable.add(r)
846 846 heads.add(r)
847 847 if p in heads and p not in stoprevs:
848 848 heads.remove(p)
849 849
850 850 return [self.node(r) for r in heads]
851 851
852 852 def children(self, node):
853 853 """find the children of a given node"""
854 854 c = []
855 855 p = self.rev(node)
856 856 for r in range(p + 1, len(self)):
857 857 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
858 858 if prevs:
859 859 for pr in prevs:
860 860 if pr == p:
861 861 c.append(self.node(r))
862 862 elif p == nullrev:
863 863 c.append(self.node(r))
864 864 return c
865 865
866 866 def _match(self, id):
867 867 if isinstance(id, (long, int)):
868 868 # rev
869 869 return self.node(id)
870 870 if len(id) == 20:
871 871 # possibly a binary node
872 872 # odds of a binary node being all hex in ASCII are 1 in 10**25
873 873 try:
874 874 node = id
875 875 self.rev(node) # quick search the index
876 876 return node
877 877 except LookupError:
878 878 pass # may be partial hex id
879 879 try:
880 880 # str(rev)
881 881 rev = int(id)
882 882 if str(rev) != id:
883 883 raise ValueError
884 884 if rev < 0:
885 885 rev = len(self) + rev
886 886 if rev < 0 or rev >= len(self):
887 887 raise ValueError
888 888 return self.node(rev)
889 889 except (ValueError, OverflowError):
890 890 pass
891 891 if len(id) == 40:
892 892 try:
893 893 # a full hex nodeid?
894 894 node = bin(id)
895 895 self.rev(node)
896 896 return node
897 897 except (TypeError, LookupError):
898 898 pass
899 899
900 900 def _partialmatch(self, id):
901 901 if len(id) < 40:
902 902 try:
903 903 # hex(node)[:...]
904 904 l = len(id) // 2 # grab an even number of digits
905 905 bin_id = bin(id[:l * 2])
906 906 nl = [n for n in self.nodemap if n[:l] == bin_id]
907 907 nl = [n for n in nl if hex(n).startswith(id)]
908 908 if len(nl) > 0:
909 909 if len(nl) == 1:
910 910 return nl[0]
911 911 raise LookupError(id, self.indexfile,
912 912 _('ambiguous identifier'))
913 913 return None
914 914 except TypeError:
915 915 pass
916 916
917 917 def lookup(self, id):
918 918 """locate a node based on:
919 919 - revision number or str(revision number)
920 920 - nodeid or subset of hex nodeid
921 921 """
922 922 n = self._match(id)
923 923 if n is not None:
924 924 return n
925 925 n = self._partialmatch(id)
926 926 if n:
927 927 return n
928 928
929 929 raise LookupError(id, self.indexfile, _('no match found'))
930 930
931 931 def cmp(self, node, text):
932 932 """compare text with a given file revision"""
933 933 p1, p2 = self.parents(node)
934 934 return hash(text, p1, p2) != node
935 935
936 936 def _addchunk(self, offset, data):
937 937 o, d = self._chunkcache
938 938 # try to add to existing cache
939 939 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
940 940 self._chunkcache = o, d + data
941 941 else:
942 942 self._chunkcache = offset, data
943 943
944 944 def _loadchunk(self, offset, length):
945 945 if self._inline:
946 946 df = self.opener(self.indexfile)
947 947 else:
948 948 df = self.opener(self.datafile)
949 949
950 950 readahead = max(65536, length)
951 951 df.seek(offset)
952 952 d = df.read(readahead)
953 953 self._addchunk(offset, d)
954 954 if readahead > length:
955 955 return d[:length]
956 956 return d
957 957
958 958 def _getchunk(self, offset, length):
959 959 o, d = self._chunkcache
960 960 l = len(d)
961 961
962 962 # is it in the cache?
963 963 cachestart = offset - o
964 964 cacheend = cachestart + length
965 965 if cachestart >= 0 and cacheend <= l:
966 966 if cachestart == 0 and cacheend == l:
967 967 return d # avoid a copy
968 968 return d[cachestart:cacheend]
969 969
970 970 return self._loadchunk(offset, length)
971 971
972 972 def _chunkraw(self, startrev, endrev):
973 973 start = self.start(startrev)
974 974 length = self.end(endrev) - start
975 975 if self._inline:
976 976 start += (startrev + 1) * self._io.size
977 977 return self._getchunk(start, length)
978 978
979 979 def _chunk(self, rev):
980 980 return decompress(self._chunkraw(rev, rev))
981 981
982 982 def _chunkclear(self):
983 983 self._chunkcache = (0, '')
984 984
985 985 def revdiff(self, rev1, rev2):
986 986 """return or calculate a delta between two revisions"""
987 987 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
988 988 return self._chunk(rev2)
989 989
990 990 return mdiff.textdiff(self.revision(self.node(rev1)),
991 991 self.revision(self.node(rev2)))
992 992
993 993 def revision(self, node):
994 994 """return an uncompressed revision of a given node"""
995 995 if node == nullid:
996 996 return ""
997 997 if self._cache and self._cache[0] == node:
998 998 return self._cache[2]
999 999
1000 1000 # look up what we need to read
1001 1001 text = None
1002 1002 rev = self.rev(node)
1003 1003 base = self.base(rev)
1004 1004
1005 1005 # check rev flags
1006 1006 if self.index[rev][0] & 0xFFFF:
1007 1007 raise RevlogError(_('incompatible revision flag %x') %
1008 1008 (self.index[rev][0] & 0xFFFF))
1009 1009
1010 1010 # do we have useful data cached?
1011 1011 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
1012 1012 base = self._cache[1]
1013 1013 text = self._cache[2]
1014 1014
1015 1015 self._loadindex(base, rev + 1)
1016 1016 self._chunkraw(base, rev)
1017 1017 if text is None:
1018 1018 text = self._chunk(base)
1019 1019
1020 1020 bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
1021 1021 text = mdiff.patches(text, bins)
1022 1022 p1, p2 = self.parents(node)
1023 1023 if node != hash(text, p1, p2):
1024 1024 raise RevlogError(_("integrity check failed on %s:%d")
1025 1025 % (self.indexfile, rev))
1026 1026
1027 1027 self._cache = (node, rev, text)
1028 1028 return text
1029 1029
1030 1030 def checkinlinesize(self, tr, fp=None):
1031 1031 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1032 1032 return
1033 1033
1034 1034 trinfo = tr.find(self.indexfile)
1035 1035 if trinfo is None:
1036 1036 raise RevlogError(_("%s not found in the transaction")
1037 1037 % self.indexfile)
1038 1038
1039 1039 trindex = trinfo[2]
1040 1040 dataoff = self.start(trindex)
1041 1041
1042 1042 tr.add(self.datafile, dataoff)
1043 1043
1044 1044 if fp:
1045 1045 fp.flush()
1046 1046 fp.close()
1047 1047
1048 1048 df = self.opener(self.datafile, 'w')
1049 1049 try:
1050 1050 for r in self:
1051 1051 df.write(self._chunkraw(r, r))
1052 1052 finally:
1053 1053 df.close()
1054 1054
1055 1055 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1056 1056 self.version &= ~(REVLOGNGINLINEDATA)
1057 1057 self._inline = False
1058 1058 for i in self:
1059 1059 e = self._io.packentry(self.index[i], self.node, self.version, i)
1060 1060 fp.write(e)
1061 1061
1062 1062 # if we don't call rename, the temp file will never replace the
1063 1063 # real index
1064 1064 fp.rename()
1065 1065
1066 1066 tr.replace(self.indexfile, trindex * self._io.size)
1067 1067 self._chunkclear()
1068 1068
1069 1069 def addrevision(self, text, transaction, link, p1, p2, d=None):
1070 1070 """add a revision to the log
1071 1071
1072 1072 text - the revision data to add
1073 1073 transaction - the transaction object used for rollback
1074 1074 link - the linkrev data to add
1075 1075 p1, p2 - the parent nodeids of the revision
1076 1076 d - an optional precomputed delta
1077 1077 """
1078 1078 dfh = None
1079 1079 if not self._inline:
1080 1080 dfh = self.opener(self.datafile, "a")
1081 1081 ifh = self.opener(self.indexfile, "a+")
1082 1082 try:
1083 1083 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1084 1084 finally:
1085 1085 if dfh:
1086 1086 dfh.close()
1087 1087 ifh.close()
1088 1088
1089 1089 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1090 1090 node = hash(text, p1, p2)
1091 1091 if node in self.nodemap:
1092 1092 return node
1093 1093
1094 1094 curr = len(self)
1095 1095 prev = curr - 1
1096 1096 base = self.base(prev)
1097 1097 offset = self.end(prev)
1098 1098
1099 1099 if curr:
1100 1100 if not d:
1101 1101 ptext = self.revision(self.node(prev))
1102 1102 d = mdiff.textdiff(ptext, text)
1103 1103 data = compress(d)
1104 1104 l = len(data[1]) + len(data[0])
1105 1105 dist = l + offset - self.start(base)
1106 1106
1107 1107 # full versions are inserted when the needed deltas
1108 1108 # become comparable to the uncompressed text
1109 1109 if not curr or dist > len(text) * 2:
1110 1110 data = compress(text)
1111 1111 l = len(data[1]) + len(data[0])
1112 1112 base = curr
1113 1113
1114 1114 e = (offset_type(offset, 0), l, len(text),
1115 1115 base, link, self.rev(p1), self.rev(p2), node)
1116 1116 self.index.insert(-1, e)
1117 1117 self.nodemap[node] = curr
1118 1118
1119 1119 entry = self._io.packentry(e, self.node, self.version, curr)
1120 1120 if not self._inline:
1121 1121 transaction.add(self.datafile, offset)
1122 1122 transaction.add(self.indexfile, curr * len(entry))
1123 1123 if data[0]:
1124 1124 dfh.write(data[0])
1125 1125 dfh.write(data[1])
1126 1126 dfh.flush()
1127 1127 ifh.write(entry)
1128 1128 else:
1129 1129 offset += curr * self._io.size
1130 1130 transaction.add(self.indexfile, offset, curr)
1131 1131 ifh.write(entry)
1132 1132 ifh.write(data[0])
1133 1133 ifh.write(data[1])
1134 1134 self.checkinlinesize(transaction, ifh)
1135 1135
1136 1136 if type(text) == str: # only accept immutable objects
1137 1137 self._cache = (node, curr, text)
1138 1138 return node
1139 1139
1140 def descendant(self, a, b):
1141 if a > b:
1142 return False
1143 for i in self.descendants(a):
1144 if i == b:
1140 def descendant(self, start, end):
1141 for i in self.descendants(start):
1142 if i == end:
1145 1143 return True
1146 elif i > b:
1144 elif i > end:
1147 1145 break
1148 1146 return False
1149 1147
1150 1148 def ancestor(self, a, b):
1151 1149 """calculate the least common ancestor of nodes a and b"""
1152 1150
1151 # fast path, check if it is a descendant
1152 a, b = self.rev(a), self.rev(b)
1153 1153 start, end = sorted((a, b))
1154 if self.descendant(a, b):
1154 if self.descendant(start, end):
1155 1155 return self.node(start)
1156 1156
1157 1157 def parents(rev):
1158 1158 return [p for p in self.parentrevs(rev) if p != nullrev]
1159 1159
1160 1160 c = ancestor.ancestor(a, b, parents)
1161 1161 if c is None:
1162 1162 return nullid
1163 1163
1164 1164 return self.node(c)
1165 1165
1166 1166 def group(self, nodelist, lookup, infocollect=None):
1167 1167 """Calculate a delta group, yielding a sequence of changegroup chunks
1168 1168 (strings).
1169 1169
1170 1170 Given a list of changeset revs, return a set of deltas and
1171 1171 metadata corresponding to nodes. the first delta is
1172 1172 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1173 1173 have this parent as it has all history before these
1174 1174 changesets. parent is parent[0]
1175 1175 """
1176 1176
1177 1177 revs = [self.rev(n) for n in nodelist]
1178 1178
1179 1179 # if we don't have any revisions touched by these changesets, bail
1180 1180 if not revs:
1181 1181 yield changegroup.closechunk()
1182 1182 return
1183 1183
1184 1184 # add the parent of the first rev
1185 1185 p = self.parentrevs(revs[0])[0]
1186 1186 revs.insert(0, p)
1187 1187
1188 1188 # build deltas
1189 1189 for d in xrange(len(revs) - 1):
1190 1190 a, b = revs[d], revs[d + 1]
1191 1191 nb = self.node(b)
1192 1192
1193 1193 if infocollect is not None:
1194 1194 infocollect(nb)
1195 1195
1196 1196 p = self.parents(nb)
1197 1197 meta = nb + p[0] + p[1] + lookup(nb)
1198 1198 if a == -1:
1199 1199 d = self.revision(nb)
1200 1200 meta += mdiff.trivialdiffheader(len(d))
1201 1201 else:
1202 1202 d = self.revdiff(a, b)
1203 1203 yield changegroup.chunkheader(len(meta) + len(d))
1204 1204 yield meta
1205 1205 if len(d) > 2**20:
1206 1206 pos = 0
1207 1207 while pos < len(d):
1208 1208 pos2 = pos + 2 ** 18
1209 1209 yield d[pos:pos2]
1210 1210 pos = pos2
1211 1211 else:
1212 1212 yield d
1213 1213
1214 1214 yield changegroup.closechunk()
1215 1215
1216 1216 def addgroup(self, revs, linkmapper, transaction):
1217 1217 """
1218 1218 add a delta group
1219 1219
1220 1220 given a set of deltas, add them to the revision log. the
1221 1221 first delta is against its parent, which should be in our
1222 1222 log, the rest are against the previous delta.
1223 1223 """
1224 1224
1225 1225 #track the base of the current delta log
1226 1226 r = len(self)
1227 1227 t = r - 1
1228 1228 node = None
1229 1229
1230 1230 base = prev = nullrev
1231 1231 start = end = textlen = 0
1232 1232 if r:
1233 1233 end = self.end(t)
1234 1234
1235 1235 ifh = self.opener(self.indexfile, "a+")
1236 1236 isize = r * self._io.size
1237 1237 if self._inline:
1238 1238 transaction.add(self.indexfile, end + isize, r)
1239 1239 dfh = None
1240 1240 else:
1241 1241 transaction.add(self.indexfile, isize, r)
1242 1242 transaction.add(self.datafile, end)
1243 1243 dfh = self.opener(self.datafile, "a")
1244 1244
1245 1245 try:
1246 1246 # loop through our set of deltas
1247 1247 chain = None
1248 1248 for chunk in revs:
1249 1249 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1250 1250 link = linkmapper(cs)
1251 1251 if node in self.nodemap:
1252 1252 # this can happen if two branches make the same change
1253 1253 chain = node
1254 1254 continue
1255 1255 delta = buffer(chunk, 80)
1256 1256 del chunk
1257 1257
1258 1258 for p in (p1, p2):
1259 1259 if not p in self.nodemap:
1260 1260 raise LookupError(p, self.indexfile, _('unknown parent'))
1261 1261
1262 1262 if not chain:
1263 1263 # retrieve the parent revision of the delta chain
1264 1264 chain = p1
1265 1265 if not chain in self.nodemap:
1266 1266 raise LookupError(chain, self.indexfile, _('unknown base'))
1267 1267
1268 1268 # full versions are inserted when the needed deltas become
1269 1269 # comparable to the uncompressed text or when the previous
1270 1270 # version is not the one we have a delta against. We use
1271 1271 # the size of the previous full rev as a proxy for the
1272 1272 # current size.
1273 1273
1274 1274 if chain == prev:
1275 1275 cdelta = compress(delta)
1276 1276 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1277 1277 textlen = mdiff.patchedsize(textlen, delta)
1278 1278
1279 1279 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1280 1280 # flush our writes here so we can read it in revision
1281 1281 if dfh:
1282 1282 dfh.flush()
1283 1283 ifh.flush()
1284 1284 text = self.revision(chain)
1285 1285 if len(text) == 0:
1286 1286 # skip over trivial delta header
1287 1287 text = buffer(delta, 12)
1288 1288 else:
1289 1289 text = mdiff.patches(text, [delta])
1290 1290 del delta
1291 1291 chk = self._addrevision(text, transaction, link, p1, p2, None,
1292 1292 ifh, dfh)
1293 1293 if not dfh and not self._inline:
1294 1294 # addrevision switched from inline to conventional
1295 1295 # reopen the index
1296 1296 dfh = self.opener(self.datafile, "a")
1297 1297 ifh = self.opener(self.indexfile, "a")
1298 1298 if chk != node:
1299 1299 raise RevlogError(_("consistency error adding group"))
1300 1300 textlen = len(text)
1301 1301 else:
1302 1302 e = (offset_type(end, 0), cdeltalen, textlen, base,
1303 1303 link, self.rev(p1), self.rev(p2), node)
1304 1304 self.index.insert(-1, e)
1305 1305 self.nodemap[node] = r
1306 1306 entry = self._io.packentry(e, self.node, self.version, r)
1307 1307 if self._inline:
1308 1308 ifh.write(entry)
1309 1309 ifh.write(cdelta[0])
1310 1310 ifh.write(cdelta[1])
1311 1311 self.checkinlinesize(transaction, ifh)
1312 1312 if not self._inline:
1313 1313 dfh = self.opener(self.datafile, "a")
1314 1314 ifh = self.opener(self.indexfile, "a")
1315 1315 else:
1316 1316 dfh.write(cdelta[0])
1317 1317 dfh.write(cdelta[1])
1318 1318 ifh.write(entry)
1319 1319
1320 1320 t, r, chain, prev = r, r + 1, node, node
1321 1321 base = self.base(t)
1322 1322 start = self.start(base)
1323 1323 end = self.end(t)
1324 1324 finally:
1325 1325 if dfh:
1326 1326 dfh.close()
1327 1327 ifh.close()
1328 1328
1329 1329 return node
1330 1330
1331 1331 def strip(self, minlink, transaction):
1332 1332 """truncate the revlog on the first revision with a linkrev >= minlink
1333 1333
1334 1334 This function is called when we're stripping revision minlink and
1335 1335 its descendants from the repository.
1336 1336
1337 1337 We have to remove all revisions with linkrev >= minlink, because
1338 1338 the equivalent changelog revisions will be renumbered after the
1339 1339 strip.
1340 1340
1341 1341 So we truncate the revlog on the first of these revisions, and
1342 1342 trust that the caller has saved the revisions that shouldn't be
1343 1343 removed and that it'll readd them after this truncation.
1344 1344 """
1345 1345 if len(self) == 0:
1346 1346 return
1347 1347
1348 1348 if isinstance(self.index, lazyindex):
1349 1349 self._loadindexmap()
1350 1350
1351 1351 for rev in self:
1352 1352 if self.index[rev][4] >= minlink:
1353 1353 break
1354 1354 else:
1355 1355 return
1356 1356
1357 1357 # first truncate the files on disk
1358 1358 end = self.start(rev)
1359 1359 if not self._inline:
1360 1360 transaction.add(self.datafile, end)
1361 1361 end = rev * self._io.size
1362 1362 else:
1363 1363 end += rev * self._io.size
1364 1364
1365 1365 transaction.add(self.indexfile, end)
1366 1366
1367 1367 # then reset internal state in memory to forget those revisions
1368 1368 self._cache = None
1369 1369 self._chunkclear()
1370 1370 for x in xrange(rev, len(self)):
1371 1371 del self.nodemap[self.node(x)]
1372 1372
1373 1373 del self.index[rev:-1]
1374 1374
1375 1375 def checksize(self):
1376 1376 expected = 0
1377 1377 if len(self):
1378 1378 expected = max(0, self.end(len(self) - 1))
1379 1379
1380 1380 try:
1381 1381 f = self.opener(self.datafile)
1382 1382 f.seek(0, 2)
1383 1383 actual = f.tell()
1384 1384 dd = actual - expected
1385 1385 except IOError, inst:
1386 1386 if inst.errno != errno.ENOENT:
1387 1387 raise
1388 1388 dd = 0
1389 1389
1390 1390 try:
1391 1391 f = self.opener(self.indexfile)
1392 1392 f.seek(0, 2)
1393 1393 actual = f.tell()
1394 1394 s = self._io.size
1395 1395 i = max(0, actual // s)
1396 1396 di = actual - (i * s)
1397 1397 if self._inline:
1398 1398 databytes = 0
1399 1399 for r in self:
1400 1400 databytes += max(0, self.length(r))
1401 1401 dd = 0
1402 1402 di = actual - len(self) * s - databytes
1403 1403 except IOError, inst:
1404 1404 if inst.errno != errno.ENOENT:
1405 1405 raise
1406 1406 di = 0
1407 1407
1408 1408 return (dd, di)
1409 1409
1410 1410 def files(self):
1411 1411 res = [self.indexfile]
1412 1412 if not self._inline:
1413 1413 res.append(self.datafile)
1414 1414 return res
General Comments 0
You need to be logged in to leave comments. Login now