##// END OF EJS Templates
provide nicer feedback when an unknown node id is passed to a command...
Sune Foldager -
r7062:efc579fd default
parent child Browse files
Show More
@@ -1,1361 +1,1361
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import bin, hex, nullid, nullrev, short
14 14 from i18n import _
15 15 import changegroup, errno, ancestor, mdiff
16 16 import struct, util, zlib
17 17
18 18 _pack = struct.pack
19 19 _unpack = struct.unpack
20 20 _compress = zlib.compress
21 21 _decompress = zlib.decompress
22 22 _sha = util.sha1
23 23
24 24 # revlog flags
25 25 REVLOGV0 = 0
26 26 REVLOGNG = 1
27 27 REVLOGNGINLINEDATA = (1 << 16)
28 28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 29 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31 31
32 32 class RevlogError(Exception):
33 33 pass
34 34
35 35 class LookupError(RevlogError, KeyError):
36 36 def __init__(self, name, index, message):
37 37 self.name = name
38 38 if isinstance(name, str) and len(name) == 20:
39 39 name = short(name)
40 40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
41 41
42 42 def __str__(self):
43 43 return RevlogError.__str__(self)
44 44
45 45 def getoffset(q):
46 46 return int(q >> 16)
47 47
48 48 def gettype(q):
49 49 return int(q & 0xFFFF)
50 50
51 51 def offset_type(offset, type):
52 52 return long(long(offset) << 16 | type)
53 53
54 54 def hash(text, p1, p2):
55 55 """generate a hash from the given text and its parent hashes
56 56
57 57 This hash combines both the current file contents and its history
58 58 in a manner that makes it easy to distinguish nodes with the same
59 59 content in the revision graph.
60 60 """
61 61 l = [p1, p2]
62 62 l.sort()
63 63 s = _sha(l[0])
64 64 s.update(l[1])
65 65 s.update(text)
66 66 return s.digest()
67 67
68 68 def compress(text):
69 69 """ generate a possibly-compressed representation of text """
70 70 if not text:
71 71 return ("", text)
72 72 l = len(text)
73 73 bin = None
74 74 if l < 44:
75 75 pass
76 76 elif l > 1000000:
77 77 # zlib makes an internal copy, thus doubling memory usage for
78 78 # large files, so lets do this in pieces
79 79 z = zlib.compressobj()
80 80 p = []
81 81 pos = 0
82 82 while pos < l:
83 83 pos2 = pos + 2**20
84 84 p.append(z.compress(text[pos:pos2]))
85 85 pos = pos2
86 86 p.append(z.flush())
87 87 if sum(map(len, p)) < l:
88 88 bin = "".join(p)
89 89 else:
90 90 bin = _compress(text)
91 91 if bin is None or len(bin) > l:
92 92 if text[0] == '\0':
93 93 return ("", text)
94 94 return ('u', text)
95 95 return ("", bin)
96 96
97 97 def decompress(bin):
98 98 """ decompress the given input """
99 99 if not bin:
100 100 return bin
101 101 t = bin[0]
102 102 if t == '\0':
103 103 return bin
104 104 if t == 'x':
105 105 return _decompress(bin)
106 106 if t == 'u':
107 107 return bin[1:]
108 108 raise RevlogError(_("unknown compression type %r") % t)
109 109
110 110 class lazyparser(object):
111 111 """
112 112 this class avoids the need to parse the entirety of large indices
113 113 """
114 114
115 115 # lazyparser is not safe to use on windows if win32 extensions not
116 116 # available. it keeps file handle open, which make it not possible
117 117 # to break hardlinks on local cloned repos.
118 118
119 119 def __init__(self, dataf, size):
120 120 self.dataf = dataf
121 121 self.s = struct.calcsize(indexformatng)
122 122 self.datasize = size
123 123 self.l = size/self.s
124 124 self.index = [None] * self.l
125 125 self.map = {nullid: nullrev}
126 126 self.allmap = 0
127 127 self.all = 0
128 128 self.mapfind_count = 0
129 129
130 130 def loadmap(self):
131 131 """
132 132 during a commit, we need to make sure the rev being added is
133 133 not a duplicate. This requires loading the entire index,
134 134 which is fairly slow. loadmap can load up just the node map,
135 135 which takes much less time.
136 136 """
137 137 if self.allmap:
138 138 return
139 139 end = self.datasize
140 140 self.allmap = 1
141 141 cur = 0
142 142 count = 0
143 143 blocksize = self.s * 256
144 144 self.dataf.seek(0)
145 145 while cur < end:
146 146 data = self.dataf.read(blocksize)
147 147 off = 0
148 148 for x in xrange(256):
149 149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
150 150 self.map[n] = count
151 151 count += 1
152 152 if count >= self.l:
153 153 break
154 154 off += self.s
155 155 cur += blocksize
156 156
157 157 def loadblock(self, blockstart, blocksize, data=None):
158 158 if self.all:
159 159 return
160 160 if data is None:
161 161 self.dataf.seek(blockstart)
162 162 if blockstart + blocksize > self.datasize:
163 163 # the revlog may have grown since we've started running,
164 164 # but we don't have space in self.index for more entries.
165 165 # limit blocksize so that we don't get too much data.
166 166 blocksize = max(self.datasize - blockstart, 0)
167 167 data = self.dataf.read(blocksize)
168 168 lend = len(data) / self.s
169 169 i = blockstart / self.s
170 170 off = 0
171 171 # lazyindex supports __delitem__
172 172 if lend > len(self.index) - i:
173 173 lend = len(self.index) - i
174 174 for x in xrange(lend):
175 175 if self.index[i + x] == None:
176 176 b = data[off : off + self.s]
177 177 self.index[i + x] = b
178 178 n = b[ngshaoffset:ngshaoffset + 20]
179 179 self.map[n] = i + x
180 180 off += self.s
181 181
182 182 def findnode(self, node):
183 183 """search backwards through the index file for a specific node"""
184 184 if self.allmap:
185 185 return None
186 186
187 187 # hg log will cause many many searches for the manifest
188 188 # nodes. After we get called a few times, just load the whole
189 189 # thing.
190 190 if self.mapfind_count > 8:
191 191 self.loadmap()
192 192 if node in self.map:
193 193 return node
194 194 return None
195 195 self.mapfind_count += 1
196 196 last = self.l - 1
197 197 while self.index[last] != None:
198 198 if last == 0:
199 199 self.all = 1
200 200 self.allmap = 1
201 201 return None
202 202 last -= 1
203 203 end = (last + 1) * self.s
204 204 blocksize = self.s * 256
205 205 while end >= 0:
206 206 start = max(end - blocksize, 0)
207 207 self.dataf.seek(start)
208 208 data = self.dataf.read(end - start)
209 209 findend = end - start
210 210 while True:
211 211 # we're searching backwards, so we have to make sure
212 212 # we don't find a changeset where this node is a parent
213 213 off = data.find(node, 0, findend)
214 214 findend = off
215 215 if off >= 0:
216 216 i = off / self.s
217 217 off = i * self.s
218 218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
219 219 if n == node:
220 220 self.map[n] = i + start / self.s
221 221 return node
222 222 else:
223 223 break
224 224 end -= blocksize
225 225 return None
226 226
227 227 def loadindex(self, i=None, end=None):
228 228 if self.all:
229 229 return
230 230 all = False
231 231 if i == None:
232 232 blockstart = 0
233 233 blocksize = (65536 / self.s) * self.s
234 234 end = self.datasize
235 235 all = True
236 236 else:
237 237 if end:
238 238 blockstart = i * self.s
239 239 end = end * self.s
240 240 blocksize = end - blockstart
241 241 else:
242 242 blockstart = (i & ~1023) * self.s
243 243 blocksize = self.s * 1024
244 244 end = blockstart + blocksize
245 245 while blockstart < end:
246 246 self.loadblock(blockstart, blocksize)
247 247 blockstart += blocksize
248 248 if all:
249 249 self.all = True
250 250
251 251 class lazyindex(object):
252 252 """a lazy version of the index array"""
253 253 def __init__(self, parser):
254 254 self.p = parser
255 255 def __len__(self):
256 256 return len(self.p.index)
257 257 def load(self, pos):
258 258 if pos < 0:
259 259 pos += len(self.p.index)
260 260 self.p.loadindex(pos)
261 261 return self.p.index[pos]
262 262 def __getitem__(self, pos):
263 263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
264 264 def __setitem__(self, pos, item):
265 265 self.p.index[pos] = _pack(indexformatng, *item)
266 266 def __delitem__(self, pos):
267 267 del self.p.index[pos]
268 268 def insert(self, pos, e):
269 269 self.p.index.insert(pos, _pack(indexformatng, *e))
270 270 def append(self, e):
271 271 self.p.index.append(_pack(indexformatng, *e))
272 272
273 273 class lazymap(object):
274 274 """a lazy version of the node map"""
275 275 def __init__(self, parser):
276 276 self.p = parser
277 277 def load(self, key):
278 278 n = self.p.findnode(key)
279 279 if n == None:
280 280 raise KeyError(key)
281 281 def __contains__(self, key):
282 282 if key in self.p.map:
283 283 return True
284 284 self.p.loadmap()
285 285 return key in self.p.map
286 286 def __iter__(self):
287 287 yield nullid
288 288 for i in xrange(self.p.l):
289 289 ret = self.p.index[i]
290 290 if not ret:
291 291 self.p.loadindex(i)
292 292 ret = self.p.index[i]
293 293 if isinstance(ret, str):
294 294 ret = _unpack(indexformatng, ret)
295 295 yield ret[7]
296 296 def __getitem__(self, key):
297 297 try:
298 298 return self.p.map[key]
299 299 except KeyError:
300 300 try:
301 301 self.load(key)
302 302 return self.p.map[key]
303 303 except KeyError:
304 304 raise KeyError("node " + hex(key))
305 305 def __setitem__(self, key, val):
306 306 self.p.map[key] = val
307 307 def __delitem__(self, key):
308 308 del self.p.map[key]
309 309
310 310 indexformatv0 = ">4l20s20s20s"
311 311 v0shaoffset = 56
312 312
313 313 class revlogoldio(object):
314 314 def __init__(self):
315 315 self.size = struct.calcsize(indexformatv0)
316 316
317 317 def parseindex(self, fp, inline):
318 318 s = self.size
319 319 index = []
320 320 nodemap = {nullid: nullrev}
321 321 n = off = 0
322 322 data = fp.read()
323 323 l = len(data)
324 324 while off + s <= l:
325 325 cur = data[off:off + s]
326 326 off += s
327 327 e = _unpack(indexformatv0, cur)
328 328 # transform to revlogv1 format
329 329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
330 330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
331 331 index.append(e2)
332 332 nodemap[e[6]] = n
333 333 n += 1
334 334
335 335 return index, nodemap, None
336 336
337 337 def packentry(self, entry, node, version, rev):
338 338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
339 339 node(entry[5]), node(entry[6]), entry[7])
340 340 return _pack(indexformatv0, *e2)
341 341
342 342 # index ng:
343 343 # 6 bytes offset
344 344 # 2 bytes flags
345 345 # 4 bytes compressed length
346 346 # 4 bytes uncompressed length
347 347 # 4 bytes: base rev
348 348 # 4 bytes link rev
349 349 # 4 bytes parent 1 rev
350 350 # 4 bytes parent 2 rev
351 351 # 32 bytes: nodeid
352 352 indexformatng = ">Qiiiiii20s12x"
353 353 ngshaoffset = 32
354 354 versionformat = ">I"
355 355
356 356 class revlogio(object):
357 357 def __init__(self):
358 358 self.size = struct.calcsize(indexformatng)
359 359
360 360 def parseindex(self, fp, inline):
361 361 try:
362 362 size = util.fstat(fp).st_size
363 363 except AttributeError:
364 364 size = 0
365 365
366 366 if util.openhardlinks() and not inline and size > 1000000:
367 367 # big index, let's parse it on demand
368 368 parser = lazyparser(fp, size)
369 369 index = lazyindex(parser)
370 370 nodemap = lazymap(parser)
371 371 e = list(index[0])
372 372 type = gettype(e[0])
373 373 e[0] = offset_type(0, type)
374 374 index[0] = e
375 375 return index, nodemap, None
376 376
377 377 s = self.size
378 378 cache = None
379 379 index = []
380 380 nodemap = {nullid: nullrev}
381 381 n = off = 0
382 382 # if we're not using lazymap, always read the whole index
383 383 data = fp.read()
384 384 l = len(data) - s
385 385 append = index.append
386 386 if inline:
387 387 cache = (0, data)
388 388 while off <= l:
389 389 e = _unpack(indexformatng, data[off:off + s])
390 390 nodemap[e[7]] = n
391 391 append(e)
392 392 n += 1
393 393 if e[1] < 0:
394 394 break
395 395 off += e[1] + s
396 396 else:
397 397 while off <= l:
398 398 e = _unpack(indexformatng, data[off:off + s])
399 399 nodemap[e[7]] = n
400 400 append(e)
401 401 n += 1
402 402 off += s
403 403
404 404 e = list(index[0])
405 405 type = gettype(e[0])
406 406 e[0] = offset_type(0, type)
407 407 index[0] = e
408 408
409 409 return index, nodemap, cache
410 410
411 411 def packentry(self, entry, node, version, rev):
412 412 p = _pack(indexformatng, *entry)
413 413 if rev == 0:
414 414 p = _pack(versionformat, version) + p[4:]
415 415 return p
416 416
417 417 class revlog(object):
418 418 """
419 419 the underlying revision storage object
420 420
421 421 A revlog consists of two parts, an index and the revision data.
422 422
423 423 The index is a file with a fixed record size containing
424 424 information on each revision, including its nodeid (hash), the
425 425 nodeids of its parents, the position and offset of its data within
426 426 the data file, and the revision it's based on. Finally, each entry
427 427 contains a linkrev entry that can serve as a pointer to external
428 428 data.
429 429
430 430 The revision data itself is a linear collection of data chunks.
431 431 Each chunk represents a revision and is usually represented as a
432 432 delta against the previous chunk. To bound lookup time, runs of
433 433 deltas are limited to about 2 times the length of the original
434 434 version data. This makes retrieval of a version proportional to
435 435 its size, or O(1) relative to the number of revisions.
436 436
437 437 Both pieces of the revlog are written to in an append-only
438 438 fashion, which means we never need to rewrite a file to insert or
439 439 remove data, and can use some simple techniques to avoid the need
440 440 for locking while reading.
441 441 """
442 442 def __init__(self, opener, indexfile):
443 443 """
444 444 create a revlog object
445 445
446 446 opener is a function that abstracts the file opening operation
447 447 and can be used to implement COW semantics or the like.
448 448 """
449 449 self.indexfile = indexfile
450 450 self.datafile = indexfile[:-2] + ".d"
451 451 self.opener = opener
452 452 self._cache = None
453 453 self._chunkcache = None
454 454 self.nodemap = {nullid: nullrev}
455 455 self.index = []
456 456
457 457 v = REVLOG_DEFAULT_VERSION
458 458 if hasattr(opener, "defversion"):
459 459 v = opener.defversion
460 460 if v & REVLOGNG:
461 461 v |= REVLOGNGINLINEDATA
462 462
463 463 i = ""
464 464 try:
465 465 f = self.opener(self.indexfile)
466 466 i = f.read(4)
467 467 f.seek(0)
468 468 if len(i) > 0:
469 469 v = struct.unpack(versionformat, i)[0]
470 470 except IOError, inst:
471 471 if inst.errno != errno.ENOENT:
472 472 raise
473 473
474 474 self.version = v
475 475 self._inline = v & REVLOGNGINLINEDATA
476 476 flags = v & ~0xFFFF
477 477 fmt = v & 0xFFFF
478 478 if fmt == REVLOGV0 and flags:
479 479 raise RevlogError(_("index %s unknown flags %#04x for format v0")
480 480 % (self.indexfile, flags >> 16))
481 481 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
482 482 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
483 483 % (self.indexfile, flags >> 16))
484 484 elif fmt > REVLOGNG:
485 485 raise RevlogError(_("index %s unknown format %d")
486 486 % (self.indexfile, fmt))
487 487
488 488 self._io = revlogio()
489 489 if self.version == REVLOGV0:
490 490 self._io = revlogoldio()
491 491 if i:
492 492 d = self._io.parseindex(f, self._inline)
493 493 self.index, self.nodemap, self._chunkcache = d
494 494
495 495 # add the magic null revision at -1
496 496 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
497 497
498 498 def _loadindex(self, start, end):
499 499 """load a block of indexes all at once from the lazy parser"""
500 500 if isinstance(self.index, lazyindex):
501 501 self.index.p.loadindex(start, end)
502 502
503 503 def _loadindexmap(self):
504 504 """loads both the map and the index from the lazy parser"""
505 505 if isinstance(self.index, lazyindex):
506 506 p = self.index.p
507 507 p.loadindex()
508 508 self.nodemap = p.map
509 509
510 510 def _loadmap(self):
511 511 """loads the map from the lazy parser"""
512 512 if isinstance(self.nodemap, lazymap):
513 513 self.nodemap.p.loadmap()
514 514 self.nodemap = self.nodemap.p.map
515 515
516 516 def tip(self):
517 517 return self.node(len(self.index) - 2)
518 518 def __len__(self):
519 519 return len(self.index) - 1
520 520 def __iter__(self):
521 521 for i in xrange(len(self)):
522 522 yield i
523 523 def rev(self, node):
524 524 try:
525 525 return self.nodemap[node]
526 526 except KeyError:
527 527 raise LookupError(node, self.indexfile, _('no node'))
528 528 def node(self, rev):
529 529 return self.index[rev][7]
530 530 def linkrev(self, node):
531 531 return self.index[self.rev(node)][4]
532 532 def parents(self, node):
533 533 d = self.index[self.rev(node)][5:7]
534 534 return (self.node(d[0]), self.node(d[1]))
535 535 def parentrevs(self, rev):
536 536 return self.index[rev][5:7]
537 537 def start(self, rev):
538 538 return int(self.index[rev][0] >> 16)
539 539 def end(self, rev):
540 540 return self.start(rev) + self.length(rev)
541 541 def length(self, rev):
542 542 return self.index[rev][1]
543 543 def base(self, rev):
544 544 return self.index[rev][3]
545 545
546 546 def size(self, rev):
547 547 """return the length of the uncompressed text for a given revision"""
548 548 l = self.index[rev][2]
549 549 if l >= 0:
550 550 return l
551 551
552 552 t = self.revision(self.node(rev))
553 553 return len(t)
554 554
555 555 # alternate implementation, The advantage to this code is it
556 556 # will be faster for a single revision. But, the results are not
557 557 # cached, so finding the size of every revision will be slower.
558 558 """
559 559 if self.cache and self.cache[1] == rev:
560 560 return len(self.cache[2])
561 561
562 562 base = self.base(rev)
563 563 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
564 564 base = self.cache[1]
565 565 text = self.cache[2]
566 566 else:
567 567 text = self.revision(self.node(base))
568 568
569 569 l = len(text)
570 570 for x in xrange(base + 1, rev + 1):
571 571 l = mdiff.patchedsize(l, self.chunk(x))
572 572 return l
573 573 """
574 574
575 575 def reachable(self, node, stop=None):
576 576 """return a hash of all nodes ancestral to a given node, including
577 577 the node itself, stopping when stop is matched"""
578 578 reachable = {}
579 579 visit = [node]
580 580 reachable[node] = 1
581 581 if stop:
582 582 stopn = self.rev(stop)
583 583 else:
584 584 stopn = 0
585 585 while visit:
586 586 n = visit.pop(0)
587 587 if n == stop:
588 588 continue
589 589 if n == nullid:
590 590 continue
591 591 for p in self.parents(n):
592 592 if self.rev(p) < stopn:
593 593 continue
594 594 if p not in reachable:
595 595 reachable[p] = 1
596 596 visit.append(p)
597 597 return reachable
598 598
599 599 def ancestors(self, *revs):
600 600 'Generate the ancestors of revs using a breadth-first visit'
601 601 visit = list(revs)
602 602 seen = util.set([nullrev])
603 603 while visit:
604 604 for parent in self.parentrevs(visit.pop(0)):
605 605 if parent not in seen:
606 606 visit.append(parent)
607 607 seen.add(parent)
608 608 yield parent
609 609
610 610 def descendants(self, *revs):
611 611 'Generate the descendants of revs in topological order'
612 612 seen = util.set(revs)
613 613 for i in xrange(min(revs) + 1, len(self)):
614 614 for x in self.parentrevs(i):
615 615 if x != nullrev and x in seen:
616 616 seen.add(i)
617 617 yield i
618 618 break
619 619
620 620 def nodesbetween(self, roots=None, heads=None):
621 621 """Return a tuple containing three elements. Elements 1 and 2 contain
622 622 a final list bases and heads after all the unreachable ones have been
623 623 pruned. Element 0 contains a topologically sorted list of all
624 624
625 625 nodes that satisfy these constraints:
626 626 1. All nodes must be descended from a node in roots (the nodes on
627 627 roots are considered descended from themselves).
628 628 2. All nodes must also be ancestors of a node in heads (the nodes in
629 629 heads are considered to be their own ancestors).
630 630
631 631 If roots is unspecified, nullid is assumed as the only root.
632 632 If heads is unspecified, it is taken to be the output of the
633 633 heads method (i.e. a list of all nodes in the repository that
634 634 have no children)."""
635 635 nonodes = ([], [], [])
636 636 if roots is not None:
637 637 roots = list(roots)
638 638 if not roots:
639 639 return nonodes
640 640 lowestrev = min([self.rev(n) for n in roots])
641 641 else:
642 642 roots = [nullid] # Everybody's a descendent of nullid
643 643 lowestrev = nullrev
644 644 if (lowestrev == nullrev) and (heads is None):
645 645 # We want _all_ the nodes!
646 646 return ([self.node(r) for r in self], [nullid], list(self.heads()))
647 647 if heads is None:
648 648 # All nodes are ancestors, so the latest ancestor is the last
649 649 # node.
650 650 highestrev = len(self) - 1
651 651 # Set ancestors to None to signal that every node is an ancestor.
652 652 ancestors = None
653 653 # Set heads to an empty dictionary for later discovery of heads
654 654 heads = {}
655 655 else:
656 656 heads = list(heads)
657 657 if not heads:
658 658 return nonodes
659 659 ancestors = {}
660 660 # Turn heads into a dictionary so we can remove 'fake' heads.
661 661 # Also, later we will be using it to filter out the heads we can't
662 662 # find from roots.
663 663 heads = dict.fromkeys(heads, 0)
664 664 # Start at the top and keep marking parents until we're done.
665 665 nodestotag = heads.keys()
666 666 # Remember where the top was so we can use it as a limit later.
667 667 highestrev = max([self.rev(n) for n in nodestotag])
668 668 while nodestotag:
669 669 # grab a node to tag
670 670 n = nodestotag.pop()
671 671 # Never tag nullid
672 672 if n == nullid:
673 673 continue
674 674 # A node's revision number represents its place in a
675 675 # topologically sorted list of nodes.
676 676 r = self.rev(n)
677 677 if r >= lowestrev:
678 678 if n not in ancestors:
679 679 # If we are possibly a descendent of one of the roots
680 680 # and we haven't already been marked as an ancestor
681 681 ancestors[n] = 1 # Mark as ancestor
682 682 # Add non-nullid parents to list of nodes to tag.
683 683 nodestotag.extend([p for p in self.parents(n) if
684 684 p != nullid])
685 685 elif n in heads: # We've seen it before, is it a fake head?
686 686 # So it is, real heads should not be the ancestors of
687 687 # any other heads.
688 688 heads.pop(n)
689 689 if not ancestors:
690 690 return nonodes
691 691 # Now that we have our set of ancestors, we want to remove any
692 692 # roots that are not ancestors.
693 693
694 694 # If one of the roots was nullid, everything is included anyway.
695 695 if lowestrev > nullrev:
696 696 # But, since we weren't, let's recompute the lowest rev to not
697 697 # include roots that aren't ancestors.
698 698
699 699 # Filter out roots that aren't ancestors of heads
700 700 roots = [n for n in roots if n in ancestors]
701 701 # Recompute the lowest revision
702 702 if roots:
703 703 lowestrev = min([self.rev(n) for n in roots])
704 704 else:
705 705 # No more roots? Return empty list
706 706 return nonodes
707 707 else:
708 708 # We are descending from nullid, and don't need to care about
709 709 # any other roots.
710 710 lowestrev = nullrev
711 711 roots = [nullid]
712 712 # Transform our roots list into a 'set' (i.e. a dictionary where the
713 713 # values don't matter.
714 714 descendents = dict.fromkeys(roots, 1)
715 715 # Also, keep the original roots so we can filter out roots that aren't
716 716 # 'real' roots (i.e. are descended from other roots).
717 717 roots = descendents.copy()
718 718 # Our topologically sorted list of output nodes.
719 719 orderedout = []
720 720 # Don't start at nullid since we don't want nullid in our output list,
721 721 # and if nullid shows up in descedents, empty parents will look like
722 722 # they're descendents.
723 723 for r in xrange(max(lowestrev, 0), highestrev + 1):
724 724 n = self.node(r)
725 725 isdescendent = False
726 726 if lowestrev == nullrev: # Everybody is a descendent of nullid
727 727 isdescendent = True
728 728 elif n in descendents:
729 729 # n is already a descendent
730 730 isdescendent = True
731 731 # This check only needs to be done here because all the roots
732 732 # will start being marked is descendents before the loop.
733 733 if n in roots:
734 734 # If n was a root, check if it's a 'real' root.
735 735 p = tuple(self.parents(n))
736 736 # If any of its parents are descendents, it's not a root.
737 737 if (p[0] in descendents) or (p[1] in descendents):
738 738 roots.pop(n)
739 739 else:
740 740 p = tuple(self.parents(n))
741 741 # A node is a descendent if either of its parents are
742 742 # descendents. (We seeded the dependents list with the roots
743 743 # up there, remember?)
744 744 if (p[0] in descendents) or (p[1] in descendents):
745 745 descendents[n] = 1
746 746 isdescendent = True
747 747 if isdescendent and ((ancestors is None) or (n in ancestors)):
748 748 # Only include nodes that are both descendents and ancestors.
749 749 orderedout.append(n)
750 750 if (ancestors is not None) and (n in heads):
751 751 # We're trying to figure out which heads are reachable
752 752 # from roots.
753 753 # Mark this head as having been reached
754 754 heads[n] = 1
755 755 elif ancestors is None:
756 756 # Otherwise, we're trying to discover the heads.
757 757 # Assume this is a head because if it isn't, the next step
758 758 # will eventually remove it.
759 759 heads[n] = 1
760 760 # But, obviously its parents aren't.
761 761 for p in self.parents(n):
762 762 heads.pop(p, None)
763 763 heads = [n for n in heads.iterkeys() if heads[n] != 0]
764 764 roots = roots.keys()
765 765 assert orderedout
766 766 assert roots
767 767 assert heads
768 768 return (orderedout, roots, heads)
769 769
770 770 def heads(self, start=None, stop=None):
771 771 """return the list of all nodes that have no children
772 772
773 773 if start is specified, only heads that are descendants of
774 774 start will be returned
775 775 if stop is specified, it will consider all the revs from stop
776 776 as if they had no children
777 777 """
778 778 if start is None and stop is None:
779 779 count = len(self)
780 780 if not count:
781 781 return [nullid]
782 782 ishead = [1] * (count + 1)
783 783 index = self.index
784 784 for r in self:
785 785 e = index[r]
786 786 ishead[e[5]] = ishead[e[6]] = 0
787 787 return [self.node(r) for r in self if ishead[r]]
788 788
789 789 if start is None:
790 790 start = nullid
791 791 if stop is None:
792 792 stop = []
793 793 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
794 794 startrev = self.rev(start)
795 795 reachable = {startrev: 1}
796 796 heads = {startrev: 1}
797 797
798 798 parentrevs = self.parentrevs
799 799 for r in xrange(startrev + 1, len(self)):
800 800 for p in parentrevs(r):
801 801 if p in reachable:
802 802 if r not in stoprevs:
803 803 reachable[r] = 1
804 804 heads[r] = 1
805 805 if p in heads and p not in stoprevs:
806 806 del heads[p]
807 807
808 808 return [self.node(r) for r in heads]
809 809
810 810 def children(self, node):
811 811 """find the children of a given node"""
812 812 c = []
813 813 p = self.rev(node)
814 814 for r in range(p + 1, len(self)):
815 815 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
816 816 if prevs:
817 817 for pr in prevs:
818 818 if pr == p:
819 819 c.append(self.node(r))
820 820 elif p == nullrev:
821 821 c.append(self.node(r))
822 822 return c
823 823
824 824 def _match(self, id):
825 825 if isinstance(id, (long, int)):
826 826 # rev
827 827 return self.node(id)
828 828 if len(id) == 20:
829 829 # possibly a binary node
830 830 # odds of a binary node being all hex in ASCII are 1 in 10**25
831 831 try:
832 832 node = id
833 833 r = self.rev(node) # quick search the index
834 834 return node
835 835 except LookupError:
836 836 pass # may be partial hex id
837 837 try:
838 838 # str(rev)
839 839 rev = int(id)
840 840 if str(rev) != id:
841 841 raise ValueError
842 842 if rev < 0:
843 843 rev = len(self) + rev
844 844 if rev < 0 or rev >= len(self):
845 845 raise ValueError
846 846 return self.node(rev)
847 847 except (ValueError, OverflowError):
848 848 pass
849 849 if len(id) == 40:
850 850 try:
851 851 # a full hex nodeid?
852 852 node = bin(id)
853 853 r = self.rev(node)
854 854 return node
855 except TypeError:
855 except (TypeError, LookupError):
856 856 pass
857 857
858 858 def _partialmatch(self, id):
859 859 if len(id) < 40:
860 860 try:
861 861 # hex(node)[:...]
862 862 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
863 863 node = None
864 864 for n in self.nodemap:
865 865 if n.startswith(bin_id) and hex(n).startswith(id):
866 866 if node is not None:
867 867 raise LookupError(id, self.indexfile,
868 868 _('ambiguous identifier'))
869 869 node = n
870 870 if node is not None:
871 871 return node
872 872 except TypeError:
873 873 pass
874 874
875 875 def lookup(self, id):
876 876 """locate a node based on:
877 877 - revision number or str(revision number)
878 878 - nodeid or subset of hex nodeid
879 879 """
880 880 n = self._match(id)
881 881 if n is not None:
882 882 return n
883 883 n = self._partialmatch(id)
884 884 if n:
885 885 return n
886 886
887 887 raise LookupError(id, self.indexfile, _('no match found'))
888 888
889 889 def cmp(self, node, text):
890 890 """compare text with a given file revision"""
891 891 p1, p2 = self.parents(node)
892 892 return hash(text, p1, p2) != node
893 893
894 894 def chunk(self, rev, df=None):
895 895 def loadcache(df):
896 896 if not df:
897 897 if self._inline:
898 898 df = self.opener(self.indexfile)
899 899 else:
900 900 df = self.opener(self.datafile)
901 901 df.seek(start)
902 902 self._chunkcache = (start, df.read(cache_length))
903 903
904 904 start, length = self.start(rev), self.length(rev)
905 905 if self._inline:
906 906 start += (rev + 1) * self._io.size
907 907 end = start + length
908 908
909 909 offset = 0
910 910 if not self._chunkcache:
911 911 cache_length = max(65536, length)
912 912 loadcache(df)
913 913 else:
914 914 cache_start = self._chunkcache[0]
915 915 cache_length = len(self._chunkcache[1])
916 916 cache_end = cache_start + cache_length
917 917 if start >= cache_start and end <= cache_end:
918 918 # it is cached
919 919 offset = start - cache_start
920 920 else:
921 921 cache_length = max(65536, length)
922 922 loadcache(df)
923 923
924 924 # avoid copying large chunks
925 925 c = self._chunkcache[1]
926 926 if cache_length != length:
927 927 c = c[offset:offset + length]
928 928
929 929 return decompress(c)
930 930
931 931 def delta(self, node):
932 932 """return or calculate a delta between a node and its predecessor"""
933 933 r = self.rev(node)
934 934 return self.revdiff(r - 1, r)
935 935
936 936 def revdiff(self, rev1, rev2):
937 937 """return or calculate a delta between two revisions"""
938 938 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
939 939 return self.chunk(rev2)
940 940
941 941 return mdiff.textdiff(self.revision(self.node(rev1)),
942 942 self.revision(self.node(rev2)))
943 943
944 944 def revision(self, node):
945 945 """return an uncompressed revision of a given node"""
946 946 if node == nullid:
947 947 return ""
948 948 if self._cache and self._cache[0] == node:
949 949 return str(self._cache[2])
950 950
951 951 # look up what we need to read
952 952 text = None
953 953 rev = self.rev(node)
954 954 base = self.base(rev)
955 955
956 956 # check rev flags
957 957 if self.index[rev][0] & 0xFFFF:
958 958 raise RevlogError(_('incompatible revision flag %x') %
959 959 (self.index[rev][0] & 0xFFFF))
960 960
961 961 df = None
962 962
963 963 # do we have useful data cached?
964 964 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
965 965 base = self._cache[1]
966 966 text = str(self._cache[2])
967 967 self._loadindex(base, rev + 1)
968 968 if not self._inline and rev > base + 1:
969 969 df = self.opener(self.datafile)
970 970 else:
971 971 self._loadindex(base, rev + 1)
972 972 if not self._inline and rev > base:
973 973 df = self.opener(self.datafile)
974 974 text = self.chunk(base, df=df)
975 975
976 976 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
977 977 text = mdiff.patches(text, bins)
978 978 p1, p2 = self.parents(node)
979 979 if node != hash(text, p1, p2):
980 980 raise RevlogError(_("integrity check failed on %s:%d")
981 981 % (self.datafile, rev))
982 982
983 983 self._cache = (node, rev, text)
984 984 return text
985 985
986 986 def checkinlinesize(self, tr, fp=None):
987 987 if not self._inline:
988 988 return
989 989 if not fp:
990 990 fp = self.opener(self.indexfile, 'r')
991 991 fp.seek(0, 2)
992 992 size = fp.tell()
993 993 if size < 131072:
994 994 return
995 995 trinfo = tr.find(self.indexfile)
996 996 if trinfo == None:
997 997 raise RevlogError(_("%s not found in the transaction")
998 998 % self.indexfile)
999 999
1000 1000 trindex = trinfo[2]
1001 1001 dataoff = self.start(trindex)
1002 1002
1003 1003 tr.add(self.datafile, dataoff)
1004 1004 df = self.opener(self.datafile, 'w')
1005 1005 try:
1006 1006 calc = self._io.size
1007 1007 for r in self:
1008 1008 start = self.start(r) + (r + 1) * calc
1009 1009 length = self.length(r)
1010 1010 fp.seek(start)
1011 1011 d = fp.read(length)
1012 1012 df.write(d)
1013 1013 finally:
1014 1014 df.close()
1015 1015
1016 1016 fp.close()
1017 1017 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1018 1018 self.version &= ~(REVLOGNGINLINEDATA)
1019 1019 self._inline = False
1020 1020 for i in self:
1021 1021 e = self._io.packentry(self.index[i], self.node, self.version, i)
1022 1022 fp.write(e)
1023 1023
1024 1024 # if we don't call rename, the temp file will never replace the
1025 1025 # real index
1026 1026 fp.rename()
1027 1027
1028 1028 tr.replace(self.indexfile, trindex * calc)
1029 1029 self._chunkcache = None
1030 1030
1031 1031 def addrevision(self, text, transaction, link, p1, p2, d=None):
1032 1032 """add a revision to the log
1033 1033
1034 1034 text - the revision data to add
1035 1035 transaction - the transaction object used for rollback
1036 1036 link - the linkrev data to add
1037 1037 p1, p2 - the parent nodeids of the revision
1038 1038 d - an optional precomputed delta
1039 1039 """
1040 1040 dfh = None
1041 1041 if not self._inline:
1042 1042 dfh = self.opener(self.datafile, "a")
1043 1043 ifh = self.opener(self.indexfile, "a+")
1044 1044 try:
1045 1045 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1046 1046 finally:
1047 1047 if dfh:
1048 1048 dfh.close()
1049 1049 ifh.close()
1050 1050
1051 1051 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1052 1052 node = hash(text, p1, p2)
1053 1053 if node in self.nodemap:
1054 1054 return node
1055 1055
1056 1056 curr = len(self)
1057 1057 prev = curr - 1
1058 1058 base = self.base(prev)
1059 1059 offset = self.end(prev)
1060 1060
1061 1061 if curr:
1062 1062 if not d:
1063 1063 ptext = self.revision(self.node(prev))
1064 1064 d = mdiff.textdiff(ptext, text)
1065 1065 data = compress(d)
1066 1066 l = len(data[1]) + len(data[0])
1067 1067 dist = l + offset - self.start(base)
1068 1068
1069 1069 # full versions are inserted when the needed deltas
1070 1070 # become comparable to the uncompressed text
1071 1071 if not curr or dist > len(text) * 2:
1072 1072 data = compress(text)
1073 1073 l = len(data[1]) + len(data[0])
1074 1074 base = curr
1075 1075
1076 1076 e = (offset_type(offset, 0), l, len(text),
1077 1077 base, link, self.rev(p1), self.rev(p2), node)
1078 1078 self.index.insert(-1, e)
1079 1079 self.nodemap[node] = curr
1080 1080
1081 1081 entry = self._io.packentry(e, self.node, self.version, curr)
1082 1082 if not self._inline:
1083 1083 transaction.add(self.datafile, offset)
1084 1084 transaction.add(self.indexfile, curr * len(entry))
1085 1085 if data[0]:
1086 1086 dfh.write(data[0])
1087 1087 dfh.write(data[1])
1088 1088 dfh.flush()
1089 1089 ifh.write(entry)
1090 1090 else:
1091 1091 offset += curr * self._io.size
1092 1092 transaction.add(self.indexfile, offset, curr)
1093 1093 ifh.write(entry)
1094 1094 ifh.write(data[0])
1095 1095 ifh.write(data[1])
1096 1096 self.checkinlinesize(transaction, ifh)
1097 1097
1098 1098 self._cache = (node, curr, text)
1099 1099 return node
1100 1100
1101 1101 def ancestor(self, a, b):
1102 1102 """calculate the least common ancestor of nodes a and b"""
1103 1103
1104 1104 def parents(rev):
1105 1105 return [p for p in self.parentrevs(rev) if p != nullrev]
1106 1106
1107 1107 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1108 1108 if c is None:
1109 1109 return nullid
1110 1110
1111 1111 return self.node(c)
1112 1112
1113 1113 def group(self, nodelist, lookup, infocollect=None):
1114 1114 """calculate a delta group
1115 1115
1116 1116 Given a list of changeset revs, return a set of deltas and
1117 1117 metadata corresponding to nodes. the first delta is
1118 1118 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1119 1119 have this parent as it has all history before these
1120 1120 changesets. parent is parent[0]
1121 1121 """
1122 1122 revs = [self.rev(n) for n in nodelist]
1123 1123
1124 1124 # if we don't have any revisions touched by these changesets, bail
1125 1125 if not revs:
1126 1126 yield changegroup.closechunk()
1127 1127 return
1128 1128
1129 1129 # add the parent of the first rev
1130 1130 p = self.parents(self.node(revs[0]))[0]
1131 1131 revs.insert(0, self.rev(p))
1132 1132
1133 1133 # build deltas
1134 1134 for d in xrange(0, len(revs) - 1):
1135 1135 a, b = revs[d], revs[d + 1]
1136 1136 nb = self.node(b)
1137 1137
1138 1138 if infocollect is not None:
1139 1139 infocollect(nb)
1140 1140
1141 1141 p = self.parents(nb)
1142 1142 meta = nb + p[0] + p[1] + lookup(nb)
1143 1143 if a == -1:
1144 1144 d = self.revision(nb)
1145 1145 meta += mdiff.trivialdiffheader(len(d))
1146 1146 else:
1147 1147 d = self.revdiff(a, b)
1148 1148 yield changegroup.chunkheader(len(meta) + len(d))
1149 1149 yield meta
1150 1150 if len(d) > 2**20:
1151 1151 pos = 0
1152 1152 while pos < len(d):
1153 1153 pos2 = pos + 2 ** 18
1154 1154 yield d[pos:pos2]
1155 1155 pos = pos2
1156 1156 else:
1157 1157 yield d
1158 1158
1159 1159 yield changegroup.closechunk()
1160 1160
1161 1161 def addgroup(self, revs, linkmapper, transaction):
1162 1162 """
1163 1163 add a delta group
1164 1164
1165 1165 given a set of deltas, add them to the revision log. the
1166 1166 first delta is against its parent, which should be in our
1167 1167 log, the rest are against the previous delta.
1168 1168 """
1169 1169
1170 1170 #track the base of the current delta log
1171 1171 r = len(self)
1172 1172 t = r - 1
1173 1173 node = None
1174 1174
1175 1175 base = prev = nullrev
1176 1176 start = end = textlen = 0
1177 1177 if r:
1178 1178 end = self.end(t)
1179 1179
1180 1180 ifh = self.opener(self.indexfile, "a+")
1181 1181 isize = r * self._io.size
1182 1182 if self._inline:
1183 1183 transaction.add(self.indexfile, end + isize, r)
1184 1184 dfh = None
1185 1185 else:
1186 1186 transaction.add(self.indexfile, isize, r)
1187 1187 transaction.add(self.datafile, end)
1188 1188 dfh = self.opener(self.datafile, "a")
1189 1189
1190 1190 try:
1191 1191 # loop through our set of deltas
1192 1192 chain = None
1193 1193 for chunk in revs:
1194 1194 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1195 1195 link = linkmapper(cs)
1196 1196 if node in self.nodemap:
1197 1197 # this can happen if two branches make the same change
1198 1198 chain = node
1199 1199 continue
1200 1200 delta = buffer(chunk, 80)
1201 1201 del chunk
1202 1202
1203 1203 for p in (p1, p2):
1204 1204 if not p in self.nodemap:
1205 1205 raise LookupError(p, self.indexfile, _('unknown parent'))
1206 1206
1207 1207 if not chain:
1208 1208 # retrieve the parent revision of the delta chain
1209 1209 chain = p1
1210 1210 if not chain in self.nodemap:
1211 1211 raise LookupError(chain, self.indexfile, _('unknown base'))
1212 1212
1213 1213 # full versions are inserted when the needed deltas become
1214 1214 # comparable to the uncompressed text or when the previous
1215 1215 # version is not the one we have a delta against. We use
1216 1216 # the size of the previous full rev as a proxy for the
1217 1217 # current size.
1218 1218
1219 1219 if chain == prev:
1220 1220 cdelta = compress(delta)
1221 1221 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1222 1222 textlen = mdiff.patchedsize(textlen, delta)
1223 1223
1224 1224 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1225 1225 # flush our writes here so we can read it in revision
1226 1226 if dfh:
1227 1227 dfh.flush()
1228 1228 ifh.flush()
1229 1229 text = self.revision(chain)
1230 1230 if len(text) == 0:
1231 1231 # skip over trivial delta header
1232 1232 text = buffer(delta, 12)
1233 1233 else:
1234 1234 text = mdiff.patches(text, [delta])
1235 1235 del delta
1236 1236 chk = self._addrevision(text, transaction, link, p1, p2, None,
1237 1237 ifh, dfh)
1238 1238 if not dfh and not self._inline:
1239 1239 # addrevision switched from inline to conventional
1240 1240 # reopen the index
1241 1241 dfh = self.opener(self.datafile, "a")
1242 1242 ifh = self.opener(self.indexfile, "a")
1243 1243 if chk != node:
1244 1244 raise RevlogError(_("consistency error adding group"))
1245 1245 textlen = len(text)
1246 1246 else:
1247 1247 e = (offset_type(end, 0), cdeltalen, textlen, base,
1248 1248 link, self.rev(p1), self.rev(p2), node)
1249 1249 self.index.insert(-1, e)
1250 1250 self.nodemap[node] = r
1251 1251 entry = self._io.packentry(e, self.node, self.version, r)
1252 1252 if self._inline:
1253 1253 ifh.write(entry)
1254 1254 ifh.write(cdelta[0])
1255 1255 ifh.write(cdelta[1])
1256 1256 self.checkinlinesize(transaction, ifh)
1257 1257 if not self._inline:
1258 1258 dfh = self.opener(self.datafile, "a")
1259 1259 ifh = self.opener(self.indexfile, "a")
1260 1260 else:
1261 1261 dfh.write(cdelta[0])
1262 1262 dfh.write(cdelta[1])
1263 1263 ifh.write(entry)
1264 1264
1265 1265 t, r, chain, prev = r, r + 1, node, node
1266 1266 base = self.base(t)
1267 1267 start = self.start(base)
1268 1268 end = self.end(t)
1269 1269 finally:
1270 1270 if dfh:
1271 1271 dfh.close()
1272 1272 ifh.close()
1273 1273
1274 1274 return node
1275 1275
1276 1276 def strip(self, minlink):
1277 1277 """truncate the revlog on the first revision with a linkrev >= minlink
1278 1278
1279 1279 This function is called when we're stripping revision minlink and
1280 1280 its descendants from the repository.
1281 1281
1282 1282 We have to remove all revisions with linkrev >= minlink, because
1283 1283 the equivalent changelog revisions will be renumbered after the
1284 1284 strip.
1285 1285
1286 1286 So we truncate the revlog on the first of these revisions, and
1287 1287 trust that the caller has saved the revisions that shouldn't be
1288 1288 removed and that it'll readd them after this truncation.
1289 1289 """
1290 1290 if len(self) == 0:
1291 1291 return
1292 1292
1293 1293 if isinstance(self.index, lazyindex):
1294 1294 self._loadindexmap()
1295 1295
1296 1296 for rev in self:
1297 1297 if self.index[rev][4] >= minlink:
1298 1298 break
1299 1299 else:
1300 1300 return
1301 1301
1302 1302 # first truncate the files on disk
1303 1303 end = self.start(rev)
1304 1304 if not self._inline:
1305 1305 df = self.opener(self.datafile, "a")
1306 1306 df.truncate(end)
1307 1307 end = rev * self._io.size
1308 1308 else:
1309 1309 end += rev * self._io.size
1310 1310
1311 1311 indexf = self.opener(self.indexfile, "a")
1312 1312 indexf.truncate(end)
1313 1313
1314 1314 # then reset internal state in memory to forget those revisions
1315 1315 self._cache = None
1316 1316 self._chunkcache = None
1317 1317 for x in xrange(rev, len(self)):
1318 1318 del self.nodemap[self.node(x)]
1319 1319
1320 1320 del self.index[rev:-1]
1321 1321
1322 1322 def checksize(self):
1323 1323 expected = 0
1324 1324 if len(self):
1325 1325 expected = max(0, self.end(len(self) - 1))
1326 1326
1327 1327 try:
1328 1328 f = self.opener(self.datafile)
1329 1329 f.seek(0, 2)
1330 1330 actual = f.tell()
1331 1331 dd = actual - expected
1332 1332 except IOError, inst:
1333 1333 if inst.errno != errno.ENOENT:
1334 1334 raise
1335 1335 dd = 0
1336 1336
1337 1337 try:
1338 1338 f = self.opener(self.indexfile)
1339 1339 f.seek(0, 2)
1340 1340 actual = f.tell()
1341 1341 s = self._io.size
1342 1342 i = max(0, actual / s)
1343 1343 di = actual - (i * s)
1344 1344 if self._inline:
1345 1345 databytes = 0
1346 1346 for r in self:
1347 1347 databytes += max(0, self.length(r))
1348 1348 dd = 0
1349 1349 di = actual - len(self) * s - databytes
1350 1350 except IOError, inst:
1351 1351 if inst.errno != errno.ENOENT:
1352 1352 raise
1353 1353 di = 0
1354 1354
1355 1355 return (dd, di)
1356 1356
1357 1357 def files(self):
1358 1358 res = [ self.indexfile ]
1359 1359 if not self._inline:
1360 1360 res.append(self.datafile)
1361 1361 return res
@@ -1,101 +1,104
1 1 #!/bin/sh
2 2
3 3 hg init a
4 4
5 5 cd a
6 6 echo a > a
7 7 hg ci -Ama -d '1 0'
8 8
9 9 hg cp a b
10 10 hg ci -mb -d '2 0'
11 11
12 12 mkdir dir
13 13 hg mv b dir
14 14 hg ci -mc -d '3 0'
15 15
16 16 hg mv a b
17 17 echo a > d
18 18 hg add d
19 19 hg ci -md -d '4 0'
20 20
21 21 hg mv dir/b e
22 22 hg ci -me -d '5 0'
23 23
24 24 hg log a
25 25 echo % -f, directory
26 26 hg log -f dir
27 27 echo % -f, but no args
28 28 hg log -f
29 29 echo % one rename
30 30 hg log -vf a
31 31 echo % many renames
32 32 hg log -vf e
33 33
34 34 echo % log copies
35 35 hg log -vC --template '{rev} {file_copies%filecopy}\n'
36 36
37 37 echo % log copies, non-linear manifest
38 38 hg up -C 3
39 39 hg mv dir/b e
40 40 echo foo > foo
41 41 hg ci -Ame2 -d '6 0'
42 42 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r 5
43 43
44 44 echo % log copies, execute bit set
45 45 chmod +x e
46 46 hg ci -me3 -d '7 0'
47 47 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r 6
48 48
49 49 echo '% log -p d'
50 50 hg log -pv d
51 51
52 52 # log --follow tests
53 53 hg init ../follow
54 54 cd ../follow
55 55
56 56 echo base > base
57 57 hg ci -Ambase -d '1 0'
58 58
59 59 echo r1 >> base
60 60 hg ci -Amr1 -d '1 0'
61 61 echo r2 >> base
62 62 hg ci -Amr2 -d '1 0'
63 63
64 64 hg up -C 1
65 65 echo b1 > b1
66 66 hg ci -Amb1 -d '1 0'
67 67
68 68 echo % log -f
69 69 hg log -f
70 70
71 71 hg up -C 0
72 72 echo b2 > b2
73 73 hg ci -Amb2 -d '1 0'
74 74
75 75 echo % log -f -r 1:tip
76 76 hg log -f -r 1:tip
77 77
78 78 hg up -C 3
79 79 hg merge tip
80 80
81 81 echo % log -r . with two parents
82 82 hg log -r .
83 83
84 84 hg ci -mm12 -d '1 0'
85 85
86 86 echo % log -r . with one parent
87 87 hg log -r .
88 88
89 89 echo postm >> b1
90 90 hg ci -Amb1.1 -d'1 0'
91 91
92 92 echo % log --follow-first
93 93 hg log --follow-first
94 94
95 95 echo % log -P 2
96 96 hg log -P 2
97 97
98 98 echo '% log -r ""'
99 99 hg log -r ''
100 100
101 echo '% log -r <some unknown node id>'
102 hg log -r 1000000000000000000000000000000000000000
103
101 104 exit 0
@@ -1,225 +1,227
1 1 adding a
2 2 changeset: 0:8580ff50825a
3 3 user: test
4 4 date: Thu Jan 01 00:00:01 1970 +0000
5 5 summary: a
6 6
7 7 % -f, directory
8 8 abort: can only follow copies/renames for explicit file names
9 9 % -f, but no args
10 10 changeset: 4:b30c444c7c84
11 11 tag: tip
12 12 user: test
13 13 date: Thu Jan 01 00:00:05 1970 +0000
14 14 summary: e
15 15
16 16 changeset: 3:16b60bf3f99a
17 17 user: test
18 18 date: Thu Jan 01 00:00:04 1970 +0000
19 19 summary: d
20 20
21 21 changeset: 2:21fba396af4c
22 22 user: test
23 23 date: Thu Jan 01 00:00:03 1970 +0000
24 24 summary: c
25 25
26 26 changeset: 1:c0296dabce9b
27 27 user: test
28 28 date: Thu Jan 01 00:00:02 1970 +0000
29 29 summary: b
30 30
31 31 changeset: 0:8580ff50825a
32 32 user: test
33 33 date: Thu Jan 01 00:00:01 1970 +0000
34 34 summary: a
35 35
36 36 % one rename
37 37 changeset: 0:8580ff50825a
38 38 user: test
39 39 date: Thu Jan 01 00:00:01 1970 +0000
40 40 files: a
41 41 description:
42 42 a
43 43
44 44
45 45 % many renames
46 46 changeset: 4:b30c444c7c84
47 47 tag: tip
48 48 user: test
49 49 date: Thu Jan 01 00:00:05 1970 +0000
50 50 files: dir/b e
51 51 description:
52 52 e
53 53
54 54
55 55 changeset: 2:21fba396af4c
56 56 user: test
57 57 date: Thu Jan 01 00:00:03 1970 +0000
58 58 files: b dir/b
59 59 description:
60 60 c
61 61
62 62
63 63 changeset: 1:c0296dabce9b
64 64 user: test
65 65 date: Thu Jan 01 00:00:02 1970 +0000
66 66 files: b
67 67 description:
68 68 b
69 69
70 70
71 71 changeset: 0:8580ff50825a
72 72 user: test
73 73 date: Thu Jan 01 00:00:01 1970 +0000
74 74 files: a
75 75 description:
76 76 a
77 77
78 78
79 79 % log copies
80 80 4 e (dir/b)
81 81 3 b (a)
82 82 2 dir/b (b)
83 83 1 b (a)
84 84 0
85 85 % log copies, non-linear manifest
86 86 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
87 87 adding foo
88 88 created new head
89 89 5 e (dir/b)
90 90 % log copies, execute bit set
91 91 6
92 92 % log -p d
93 93 changeset: 3:16b60bf3f99a
94 94 user: test
95 95 date: Thu Jan 01 00:00:04 1970 +0000
96 96 files: a b d
97 97 description:
98 98 d
99 99
100 100
101 101 diff -r 21fba396af4c -r 16b60bf3f99a d
102 102 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
103 103 +++ b/d Thu Jan 01 00:00:04 1970 +0000
104 104 @@ -0,0 +1,1 @@
105 105 +a
106 106
107 107 adding base
108 108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 109 adding b1
110 110 created new head
111 111 % log -f
112 112 changeset: 3:e62f78d544b4
113 113 tag: tip
114 114 parent: 1:3d5bf5654eda
115 115 user: test
116 116 date: Thu Jan 01 00:00:01 1970 +0000
117 117 summary: b1
118 118
119 119 changeset: 1:3d5bf5654eda
120 120 user: test
121 121 date: Thu Jan 01 00:00:01 1970 +0000
122 122 summary: r1
123 123
124 124 changeset: 0:67e992f2c4f3
125 125 user: test
126 126 date: Thu Jan 01 00:00:01 1970 +0000
127 127 summary: base
128 128
129 129 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
130 130 adding b2
131 131 created new head
132 132 % log -f -r 1:tip
133 133 changeset: 1:3d5bf5654eda
134 134 user: test
135 135 date: Thu Jan 01 00:00:01 1970 +0000
136 136 summary: r1
137 137
138 138 changeset: 2:60c670bf5b30
139 139 user: test
140 140 date: Thu Jan 01 00:00:01 1970 +0000
141 141 summary: r2
142 142
143 143 changeset: 3:e62f78d544b4
144 144 parent: 1:3d5bf5654eda
145 145 user: test
146 146 date: Thu Jan 01 00:00:01 1970 +0000
147 147 summary: b1
148 148
149 149 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
150 150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 151 (branch merge, don't forget to commit)
152 152 % log -r . with two parents
153 153 changeset: 3:e62f78d544b4
154 154 parent: 1:3d5bf5654eda
155 155 user: test
156 156 date: Thu Jan 01 00:00:01 1970 +0000
157 157 summary: b1
158 158
159 159 % log -r . with one parent
160 160 changeset: 5:302e9dd6890d
161 161 tag: tip
162 162 parent: 3:e62f78d544b4
163 163 parent: 4:ddb82e70d1a1
164 164 user: test
165 165 date: Thu Jan 01 00:00:01 1970 +0000
166 166 summary: m12
167 167
168 168 % log --follow-first
169 169 changeset: 6:2404bbcab562
170 170 tag: tip
171 171 user: test
172 172 date: Thu Jan 01 00:00:01 1970 +0000
173 173 summary: b1.1
174 174
175 175 changeset: 5:302e9dd6890d
176 176 parent: 3:e62f78d544b4
177 177 parent: 4:ddb82e70d1a1
178 178 user: test
179 179 date: Thu Jan 01 00:00:01 1970 +0000
180 180 summary: m12
181 181
182 182 changeset: 3:e62f78d544b4
183 183 parent: 1:3d5bf5654eda
184 184 user: test
185 185 date: Thu Jan 01 00:00:01 1970 +0000
186 186 summary: b1
187 187
188 188 changeset: 1:3d5bf5654eda
189 189 user: test
190 190 date: Thu Jan 01 00:00:01 1970 +0000
191 191 summary: r1
192 192
193 193 changeset: 0:67e992f2c4f3
194 194 user: test
195 195 date: Thu Jan 01 00:00:01 1970 +0000
196 196 summary: base
197 197
198 198 % log -P 2
199 199 changeset: 6:2404bbcab562
200 200 tag: tip
201 201 user: test
202 202 date: Thu Jan 01 00:00:01 1970 +0000
203 203 summary: b1.1
204 204
205 205 changeset: 5:302e9dd6890d
206 206 parent: 3:e62f78d544b4
207 207 parent: 4:ddb82e70d1a1
208 208 user: test
209 209 date: Thu Jan 01 00:00:01 1970 +0000
210 210 summary: m12
211 211
212 212 changeset: 4:ddb82e70d1a1
213 213 parent: 0:67e992f2c4f3
214 214 user: test
215 215 date: Thu Jan 01 00:00:01 1970 +0000
216 216 summary: b2
217 217
218 218 changeset: 3:e62f78d544b4
219 219 parent: 1:3d5bf5654eda
220 220 user: test
221 221 date: Thu Jan 01 00:00:01 1970 +0000
222 222 summary: b1
223 223
224 224 % log -r ""
225 225 abort: 00changelog.i@: ambiguous identifier!
226 % log -r <some unknown node id>
227 abort: unknown revision '1000000000000000000000000000000000000000'!
General Comments 0
You need to be logged in to leave comments. Login now