##// END OF EJS Templates
revlog: document v0 format
Benoit Boissinot -
r18585:b280f3bf default
parent child Browse files
Show More
@@ -1,1337 +1,1345
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from node import bin, hex, nullid, nullrev
16 16 from i18n import _
17 17 import ancestor, mdiff, parsers, error, util, dagutil
18 18 import struct, zlib, errno
19 19
20 20 _pack = struct.pack
21 21 _unpack = struct.unpack
22 22 _compress = zlib.compress
23 23 _decompress = zlib.decompress
24 24 _sha = util.sha1
25 25
26 26 # revlog header flags
27 27 REVLOGV0 = 0
28 28 REVLOGNG = 1
29 29 REVLOGNGINLINEDATA = (1 << 16)
30 30 REVLOGGENERALDELTA = (1 << 17)
31 31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35 35
36 36 # revlog index flags
37 37 REVIDX_KNOWN_FLAGS = 0
38 38
39 39 # max size of revlog with inline data
40 40 _maxinline = 131072
41 41 _chunksize = 1048576
42 42
43 43 RevlogError = error.RevlogError
44 44 LookupError = error.LookupError
45 45
46 46 def getoffset(q):
47 47 return int(q >> 16)
48 48
49 49 def gettype(q):
50 50 return int(q & 0xFFFF)
51 51
52 52 def offset_type(offset, type):
53 53 return long(long(offset) << 16 | type)
54 54
55 55 nullhash = _sha(nullid)
56 56
57 57 def hash(text, p1, p2):
58 58 """generate a hash from the given text and its parent hashes
59 59
60 60 This hash combines both the current file contents and its history
61 61 in a manner that makes it easy to distinguish nodes with the same
62 62 content in the revision graph.
63 63 """
64 64 # As of now, if one of the parent node is null, p2 is null
65 65 if p2 == nullid:
66 66 # deep copy of a hash is faster than creating one
67 67 s = nullhash.copy()
68 68 s.update(p1)
69 69 else:
70 70 # none of the parent nodes are nullid
71 71 l = [p1, p2]
72 72 l.sort()
73 73 s = _sha(l[0])
74 74 s.update(l[1])
75 75 s.update(text)
76 76 return s.digest()
77 77
78 78 def decompress(bin):
79 79 """ decompress the given input """
80 80 if not bin:
81 81 return bin
82 82 t = bin[0]
83 83 if t == '\0':
84 84 return bin
85 85 if t == 'x':
86 86 try:
87 87 return _decompress(bin)
88 88 except zlib.error, e:
89 89 raise RevlogError(_("revlog decompress error: %s") % str(e))
90 90 if t == 'u':
91 91 return bin[1:]
92 92 raise RevlogError(_("unknown compression type %r") % t)
93 93
94 # index v0:
95 # 4 bytes: offset
96 # 4 bytes: compressed length
97 # 4 bytes: base rev
98 # 4 bytes: link rev
99 # 32 bytes: parent 1 nodeid
100 # 32 bytes: parent 2 nodeid
101 # 32 bytes: nodeid
94 102 indexformatv0 = ">4l20s20s20s"
95 103 v0shaoffset = 56
96 104
97 105 class revlogoldio(object):
98 106 def __init__(self):
99 107 self.size = struct.calcsize(indexformatv0)
100 108
101 109 def parseindex(self, data, inline):
102 110 s = self.size
103 111 index = []
104 112 nodemap = {nullid: nullrev}
105 113 n = off = 0
106 114 l = len(data)
107 115 while off + s <= l:
108 116 cur = data[off:off + s]
109 117 off += s
110 118 e = _unpack(indexformatv0, cur)
111 119 # transform to revlogv1 format
112 120 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
113 121 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
114 122 index.append(e2)
115 123 nodemap[e[6]] = n
116 124 n += 1
117 125
118 126 # add the magic null revision at -1
119 127 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
120 128
121 129 return index, nodemap, None
122 130
123 131 def packentry(self, entry, node, version, rev):
124 132 if gettype(entry[0]):
125 133 raise RevlogError(_("index entry flags need RevlogNG"))
126 134 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
127 135 node(entry[5]), node(entry[6]), entry[7])
128 136 return _pack(indexformatv0, *e2)
129 137
130 138 # index ng:
131 139 # 6 bytes: offset
132 140 # 2 bytes: flags
133 141 # 4 bytes: compressed length
134 142 # 4 bytes: uncompressed length
135 143 # 4 bytes: base rev
136 144 # 4 bytes: link rev
137 145 # 4 bytes: parent 1 rev
138 146 # 4 bytes: parent 2 rev
139 147 # 32 bytes: nodeid
140 148 indexformatng = ">Qiiiiii20s12x"
141 149 ngshaoffset = 32
142 150 versionformat = ">I"
143 151
144 152 class revlogio(object):
145 153 def __init__(self):
146 154 self.size = struct.calcsize(indexformatng)
147 155
148 156 def parseindex(self, data, inline):
149 157 # call the C implementation to parse the index data
150 158 index, cache = parsers.parse_index2(data, inline)
151 159 return index, getattr(index, 'nodemap', None), cache
152 160
153 161 def packentry(self, entry, node, version, rev):
154 162 p = _pack(indexformatng, *entry)
155 163 if rev == 0:
156 164 p = _pack(versionformat, version) + p[4:]
157 165 return p
158 166
159 167 class revlog(object):
160 168 """
161 169 the underlying revision storage object
162 170
163 171 A revlog consists of two parts, an index and the revision data.
164 172
165 173 The index is a file with a fixed record size containing
166 174 information on each revision, including its nodeid (hash), the
167 175 nodeids of its parents, the position and offset of its data within
168 176 the data file, and the revision it's based on. Finally, each entry
169 177 contains a linkrev entry that can serve as a pointer to external
170 178 data.
171 179
172 180 The revision data itself is a linear collection of data chunks.
173 181 Each chunk represents a revision and is usually represented as a
174 182 delta against the previous chunk. To bound lookup time, runs of
175 183 deltas are limited to about 2 times the length of the original
176 184 version data. This makes retrieval of a version proportional to
177 185 its size, or O(1) relative to the number of revisions.
178 186
179 187 Both pieces of the revlog are written to in an append-only
180 188 fashion, which means we never need to rewrite a file to insert or
181 189 remove data, and can use some simple techniques to avoid the need
182 190 for locking while reading.
183 191 """
184 192 def __init__(self, opener, indexfile):
185 193 """
186 194 create a revlog object
187 195
188 196 opener is a function that abstracts the file opening operation
189 197 and can be used to implement COW semantics or the like.
190 198 """
191 199 self.indexfile = indexfile
192 200 self.datafile = indexfile[:-2] + ".d"
193 201 self.opener = opener
194 202 self._cache = None
195 203 self._basecache = (0, 0)
196 204 self._chunkcache = (0, '')
197 205 self.index = []
198 206 self._pcache = {}
199 207 self._nodecache = {nullid: nullrev}
200 208 self._nodepos = None
201 209
202 210 v = REVLOG_DEFAULT_VERSION
203 211 opts = getattr(opener, 'options', None)
204 212 if opts is not None:
205 213 if 'revlogv1' in opts:
206 214 if 'generaldelta' in opts:
207 215 v |= REVLOGGENERALDELTA
208 216 else:
209 217 v = 0
210 218
211 219 i = ''
212 220 self._initempty = True
213 221 try:
214 222 f = self.opener(self.indexfile)
215 223 i = f.read()
216 224 f.close()
217 225 if len(i) > 0:
218 226 v = struct.unpack(versionformat, i[:4])[0]
219 227 self._initempty = False
220 228 except IOError, inst:
221 229 if inst.errno != errno.ENOENT:
222 230 raise
223 231
224 232 self.version = v
225 233 self._inline = v & REVLOGNGINLINEDATA
226 234 self._generaldelta = v & REVLOGGENERALDELTA
227 235 flags = v & ~0xFFFF
228 236 fmt = v & 0xFFFF
229 237 if fmt == REVLOGV0 and flags:
230 238 raise RevlogError(_("index %s unknown flags %#04x for format v0")
231 239 % (self.indexfile, flags >> 16))
232 240 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
233 241 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
234 242 % (self.indexfile, flags >> 16))
235 243 elif fmt > REVLOGNG:
236 244 raise RevlogError(_("index %s unknown format %d")
237 245 % (self.indexfile, fmt))
238 246
239 247 self._io = revlogio()
240 248 if self.version == REVLOGV0:
241 249 self._io = revlogoldio()
242 250 try:
243 251 d = self._io.parseindex(i, self._inline)
244 252 except (ValueError, IndexError):
245 253 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
246 254 self.index, nodemap, self._chunkcache = d
247 255 if nodemap is not None:
248 256 self.nodemap = self._nodecache = nodemap
249 257 if not self._chunkcache:
250 258 self._chunkclear()
251 259
252 260 def tip(self):
253 261 return self.node(len(self.index) - 2)
254 262 def __len__(self):
255 263 return len(self.index) - 1
256 264 def __iter__(self):
257 265 return iter(xrange(len(self)))
258 266 def revs(self, start=0, stop=None):
259 267 """iterate over all rev in this revlog (from start to stop)"""
260 268 step = 1
261 269 if stop is not None:
262 270 if start > stop:
263 271 step = -1
264 272 stop += step
265 273 else:
266 274 stop = len(self)
267 275 return xrange(start, stop, step)
268 276
269 277 @util.propertycache
270 278 def nodemap(self):
271 279 self.rev(self.node(0))
272 280 return self._nodecache
273 281
274 282 def hasnode(self, node):
275 283 try:
276 284 self.rev(node)
277 285 return True
278 286 except KeyError:
279 287 return False
280 288
281 289 def clearcaches(self):
282 290 try:
283 291 self._nodecache.clearcaches()
284 292 except AttributeError:
285 293 self._nodecache = {nullid: nullrev}
286 294 self._nodepos = None
287 295
288 296 def rev(self, node):
289 297 try:
290 298 return self._nodecache[node]
291 299 except RevlogError:
292 300 # parsers.c radix tree lookup failed
293 301 raise LookupError(node, self.indexfile, _('no node'))
294 302 except KeyError:
295 303 # pure python cache lookup failed
296 304 n = self._nodecache
297 305 i = self.index
298 306 p = self._nodepos
299 307 if p is None:
300 308 p = len(i) - 2
301 309 for r in xrange(p, -1, -1):
302 310 v = i[r][7]
303 311 n[v] = r
304 312 if v == node:
305 313 self._nodepos = r - 1
306 314 return r
307 315 raise LookupError(node, self.indexfile, _('no node'))
308 316
309 317 def node(self, rev):
310 318 return self.index[rev][7]
311 319 def linkrev(self, rev):
312 320 return self.index[rev][4]
313 321 def parents(self, node):
314 322 i = self.index
315 323 d = i[self.rev(node)]
316 324 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
317 325 def parentrevs(self, rev):
318 326 return self.index[rev][5:7]
319 327 def start(self, rev):
320 328 return int(self.index[rev][0] >> 16)
321 329 def end(self, rev):
322 330 return self.start(rev) + self.length(rev)
323 331 def length(self, rev):
324 332 return self.index[rev][1]
325 333 def chainbase(self, rev):
326 334 index = self.index
327 335 base = index[rev][3]
328 336 while base != rev:
329 337 rev = base
330 338 base = index[rev][3]
331 339 return base
332 340 def flags(self, rev):
333 341 return self.index[rev][0] & 0xFFFF
334 342 def rawsize(self, rev):
335 343 """return the length of the uncompressed text for a given revision"""
336 344 l = self.index[rev][2]
337 345 if l >= 0:
338 346 return l
339 347
340 348 t = self.revision(self.node(rev))
341 349 return len(t)
342 350 size = rawsize
343 351
344 352 def ancestors(self, revs, stoprev=0, inclusive=False):
345 353 """Generate the ancestors of 'revs' in reverse topological order.
346 354 Does not generate revs lower than stoprev.
347 355
348 356 See the documentation for ancestor.lazyancestors for more details."""
349 357
350 358 return ancestor.lazyancestors(self, revs, stoprev=stoprev,
351 359 inclusive=inclusive)
352 360
353 361 def descendants(self, revs):
354 362 """Generate the descendants of 'revs' in revision order.
355 363
356 364 Yield a sequence of revision numbers starting with a child of
357 365 some rev in revs, i.e., each revision is *not* considered a
358 366 descendant of itself. Results are ordered by revision number (a
359 367 topological sort)."""
360 368 first = min(revs)
361 369 if first == nullrev:
362 370 for i in self:
363 371 yield i
364 372 return
365 373
366 374 seen = set(revs)
367 375 for i in self.revs(start=first + 1):
368 376 for x in self.parentrevs(i):
369 377 if x != nullrev and x in seen:
370 378 seen.add(i)
371 379 yield i
372 380 break
373 381
374 382 def findcommonmissing(self, common=None, heads=None):
375 383 """Return a tuple of the ancestors of common and the ancestors of heads
376 384 that are not ancestors of common. In revset terminology, we return the
377 385 tuple:
378 386
379 387 ::common, (::heads) - (::common)
380 388
381 389 The list is sorted by revision number, meaning it is
382 390 topologically sorted.
383 391
384 392 'heads' and 'common' are both lists of node IDs. If heads is
385 393 not supplied, uses all of the revlog's heads. If common is not
386 394 supplied, uses nullid."""
387 395 if common is None:
388 396 common = [nullid]
389 397 if heads is None:
390 398 heads = self.heads()
391 399
392 400 common = [self.rev(n) for n in common]
393 401 heads = [self.rev(n) for n in heads]
394 402
395 403 # we want the ancestors, but inclusive
396 404 has = set(self.ancestors(common))
397 405 has.add(nullrev)
398 406 has.update(common)
399 407
400 408 # take all ancestors from heads that aren't in has
401 409 missing = set()
402 410 visit = util.deque(r for r in heads if r not in has)
403 411 while visit:
404 412 r = visit.popleft()
405 413 if r in missing:
406 414 continue
407 415 else:
408 416 missing.add(r)
409 417 for p in self.parentrevs(r):
410 418 if p not in has:
411 419 visit.append(p)
412 420 missing = list(missing)
413 421 missing.sort()
414 422 return has, [self.node(r) for r in missing]
415 423
416 424 def findmissingrevs(self, common=None, heads=None):
417 425 """Return the revision numbers of the ancestors of heads that
418 426 are not ancestors of common.
419 427
420 428 More specifically, return a list of revision numbers corresponding to
421 429 nodes N such that every N satisfies the following constraints:
422 430
423 431 1. N is an ancestor of some node in 'heads'
424 432 2. N is not an ancestor of any node in 'common'
425 433
426 434 The list is sorted by revision number, meaning it is
427 435 topologically sorted.
428 436
429 437 'heads' and 'common' are both lists of revision numbers. If heads is
430 438 not supplied, uses all of the revlog's heads. If common is not
431 439 supplied, uses nullid."""
432 440 if common is None:
433 441 common = [nullrev]
434 442 if heads is None:
435 443 heads = self.headrevs()
436 444
437 445 return ancestor.missingancestors(heads, common, self.parentrevs)
438 446
439 447 def findmissing(self, common=None, heads=None):
440 448 """Return the ancestors of heads that are not ancestors of common.
441 449
442 450 More specifically, return a list of nodes N such that every N
443 451 satisfies the following constraints:
444 452
445 453 1. N is an ancestor of some node in 'heads'
446 454 2. N is not an ancestor of any node in 'common'
447 455
448 456 The list is sorted by revision number, meaning it is
449 457 topologically sorted.
450 458
451 459 'heads' and 'common' are both lists of node IDs. If heads is
452 460 not supplied, uses all of the revlog's heads. If common is not
453 461 supplied, uses nullid."""
454 462 if common is None:
455 463 common = [nullid]
456 464 if heads is None:
457 465 heads = self.heads()
458 466
459 467 common = [self.rev(n) for n in common]
460 468 heads = [self.rev(n) for n in heads]
461 469
462 470 return [self.node(r) for r in
463 471 ancestor.missingancestors(heads, common, self.parentrevs)]
464 472
465 473 def nodesbetween(self, roots=None, heads=None):
466 474 """Return a topological path from 'roots' to 'heads'.
467 475
468 476 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
469 477 topologically sorted list of all nodes N that satisfy both of
470 478 these constraints:
471 479
472 480 1. N is a descendant of some node in 'roots'
473 481 2. N is an ancestor of some node in 'heads'
474 482
475 483 Every node is considered to be both a descendant and an ancestor
476 484 of itself, so every reachable node in 'roots' and 'heads' will be
477 485 included in 'nodes'.
478 486
479 487 'outroots' is the list of reachable nodes in 'roots', i.e., the
480 488 subset of 'roots' that is returned in 'nodes'. Likewise,
481 489 'outheads' is the subset of 'heads' that is also in 'nodes'.
482 490
483 491 'roots' and 'heads' are both lists of node IDs. If 'roots' is
484 492 unspecified, uses nullid as the only root. If 'heads' is
485 493 unspecified, uses list of all of the revlog's heads."""
486 494 nonodes = ([], [], [])
487 495 if roots is not None:
488 496 roots = list(roots)
489 497 if not roots:
490 498 return nonodes
491 499 lowestrev = min([self.rev(n) for n in roots])
492 500 else:
493 501 roots = [nullid] # Everybody's a descendant of nullid
494 502 lowestrev = nullrev
495 503 if (lowestrev == nullrev) and (heads is None):
496 504 # We want _all_ the nodes!
497 505 return ([self.node(r) for r in self], [nullid], list(self.heads()))
498 506 if heads is None:
499 507 # All nodes are ancestors, so the latest ancestor is the last
500 508 # node.
501 509 highestrev = len(self) - 1
502 510 # Set ancestors to None to signal that every node is an ancestor.
503 511 ancestors = None
504 512 # Set heads to an empty dictionary for later discovery of heads
505 513 heads = {}
506 514 else:
507 515 heads = list(heads)
508 516 if not heads:
509 517 return nonodes
510 518 ancestors = set()
511 519 # Turn heads into a dictionary so we can remove 'fake' heads.
512 520 # Also, later we will be using it to filter out the heads we can't
513 521 # find from roots.
514 522 heads = dict.fromkeys(heads, False)
515 523 # Start at the top and keep marking parents until we're done.
516 524 nodestotag = set(heads)
517 525 # Remember where the top was so we can use it as a limit later.
518 526 highestrev = max([self.rev(n) for n in nodestotag])
519 527 while nodestotag:
520 528 # grab a node to tag
521 529 n = nodestotag.pop()
522 530 # Never tag nullid
523 531 if n == nullid:
524 532 continue
525 533 # A node's revision number represents its place in a
526 534 # topologically sorted list of nodes.
527 535 r = self.rev(n)
528 536 if r >= lowestrev:
529 537 if n not in ancestors:
530 538 # If we are possibly a descendant of one of the roots
531 539 # and we haven't already been marked as an ancestor
532 540 ancestors.add(n) # Mark as ancestor
533 541 # Add non-nullid parents to list of nodes to tag.
534 542 nodestotag.update([p for p in self.parents(n) if
535 543 p != nullid])
536 544 elif n in heads: # We've seen it before, is it a fake head?
537 545 # So it is, real heads should not be the ancestors of
538 546 # any other heads.
539 547 heads.pop(n)
540 548 if not ancestors:
541 549 return nonodes
542 550 # Now that we have our set of ancestors, we want to remove any
543 551 # roots that are not ancestors.
544 552
545 553 # If one of the roots was nullid, everything is included anyway.
546 554 if lowestrev > nullrev:
547 555 # But, since we weren't, let's recompute the lowest rev to not
548 556 # include roots that aren't ancestors.
549 557
550 558 # Filter out roots that aren't ancestors of heads
551 559 roots = [n for n in roots if n in ancestors]
552 560 # Recompute the lowest revision
553 561 if roots:
554 562 lowestrev = min([self.rev(n) for n in roots])
555 563 else:
556 564 # No more roots? Return empty list
557 565 return nonodes
558 566 else:
559 567 # We are descending from nullid, and don't need to care about
560 568 # any other roots.
561 569 lowestrev = nullrev
562 570 roots = [nullid]
563 571 # Transform our roots list into a set.
564 572 descendants = set(roots)
565 573 # Also, keep the original roots so we can filter out roots that aren't
566 574 # 'real' roots (i.e. are descended from other roots).
567 575 roots = descendants.copy()
568 576 # Our topologically sorted list of output nodes.
569 577 orderedout = []
570 578 # Don't start at nullid since we don't want nullid in our output list,
571 579 # and if nullid shows up in descendants, empty parents will look like
572 580 # they're descendants.
573 581 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
574 582 n = self.node(r)
575 583 isdescendant = False
576 584 if lowestrev == nullrev: # Everybody is a descendant of nullid
577 585 isdescendant = True
578 586 elif n in descendants:
579 587 # n is already a descendant
580 588 isdescendant = True
581 589 # This check only needs to be done here because all the roots
582 590 # will start being marked is descendants before the loop.
583 591 if n in roots:
584 592 # If n was a root, check if it's a 'real' root.
585 593 p = tuple(self.parents(n))
586 594 # If any of its parents are descendants, it's not a root.
587 595 if (p[0] in descendants) or (p[1] in descendants):
588 596 roots.remove(n)
589 597 else:
590 598 p = tuple(self.parents(n))
591 599 # A node is a descendant if either of its parents are
592 600 # descendants. (We seeded the dependents list with the roots
593 601 # up there, remember?)
594 602 if (p[0] in descendants) or (p[1] in descendants):
595 603 descendants.add(n)
596 604 isdescendant = True
597 605 if isdescendant and ((ancestors is None) or (n in ancestors)):
598 606 # Only include nodes that are both descendants and ancestors.
599 607 orderedout.append(n)
600 608 if (ancestors is not None) and (n in heads):
601 609 # We're trying to figure out which heads are reachable
602 610 # from roots.
603 611 # Mark this head as having been reached
604 612 heads[n] = True
605 613 elif ancestors is None:
606 614 # Otherwise, we're trying to discover the heads.
607 615 # Assume this is a head because if it isn't, the next step
608 616 # will eventually remove it.
609 617 heads[n] = True
610 618 # But, obviously its parents aren't.
611 619 for p in self.parents(n):
612 620 heads.pop(p, None)
613 621 heads = [n for n, flag in heads.iteritems() if flag]
614 622 roots = list(roots)
615 623 assert orderedout
616 624 assert roots
617 625 assert heads
618 626 return (orderedout, roots, heads)
619 627
620 628 def headrevs(self):
621 629 try:
622 630 return self.index.headrevs()
623 631 except AttributeError:
624 632 return self._headrevs()
625 633
626 634 def _headrevs(self):
627 635 count = len(self)
628 636 if not count:
629 637 return [nullrev]
630 638 # we won't iter over filtered rev so nobody is a head at start
631 639 ishead = [0] * (count + 1)
632 640 index = self.index
633 641 for r in self:
634 642 ishead[r] = 1 # I may be an head
635 643 e = index[r]
636 644 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
637 645 return [r for r, val in enumerate(ishead) if val]
638 646
639 647 def heads(self, start=None, stop=None):
640 648 """return the list of all nodes that have no children
641 649
642 650 if start is specified, only heads that are descendants of
643 651 start will be returned
644 652 if stop is specified, it will consider all the revs from stop
645 653 as if they had no children
646 654 """
647 655 if start is None and stop is None:
648 656 if not len(self):
649 657 return [nullid]
650 658 return [self.node(r) for r in self.headrevs()]
651 659
652 660 if start is None:
653 661 start = nullid
654 662 if stop is None:
655 663 stop = []
656 664 stoprevs = set([self.rev(n) for n in stop])
657 665 startrev = self.rev(start)
658 666 reachable = set((startrev,))
659 667 heads = set((startrev,))
660 668
661 669 parentrevs = self.parentrevs
662 670 for r in self.revs(start=startrev + 1):
663 671 for p in parentrevs(r):
664 672 if p in reachable:
665 673 if r not in stoprevs:
666 674 reachable.add(r)
667 675 heads.add(r)
668 676 if p in heads and p not in stoprevs:
669 677 heads.remove(p)
670 678
671 679 return [self.node(r) for r in heads]
672 680
673 681 def children(self, node):
674 682 """find the children of a given node"""
675 683 c = []
676 684 p = self.rev(node)
677 685 for r in self.revs(start=p + 1):
678 686 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
679 687 if prevs:
680 688 for pr in prevs:
681 689 if pr == p:
682 690 c.append(self.node(r))
683 691 elif p == nullrev:
684 692 c.append(self.node(r))
685 693 return c
686 694
687 695 def descendant(self, start, end):
688 696 if start == nullrev:
689 697 return True
690 698 for i in self.descendants([start]):
691 699 if i == end:
692 700 return True
693 701 elif i > end:
694 702 break
695 703 return False
696 704
697 705 def ancestor(self, a, b):
698 706 """calculate the least common ancestor of nodes a and b"""
699 707
700 708 # fast path, check if it is a descendant
701 709 a, b = self.rev(a), self.rev(b)
702 710 start, end = sorted((a, b))
703 711 if self.descendant(start, end):
704 712 return self.node(start)
705 713
706 714 def parents(rev):
707 715 return [p for p in self.parentrevs(rev) if p != nullrev]
708 716
709 717 c = ancestor.ancestor(a, b, parents)
710 718 if c is None:
711 719 return nullid
712 720
713 721 return self.node(c)
714 722
715 723 def _match(self, id):
716 724 if isinstance(id, int):
717 725 # rev
718 726 return self.node(id)
719 727 if len(id) == 20:
720 728 # possibly a binary node
721 729 # odds of a binary node being all hex in ASCII are 1 in 10**25
722 730 try:
723 731 node = id
724 732 self.rev(node) # quick search the index
725 733 return node
726 734 except LookupError:
727 735 pass # may be partial hex id
728 736 try:
729 737 # str(rev)
730 738 rev = int(id)
731 739 if str(rev) != id:
732 740 raise ValueError
733 741 if rev < 0:
734 742 rev = len(self) + rev
735 743 if rev < 0 or rev >= len(self):
736 744 raise ValueError
737 745 return self.node(rev)
738 746 except (ValueError, OverflowError):
739 747 pass
740 748 if len(id) == 40:
741 749 try:
742 750 # a full hex nodeid?
743 751 node = bin(id)
744 752 self.rev(node)
745 753 return node
746 754 except (TypeError, LookupError):
747 755 pass
748 756
749 757 def _partialmatch(self, id):
750 758 try:
751 759 return self.index.partialmatch(id)
752 760 except RevlogError:
753 761 # parsers.c radix tree lookup gave multiple matches
754 762 raise LookupError(id, self.indexfile, _("ambiguous identifier"))
755 763 except (AttributeError, ValueError):
756 764 # we are pure python, or key was too short to search radix tree
757 765 pass
758 766
759 767 if id in self._pcache:
760 768 return self._pcache[id]
761 769
762 770 if len(id) < 40:
763 771 try:
764 772 # hex(node)[:...]
765 773 l = len(id) // 2 # grab an even number of digits
766 774 prefix = bin(id[:l * 2])
767 775 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
768 776 nl = [n for n in nl if hex(n).startswith(id)]
769 777 if len(nl) > 0:
770 778 if len(nl) == 1:
771 779 self._pcache[id] = nl[0]
772 780 return nl[0]
773 781 raise LookupError(id, self.indexfile,
774 782 _('ambiguous identifier'))
775 783 return None
776 784 except TypeError:
777 785 pass
778 786
779 787 def lookup(self, id):
780 788 """locate a node based on:
781 789 - revision number or str(revision number)
782 790 - nodeid or subset of hex nodeid
783 791 """
784 792 n = self._match(id)
785 793 if n is not None:
786 794 return n
787 795 n = self._partialmatch(id)
788 796 if n:
789 797 return n
790 798
791 799 raise LookupError(id, self.indexfile, _('no match found'))
792 800
793 801 def cmp(self, node, text):
794 802 """compare text with a given file revision
795 803
796 804 returns True if text is different than what is stored.
797 805 """
798 806 p1, p2 = self.parents(node)
799 807 return hash(text, p1, p2) != node
800 808
801 809 def _addchunk(self, offset, data):
802 810 o, d = self._chunkcache
803 811 # try to add to existing cache
804 812 if o + len(d) == offset and len(d) + len(data) < _chunksize:
805 813 self._chunkcache = o, d + data
806 814 else:
807 815 self._chunkcache = offset, data
808 816
809 817 def _loadchunk(self, offset, length):
810 818 if self._inline:
811 819 df = self.opener(self.indexfile)
812 820 else:
813 821 df = self.opener(self.datafile)
814 822
815 823 readahead = max(65536, length)
816 824 df.seek(offset)
817 825 d = df.read(readahead)
818 826 df.close()
819 827 self._addchunk(offset, d)
820 828 if readahead > length:
821 829 return util.buffer(d, 0, length)
822 830 return d
823 831
824 832 def _getchunk(self, offset, length):
825 833 o, d = self._chunkcache
826 834 l = len(d)
827 835
828 836 # is it in the cache?
829 837 cachestart = offset - o
830 838 cacheend = cachestart + length
831 839 if cachestart >= 0 and cacheend <= l:
832 840 if cachestart == 0 and cacheend == l:
833 841 return d # avoid a copy
834 842 return util.buffer(d, cachestart, cacheend - cachestart)
835 843
836 844 return self._loadchunk(offset, length)
837 845
838 846 def _chunkraw(self, startrev, endrev):
839 847 start = self.start(startrev)
840 848 length = self.end(endrev) - start
841 849 if self._inline:
842 850 start += (startrev + 1) * self._io.size
843 851 return self._getchunk(start, length)
844 852
845 853 def _chunk(self, rev):
846 854 return decompress(self._chunkraw(rev, rev))
847 855
848 856 def _chunkbase(self, rev):
849 857 return self._chunk(rev)
850 858
851 859 def _chunkclear(self):
852 860 self._chunkcache = (0, '')
853 861
854 862 def deltaparent(self, rev):
855 863 """return deltaparent of the given revision"""
856 864 base = self.index[rev][3]
857 865 if base == rev:
858 866 return nullrev
859 867 elif self._generaldelta:
860 868 return base
861 869 else:
862 870 return rev - 1
863 871
864 872 def revdiff(self, rev1, rev2):
865 873 """return or calculate a delta between two revisions"""
866 874 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
867 875 return str(self._chunk(rev2))
868 876
869 877 return mdiff.textdiff(self.revision(rev1),
870 878 self.revision(rev2))
871 879
872 880 def revision(self, nodeorrev):
873 881 """return an uncompressed revision of a given node or revision
874 882 number.
875 883 """
876 884 if isinstance(nodeorrev, int):
877 885 rev = nodeorrev
878 886 node = self.node(rev)
879 887 else:
880 888 node = nodeorrev
881 889 rev = None
882 890
883 891 cachedrev = None
884 892 if node == nullid:
885 893 return ""
886 894 if self._cache:
887 895 if self._cache[0] == node:
888 896 return self._cache[2]
889 897 cachedrev = self._cache[1]
890 898
891 899 # look up what we need to read
892 900 text = None
893 901 if rev is None:
894 902 rev = self.rev(node)
895 903
896 904 # check rev flags
897 905 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
898 906 raise RevlogError(_('incompatible revision flag %x') %
899 907 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
900 908
901 909 # build delta chain
902 910 chain = []
903 911 index = self.index # for performance
904 912 generaldelta = self._generaldelta
905 913 iterrev = rev
906 914 e = index[iterrev]
907 915 while iterrev != e[3] and iterrev != cachedrev:
908 916 chain.append(iterrev)
909 917 if generaldelta:
910 918 iterrev = e[3]
911 919 else:
912 920 iterrev -= 1
913 921 e = index[iterrev]
914 922 chain.reverse()
915 923 base = iterrev
916 924
917 925 if iterrev == cachedrev:
918 926 # cache hit
919 927 text = self._cache[2]
920 928
921 929 # drop cache to save memory
922 930 self._cache = None
923 931
924 932 self._chunkraw(base, rev)
925 933 if text is None:
926 934 text = str(self._chunkbase(base))
927 935
928 936 bins = [self._chunk(r) for r in chain]
929 937 text = mdiff.patches(text, bins)
930 938
931 939 text = self._checkhash(text, node, rev)
932 940
933 941 self._cache = (node, rev, text)
934 942 return text
935 943
936 944 def _checkhash(self, text, node, rev):
937 945 p1, p2 = self.parents(node)
938 946 if node != hash(text, p1, p2):
939 947 raise RevlogError(_("integrity check failed on %s:%d")
940 948 % (self.indexfile, rev))
941 949 return text
942 950
943 951 def checkinlinesize(self, tr, fp=None):
944 952 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
945 953 return
946 954
947 955 trinfo = tr.find(self.indexfile)
948 956 if trinfo is None:
949 957 raise RevlogError(_("%s not found in the transaction")
950 958 % self.indexfile)
951 959
952 960 trindex = trinfo[2]
953 961 dataoff = self.start(trindex)
954 962
955 963 tr.add(self.datafile, dataoff)
956 964
957 965 if fp:
958 966 fp.flush()
959 967 fp.close()
960 968
961 969 df = self.opener(self.datafile, 'w')
962 970 try:
963 971 for r in self:
964 972 df.write(self._chunkraw(r, r))
965 973 finally:
966 974 df.close()
967 975
968 976 fp = self.opener(self.indexfile, 'w', atomictemp=True)
969 977 self.version &= ~(REVLOGNGINLINEDATA)
970 978 self._inline = False
971 979 for i in self:
972 980 e = self._io.packentry(self.index[i], self.node, self.version, i)
973 981 fp.write(e)
974 982
975 983 # if we don't call close, the temp file will never replace the
976 984 # real index
977 985 fp.close()
978 986
979 987 tr.replace(self.indexfile, trindex * self._io.size)
980 988 self._chunkclear()
981 989
982 990 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
983 991 """add a revision to the log
984 992
985 993 text - the revision data to add
986 994 transaction - the transaction object used for rollback
987 995 link - the linkrev data to add
988 996 p1, p2 - the parent nodeids of the revision
989 997 cachedelta - an optional precomputed delta
990 998 """
991 999 node = hash(text, p1, p2)
992 1000 if node in self.nodemap:
993 1001 return node
994 1002
995 1003 dfh = None
996 1004 if not self._inline:
997 1005 dfh = self.opener(self.datafile, "a")
998 1006 ifh = self.opener(self.indexfile, "a+")
999 1007 try:
1000 1008 return self._addrevision(node, text, transaction, link, p1, p2,
1001 1009 cachedelta, ifh, dfh)
1002 1010 finally:
1003 1011 if dfh:
1004 1012 dfh.close()
1005 1013 ifh.close()
1006 1014
1007 1015 def compress(self, text):
1008 1016 """ generate a possibly-compressed representation of text """
1009 1017 if not text:
1010 1018 return ("", text)
1011 1019 l = len(text)
1012 1020 bin = None
1013 1021 if l < 44:
1014 1022 pass
1015 1023 elif l > 1000000:
1016 1024 # zlib makes an internal copy, thus doubling memory usage for
1017 1025 # large files, so lets do this in pieces
1018 1026 z = zlib.compressobj()
1019 1027 p = []
1020 1028 pos = 0
1021 1029 while pos < l:
1022 1030 pos2 = pos + 2**20
1023 1031 p.append(z.compress(text[pos:pos2]))
1024 1032 pos = pos2
1025 1033 p.append(z.flush())
1026 1034 if sum(map(len, p)) < l:
1027 1035 bin = "".join(p)
1028 1036 else:
1029 1037 bin = _compress(text)
1030 1038 if bin is None or len(bin) > l:
1031 1039 if text[0] == '\0':
1032 1040 return ("", text)
1033 1041 return ('u', text)
1034 1042 return ("", bin)
1035 1043
1036 1044 def _addrevision(self, node, text, transaction, link, p1, p2,
1037 1045 cachedelta, ifh, dfh):
1038 1046 """internal function to add revisions to the log
1039 1047
1040 1048 see addrevision for argument descriptions.
1041 1049 invariants:
1042 1050 - text is optional (can be None); if not set, cachedelta must be set.
1043 1051 if both are set, they must correspond to each other.
1044 1052 """
1045 1053 btext = [text]
1046 1054 def buildtext():
1047 1055 if btext[0] is not None:
1048 1056 return btext[0]
1049 1057 # flush any pending writes here so we can read it in revision
1050 1058 if dfh:
1051 1059 dfh.flush()
1052 1060 ifh.flush()
1053 1061 basetext = self.revision(self.node(cachedelta[0]))
1054 1062 btext[0] = mdiff.patch(basetext, cachedelta[1])
1055 1063 chk = hash(btext[0], p1, p2)
1056 1064 if chk != node:
1057 1065 raise RevlogError(_("consistency error in delta"))
1058 1066 return btext[0]
1059 1067
1060 1068 def builddelta(rev):
1061 1069 # can we use the cached delta?
1062 1070 if cachedelta and cachedelta[0] == rev:
1063 1071 delta = cachedelta[1]
1064 1072 else:
1065 1073 t = buildtext()
1066 1074 ptext = self.revision(self.node(rev))
1067 1075 delta = mdiff.textdiff(ptext, t)
1068 1076 data = self.compress(delta)
1069 1077 l = len(data[1]) + len(data[0])
1070 1078 if basecache[0] == rev:
1071 1079 chainbase = basecache[1]
1072 1080 else:
1073 1081 chainbase = self.chainbase(rev)
1074 1082 dist = l + offset - self.start(chainbase)
1075 1083 if self._generaldelta:
1076 1084 base = rev
1077 1085 else:
1078 1086 base = chainbase
1079 1087 return dist, l, data, base, chainbase
1080 1088
1081 1089 curr = len(self)
1082 1090 prev = curr - 1
1083 1091 base = chainbase = curr
1084 1092 offset = self.end(prev)
1085 1093 flags = 0
1086 1094 d = None
1087 1095 basecache = self._basecache
1088 1096 p1r, p2r = self.rev(p1), self.rev(p2)
1089 1097
1090 1098 # should we try to build a delta?
1091 1099 if prev != nullrev:
1092 1100 if self._generaldelta:
1093 1101 if p1r >= basecache[1]:
1094 1102 d = builddelta(p1r)
1095 1103 elif p2r >= basecache[1]:
1096 1104 d = builddelta(p2r)
1097 1105 else:
1098 1106 d = builddelta(prev)
1099 1107 else:
1100 1108 d = builddelta(prev)
1101 1109 dist, l, data, base, chainbase = d
1102 1110
1103 1111 # full versions are inserted when the needed deltas
1104 1112 # become comparable to the uncompressed text
1105 1113 if text is None:
1106 1114 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1107 1115 cachedelta[1])
1108 1116 else:
1109 1117 textlen = len(text)
1110 1118 if d is None or dist > textlen * 2:
1111 1119 text = buildtext()
1112 1120 data = self.compress(text)
1113 1121 l = len(data[1]) + len(data[0])
1114 1122 base = chainbase = curr
1115 1123
1116 1124 e = (offset_type(offset, flags), l, textlen,
1117 1125 base, link, p1r, p2r, node)
1118 1126 self.index.insert(-1, e)
1119 1127 self.nodemap[node] = curr
1120 1128
1121 1129 entry = self._io.packentry(e, self.node, self.version, curr)
1122 1130 if not self._inline:
1123 1131 transaction.add(self.datafile, offset)
1124 1132 transaction.add(self.indexfile, curr * len(entry))
1125 1133 if data[0]:
1126 1134 dfh.write(data[0])
1127 1135 dfh.write(data[1])
1128 1136 dfh.flush()
1129 1137 ifh.write(entry)
1130 1138 else:
1131 1139 offset += curr * self._io.size
1132 1140 transaction.add(self.indexfile, offset, curr)
1133 1141 ifh.write(entry)
1134 1142 ifh.write(data[0])
1135 1143 ifh.write(data[1])
1136 1144 self.checkinlinesize(transaction, ifh)
1137 1145
1138 1146 if type(text) == str: # only accept immutable objects
1139 1147 self._cache = (node, curr, text)
1140 1148 self._basecache = (curr, chainbase)
1141 1149 return node
1142 1150
1143 1151 def group(self, nodelist, bundler, reorder=None):
1144 1152 """Calculate a delta group, yielding a sequence of changegroup chunks
1145 1153 (strings).
1146 1154
1147 1155 Given a list of changeset revs, return a set of deltas and
1148 1156 metadata corresponding to nodes. The first delta is
1149 1157 first parent(nodelist[0]) -> nodelist[0], the receiver is
1150 1158 guaranteed to have this parent as it has all history before
1151 1159 these changesets. In the case firstparent is nullrev the
1152 1160 changegroup starts with a full revision.
1153 1161 """
1154 1162
1155 1163 # if we don't have any revisions touched by these changesets, bail
1156 1164 if len(nodelist) == 0:
1157 1165 yield bundler.close()
1158 1166 return
1159 1167
1160 1168 # for generaldelta revlogs, we linearize the revs; this will both be
1161 1169 # much quicker and generate a much smaller bundle
1162 1170 if (self._generaldelta and reorder is not False) or reorder:
1163 1171 dag = dagutil.revlogdag(self)
1164 1172 revs = set(self.rev(n) for n in nodelist)
1165 1173 revs = dag.linearize(revs)
1166 1174 else:
1167 1175 revs = sorted([self.rev(n) for n in nodelist])
1168 1176
1169 1177 # add the parent of the first rev
1170 1178 p = self.parentrevs(revs[0])[0]
1171 1179 revs.insert(0, p)
1172 1180
1173 1181 # build deltas
1174 1182 for r in xrange(len(revs) - 1):
1175 1183 prev, curr = revs[r], revs[r + 1]
1176 1184 for c in bundler.revchunk(self, curr, prev):
1177 1185 yield c
1178 1186
1179 1187 yield bundler.close()
1180 1188
1181 1189 def addgroup(self, bundle, linkmapper, transaction):
1182 1190 """
1183 1191 add a delta group
1184 1192
1185 1193 given a set of deltas, add them to the revision log. the
1186 1194 first delta is against its parent, which should be in our
1187 1195 log, the rest are against the previous delta.
1188 1196 """
1189 1197
1190 1198 # track the base of the current delta log
1191 1199 content = []
1192 1200 node = None
1193 1201
1194 1202 r = len(self)
1195 1203 end = 0
1196 1204 if r:
1197 1205 end = self.end(r - 1)
1198 1206 ifh = self.opener(self.indexfile, "a+")
1199 1207 isize = r * self._io.size
1200 1208 if self._inline:
1201 1209 transaction.add(self.indexfile, end + isize, r)
1202 1210 dfh = None
1203 1211 else:
1204 1212 transaction.add(self.indexfile, isize, r)
1205 1213 transaction.add(self.datafile, end)
1206 1214 dfh = self.opener(self.datafile, "a")
1207 1215
1208 1216 try:
1209 1217 # loop through our set of deltas
1210 1218 chain = None
1211 1219 while True:
1212 1220 chunkdata = bundle.deltachunk(chain)
1213 1221 if not chunkdata:
1214 1222 break
1215 1223 node = chunkdata['node']
1216 1224 p1 = chunkdata['p1']
1217 1225 p2 = chunkdata['p2']
1218 1226 cs = chunkdata['cs']
1219 1227 deltabase = chunkdata['deltabase']
1220 1228 delta = chunkdata['delta']
1221 1229
1222 1230 content.append(node)
1223 1231
1224 1232 link = linkmapper(cs)
1225 1233 if node in self.nodemap:
1226 1234 # this can happen if two branches make the same change
1227 1235 chain = node
1228 1236 continue
1229 1237
1230 1238 for p in (p1, p2):
1231 1239 if p not in self.nodemap:
1232 1240 raise LookupError(p, self.indexfile,
1233 1241 _('unknown parent'))
1234 1242
1235 1243 if deltabase not in self.nodemap:
1236 1244 raise LookupError(deltabase, self.indexfile,
1237 1245 _('unknown delta base'))
1238 1246
1239 1247 baserev = self.rev(deltabase)
1240 1248 chain = self._addrevision(node, None, transaction, link,
1241 1249 p1, p2, (baserev, delta), ifh, dfh)
1242 1250 if not dfh and not self._inline:
1243 1251 # addrevision switched from inline to conventional
1244 1252 # reopen the index
1245 1253 ifh.close()
1246 1254 dfh = self.opener(self.datafile, "a")
1247 1255 ifh = self.opener(self.indexfile, "a")
1248 1256 finally:
1249 1257 if dfh:
1250 1258 dfh.close()
1251 1259 ifh.close()
1252 1260
1253 1261 return content
1254 1262
1255 1263 def strip(self, minlink, transaction):
1256 1264 """truncate the revlog on the first revision with a linkrev >= minlink
1257 1265
1258 1266 This function is called when we're stripping revision minlink and
1259 1267 its descendants from the repository.
1260 1268
1261 1269 We have to remove all revisions with linkrev >= minlink, because
1262 1270 the equivalent changelog revisions will be renumbered after the
1263 1271 strip.
1264 1272
1265 1273 So we truncate the revlog on the first of these revisions, and
1266 1274 trust that the caller has saved the revisions that shouldn't be
1267 1275 removed and that it'll re-add them after this truncation.
1268 1276 """
1269 1277 if len(self) == 0:
1270 1278 return
1271 1279
1272 1280 for rev in self:
1273 1281 if self.index[rev][4] >= minlink:
1274 1282 break
1275 1283 else:
1276 1284 return
1277 1285
1278 1286 # first truncate the files on disk
1279 1287 end = self.start(rev)
1280 1288 if not self._inline:
1281 1289 transaction.add(self.datafile, end)
1282 1290 end = rev * self._io.size
1283 1291 else:
1284 1292 end += rev * self._io.size
1285 1293
1286 1294 transaction.add(self.indexfile, end)
1287 1295
1288 1296 # then reset internal state in memory to forget those revisions
1289 1297 self._cache = None
1290 1298 self._chunkclear()
1291 1299 for x in xrange(rev, len(self)):
1292 1300 del self.nodemap[self.node(x)]
1293 1301
1294 1302 del self.index[rev:-1]
1295 1303
1296 1304 def checksize(self):
1297 1305 expected = 0
1298 1306 if len(self):
1299 1307 expected = max(0, self.end(len(self) - 1))
1300 1308
1301 1309 try:
1302 1310 f = self.opener(self.datafile)
1303 1311 f.seek(0, 2)
1304 1312 actual = f.tell()
1305 1313 f.close()
1306 1314 dd = actual - expected
1307 1315 except IOError, inst:
1308 1316 if inst.errno != errno.ENOENT:
1309 1317 raise
1310 1318 dd = 0
1311 1319
1312 1320 try:
1313 1321 f = self.opener(self.indexfile)
1314 1322 f.seek(0, 2)
1315 1323 actual = f.tell()
1316 1324 f.close()
1317 1325 s = self._io.size
1318 1326 i = max(0, actual // s)
1319 1327 di = actual - (i * s)
1320 1328 if self._inline:
1321 1329 databytes = 0
1322 1330 for r in self:
1323 1331 databytes += max(0, self.length(r))
1324 1332 dd = 0
1325 1333 di = actual - len(self) * s - databytes
1326 1334 except IOError, inst:
1327 1335 if inst.errno != errno.ENOENT:
1328 1336 raise
1329 1337 di = 0
1330 1338
1331 1339 return (dd, di)
1332 1340
1333 1341 def files(self):
1334 1342 res = [self.indexfile]
1335 1343 if not self._inline:
1336 1344 res.append(self.datafile)
1337 1345 return res
General Comments 0
You need to be logged in to leave comments. Login now