##// END OF EJS Templates
revlog: simplify the v0 parser
Matt Mackall -
r4973:a386a6e4 default
parent child Browse files
Show More
@@ -1,1335 +1,1318 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import _
15 15 import binascii, changegroup, errno, ancestor, mdiff, os
16 16 import sha, struct, util, zlib
17 17
18 18 # revlog version strings
19 19 REVLOGV0 = 0
20 20 REVLOGNG = 1
21 21
22 22 # revlog flags
23 23 REVLOGNGINLINEDATA = (1 << 16)
24 24 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
25 25
26 26 REVLOG_DEFAULT_FORMAT = REVLOGNG
27 27 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
28 28
29 29 def hash(text, p1, p2):
30 30 """generate a hash from the given text and its parent hashes
31 31
32 32 This hash combines both the current file contents and its history
33 33 in a manner that makes it easy to distinguish nodes with the same
34 34 content in the revision graph.
35 35 """
36 36 l = [p1, p2]
37 37 l.sort()
38 38 s = sha.new(l[0])
39 39 s.update(l[1])
40 40 s.update(text)
41 41 return s.digest()
42 42
43 43 def compress(text):
44 44 """ generate a possibly-compressed representation of text """
45 45 if not text: return ("", text)
46 46 if len(text) < 44:
47 47 if text[0] == '\0': return ("", text)
48 48 return ('u', text)
49 49 bin = zlib.compress(text)
50 50 if len(bin) > len(text):
51 51 if text[0] == '\0': return ("", text)
52 52 return ('u', text)
53 53 return ("", bin)
54 54
55 55 def decompress(bin):
56 56 """ decompress the given input """
57 57 if not bin: return bin
58 58 t = bin[0]
59 59 if t == '\0': return bin
60 60 if t == 'x': return zlib.decompress(bin)
61 61 if t == 'u': return bin[1:]
62 62 raise RevlogError(_("unknown compression type %r") % t)
63 63
64 64 indexformatv0 = ">4l20s20s20s"
65 65 v0shaoffset = 56
66 66 # index ng:
67 67 # 6 bytes offset
68 68 # 2 bytes flags
69 69 # 4 bytes compressed length
70 70 # 4 bytes uncompressed length
71 71 # 4 bytes: base rev
72 72 # 4 bytes link rev
73 73 # 4 bytes parent 1 rev
74 74 # 4 bytes parent 2 rev
75 75 # 32 bytes: nodeid
76 76 indexformatng = ">Qiiiiii20s12x"
77 77 ngshaoffset = 32
78 78 versionformat = ">I"
79 79
80 80 class lazyparser(object):
81 81 """
82 82 this class avoids the need to parse the entirety of large indices
83 83 """
84 84
85 85 # lazyparser is not safe to use on windows if win32 extensions not
86 86 # available. it keeps file handle open, which make it not possible
87 87 # to break hardlinks on local cloned repos.
88 88 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
89 89 hasattr(util, 'win32api'))
90 90
91 91 def __init__(self, dataf, size, indexformat, shaoffset):
92 92 self.dataf = dataf
93 93 self.format = indexformat
94 94 self.s = struct.calcsize(indexformat)
95 95 self.indexformat = indexformat
96 96 self.datasize = size
97 97 self.l = size/self.s
98 98 self.index = [None] * self.l
99 99 self.map = {nullid: nullrev}
100 100 self.allmap = 0
101 101 self.all = 0
102 102 self.mapfind_count = 0
103 103 self.shaoffset = shaoffset
104 104
105 105 def loadmap(self):
106 106 """
107 107 during a commit, we need to make sure the rev being added is
108 108 not a duplicate. This requires loading the entire index,
109 109 which is fairly slow. loadmap can load up just the node map,
110 110 which takes much less time.
111 111 """
112 112 if self.allmap: return
113 113 end = self.datasize
114 114 self.allmap = 1
115 115 cur = 0
116 116 count = 0
117 117 blocksize = self.s * 256
118 118 self.dataf.seek(0)
119 119 while cur < end:
120 120 data = self.dataf.read(blocksize)
121 121 off = 0
122 122 for x in xrange(256):
123 123 n = data[off + self.shaoffset:off + self.shaoffset + 20]
124 124 self.map[n] = count
125 125 count += 1
126 126 if count >= self.l:
127 127 break
128 128 off += self.s
129 129 cur += blocksize
130 130
131 131 def loadblock(self, blockstart, blocksize, data=None):
132 132 if self.all: return
133 133 if data is None:
134 134 self.dataf.seek(blockstart)
135 135 if blockstart + blocksize > self.datasize:
136 136 # the revlog may have grown since we've started running,
137 137 # but we don't have space in self.index for more entries.
138 138 # limit blocksize so that we don't get too much data.
139 139 blocksize = max(self.datasize - blockstart, 0)
140 140 data = self.dataf.read(blocksize)
141 141 lend = len(data) / self.s
142 142 i = blockstart / self.s
143 143 off = 0
144 144 # lazyindex supports __delitem__
145 145 if lend > len(self.index) - i:
146 146 lend = len(self.index) - i
147 147 for x in xrange(lend):
148 148 if self.index[i + x] == None:
149 149 b = data[off : off + self.s]
150 150 self.index[i + x] = b
151 151 n = b[self.shaoffset:self.shaoffset + 20]
152 152 self.map[n] = i + x
153 153 off += self.s
154 154
155 155 def findnode(self, node):
156 156 """search backwards through the index file for a specific node"""
157 157 if self.allmap: return None
158 158
159 159 # hg log will cause many many searches for the manifest
160 160 # nodes. After we get called a few times, just load the whole
161 161 # thing.
162 162 if self.mapfind_count > 8:
163 163 self.loadmap()
164 164 if node in self.map:
165 165 return node
166 166 return None
167 167 self.mapfind_count += 1
168 168 last = self.l - 1
169 169 while self.index[last] != None:
170 170 if last == 0:
171 171 self.all = 1
172 172 self.allmap = 1
173 173 return None
174 174 last -= 1
175 175 end = (last + 1) * self.s
176 176 blocksize = self.s * 256
177 177 while end >= 0:
178 178 start = max(end - blocksize, 0)
179 179 self.dataf.seek(start)
180 180 data = self.dataf.read(end - start)
181 181 findend = end - start
182 182 while True:
183 183 # we're searching backwards, so weh have to make sure
184 184 # we don't find a changeset where this node is a parent
185 185 off = data.rfind(node, 0, findend)
186 186 findend = off
187 187 if off >= 0:
188 188 i = off / self.s
189 189 off = i * self.s
190 190 n = data[off + self.shaoffset:off + self.shaoffset + 20]
191 191 if n == node:
192 192 self.map[n] = i + start / self.s
193 193 return node
194 194 else:
195 195 break
196 196 end -= blocksize
197 197 return None
198 198
199 199 def loadindex(self, i=None, end=None):
200 200 if self.all: return
201 201 all = False
202 202 if i == None:
203 203 blockstart = 0
204 204 blocksize = (512 / self.s) * self.s
205 205 end = self.datasize
206 206 all = True
207 207 else:
208 208 if end:
209 209 blockstart = i * self.s
210 210 end = end * self.s
211 211 blocksize = end - blockstart
212 212 else:
213 213 blockstart = (i & ~63) * self.s
214 214 blocksize = self.s * 64
215 215 end = blockstart + blocksize
216 216 while blockstart < end:
217 217 self.loadblock(blockstart, blocksize)
218 218 blockstart += blocksize
219 219 if all: self.all = True
220 220
221 221 class lazyindex(object):
222 222 """a lazy version of the index array"""
223 223 def __init__(self, parser):
224 224 self.p = parser
225 225 def __len__(self):
226 226 return len(self.p.index)
227 227 def load(self, pos):
228 228 if pos < 0:
229 229 pos += len(self.p.index)
230 230 self.p.loadindex(pos)
231 231 return self.p.index[pos]
232 232 def __getitem__(self, pos):
233 233 ret = self.p.index[pos] or self.load(pos)
234 234 if isinstance(ret, str):
235 235 ret = struct.unpack(self.p.indexformat, ret)
236 236 return ret
237 237 def __setitem__(self, pos, item):
238 238 self.p.index[pos] = item
239 239 def __delitem__(self, pos):
240 240 del self.p.index[pos]
241 241 def append(self, e):
242 242 self.p.index.append(e)
243 243
244 244 class lazymap(object):
245 245 """a lazy version of the node map"""
246 246 def __init__(self, parser):
247 247 self.p = parser
248 248 def load(self, key):
249 249 n = self.p.findnode(key)
250 250 if n == None:
251 251 raise KeyError(key)
252 252 def __contains__(self, key):
253 253 if key in self.p.map:
254 254 return True
255 255 self.p.loadmap()
256 256 return key in self.p.map
257 257 def __iter__(self):
258 258 yield nullid
259 259 for i in xrange(self.p.l):
260 260 ret = self.p.index[i]
261 261 if not ret:
262 262 self.p.loadindex(i)
263 263 ret = self.p.index[i]
264 264 if isinstance(ret, str):
265 265 ret = struct.unpack(self.p.indexformat, ret)
266 266 yield ret[-1]
267 267 def __getitem__(self, key):
268 268 try:
269 269 return self.p.map[key]
270 270 except KeyError:
271 271 try:
272 272 self.load(key)
273 273 return self.p.map[key]
274 274 except KeyError:
275 275 raise KeyError("node " + hex(key))
276 276 def __setitem__(self, key, val):
277 277 self.p.map[key] = val
278 278 def __delitem__(self, key):
279 279 del self.p.map[key]
280 280
281 281 class RevlogError(Exception): pass
282 282 class LookupError(RevlogError): pass
283 283
284 284 def getoffset(q):
285 285 if q & 0xFFFF:
286 286 raise RevlogError(_('incompatible revision flag %x') % q)
287 287 return int(q >> 16)
288 288
289 289 def gettype(q):
290 290 return int(q & 0xFFFF)
291 291
292 292 def offset_type(offset, type):
293 293 return long(long(offset) << 16 | type)
294 294
295 295 class revlogoldio(object):
296 296 def __init__(self):
297 297 self.chunkcache = None
298 298
299 299 def parseindex(self, fp, st, inline):
300 300 s = struct.calcsize(indexformatv0)
301 301 index = []
302 302 nodemap = {nullid: nullrev}
303 n = 0
304 leftover = None
305 while True:
306 if st:
307 data = fp.read(65536)
308 else:
309 # hack for httprangereader, it doesn't do partial reads well
310 data = fp.read()
311 if not data:
312 break
313 if leftover:
314 data = leftover + data
315 leftover = None
316 off = 0
317 l = len(data)
318 while off < l:
319 if l - off < s:
320 leftover = data[off:]
321 break
322 cur = data[off:off + s]
323 off += s
324 e = struct.unpack(indexformatv0, cur)
325 index.append(e)
326 nodemap[e[-1]] = n
327 n += 1
328 if not st:
329 break
303 n = off = 0
304 data = fp.read()
305 l = len(data)
306 while off + s <= l:
307 cur = data[off:off + s]
308 off += s
309 e = struct.unpack(indexformatv0, cur)
310 index.append(e)
311 nodemap[e[-1]] = n
312 n += 1
330 313
331 314 return index, nodemap
332 315
333 316 class revlogio(object):
334 317 def __init__(self):
335 318 self.chunkcache = None
336 319
337 320 def parseindex(self, fp, st, inline):
338 321 if (lazyparser.safe_to_use and not inline and
339 322 st and st.st_size > 10000):
340 323 # big index, let's parse it on demand
341 324 parser = lazyparser(fp, st.st_size, indexformatng, ngshaoffset)
342 325 index = lazyindex(parser)
343 326 nodemap = lazymap(parser)
344 327 e = list(index[0])
345 328 type = gettype(e[0])
346 329 e[0] = offset_type(0, type)
347 330 index[0] = e
348 331 return index, nodemap
349 332
350 333 s = struct.calcsize(indexformatng)
351 334 index = []
352 335 nodemap = {nullid: nullrev}
353 336 n = 0
354 337 leftover = None
355 338 while True:
356 339 if st:
357 340 data = fp.read(65536)
358 341 else:
359 342 # hack for httprangereader, it doesn't do partial reads well
360 343 data = fp.read()
361 344 if not data:
362 345 break
363 346 if n == 0 and inline:
364 347 # cache the first chunk
365 348 self.chunkcache = (0, data)
366 349 if leftover:
367 350 data = leftover + data
368 351 leftover = None
369 352 off = 0
370 353 l = len(data)
371 354 while off < l:
372 355 if l - off < s:
373 356 leftover = data[off:]
374 357 break
375 358 cur = data[off:off + s]
376 359 off += s
377 360 e = struct.unpack(indexformatng, cur)
378 361 index.append(e)
379 362 nodemap[e[-1]] = n
380 363 n += 1
381 364 if inline:
382 365 if e[1] < 0:
383 366 break
384 367 off += e[1]
385 368 if off > l:
386 369 # some things don't seek well, just read it
387 370 fp.read(off - l)
388 371 break
389 372 if not st:
390 373 break
391 374
392 375 e = list(index[0])
393 376 type = gettype(e[0])
394 377 e[0] = offset_type(0, type)
395 378 index[0] = e
396 379
397 380 return index, nodemap
398 381
399 382 class revlog(object):
400 383 """
401 384 the underlying revision storage object
402 385
403 386 A revlog consists of two parts, an index and the revision data.
404 387
405 388 The index is a file with a fixed record size containing
406 389 information on each revision, includings its nodeid (hash), the
407 390 nodeids of its parents, the position and offset of its data within
408 391 the data file, and the revision it's based on. Finally, each entry
409 392 contains a linkrev entry that can serve as a pointer to external
410 393 data.
411 394
412 395 The revision data itself is a linear collection of data chunks.
413 396 Each chunk represents a revision and is usually represented as a
414 397 delta against the previous chunk. To bound lookup time, runs of
415 398 deltas are limited to about 2 times the length of the original
416 399 version data. This makes retrieval of a version proportional to
417 400 its size, or O(1) relative to the number of revisions.
418 401
419 402 Both pieces of the revlog are written to in an append-only
420 403 fashion, which means we never need to rewrite a file to insert or
421 404 remove data, and can use some simple techniques to avoid the need
422 405 for locking while reading.
423 406 """
424 407 def __init__(self, opener, indexfile):
425 408 """
426 409 create a revlog object
427 410
428 411 opener is a function that abstracts the file opening operation
429 412 and can be used to implement COW semantics or the like.
430 413 """
431 414 self.indexfile = indexfile
432 415 self.datafile = indexfile[:-2] + ".d"
433 416 self.opener = opener
434 417
435 418 self.indexstat = None
436 419 self.cache = None
437 420 self.defversion = REVLOG_DEFAULT_VERSION
438 421 if hasattr(opener, "defversion"):
439 422 self.defversion = opener.defversion
440 423 if self.defversion & REVLOGNG:
441 424 self.defversion |= REVLOGNGINLINEDATA
442 425 self._load()
443 426
444 427 def _load(self):
445 428 v = self.defversion
446 429 try:
447 430 f = self.opener(self.indexfile)
448 431 i = f.read(4)
449 432 f.seek(0)
450 433 if len(i) > 0:
451 434 v = struct.unpack(versionformat, i)[0]
452 435 except IOError, inst:
453 436 if inst.errno != errno.ENOENT:
454 437 raise
455 438 i = ""
456 439 else:
457 440 try:
458 441 st = util.fstat(f)
459 442 except AttributeError, inst:
460 443 st = None
461 444 else:
462 445 oldst = self.indexstat
463 446 if (oldst and st.st_dev == oldst.st_dev
464 447 and st.st_ino == oldst.st_ino
465 448 and st.st_mtime == oldst.st_mtime
466 449 and st.st_ctime == oldst.st_ctime
467 450 and st.st_size == oldst.st_size):
468 451 return
469 452 self.indexstat = st
470 453 flags = v & ~0xFFFF
471 454 fmt = v & 0xFFFF
472 455 if fmt == REVLOGV0:
473 456 if flags:
474 457 raise RevlogError(_("index %s unknown flags %#04x for format v0")
475 458 % (self.indexfile, flags >> 16))
476 459 elif fmt == REVLOGNG:
477 460 if flags & ~REVLOGNGINLINEDATA:
478 461 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
479 462 % (self.indexfile, flags >> 16))
480 463 else:
481 464 raise RevlogError(_("index %s unknown format %d")
482 465 % (self.indexfile, fmt))
483 466 self.version = v
484 467 self.nodemap = {nullid: nullrev}
485 468 self.index = []
486 469 self._io = revlogio()
487 470 self.indexformat = indexformatng
488 471 if self.version == REVLOGV0:
489 472 self._io = revlogoldio()
490 473 self.indexformat = indexformatv0
491 474 if i:
492 475 self.index, self.nodemap = self._io.parseindex(f, st, self._inline())
493 476
494 477 def _loadindex(self, start, end):
495 478 """load a block of indexes all at once from the lazy parser"""
496 479 if isinstance(self.index, lazyindex):
497 480 self.index.p.loadindex(start, end)
498 481
499 482 def _loadindexmap(self):
500 483 """loads both the map and the index from the lazy parser"""
501 484 if isinstance(self.index, lazyindex):
502 485 p = self.index.p
503 486 p.loadindex()
504 487 self.nodemap = p.map
505 488
506 489 def _loadmap(self):
507 490 """loads the map from the lazy parser"""
508 491 if isinstance(self.nodemap, lazymap):
509 492 self.nodemap.p.loadmap()
510 493 self.nodemap = self.nodemap.p.map
511 494
512 495 def _inline(self): return self.version & REVLOGNGINLINEDATA
513 496
514 497 def tip(self): return self.node(len(self.index) - 1)
515 498 def count(self): return len(self.index)
516 499 def node(self, rev):
517 500 return rev == nullrev and nullid or self.index[rev][-1]
518 501 def rev(self, node):
519 502 try:
520 503 return self.nodemap[node]
521 504 except KeyError:
522 505 raise LookupError(_('%s: no node %s') % (self.indexfile, hex(node)))
523 506 def linkrev(self, node):
524 507 return (node == nullid) and nullrev or self.index[self.rev(node)][-4]
525 508 def parents(self, node):
526 509 if node == nullid: return (nullid, nullid)
527 510 r = self.rev(node)
528 511 d = self.index[r][-3:-1]
529 512 if self.version == REVLOGV0:
530 513 return d
531 514 return (self.node(d[0]), self.node(d[1]))
532 515 def parentrevs(self, rev):
533 516 if rev == nullrev:
534 517 return (nullrev, nullrev)
535 518 d = self.index[rev][-3:-1]
536 519 if self.version == REVLOGV0:
537 520 return (self.rev(d[0]), self.rev(d[1]))
538 521 return d
539 522 def start(self, rev):
540 523 if rev == nullrev:
541 524 return 0
542 525 if self.version != REVLOGV0:
543 526 return getoffset(self.index[rev][0])
544 527 return self.index[rev][0]
545 528
546 529 def end(self, rev): return self.start(rev) + self.length(rev)
547 530
548 531 def size(self, rev):
549 532 """return the length of the uncompressed text for a given revision"""
550 533 if rev == nullrev:
551 534 return 0
552 535 l = -1
553 536 if self.version != REVLOGV0:
554 537 l = self.index[rev][2]
555 538 if l >= 0:
556 539 return l
557 540
558 541 t = self.revision(self.node(rev))
559 542 return len(t)
560 543
561 544 # alternate implementation, The advantage to this code is it
562 545 # will be faster for a single revision. But, the results are not
563 546 # cached, so finding the size of every revision will be slower.
564 547 """
565 548 if self.cache and self.cache[1] == rev:
566 549 return len(self.cache[2])
567 550
568 551 base = self.base(rev)
569 552 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
570 553 base = self.cache[1]
571 554 text = self.cache[2]
572 555 else:
573 556 text = self.revision(self.node(base))
574 557
575 558 l = len(text)
576 559 for x in xrange(base + 1, rev + 1):
577 560 l = mdiff.patchedsize(l, self.chunk(x))
578 561 return l
579 562 """
580 563
581 564 def length(self, rev):
582 565 if rev == nullrev:
583 566 return 0
584 567 else:
585 568 return self.index[rev][1]
586 569 def base(self, rev):
587 570 if (rev == nullrev):
588 571 return nullrev
589 572 else:
590 573 return self.index[rev][-5]
591 574
592 575 def reachable(self, node, stop=None):
593 576 """return a hash of all nodes ancestral to a given node, including
594 577 the node itself, stopping when stop is matched"""
595 578 reachable = {}
596 579 visit = [node]
597 580 reachable[node] = 1
598 581 if stop:
599 582 stopn = self.rev(stop)
600 583 else:
601 584 stopn = 0
602 585 while visit:
603 586 n = visit.pop(0)
604 587 if n == stop:
605 588 continue
606 589 if n == nullid:
607 590 continue
608 591 for p in self.parents(n):
609 592 if self.rev(p) < stopn:
610 593 continue
611 594 if p not in reachable:
612 595 reachable[p] = 1
613 596 visit.append(p)
614 597 return reachable
615 598
616 599 def nodesbetween(self, roots=None, heads=None):
617 600 """Return a tuple containing three elements. Elements 1 and 2 contain
618 601 a final list bases and heads after all the unreachable ones have been
619 602 pruned. Element 0 contains a topologically sorted list of all
620 603
621 604 nodes that satisfy these constraints:
622 605 1. All nodes must be descended from a node in roots (the nodes on
623 606 roots are considered descended from themselves).
624 607 2. All nodes must also be ancestors of a node in heads (the nodes in
625 608 heads are considered to be their own ancestors).
626 609
627 610 If roots is unspecified, nullid is assumed as the only root.
628 611 If heads is unspecified, it is taken to be the output of the
629 612 heads method (i.e. a list of all nodes in the repository that
630 613 have no children)."""
631 614 nonodes = ([], [], [])
632 615 if roots is not None:
633 616 roots = list(roots)
634 617 if not roots:
635 618 return nonodes
636 619 lowestrev = min([self.rev(n) for n in roots])
637 620 else:
638 621 roots = [nullid] # Everybody's a descendent of nullid
639 622 lowestrev = nullrev
640 623 if (lowestrev == nullrev) and (heads is None):
641 624 # We want _all_ the nodes!
642 625 return ([self.node(r) for r in xrange(0, self.count())],
643 626 [nullid], list(self.heads()))
644 627 if heads is None:
645 628 # All nodes are ancestors, so the latest ancestor is the last
646 629 # node.
647 630 highestrev = self.count() - 1
648 631 # Set ancestors to None to signal that every node is an ancestor.
649 632 ancestors = None
650 633 # Set heads to an empty dictionary for later discovery of heads
651 634 heads = {}
652 635 else:
653 636 heads = list(heads)
654 637 if not heads:
655 638 return nonodes
656 639 ancestors = {}
657 640 # Turn heads into a dictionary so we can remove 'fake' heads.
658 641 # Also, later we will be using it to filter out the heads we can't
659 642 # find from roots.
660 643 heads = dict.fromkeys(heads, 0)
661 644 # Start at the top and keep marking parents until we're done.
662 645 nodestotag = heads.keys()
663 646 # Remember where the top was so we can use it as a limit later.
664 647 highestrev = max([self.rev(n) for n in nodestotag])
665 648 while nodestotag:
666 649 # grab a node to tag
667 650 n = nodestotag.pop()
668 651 # Never tag nullid
669 652 if n == nullid:
670 653 continue
671 654 # A node's revision number represents its place in a
672 655 # topologically sorted list of nodes.
673 656 r = self.rev(n)
674 657 if r >= lowestrev:
675 658 if n not in ancestors:
676 659 # If we are possibly a descendent of one of the roots
677 660 # and we haven't already been marked as an ancestor
678 661 ancestors[n] = 1 # Mark as ancestor
679 662 # Add non-nullid parents to list of nodes to tag.
680 663 nodestotag.extend([p for p in self.parents(n) if
681 664 p != nullid])
682 665 elif n in heads: # We've seen it before, is it a fake head?
683 666 # So it is, real heads should not be the ancestors of
684 667 # any other heads.
685 668 heads.pop(n)
686 669 if not ancestors:
687 670 return nonodes
688 671 # Now that we have our set of ancestors, we want to remove any
689 672 # roots that are not ancestors.
690 673
691 674 # If one of the roots was nullid, everything is included anyway.
692 675 if lowestrev > nullrev:
693 676 # But, since we weren't, let's recompute the lowest rev to not
694 677 # include roots that aren't ancestors.
695 678
696 679 # Filter out roots that aren't ancestors of heads
697 680 roots = [n for n in roots if n in ancestors]
698 681 # Recompute the lowest revision
699 682 if roots:
700 683 lowestrev = min([self.rev(n) for n in roots])
701 684 else:
702 685 # No more roots? Return empty list
703 686 return nonodes
704 687 else:
705 688 # We are descending from nullid, and don't need to care about
706 689 # any other roots.
707 690 lowestrev = nullrev
708 691 roots = [nullid]
709 692 # Transform our roots list into a 'set' (i.e. a dictionary where the
710 693 # values don't matter.
711 694 descendents = dict.fromkeys(roots, 1)
712 695 # Also, keep the original roots so we can filter out roots that aren't
713 696 # 'real' roots (i.e. are descended from other roots).
714 697 roots = descendents.copy()
715 698 # Our topologically sorted list of output nodes.
716 699 orderedout = []
717 700 # Don't start at nullid since we don't want nullid in our output list,
718 701 # and if nullid shows up in descedents, empty parents will look like
719 702 # they're descendents.
720 703 for r in xrange(max(lowestrev, 0), highestrev + 1):
721 704 n = self.node(r)
722 705 isdescendent = False
723 706 if lowestrev == nullrev: # Everybody is a descendent of nullid
724 707 isdescendent = True
725 708 elif n in descendents:
726 709 # n is already a descendent
727 710 isdescendent = True
728 711 # This check only needs to be done here because all the roots
729 712 # will start being marked is descendents before the loop.
730 713 if n in roots:
731 714 # If n was a root, check if it's a 'real' root.
732 715 p = tuple(self.parents(n))
733 716 # If any of its parents are descendents, it's not a root.
734 717 if (p[0] in descendents) or (p[1] in descendents):
735 718 roots.pop(n)
736 719 else:
737 720 p = tuple(self.parents(n))
738 721 # A node is a descendent if either of its parents are
739 722 # descendents. (We seeded the dependents list with the roots
740 723 # up there, remember?)
741 724 if (p[0] in descendents) or (p[1] in descendents):
742 725 descendents[n] = 1
743 726 isdescendent = True
744 727 if isdescendent and ((ancestors is None) or (n in ancestors)):
745 728 # Only include nodes that are both descendents and ancestors.
746 729 orderedout.append(n)
747 730 if (ancestors is not None) and (n in heads):
748 731 # We're trying to figure out which heads are reachable
749 732 # from roots.
750 733 # Mark this head as having been reached
751 734 heads[n] = 1
752 735 elif ancestors is None:
753 736 # Otherwise, we're trying to discover the heads.
754 737 # Assume this is a head because if it isn't, the next step
755 738 # will eventually remove it.
756 739 heads[n] = 1
757 740 # But, obviously its parents aren't.
758 741 for p in self.parents(n):
759 742 heads.pop(p, None)
760 743 heads = [n for n in heads.iterkeys() if heads[n] != 0]
761 744 roots = roots.keys()
762 745 assert orderedout
763 746 assert roots
764 747 assert heads
765 748 return (orderedout, roots, heads)
766 749
767 750 def heads(self, start=None, stop=None):
768 751 """return the list of all nodes that have no children
769 752
770 753 if start is specified, only heads that are descendants of
771 754 start will be returned
772 755 if stop is specified, it will consider all the revs from stop
773 756 as if they had no children
774 757 """
775 758 if start is None:
776 759 start = nullid
777 760 if stop is None:
778 761 stop = []
779 762 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
780 763 startrev = self.rev(start)
781 764 reachable = {startrev: 1}
782 765 heads = {startrev: 1}
783 766
784 767 parentrevs = self.parentrevs
785 768 for r in xrange(startrev + 1, self.count()):
786 769 for p in parentrevs(r):
787 770 if p in reachable:
788 771 if r not in stoprevs:
789 772 reachable[r] = 1
790 773 heads[r] = 1
791 774 if p in heads and p not in stoprevs:
792 775 del heads[p]
793 776
794 777 return [self.node(r) for r in heads]
795 778
796 779 def children(self, node):
797 780 """find the children of a given node"""
798 781 c = []
799 782 p = self.rev(node)
800 783 for r in range(p + 1, self.count()):
801 784 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
802 785 if prevs:
803 786 for pr in prevs:
804 787 if pr == p:
805 788 c.append(self.node(r))
806 789 elif p == nullrev:
807 790 c.append(self.node(r))
808 791 return c
809 792
810 793 def _match(self, id):
811 794 if isinstance(id, (long, int)):
812 795 # rev
813 796 return self.node(id)
814 797 if len(id) == 20:
815 798 # possibly a binary node
816 799 # odds of a binary node being all hex in ASCII are 1 in 10**25
817 800 try:
818 801 node = id
819 802 r = self.rev(node) # quick search the index
820 803 return node
821 804 except LookupError:
822 805 pass # may be partial hex id
823 806 try:
824 807 # str(rev)
825 808 rev = int(id)
826 809 if str(rev) != id: raise ValueError
827 810 if rev < 0: rev = self.count() + rev
828 811 if rev < 0 or rev >= self.count(): raise ValueError
829 812 return self.node(rev)
830 813 except (ValueError, OverflowError):
831 814 pass
832 815 if len(id) == 40:
833 816 try:
834 817 # a full hex nodeid?
835 818 node = bin(id)
836 819 r = self.rev(node)
837 820 return node
838 821 except TypeError:
839 822 pass
840 823
841 824 def _partialmatch(self, id):
842 825 if len(id) < 40:
843 826 try:
844 827 # hex(node)[:...]
845 828 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
846 829 node = None
847 830 for n in self.nodemap:
848 831 if n.startswith(bin_id) and hex(n).startswith(id):
849 832 if node is not None:
850 833 raise LookupError(_("Ambiguous identifier"))
851 834 node = n
852 835 if node is not None:
853 836 return node
854 837 except TypeError:
855 838 pass
856 839
857 840 def lookup(self, id):
858 841 """locate a node based on:
859 842 - revision number or str(revision number)
860 843 - nodeid or subset of hex nodeid
861 844 """
862 845
863 846 n = self._match(id)
864 847 if n is not None:
865 848 return n
866 849 n = self._partialmatch(id)
867 850 if n:
868 851 return n
869 852
870 853 raise LookupError(_("No match found"))
871 854
872 855 def cmp(self, node, text):
873 856 """compare text with a given file revision"""
874 857 p1, p2 = self.parents(node)
875 858 return hash(text, p1, p2) != node
876 859
877 860 def diff(self, a, b):
878 861 """return a delta between two revisions"""
879 862 return mdiff.textdiff(a, b)
880 863
881 864 def patches(self, t, pl):
882 865 """apply a list of patches to a string"""
883 866 return mdiff.patches(t, pl)
884 867
885 868 def chunk(self, rev, df=None, cachelen=4096):
886 869 start, length = self.start(rev), self.length(rev)
887 870 inline = self._inline()
888 871 if inline:
889 872 start += (rev + 1) * struct.calcsize(self.indexformat)
890 873 end = start + length
891 874 def loadcache(df):
892 875 cache_length = max(cachelen, length) # 4k
893 876 if not df:
894 877 if inline:
895 878 df = self.opener(self.indexfile)
896 879 else:
897 880 df = self.opener(self.datafile)
898 881 df.seek(start)
899 882 self._io.chunkcache = (start, df.read(cache_length))
900 883
901 884 if not self._io.chunkcache:
902 885 loadcache(df)
903 886
904 887 cache_start = self._io.chunkcache[0]
905 888 cache_end = cache_start + len(self._io.chunkcache[1])
906 889 if start >= cache_start and end <= cache_end:
907 890 # it is cached
908 891 offset = start - cache_start
909 892 else:
910 893 loadcache(df)
911 894 offset = 0
912 895
913 896 #def checkchunk():
914 897 # df = self.opener(self.datafile)
915 898 # df.seek(start)
916 899 # return df.read(length)
917 900 #assert s == checkchunk()
918 901 return decompress(self._io.chunkcache[1][offset:offset + length])
919 902
920 903 def delta(self, node):
921 904 """return or calculate a delta between a node and its predecessor"""
922 905 r = self.rev(node)
923 906 return self.revdiff(r - 1, r)
924 907
925 908 def revdiff(self, rev1, rev2):
926 909 """return or calculate a delta between two revisions"""
927 910 b1 = self.base(rev1)
928 911 b2 = self.base(rev2)
929 912 if b1 == b2 and rev1 + 1 == rev2:
930 913 return self.chunk(rev2)
931 914 else:
932 915 return self.diff(self.revision(self.node(rev1)),
933 916 self.revision(self.node(rev2)))
934 917
935 918 def revision(self, node):
936 919 """return an uncompressed revision of a given"""
937 920 if node == nullid: return ""
938 921 if self.cache and self.cache[0] == node: return self.cache[2]
939 922
940 923 # look up what we need to read
941 924 text = None
942 925 rev = self.rev(node)
943 926 base = self.base(rev)
944 927
945 928 if self._inline():
946 929 # we probably have the whole chunk cached
947 930 df = None
948 931 else:
949 932 df = self.opener(self.datafile)
950 933
951 934 # do we have useful data cached?
952 935 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
953 936 base = self.cache[1]
954 937 text = self.cache[2]
955 938 self._loadindex(base, rev + 1)
956 939 else:
957 940 self._loadindex(base, rev + 1)
958 941 text = self.chunk(base, df=df)
959 942
960 943 bins = []
961 944 for r in xrange(base + 1, rev + 1):
962 945 bins.append(self.chunk(r, df=df))
963 946
964 947 text = self.patches(text, bins)
965 948
966 949 p1, p2 = self.parents(node)
967 950 if node != hash(text, p1, p2):
968 951 raise RevlogError(_("integrity check failed on %s:%d")
969 952 % (self.datafile, rev))
970 953
971 954 self.cache = (node, rev, text)
972 955 return text
973 956
974 957 def checkinlinesize(self, tr, fp=None):
975 958 if not self._inline():
976 959 return
977 960 if not fp:
978 961 fp = self.opener(self.indexfile, 'r')
979 962 fp.seek(0, 2)
980 963 size = fp.tell()
981 964 if size < 131072:
982 965 return
983 966 trinfo = tr.find(self.indexfile)
984 967 if trinfo == None:
985 968 raise RevlogError(_("%s not found in the transaction")
986 969 % self.indexfile)
987 970
988 971 trindex = trinfo[2]
989 972 dataoff = self.start(trindex)
990 973
991 974 tr.add(self.datafile, dataoff)
992 975 df = self.opener(self.datafile, 'w')
993 976 calc = struct.calcsize(self.indexformat)
994 977 for r in xrange(self.count()):
995 978 start = self.start(r) + (r + 1) * calc
996 979 length = self.length(r)
997 980 fp.seek(start)
998 981 d = fp.read(length)
999 982 df.write(d)
1000 983 fp.close()
1001 984 df.close()
1002 985 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1003 986 self.version &= ~(REVLOGNGINLINEDATA)
1004 987 if self.count():
1005 988 x = self.index[0]
1006 989 e = struct.pack(self.indexformat, *x)[4:]
1007 990 l = struct.pack(versionformat, self.version)
1008 991 fp.write(l)
1009 992 fp.write(e)
1010 993
1011 994 for i in xrange(1, self.count()):
1012 995 x = self.index[i]
1013 996 e = struct.pack(self.indexformat, *x)
1014 997 fp.write(e)
1015 998
1016 999 # if we don't call rename, the temp file will never replace the
1017 1000 # real index
1018 1001 fp.rename()
1019 1002
1020 1003 tr.replace(self.indexfile, trindex * calc)
1021 1004 self._io.chunkcache = None
1022 1005
1023 1006 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
1024 1007 """add a revision to the log
1025 1008
1026 1009 text - the revision data to add
1027 1010 transaction - the transaction object used for rollback
1028 1011 link - the linkrev data to add
1029 1012 p1, p2 - the parent nodeids of the revision
1030 1013 d - an optional precomputed delta
1031 1014 """
1032 1015 if not self._inline():
1033 1016 dfh = self.opener(self.datafile, "a")
1034 1017 else:
1035 1018 dfh = None
1036 1019 ifh = self.opener(self.indexfile, "a+")
1037 1020 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1038 1021
1039 1022 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1040 1023 if text is None: text = ""
1041 1024 if p1 is None: p1 = self.tip()
1042 1025 if p2 is None: p2 = nullid
1043 1026
1044 1027 node = hash(text, p1, p2)
1045 1028
1046 1029 if node in self.nodemap:
1047 1030 return node
1048 1031
1049 1032 n = self.count()
1050 1033 t = n - 1
1051 1034
1052 1035 if n:
1053 1036 base = self.base(t)
1054 1037 start = self.start(base)
1055 1038 end = self.end(t)
1056 1039 if not d:
1057 1040 prev = self.revision(self.tip())
1058 1041 d = self.diff(prev, text)
1059 1042 data = compress(d)
1060 1043 l = len(data[1]) + len(data[0])
1061 1044 dist = end - start + l
1062 1045
1063 1046 # full versions are inserted when the needed deltas
1064 1047 # become comparable to the uncompressed text
1065 1048 if not n or dist > len(text) * 2:
1066 1049 data = compress(text)
1067 1050 l = len(data[1]) + len(data[0])
1068 1051 base = n
1069 1052 else:
1070 1053 base = self.base(t)
1071 1054
1072 1055 offset = 0
1073 1056 if t >= 0:
1074 1057 offset = self.end(t)
1075 1058
1076 1059 if self.version == REVLOGV0:
1077 1060 e = (offset, l, base, link, p1, p2, node)
1078 1061 else:
1079 1062 e = (offset_type(offset, 0), l, len(text),
1080 1063 base, link, self.rev(p1), self.rev(p2), node)
1081 1064
1082 1065 self.index.append(e)
1083 1066 self.nodemap[node] = n
1084 1067 entry = struct.pack(self.indexformat, *e)
1085 1068
1086 1069 if not self._inline():
1087 1070 transaction.add(self.datafile, offset)
1088 1071 transaction.add(self.indexfile, n * len(entry))
1089 1072 if data[0]:
1090 1073 dfh.write(data[0])
1091 1074 dfh.write(data[1])
1092 1075 dfh.flush()
1093 1076 else:
1094 1077 ifh.seek(0, 2)
1095 1078 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1096 1079
1097 1080 if len(self.index) == 1 and self.version != REVLOGV0:
1098 1081 l = struct.pack(versionformat, self.version)
1099 1082 ifh.write(l)
1100 1083 entry = entry[4:]
1101 1084
1102 1085 ifh.write(entry)
1103 1086
1104 1087 if self._inline():
1105 1088 ifh.write(data[0])
1106 1089 ifh.write(data[1])
1107 1090 self.checkinlinesize(transaction, ifh)
1108 1091
1109 1092 self.cache = (node, n, text)
1110 1093 return node
1111 1094
1112 1095 def ancestor(self, a, b):
1113 1096 """calculate the least common ancestor of nodes a and b"""
1114 1097
1115 1098 def parents(rev):
1116 1099 return [p for p in self.parentrevs(rev) if p != nullrev]
1117 1100
1118 1101 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1119 1102 if c is None:
1120 1103 return nullid
1121 1104
1122 1105 return self.node(c)
1123 1106
1124 1107 def group(self, nodelist, lookup, infocollect=None):
1125 1108 """calculate a delta group
1126 1109
1127 1110 Given a list of changeset revs, return a set of deltas and
1128 1111 metadata corresponding to nodes. the first delta is
1129 1112 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1130 1113 have this parent as it has all history before these
1131 1114 changesets. parent is parent[0]
1132 1115 """
1133 1116 revs = [self.rev(n) for n in nodelist]
1134 1117
1135 1118 # if we don't have any revisions touched by these changesets, bail
1136 1119 if not revs:
1137 1120 yield changegroup.closechunk()
1138 1121 return
1139 1122
1140 1123 # add the parent of the first rev
1141 1124 p = self.parents(self.node(revs[0]))[0]
1142 1125 revs.insert(0, self.rev(p))
1143 1126
1144 1127 # build deltas
1145 1128 for d in xrange(0, len(revs) - 1):
1146 1129 a, b = revs[d], revs[d + 1]
1147 1130 nb = self.node(b)
1148 1131
1149 1132 if infocollect is not None:
1150 1133 infocollect(nb)
1151 1134
1152 1135 d = self.revdiff(a, b)
1153 1136 p = self.parents(nb)
1154 1137 meta = nb + p[0] + p[1] + lookup(nb)
1155 1138 yield changegroup.genchunk("%s%s" % (meta, d))
1156 1139
1157 1140 yield changegroup.closechunk()
1158 1141
1159 1142 def addgroup(self, revs, linkmapper, transaction, unique=0):
1160 1143 """
1161 1144 add a delta group
1162 1145
1163 1146 given a set of deltas, add them to the revision log. the
1164 1147 first delta is against its parent, which should be in our
1165 1148 log, the rest are against the previous delta.
1166 1149 """
1167 1150
1168 1151 #track the base of the current delta log
1169 1152 r = self.count()
1170 1153 t = r - 1
1171 1154 node = None
1172 1155
1173 1156 base = prev = nullrev
1174 1157 start = end = textlen = 0
1175 1158 if r:
1176 1159 end = self.end(t)
1177 1160
1178 1161 ifh = self.opener(self.indexfile, "a+")
1179 1162 ifh.seek(0, 2)
1180 1163 transaction.add(self.indexfile, ifh.tell(), self.count())
1181 1164 if self._inline():
1182 1165 dfh = None
1183 1166 else:
1184 1167 transaction.add(self.datafile, end)
1185 1168 dfh = self.opener(self.datafile, "a")
1186 1169
1187 1170 # loop through our set of deltas
1188 1171 chain = None
1189 1172 for chunk in revs:
1190 1173 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1191 1174 link = linkmapper(cs)
1192 1175 if node in self.nodemap:
1193 1176 # this can happen if two branches make the same change
1194 1177 # if unique:
1195 1178 # raise RevlogError(_("already have %s") % hex(node[:4]))
1196 1179 chain = node
1197 1180 continue
1198 1181 delta = chunk[80:]
1199 1182
1200 1183 for p in (p1, p2):
1201 1184 if not p in self.nodemap:
1202 1185 raise LookupError(_("unknown parent %s") % short(p))
1203 1186
1204 1187 if not chain:
1205 1188 # retrieve the parent revision of the delta chain
1206 1189 chain = p1
1207 1190 if not chain in self.nodemap:
1208 1191 raise LookupError(_("unknown base %s") % short(chain[:4]))
1209 1192
1210 1193 # full versions are inserted when the needed deltas become
1211 1194 # comparable to the uncompressed text or when the previous
1212 1195 # version is not the one we have a delta against. We use
1213 1196 # the size of the previous full rev as a proxy for the
1214 1197 # current size.
1215 1198
1216 1199 if chain == prev:
1217 1200 tempd = compress(delta)
1218 1201 cdelta = tempd[0] + tempd[1]
1219 1202 textlen = mdiff.patchedsize(textlen, delta)
1220 1203
1221 1204 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1222 1205 # flush our writes here so we can read it in revision
1223 1206 if dfh:
1224 1207 dfh.flush()
1225 1208 ifh.flush()
1226 1209 text = self.revision(chain)
1227 1210 text = self.patches(text, [delta])
1228 1211 chk = self._addrevision(text, transaction, link, p1, p2, None,
1229 1212 ifh, dfh)
1230 1213 if not dfh and not self._inline():
1231 1214 # addrevision switched from inline to conventional
1232 1215 # reopen the index
1233 1216 dfh = self.opener(self.datafile, "a")
1234 1217 ifh = self.opener(self.indexfile, "a")
1235 1218 if chk != node:
1236 1219 raise RevlogError(_("consistency error adding group"))
1237 1220 textlen = len(text)
1238 1221 else:
1239 1222 if self.version == REVLOGV0:
1240 1223 e = (end, len(cdelta), base, link, p1, p2, node)
1241 1224 else:
1242 1225 e = (offset_type(end, 0), len(cdelta), textlen, base,
1243 1226 link, self.rev(p1), self.rev(p2), node)
1244 1227 self.index.append(e)
1245 1228 self.nodemap[node] = r
1246 1229 if self._inline():
1247 1230 ifh.write(struct.pack(self.indexformat, *e))
1248 1231 ifh.write(cdelta)
1249 1232 self.checkinlinesize(transaction, ifh)
1250 1233 if not self._inline():
1251 1234 dfh = self.opener(self.datafile, "a")
1252 1235 ifh = self.opener(self.indexfile, "a")
1253 1236 else:
1254 1237 dfh.write(cdelta)
1255 1238 ifh.write(struct.pack(self.indexformat, *e))
1256 1239
1257 1240 t, r, chain, prev = r, r + 1, node, node
1258 1241 base = self.base(t)
1259 1242 start = self.start(base)
1260 1243 end = self.end(t)
1261 1244
1262 1245 return node
1263 1246
1264 1247 def strip(self, rev, minlink):
1265 1248 if self.count() == 0 or rev >= self.count():
1266 1249 return
1267 1250
1268 1251 if isinstance(self.index, lazyindex):
1269 1252 self._loadindexmap()
1270 1253
1271 1254 # When stripping away a revision, we need to make sure it
1272 1255 # does not actually belong to an older changeset.
1273 1256 # The minlink parameter defines the oldest revision
1274 1257 # we're allowed to strip away.
1275 1258 while minlink > self.index[rev][-4]:
1276 1259 rev += 1
1277 1260 if rev >= self.count():
1278 1261 return
1279 1262
1280 1263 # first truncate the files on disk
1281 1264 end = self.start(rev)
1282 1265 if not self._inline():
1283 1266 df = self.opener(self.datafile, "a")
1284 1267 df.truncate(end)
1285 1268 end = rev * struct.calcsize(self.indexformat)
1286 1269 else:
1287 1270 end += rev * struct.calcsize(self.indexformat)
1288 1271
1289 1272 indexf = self.opener(self.indexfile, "a")
1290 1273 indexf.truncate(end)
1291 1274
1292 1275 # then reset internal state in memory to forget those revisions
1293 1276 self.cache = None
1294 1277 self._io.chunkcache = None
1295 1278 for x in xrange(rev, self.count()):
1296 1279 del self.nodemap[self.node(x)]
1297 1280
1298 1281 del self.index[rev:]
1299 1282
1300 1283 def checksize(self):
1301 1284 expected = 0
1302 1285 if self.count():
1303 1286 expected = self.end(self.count() - 1)
1304 1287
1305 1288 try:
1306 1289 f = self.opener(self.datafile)
1307 1290 f.seek(0, 2)
1308 1291 actual = f.tell()
1309 1292 dd = actual - expected
1310 1293 except IOError, inst:
1311 1294 if inst.errno != errno.ENOENT:
1312 1295 raise
1313 1296 dd = 0
1314 1297
1315 1298 try:
1316 1299 f = self.opener(self.indexfile)
1317 1300 f.seek(0, 2)
1318 1301 actual = f.tell()
1319 1302 s = struct.calcsize(self.indexformat)
1320 1303 i = actual / s
1321 1304 di = actual - (i * s)
1322 1305 if self._inline():
1323 1306 databytes = 0
1324 1307 for r in xrange(self.count()):
1325 1308 databytes += self.length(r)
1326 1309 dd = 0
1327 1310 di = actual - self.count() * s - databytes
1328 1311 except IOError, inst:
1329 1312 if inst.errno != errno.ENOENT:
1330 1313 raise
1331 1314 di = 0
1332 1315
1333 1316 return (dd, di)
1334 1317
1335 1318
General Comments 0
You need to be logged in to leave comments. Login now