##// END OF EJS Templates
Refactor merge code...
mpm@selenic.com -
r94:7daef883 default
parent child Browse files
Show More
@@ -1,967 +1,825 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, sha, socket, os, time, base64, re, urllib2
9 9 import urllib
10 10 from mercurial import byterange
11 11 from mercurial.transaction import *
12 12 from mercurial.revlog import *
13 13 from difflib import SequenceMatcher
14 14
15 15 class filelog(revlog):
16 16 def __init__(self, opener, path):
17 17 s = self.encodepath(path)
18 18 revlog.__init__(self, opener, os.path.join("data", s + "i"),
19 19 os.path.join("data", s))
20 20
21 21 def encodepath(self, path):
22 22 s = sha.sha(path).digest()
23 23 s = base64.encodestring(s)[:-3]
24 24 s = re.sub("\+", "%", s)
25 25 s = re.sub("/", "_", s)
26 26 return s
27 27
28 28 def read(self, node):
29 29 return self.revision(node)
30 30 def add(self, text, transaction, link, p1=None, p2=None):
31 31 return self.addrevision(text, transaction, link, p1, p2)
32 32
33 def resolvedag(self, old, new, transaction, link):
34 """resolve unmerged heads in our DAG"""
35 if old == new: return None
36 a = self.ancestor(old, new)
37 if old == a: return None
38 return self.merge3(old, new, a, transaction, link)
39
40 def merge3(self, my, other, base, transaction, link):
41 """perform a 3-way merge and append the result"""
42 def temp(prefix, node):
43 (fd, name) = tempfile.mkstemp(prefix)
44 f = os.fdopen(fd, "w")
45 f.write(self.revision(node))
46 f.close()
47 return name
48
49 a = temp("local", my)
50 b = temp("remote", other)
51 c = temp("parent", base)
52
53 cmd = os.environ["HGMERGE"]
54 r = os.system("%s %s %s %s" % (cmd, a, b, c))
55 if r:
56 raise "Merge failed, implement rollback!"
57
58 t = open(a).read()
59 os.unlink(a)
60 os.unlink(b)
61 os.unlink(c)
62 return self.addrevision(t, transaction, link, my, other)
63
64 def merge(self, other, transaction, linkseq, link):
65 """perform a merge and resolve resulting heads"""
66 (o, n) = self.mergedag(other, transaction, linkseq)
67 return self.resolvedag(o, n, transaction, link)
68
69 33 def annotate(self, node):
70 34 revs = []
71 35 while node != nullid:
72 36 revs.append(node)
73 37 node = self.parents(node)[0]
74 38 revs.reverse()
75 39 prev = []
76 40 annotate = []
77 41 for node in revs:
78 42 curr = self.read(node).splitlines(1)
79 43 linkrev = self.linkrev(node)
80 44 sm = SequenceMatcher(None, prev, curr)
81 45 offset = 0
82 46 for o, m, n, s, t in sm.get_opcodes():
83 47 if o in ('insert','replace'):
84 48 annotate[m+offset:n+offset] = \
85 49 [ (linkrev, l) for l in curr[s:t]]
86 50 if o == 'insert':
87 51 offset += m-n
88 52 elif o == 'delete':
89 53 del annotate[m+offset:n+offset]
90 54 offset -= m-n
91 55 assert len(annotate) == len(curr)
92 56 prev = curr
93 57 return annotate
94 58
95 59 class manifest(revlog):
96 60 def __init__(self, opener):
97 61 self.mapcache = None
98 62 self.listcache = None
99 63 self.addlist = None
100 64 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
101 65
102 66 def read(self, node):
103 67 if self.mapcache and self.mapcache[0] == node:
104 68 return self.mapcache[1].copy()
105 69 text = self.revision(node)
106 70 map = {}
107 71 self.listcache = (text, text.splitlines(1))
108 72 for l in self.listcache[1]:
109 73 (f, n) = l.split('\0')
110 74 map[f] = bin(n[:40])
111 75 self.mapcache = (node, map)
112 76 return map
113 77
114 78 def diff(self, a, b):
115 79 # this is sneaky, as we're not actually using a and b
116 80 if self.listcache and len(self.listcache[0]) == len(a):
117 81 return mdiff.diff(self.listcache[1], self.addlist, 1)
118 82 else:
119 83 return mdiff.textdiff(a, b)
120 84
121 85 def add(self, map, transaction, link, p1=None, p2=None):
122 86 files = map.keys()
123 87 files.sort()
124 88
125 89 self.addlist = ["%s\000%s\n" % (f, hex(map[f])) for f in files]
126 90 text = "".join(self.addlist)
127 91
128 92 n = self.addrevision(text, transaction, link, p1, p2)
129 93 self.mapcache = (n, map)
130 94 self.listcache = (text, self.addlist)
131 95
132 96 return n
133 97
134 98 class changelog(revlog):
135 99 def __init__(self, opener):
136 100 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
137 101
138 102 def extract(self, text):
139 103 if not text:
140 104 return (nullid, "", "0", [], "")
141 105 last = text.index("\n\n")
142 106 desc = text[last + 2:]
143 107 l = text[:last].splitlines()
144 108 manifest = bin(l[0])
145 109 user = l[1]
146 110 date = l[2]
147 111 files = l[3:]
148 112 return (manifest, user, date, files, desc)
149 113
150 114 def read(self, node):
151 115 return self.extract(self.revision(node))
152 116
153 117 def add(self, manifest, list, desc, transaction, p1=None, p2=None):
154 118 user = (os.environ.get("HGUSER") or
155 119 os.environ.get("EMAIL") or
156 120 os.environ.get("LOGNAME", "unknown") + '@' + socket.getfqdn())
157 121 date = "%d %d" % (time.time(), time.timezone)
158 122 list.sort()
159 123 l = [hex(manifest), user, date] + list + ["", desc]
160 124 text = "\n".join(l)
161 125 return self.addrevision(text, transaction, self.count(), p1, p2)
162 126
163 def merge3(self, my, other, base):
164 pass
165
166 127 class dircache:
167 128 def __init__(self, opener, ui):
168 129 self.opener = opener
169 130 self.dirty = 0
170 131 self.ui = ui
171 132 self.map = None
172 133 def __del__(self):
173 134 if self.dirty: self.write()
174 135 def __getitem__(self, key):
175 136 try:
176 137 return self.map[key]
177 138 except TypeError:
178 139 self.read()
179 140 return self[key]
180 141
181 142 def read(self):
182 143 if self.map is not None: return self.map
183 144
184 145 self.map = {}
185 146 try:
186 147 st = self.opener("dircache").read()
187 148 except: return
188 149
189 150 pos = 0
190 151 while pos < len(st):
191 152 e = struct.unpack(">llll", st[pos:pos+16])
192 153 l = e[3]
193 154 pos += 16
194 155 f = st[pos:pos + l]
195 156 self.map[f] = e[:3]
196 157 pos += l
197 158
198 159 def update(self, files):
199 160 if not files: return
200 161 self.read()
201 162 self.dirty = 1
202 163 for f in files:
203 164 try:
204 165 s = os.stat(f)
205 166 self.map[f] = (s.st_mode, s.st_size, s.st_mtime)
206 167 except IOError:
207 168 self.remove(f)
208 169
209 170 def taint(self, files):
210 171 if not files: return
211 172 self.read()
212 173 self.dirty = 1
213 174 for f in files:
214 175 self.map[f] = (0, -1, 0)
215 176
216 177 def remove(self, files):
217 178 if not files: return
218 179 self.read()
219 180 self.dirty = 1
220 181 for f in files:
221 182 try:
222 183 del self.map[f]
223 184 except KeyError:
224 185 self.ui.warn("Not in dircache: %s\n" % f)
225 186 pass
226 187
227 188 def clear(self):
228 189 self.map = {}
229 190 self.dirty = 1
230 191
231 192 def write(self):
232 193 st = self.opener("dircache", "w")
233 194 for f, e in self.map.items():
234 195 e = struct.pack(">llll", e[0], e[1], e[2], len(f))
235 196 st.write(e + f)
236 197 self.dirty = 0
237 198
238 199 def copy(self):
239 200 self.read()
240 201 return self.map.copy()
241 202
242 203 # used to avoid circular references so destructors work
243 204 def opener(base):
244 205 p = base
245 206 def o(path, mode="r"):
246 207 if p[:7] == "http://":
247 208 f = os.path.join(p, urllib.quote(path))
248 209 return httprangereader(f)
249 210
250 211 f = os.path.join(p, path)
251 212
252 213 if mode != "r" and os.path.isfile(f):
253 214 s = os.stat(f)
254 215 if s.st_nlink > 1:
255 216 file(f + ".tmp", "w").write(file(f).read())
256 217 os.rename(f+".tmp", f)
257 218
258 219 return file(f, mode)
259 220
260 221 return o
261 222
262 223 class localrepository:
263 224 def __init__(self, ui, path=None, create=0):
264 225 self.remote = 0
265 226 if path and path[:7] == "http://":
266 227 self.remote = 1
267 228 self.path = path
268 229 else:
269 230 if not path:
270 231 p = os.getcwd()
271 232 while not os.path.isdir(os.path.join(p, ".hg")):
272 233 p = os.path.dirname(p)
273 234 if p == "/": raise "No repo found"
274 235 path = p
275 236 self.path = os.path.join(path, ".hg")
276 237
277 238 self.root = path
278 239 self.ui = ui
279 240
280 241 if create:
281 242 os.mkdir(self.path)
282 243 os.mkdir(self.join("data"))
283 244
284 245 self.opener = opener(self.path)
285 246 self.manifest = manifest(self.opener)
286 247 self.changelog = changelog(self.opener)
287 248 self.ignorelist = None
288 249 self.tags = None
289 250
290 251 if not self.remote:
291 252 self.dircache = dircache(self.opener, ui)
292 253 try:
293 254 self.current = bin(self.opener("current").read())
294 255 except IOError:
295 256 self.current = None
296 257
297 258 def setcurrent(self, node):
298 259 self.current = node
299 260 self.opener("current", "w").write(hex(node))
300 261
301 262 def ignore(self, f):
302 263 if self.ignorelist is None:
303 264 self.ignorelist = []
304 265 try:
305 266 l = open(os.path.join(self.root, ".hgignore"))
306 267 for pat in l:
307 268 if pat != "\n":
308 269 self.ignorelist.append(re.compile(pat[:-1]))
309 270 except IOError: pass
310 271 for pat in self.ignorelist:
311 272 if pat.search(f): return True
312 273 return False
313 274
314 275 def lookup(self, key):
315 276 if self.tags is None:
316 277 self.tags = {}
317 278 try:
318 279 fl = self.file(".hgtags")
319 280 for l in fl.revision(fl.tip()).splitlines():
320 281 if l:
321 282 n, k = l.split(" ")
322 283 self.tags[k] = bin(n)
323 284 except KeyError: pass
324 285 try:
325 286 return self.tags[key]
326 287 except KeyError:
327 288 return self.changelog.lookup(key)
328 289
329 290 def join(self, f):
330 291 return os.path.join(self.path, f)
331 292
332 293 def file(self, f):
333 294 return filelog(self.opener, f)
334 295
335 296 def transaction(self):
336 297 return transaction(self.opener, self.join("journal"))
337 298
338 def merge(self, other):
339 tr = self.transaction()
340 changed = {}
341 new = {}
342 seqrev = self.changelog.count()
343 # some magic to allow fiddling in nested scope
344 nextrev = [seqrev]
345
346 # helpers for back-linking file revisions to local changeset
347 # revisions so we can immediately get to changeset from annotate
348 def accumulate(text):
349 # track which files are added in which changeset and the
350 # corresponding _local_ changeset revision
351 files = self.changelog.extract(text)[3]
352 for f in files:
353 changed.setdefault(f, []).append(nextrev[0])
354 nextrev[0] += 1
355
356 def seq(start):
357 while 1:
358 yield start
359 start += 1
360
361 def lseq(l):
362 for r in l:
363 yield r
364
365 # begin the import/merge of changesets
366 self.ui.status("merging new changesets\n")
367 (co, cn) = self.changelog.mergedag(other.changelog, tr,
368 seq(seqrev), accumulate)
369 resolverev = self.changelog.count()
370
371 # is there anything to do?
372 if co == cn:
373 tr.close()
374 return
375
376 # do we need to resolve?
377 simple = (co == self.changelog.ancestor(co, cn))
378
379 # merge all files changed by the changesets,
380 # keeping track of the new tips
381 changelist = changed.keys()
382 changelist.sort()
383 for f in changelist:
384 sys.stdout.write(".")
385 sys.stdout.flush()
386 r = self.file(f)
387 node = r.merge(other.file(f), tr, lseq(changed[f]), resolverev)
388 if node:
389 new[f] = node
390 sys.stdout.write("\n")
391
392 # begin the merge of the manifest
393 self.ui.status("merging manifests\n")
394 (mm, mo) = self.manifest.mergedag(other.manifest, tr, seq(seqrev))
395
396 # For simple merges, we don't need to resolve manifests or changesets
397 if simple:
398 tr.close()
399 return
400
401 ma = self.manifest.ancestor(mm, mo)
402
403 # resolve the manifest to point to all the merged files
404 self.ui.status("resolving manifests\n")
405 omap = self.manifest.read(mo) # other
406 amap = self.manifest.read(ma) # ancestor
407 mmap = self.manifest.read(mm) # mine
408 nmap = {}
409
410 for f, mid in mmap.iteritems():
411 if f in omap:
412 if mid != omap[f]:
413 nmap[f] = new.get(f, mid) # use merged version
414 else:
415 nmap[f] = new.get(f, mid) # they're the same
416 del omap[f]
417 elif f in amap:
418 if mid != amap[f]:
419 pass # we should prompt here
420 else:
421 pass # other deleted it
422 else:
423 nmap[f] = new.get(f, mid) # we created it
424
425 del mmap
426
427 for f, oid in omap.iteritems():
428 if f in amap:
429 if oid != amap[f]:
430 pass # this is the nasty case, we should prompt
431 else:
432 pass # probably safe
433 else:
434 nmap[f] = new.get(f, oid) # remote created it
435
436 del omap
437 del amap
438
439 node = self.manifest.add(nmap, tr, resolverev, mm, mo)
440
441 # Now all files and manifests are merged, we add the changed files
442 # and manifest id to the changelog
443 self.ui.status("committing merge changeset\n")
444 new = new.keys()
445 new.sort()
446 if co == cn: cn = -1
447
448 edittext = "\n"+"".join(["HG: changed %s\n" % f for f in new])
449 edittext = self.ui.edit(edittext)
450 n = self.changelog.add(node, new, edittext, tr, co, cn)
451
452 tr.close()
453
454 299 def commit(self, parent, update = None, text = ""):
455 300 tr = self.transaction()
456 301
457 302 try:
458 303 remove = [ l[:-1] for l in self.opener("to-remove") ]
459 304 os.unlink(self.join("to-remove"))
460 305
461 306 except IOError:
462 307 remove = []
463 308
464 309 if update == None:
465 310 update = self.diffdir(self.root, parent)[0]
466 311
467 312 # check in files
468 313 new = {}
469 314 linkrev = self.changelog.count()
470 315 for f in update:
471 316 self.ui.note(f + "\n")
472 317 try:
473 318 t = file(f).read()
474 319 except IOError:
475 320 remove.append(f)
476 321 continue
477 322 r = self.file(f)
478 323 new[f] = r.add(t, tr, linkrev)
479 324
480 325 # update manifest
481 326 mmap = self.manifest.read(self.manifest.tip())
482 327 mmap.update(new)
483 328 for f in remove:
484 329 del mmap[f]
485 330 mnode = self.manifest.add(mmap, tr, linkrev)
486 331
487 332 # add changeset
488 333 new = new.keys()
489 334 new.sort()
490 335
491 336 edittext = text + "\n"+"".join(["HG: changed %s\n" % f for f in new])
492 337 edittext += "".join(["HG: removed %s\n" % f for f in remove])
493 338 edittext = self.ui.edit(edittext)
494 339
495 340 n = self.changelog.add(mnode, new, edittext, tr)
496 341 tr.close()
497 342
498 343 self.setcurrent(n)
499 344 self.dircache.update(new)
500 345 self.dircache.remove(remove)
501 346
502 347 def checkdir(self, path):
503 348 d = os.path.dirname(path)
504 349 if not d: return
505 350 if not os.path.isdir(d):
506 351 self.checkdir(d)
507 352 os.mkdir(d)
508 353
509 354 def checkout(self, node):
510 355 # checkout is really dumb at the moment
511 356 # it ought to basically merge
512 357 change = self.changelog.read(node)
513 358 mmap = self.manifest.read(change[0])
514 359
515 360 l = mmap.keys()
516 361 l.sort()
517 362 stats = []
518 363 for f in l:
519 364 self.ui.note(f + "\n")
520 365 r = self.file(f)
521 366 t = r.revision(mmap[f])
522 367 try:
523 368 file(f, "w").write(t)
524 369 except:
525 370 self.checkdir(f)
526 371 file(f, "w").write(t)
527 372
528 373 self.setcurrent(node)
529 374 self.dircache.clear()
530 375 self.dircache.update(l)
531 376
532 377 def diffdir(self, path, changeset):
533 378 changed = []
534 379 mf = {}
535 380 added = []
536 381
537 382 if changeset:
538 383 change = self.changelog.read(changeset)
539 384 mf = self.manifest.read(change[0])
540 385
541 386 if changeset == self.current:
542 387 dc = self.dircache.copy()
543 388 else:
544 389 dc = dict.fromkeys(mf)
545 390
546 391 def fcmp(fn):
547 392 t1 = file(os.path.join(self.root, fn)).read()
548 393 t2 = self.file(fn).revision(mf[fn])
549 394 return cmp(t1, t2)
550 395
551 396 for dir, subdirs, files in os.walk(self.root):
552 397 d = dir[len(self.root)+1:]
553 398 if ".hg" in subdirs: subdirs.remove(".hg")
554 399
555 400 for f in files:
556 401 fn = os.path.join(d, f)
557 402 try: s = os.stat(os.path.join(self.root, fn))
558 403 except: continue
559 404 if fn in dc:
560 405 c = dc[fn]
561 406 del dc[fn]
562 407 if not c:
563 408 if fcmp(fn):
564 409 changed.append(fn)
565 410 elif c[1] != s.st_size:
566 411 changed.append(fn)
567 412 elif c[0] != s.st_mode or c[2] != s.st_mtime:
568 413 if fcmp(fn):
569 414 changed.append(fn)
570 415 else:
571 416 if self.ignore(fn): continue
572 417 added.append(fn)
573 418
574 419 deleted = dc.keys()
575 420 deleted.sort()
576 421
577 422 return (changed, added, deleted)
578 423
579 424 def diffrevs(self, node1, node2):
580 425 changed, added = [], []
581 426
582 427 change = self.changelog.read(node1)
583 428 mf1 = self.manifest.read(change[0])
584 429 change = self.changelog.read(node2)
585 430 mf2 = self.manifest.read(change[0])
586 431
587 432 for fn in mf2:
588 433 if mf1.has_key(fn):
589 434 if mf1[fn] != mf2[fn]:
590 435 changed.append(fn)
591 436 del mf1[fn]
592 437 else:
593 438 added.append(fn)
594 439
595 440 deleted = mf1.keys()
596 441 deleted.sort()
597 442
598 443 return (changed, added, deleted)
599 444
600 445 def add(self, list):
601 446 self.dircache.taint(list)
602 447
603 448 def remove(self, list):
604 449 dl = self.opener("to-remove", "a")
605 450 for f in list:
606 451 dl.write(f + "\n")
607 452
608 453 def branches(self, nodes):
609 454 if not nodes: nodes = [self.changelog.tip()]
610 455 b = []
611 456 for n in nodes:
612 457 t = n
613 458 while n:
614 459 p = self.changelog.parents(n)
615 460 if p[1] != nullid or p[0] == nullid:
616 461 b.append((t, n, p[0], p[1]))
617 462 break
618 463 n = p[0]
619 464 return b
620 465
621 466 def between(self, pairs):
622 467 r = []
623 468
624 469 for top, bottom in pairs:
625 470 n, l, i = top, [], 0
626 471 f = 1
627 472
628 473 while n != bottom:
629 474 p = self.changelog.parents(n)[0]
630 475 if i == f:
631 476 l.append(n)
632 477 f = f * 2
633 478 n = p
634 479 i += 1
635 480
636 481 r.append(l)
637 482
638 483 return r
639 484
640 485 def newer(self, nodes):
641 486 m = {}
642 487 nl = []
488 pm = {}
643 489 cl = self.changelog
644 490 t = l = cl.count()
491
492 # find the lowest numbered node
645 493 for n in nodes:
646 494 l = min(l, cl.rev(n))
647 for p in cl.parents(n):
648 m[p] = 1
495 m[n] = 1
649 496
650 497 for i in xrange(l, t):
651 498 n = cl.node(i)
499 if n in m: # explicitly listed
500 pm[n] = 1
501 nl.append(n)
502 continue
652 503 for p in cl.parents(n):
653 if p in m and n not in m:
654 m[n] = 1
504 if p in pm: # parent listed
505 pm[n] = 1
655 506 nl.append(n)
507 break
656 508
657 509 return nl
658 510
659 511 def getchangegroup(self, remote):
660 512 tip = remote.branches([])[0]
661 513 self.ui.debug("remote tip branch is %s:%s\n" %
662 514 (short(tip[0]), short(tip[1])))
663 515 m = self.changelog.nodemap
664 516 unknown = [tip]
665 517 search = []
666 518 fetch = []
667 519
668 520 if tip[0] in m:
669 521 self.ui.note("nothing to do!\n")
670 522 return None
671 523
672 524 while unknown:
673 525 n = unknown.pop(0)
674 526 if n == nullid: break
675 527 if n[1] and n[1] in m: # do we know the base?
676 528 self.ui.debug("found incomplete branch %s\n" % short(n[1]))
677 529 search.append(n) # schedule branch range for scanning
678 530 else:
531 if n[2] in m and n[3] in m:
532 if n[1] not in fetch:
533 self.ui.debug("found new changeset %s\n" %
534 short(n[1]))
535 fetch.append(n[1]) # earliest unknown
536 continue
679 537 for b in remote.branches([n[2], n[3]]):
680 if b[0] in m:
681 if n[1] not in fetch:
682 self.ui.debug("found new changeset %s\n" %
683 short(n[1]))
684 fetch.append(n[1]) # earliest unknown
685 else:
538 if b[0] not in m:
686 539 unknown.append(b)
687 540
688 541 while search:
689 542 n = search.pop(0)
690 543 l = remote.between([(n[0], n[1])])[0]
691 544 p = n[0]
692 545 f = 1
693 546 for i in l + [n[1]]:
694 547 if i in m:
695 548 if f <= 2:
696 549 self.ui.debug("found new branch changeset %s\n" %
697 550 short(p))
698 551 fetch.append(p)
699 552 else:
700 553 self.ui.debug("narrowed branch search to %s:%s\n"
701 554 % (short(p), short(i)))
702 555 search.append((p, i))
703 556 break
704 557 p, f = i, f * 2
705 558
706 559 for f in fetch:
707 560 if f in m:
708 561 raise "already have", short(f[:4])
709 562
710 self.ui.note("merging new changesets starting at " +
563 self.ui.note("adding new changesets starting at " +
711 564 " ".join([short(f) for f in fetch]) + "\n")
712 565
713 566 return remote.changegroup(fetch)
714 567
715 568 def changegroup(self, basenodes):
716 569 nodes = self.newer(basenodes)
717 570
718 571 # construct the link map
719 572 linkmap = {}
720 573 for n in nodes:
721 574 linkmap[self.changelog.rev(n)] = n
722 575
723 576 # construct a list of all changed files
724 577 changed = {}
725 578 for n in nodes:
726 579 c = self.changelog.read(n)
727 580 for f in c[3]:
728 581 changed[f] = 1
729 582 changed = changed.keys()
730 583 changed.sort()
731 584
732 585 # the changegroup is changesets + manifests + all file revs
733 586 revs = [ self.changelog.rev(n) for n in nodes ]
734 587
735 588 yield self.changelog.group(linkmap)
736 589 yield self.manifest.group(linkmap)
737 590
738 591 for f in changed:
739 592 g = self.file(f).group(linkmap)
740 593 if not g: raise "couldn't find change to %s" % f
741 594 l = struct.pack(">l", len(f))
742 595 yield "".join([l, f, g])
743 596
744 597 def addchangegroup(self, generator):
745 598 class genread:
746 599 def __init__(self, generator):
747 600 self.g = generator
748 601 self.buf = ""
749 602 def read(self, l):
750 603 while l > len(self.buf):
751 604 try:
752 605 self.buf += self.g.next()
753 606 except StopIteration:
754 607 break
755 608 d, self.buf = self.buf[:l], self.buf[l:]
756 609 return d
757 610
758 611 if not generator: return
759 612 source = genread(generator)
760 613
761 614 def getchunk(add = 0):
762 615 d = source.read(4)
763 616 if not d: return ""
764 617 l = struct.unpack(">l", d)[0]
765 618 return source.read(l - 4 + add)
766 619
767 620 tr = self.transaction()
768 621 simple = True
769 622
770 self.ui.status("merging changesets\n")
623 self.ui.status("adding changesets\n")
771 624 # pull off the changeset group
625 def report(x):
626 self.ui.debug("add changeset %s\n" % short(x))
627 return self.changelog.count()
628
772 629 csg = getchunk()
773 630 co = self.changelog.tip()
774 cn = self.changelog.addgroup(csg, lambda x: self.changelog.count(), tr)
631 cn = self.changelog.addgroup(csg, report, tr)
775 632
776 self.ui.status("merging manifests\n")
633 self.ui.status("adding manifests\n")
777 634 # pull off the manifest group
778 635 mfg = getchunk()
779 636 mm = self.manifest.tip()
780 637 mo = self.manifest.addgroup(mfg, lambda x: self.changelog.rev(x), tr)
781 638
782 639 # do we need a resolve?
783 640 if self.changelog.ancestor(co, cn) != co:
784 641 simple = False
785 642 resolverev = self.changelog.count()
786 643
787 644 # process the files
788 self.ui.status("merging files\n")
645 self.ui.status("adding files\n")
789 646 new = {}
790 647 while 1:
791 648 f = getchunk(4)
792 649 if not f: break
793 650 fg = getchunk()
794
651 self.ui.debug("adding %s revisions\n" % f)
795 652 fl = self.file(f)
796 653 o = fl.tip()
797 654 n = fl.addgroup(fg, lambda x: self.changelog.rev(x), tr)
798 655 if not simple:
799 nn = fl.resolvedag(o, n, tr, resolverev)
800 if nn:
801 self.ui.note("merged %s\n", f)
802 new[f] = nn
656 if o == n: continue
657 # this file has changed between branches, so it must be
658 # represented in the merge changeset
659 new[f] = self.merge3(fl, f, o, n, tr, resolverev)
803 660
804 661 # For simple merges, we don't need to resolve manifests or changesets
805 662 if simple:
806 663 self.ui.debug("simple merge, skipping resolve\n")
807 664 tr.close()
808 665 return
809 666
810 667 # resolve the manifest to point to all the merged files
811 668 self.ui.status("resolving manifests\n")
812 669 ma = self.manifest.ancestor(mm, mo)
813 670 omap = self.manifest.read(mo) # other
814 671 amap = self.manifest.read(ma) # ancestor
815 672 mmap = self.manifest.read(mm) # mine
816 673 self.ui.debug("ancestor %s local %s other %s\n" %
817 674 (short(ma), short(mm), short(mo)))
818 675 nmap = {}
819 676
820 677 for f, mid in mmap.iteritems():
821 678 if f in omap:
822 679 if mid != omap[f]:
823 680 self.ui.debug("%s versions differ\n" % f)
824 if f in new: self.ui.note("%s updated in resolve\n" % f)
825 nmap[f] = new.get(f, mid) # use merged version
681 if f in new: self.ui.debug("%s updated in resolve\n" % f)
682 # use merged version or local version
683 nmap[f] = new.get(f, mid)
826 684 else:
827 685 nmap[f] = mid # keep ours
828 686 del omap[f]
829 687 elif f in amap:
830 688 if mid != amap[f]:
831 689 self.ui.debug("local changed %s which other deleted\n" % f)
832 690 pass # we should prompt here
833 691 else:
834 692 self.ui.debug("other deleted %s\n" % f)
835 693 pass # other deleted it
836 694 else:
837 695 self.ui.debug("local created %s\n" %f)
838 696 nmap[f] = mid # we created it
839 697
840 698 del mmap
841 699
842 700 for f, oid in omap.iteritems():
843 701 if f in amap:
844 702 if oid != amap[f]:
845 703 self.ui.debug("other changed %s which we deleted\n" % f)
846 704 pass # this is the nasty case, we should prompt
847 705 else:
848 706 pass # probably safe
849 707 else:
850 708 self.ui.debug("remote created %s\n" % f)
851 709 nmap[f] = new.get(f, oid) # remote created it
852 710
853 711 del omap
854 712 del amap
855 713
856 714 node = self.manifest.add(nmap, tr, resolverev, mm, mo)
857 715
858 716 # Now all files and manifests are merged, we add the changed files
859 717 # and manifest id to the changelog
860 718 self.ui.status("committing merge changeset\n")
861 719 new = new.keys()
862 720 new.sort()
863 721 if co == cn: cn = -1
864 722
865 723 edittext = "\nHG: merge resolve\n" + \
866 724 "".join(["HG: changed %s\n" % f for f in new])
867 725 edittext = self.ui.edit(edittext)
868 726 n = self.changelog.add(node, new, edittext, tr, co, cn)
869 727
870 728 tr.close()
871 729
872 730 class remoterepository:
873 731 def __init__(self, ui, path):
874 732 self.url = path.replace("hg://", "http://", 1)
875 733 self.ui = ui
876 734
877 735 def do_cmd(self, cmd, **args):
878 736 self.ui.debug("sending %s command\n" % cmd)
879 737 q = {"cmd": cmd}
880 738 q.update(args)
881 739 qs = urllib.urlencode(q)
882 740 cu = "%s?%s" % (self.url, qs)
883 741 return urllib.urlopen(cu)
884 742
885 743 def branches(self, nodes):
886 744 n = " ".join(map(hex, nodes))
887 745 d = self.do_cmd("branches", nodes=n).read()
888 746 br = [ map(bin, b.split(" ")) for b in d.splitlines() ]
889 747 return br
890 748
891 749 def between(self, pairs):
892 750 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
893 751 d = self.do_cmd("between", pairs=n).read()
894 752 p = [ map(bin, l.split(" ")) for l in d.splitlines() ]
895 753 return p
896 754
897 755 def changegroup(self, nodes):
898 756 n = " ".join(map(hex, nodes))
899 757 zd = zlib.decompressobj()
900 758 f = self.do_cmd("changegroup", roots=n)
901 759 while 1:
902 760 d = f.read(4096)
903 761 if not d:
904 762 yield zd.flush()
905 763 break
906 764 yield zd.decompress(d)
907 765
908 766 def repository(ui, path=None, create=0):
909 767 if path and path[:5] == "hg://":
910 768 return remoterepository(ui, path)
911 769 else:
912 770 return localrepository(ui, path, create)
913 771
914 772 class ui:
915 773 def __init__(self, verbose=False, debug=False, quiet=False):
916 774 self.quiet = quiet and not verbose and not debug
917 775 self.verbose = verbose or debug
918 776 self.debugflag = debug
919 777 def write(self, *args):
920 778 for a in args:
921 779 sys.stdout.write(str(a))
922 780 def prompt(self, msg, pat):
923 781 while 1:
924 782 sys.stdout.write(msg)
925 783 r = sys.stdin.readline()[:-1]
926 784 if re.match(pat, r):
927 785 return r
928 786 def status(self, *msg):
929 787 if not self.quiet: self.write(*msg)
930 788 def warn(self, msg):
931 789 self.write(*msg)
932 790 def note(self, msg):
933 791 if self.verbose: self.write(*msg)
934 792 def debug(self, msg):
935 793 if self.debugflag: self.write(*msg)
936 794 def edit(self, text):
937 795 (fd, name) = tempfile.mkstemp("hg")
938 796 f = os.fdopen(fd, "w")
939 797 f.write(text)
940 798 f.close()
941 799
942 800 editor = os.environ.get("EDITOR", "vi")
943 801 r = os.system("%s %s" % (editor, name))
944 802 if r:
945 803 raise "Edit failed!"
946 804
947 805 t = open(name).read()
948 806 t = re.sub("(?m)^HG:.*\n", "", t)
949 807
950 808 return t
951 809
952 810
953 811 class httprangereader:
954 812 def __init__(self, url):
955 813 self.url = url
956 814 self.pos = 0
957 815 def seek(self, pos):
958 816 self.pos = pos
959 817 def read(self, bytes=None):
960 818 opener = urllib2.build_opener(byterange.HTTPRangeHandler())
961 819 urllib2.install_opener(opener)
962 820 req = urllib2.Request(self.url)
963 821 end = ''
964 822 if bytes: end = self.pos + bytes
965 823 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
966 824 f = urllib2.urlopen(req)
967 825 return f.read()
@@ -1,481 +1,450 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # This provides efficient delta storage with O(1) retrieve and append
4 4 # and O(changes) merge between branches
5 5 #
6 6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 7 #
8 8 # This software may be used and distributed according to the terms
9 9 # of the GNU General Public License, incorporated herein by reference.
10 10
11 11 import zlib, struct, sha, os, tempfile, binascii
12 12 from mercurial import mdiff
13 13
14 14 def hex(node): return binascii.hexlify(node)
15 15 def bin(node): return binascii.unhexlify(node)
16 16 def short(node): return hex(node[:4])
17 17
18 18 def compress(text):
19 19 return zlib.compress(text)
20 20
21 21 def decompress(bin):
22 22 return zlib.decompress(bin)
23 23
24 24 def hash(text, p1, p2):
25 25 l = [p1, p2]
26 26 l.sort()
27 27 return sha.sha(l[0] + l[1] + text).digest()
28 28
29 29 nullid = "\0" * 20
30 30 indexformat = ">4l20s20s20s"
31 31
32 32 class lazyparser:
33 33 def __init__(self, data):
34 34 self.data = data
35 35 self.s = struct.calcsize(indexformat)
36 36 self.l = len(data)/self.s
37 37 self.index = [None] * self.l
38 38 self.map = {nullid: -1}
39 39
40 40 if 0:
41 41 n = 0
42 42 i = self.data
43 43 s = struct.calcsize(indexformat)
44 44 for f in xrange(0, len(i), s):
45 45 # offset, size, base, linkrev, p1, p2, nodeid
46 46 e = struct.unpack(indexformat, i[f:f + s])
47 47 self.map[e[6]] = n
48 48 self.index.append(e)
49 49 n += 1
50 50
51 51 def load(self, pos):
52 52 block = pos / 1000
53 53 i = block * 1000
54 54 end = min(self.l, i + 1000)
55 55 while i < end:
56 56 d = self.data[i * self.s: (i + 1) * self.s]
57 57 e = struct.unpack(indexformat, d)
58 58 self.index[i] = e
59 59 self.map[e[6]] = i
60 60 i += 1
61 61
62 62 class lazyindex:
63 63 def __init__(self, parser):
64 64 self.p = parser
65 65 def __len__(self):
66 66 return len(self.p.index)
67 67 def __getitem__(self, pos):
68 68 i = self.p.index[pos]
69 69 if not i:
70 70 self.p.load(pos)
71 71 return self.p.index[pos]
72 72 return i
73 73 def append(self, e):
74 74 self.p.index.append(e)
75 75
76 76 class lazymap:
77 77 def __init__(self, parser):
78 78 self.p = parser
79 79 def load(self, key):
80 80 n = self.p.data.find(key)
81 81 if n < 0: raise KeyError("node " + hex(key))
82 82 pos = n / self.p.s
83 83 self.p.load(pos)
84 84 def __contains__(self, key):
85 85 try:
86 86 self[key]
87 87 return True
88 88 except KeyError:
89 89 return False
90 90 def __getitem__(self, key):
91 91 try:
92 92 return self.p.map[key]
93 93 except KeyError:
94 94 try:
95 95 self.load(key)
96 96 return self.p.map[key]
97 97 except KeyError:
98 98 raise KeyError("node " + hex(key))
99 99 def __setitem__(self, key, val):
100 100 self.p.map[key] = val
101 101
102 102 class revlog:
103 103 def __init__(self, opener, indexfile, datafile):
104 104 self.indexfile = indexfile
105 105 self.datafile = datafile
106 106 self.opener = opener
107 107 self.cache = None
108 108 # read the whole index for now, handle on-demand later
109 109 try:
110 110 i = self.opener(self.indexfile).read()
111 111 except IOError:
112 112 i = ""
113 113 parser = lazyparser(i)
114 114 self.index = lazyindex(parser)
115 115 self.nodemap = lazymap(parser)
116 116
117 117 def tip(self): return self.node(len(self.index) - 1)
118 118 def count(self): return len(self.index)
119 119 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
120 120 def rev(self, node): return self.nodemap[node]
121 121 def linkrev(self, node): return self.index[self.nodemap[node]][3]
122 122 def parents(self, node):
123 123 if node == nullid: return (nullid, nullid)
124 124 return self.index[self.nodemap[node]][4:6]
125 125
126 126 def start(self, rev): return self.index[rev][0]
127 127 def length(self, rev): return self.index[rev][1]
128 128 def end(self, rev): return self.start(rev) + self.length(rev)
129 129 def base(self, rev): return self.index[rev][2]
130 130
131 131 def lookup(self, id):
132 132 try:
133 133 rev = int(id)
134 134 return self.node(rev)
135 135 except ValueError:
136 136 c = []
137 137 for n in self.nodemap:
138 138 if id in hex(n):
139 139 c.append(n)
140 140 if len(c) > 1: raise KeyError("Ambiguous identifier")
141 141 if len(c) < 1: raise KeyError("No match found")
142 142 return c[0]
143 143
144 144 return None
145 145
146 def revisions(self, list):
147 # this can be optimized to do spans, etc
148 # be stupid for now
149 for node in list:
150 yield self.revision(node)
151
152 146 def diff(self, a, b):
153 147 return mdiff.textdiff(a, b)
154 148
155 149 def patches(self, t, pl):
156 150 return mdiff.patches(t, pl)
157 151
158 152 def revision(self, node):
159 153 if node == nullid: return ""
160 154 if self.cache and self.cache[0] == node: return self.cache[2]
161 155
162 156 text = None
163 157 rev = self.rev(node)
164 158 base = self.base(rev)
165 159 start = self.start(base)
166 160 end = self.end(rev)
167 161
168 162 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
169 163 base = self.cache[1]
170 164 start = self.start(base + 1)
171 165 text = self.cache[2]
172 166 last = 0
173 167
174 168 f = self.opener(self.datafile)
175 169 f.seek(start)
176 170 data = f.read(end - start)
177 171
178 172 if not text:
179 173 last = self.length(base)
180 174 text = decompress(data[:last])
181 175
182 176 bins = []
183 177 for r in xrange(base + 1, rev + 1):
184 178 s = self.length(r)
185 179 bins.append(decompress(data[last:last + s]))
186 180 last = last + s
187 181
188 182 text = mdiff.patches(text, bins)
189 183
190 184 (p1, p2) = self.parents(node)
191 185 if node != hash(text, p1, p2):
192 186 raise "integrity check failed on %s:%d" % (self.datafile, rev)
193 187
194 188 self.cache = (node, rev, text)
195 189 return text
196 190
197 191 def addrevision(self, text, transaction, link, p1=None, p2=None):
198 192 if text is None: text = ""
199 193 if p1 is None: p1 = self.tip()
200 194 if p2 is None: p2 = nullid
201 195
202 196 node = hash(text, p1, p2)
203 197
204 198 n = self.count()
205 199 t = n - 1
206 200
207 201 if n:
208 202 base = self.base(t)
209 203 start = self.start(base)
210 204 end = self.end(t)
211 205 prev = self.revision(self.tip())
212 206 data = compress(self.diff(prev, text))
213 207 dist = end - start + len(data)
214 208
215 209 # full versions are inserted when the needed deltas
216 210 # become comparable to the uncompressed text
217 211 if not n or dist > len(text) * 2:
218 212 data = compress(text)
219 213 base = n
220 214 else:
221 215 base = self.base(t)
222 216
223 217 offset = 0
224 218 if t >= 0:
225 219 offset = self.end(t)
226 220
227 221 e = (offset, len(data), base, link, p1, p2, node)
228 222
229 223 self.index.append(e)
230 224 self.nodemap[node] = n
231 225 entry = struct.pack(indexformat, *e)
232 226
233 227 transaction.add(self.datafile, e[0])
234 228 self.opener(self.datafile, "a").write(data)
235 229 transaction.add(self.indexfile, n * len(entry))
236 230 self.opener(self.indexfile, "a").write(entry)
237 231
238 232 self.cache = (node, n, text)
239 233 return node
240 234
241 235 def ancestor(self, a, b):
242 236 def expand(list, map):
243 237 a = []
244 238 while list:
245 239 n = list.pop(0)
246 240 map[n] = 1
247 241 yield n
248 242 for p in self.parents(n):
249 243 if p != nullid and p not in map:
250 244 list.append(p)
251 245 yield nullid
252 246
253 247 amap = {}
254 248 bmap = {}
255 249 ag = expand([a], amap)
256 250 bg = expand([b], bmap)
257 251 adone = bdone = 0
258 252
259 253 while not adone or not bdone:
260 254 if not adone:
261 255 an = ag.next()
262 256 if an == nullid:
263 257 adone = 1
264 258 elif an in bmap:
265 259 return an
266 260 if not bdone:
267 261 bn = bg.next()
268 262 if bn == nullid:
269 263 bdone = 1
270 264 elif bn in amap:
271 265 return bn
272 266
273 267 return nullid
274 268
275 def mergedag(self, other, transaction, linkseq, accumulate = None):
276 """combine the nodes from other's DAG into ours"""
277 old = self.tip()
278 i = self.count()
279 l = []
280
281 # merge the other revision log into our DAG
282 for r in range(other.count()):
283 id = other.node(r)
284 if id not in self.nodemap:
285 (xn, yn) = other.parents(id)
286 l.append((id, xn, yn))
287 self.nodemap[id] = i
288 i += 1
289
290 # merge node date for new nodes
291 r = other.revisions([e[0] for e in l])
292 for e in l:
293 t = r.next()
294 if accumulate: accumulate(t)
295 self.addrevision(t, transaction, linkseq.next(), e[1], e[2])
296
297 # return the unmerged heads for later resolving
298 return (old, self.tip())
299
300 269 def group(self, linkmap):
301 270 # given a list of changeset revs, return a set of deltas and
302 # metadata corresponding to nodes the first delta is
271 # metadata corresponding to nodes. the first delta is
303 272 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
304 273 # have this parent as it has all history before these
305 274 # changesets. parent is parent[0]
306 275
307 276 revs = []
308 277 needed = {}
309 278
310 279 # find file nodes/revs that match changeset revs
311 280 for i in xrange(0, self.count()):
312 281 if self.index[i][3] in linkmap:
313 282 revs.append(i)
314 283 needed[i] = 1
315 284
316 285 # if we don't have any revisions touched by these changesets, bail
317 286 if not revs: return struct.pack(">l", 0)
318 287
319 288 # add the parent of the first rev
320 289 p = self.parents(self.node(revs[0]))[0]
321 290 revs.insert(0, self.rev(p))
322 291
323 292 # for each delta that isn't contiguous in the log, we need to
324 293 # reconstruct the base, reconstruct the result, and then
325 294 # calculate the delta. We also need to do this where we've
326 295 # stored a full version and not a delta
327 296 for i in xrange(0, len(revs) - 1):
328 297 a, b = revs[i], revs[i + 1]
329 298 if a + 1 != b or self.base(b) == b:
330 299 for j in xrange(self.base(a), a + 1):
331 300 needed[j] = 1
332 301 for j in xrange(self.base(b), b + 1):
333 302 needed[j] = 1
334 303
335 304 # calculate spans to retrieve from datafile
336 305 needed = needed.keys()
337 306 needed.sort()
338 307 spans = []
339 308 for n in needed:
340 309 if n < 0: continue
341 310 o = self.start(n)
342 311 l = self.length(n)
343 312 spans.append((o, l, [(n, l)]))
344 313
345 314 # merge spans
346 315 merge = [spans.pop(0)]
347 316 while spans:
348 317 e = spans.pop(0)
349 318 f = merge[-1]
350 319 if e[0] == f[0] + f[1]:
351 320 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
352 321 else:
353 322 merge.append(e)
354 323
355 324 # read spans in, divide up chunks
356 325 chunks = {}
357 326 for span in merge:
358 327 # we reopen the file for each span to make http happy for now
359 328 f = self.opener(self.datafile)
360 329 f.seek(span[0])
361 330 data = f.read(span[1])
362 331
363 332 # divide up the span
364 333 pos = 0
365 334 for r, l in span[2]:
366 335 chunks[r] = data[pos: pos + l]
367 336 pos += l
368 337
369 338 # helper to reconstruct intermediate versions
370 339 def construct(text, base, rev):
371 340 bins = [decompress(chunks[r]) for r in xrange(base + 1, rev + 1)]
372 341 return mdiff.patches(text, bins)
373 342
374 343 # build deltas
375 344 deltas = []
376 345 for d in xrange(0, len(revs) - 1):
377 346 a, b = revs[d], revs[d + 1]
378 347 n = self.node(b)
379 348
380 349 if a + 1 != b or self.base(b) == b:
381 350 if a >= 0:
382 351 base = self.base(a)
383 352 ta = decompress(chunks[self.base(a)])
384 353 ta = construct(ta, base, a)
385 354 else:
386 355 ta = ""
387 356
388 357 base = self.base(b)
389 358 if a > base:
390 359 base = a
391 360 tb = ta
392 361 else:
393 362 tb = decompress(chunks[self.base(b)])
394 363 tb = construct(tb, base, b)
395 364 d = self.diff(ta, tb)
396 365 else:
397 366 d = decompress(chunks[b])
398 367
399 368 p = self.parents(n)
400 369 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
401 370 l = struct.pack(">l", len(meta) + len(d) + 4)
402 371 deltas.append(l + meta + d)
403 372
404 373 l = struct.pack(">l", sum(map(len, deltas)) + 4)
405 374 deltas.insert(0, l)
406 375 return "".join(deltas)
407 376
408 377 def addgroup(self, data, linkmapper, transaction):
409 378 # given a set of deltas, add them to the revision log. the
410 379 # first delta is against its parent, which should be in our
411 380 # log, the rest are against the previous delta.
412 381
413 382 if not data: return self.tip()
414 383
415 384 # retrieve the parent revision of the delta chain
416 385 chain = data[24:44]
417 386 if not chain in self.nodemap:
418 387 raise "unknown base %s" % short(chain[:4])
419 388
420 389 # track the base of the current delta log
421 390 r = self.count()
422 391 t = r - 1
423 392
424 393 base = prev = -1
425 394 start = end = 0
426 395 if r:
427 396 start = self.start(self.base(t))
428 397 end = self.end(t)
429 398 measure = self.length(self.base(t))
430 399 base = self.base(t)
431 400 prev = self.tip()
432 401
433 402 transaction.add(self.datafile, end)
434 403 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
435 404 dfh = self.opener(self.datafile, "a")
436 405 ifh = self.opener(self.indexfile, "a")
437 406
438 407 # loop through our set of deltas
439 408 pos = 0
440 409 while pos < len(data):
441 410 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
442 411 data[pos:pos+84])
412 link = linkmapper(cs)
443 413 if node in self.nodemap:
444 414 raise "already have %s" % hex(node[:4])
445 link = linkmapper(cs)
446 415 delta = data[pos + 84:pos + l]
447 416 pos += l
448 417
449 418 # full versions are inserted when the needed deltas become
450 419 # comparable to the uncompressed text or when the previous
451 420 # version is not the one we have a delta against. We use
452 421 # the size of the previous full rev as a proxy for the
453 422 # current size.
454 423
455 424 if chain == prev:
456 425 cdelta = compress(delta)
457 426
458 427 if chain != prev or (end - start + len(cdelta)) > measure * 2:
459 428 # flush our writes here so we can read it in revision
460 429 dfh.flush()
461 430 ifh.flush()
462 431 text = self.revision(chain)
463 432 text = self.patches(text, [delta])
464 433 chk = self.addrevision(text, transaction, link, p1, p2)
465 434 if chk != node:
466 435 raise "consistency error adding group"
467 436 measure = len(text)
468 437 else:
469 438 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
470 439 self.index.append(e)
471 440 self.nodemap[node] = r
472 441 dfh.write(cdelta)
473 442 ifh.write(struct.pack(indexformat, *e))
474 443
475 444 t, r, chain, prev = r, r + 1, node, node
476 445 start = self.start(self.base(t))
477 446 end = self.end(t)
478 447
479 448 dfh.close()
480 449 ifh.close()
481 450 return node
General Comments 0
You need to be logged in to leave comments. Login now