Show More
@@ -8,12 +8,13 b'' | |||
|
8 | 8 | # This software may be used and distributed according to the terms |
|
9 | 9 | # of the GNU General Public License, incorporated herein by reference. |
|
10 | 10 | |
|
11 |
# the psyco compiler makes commits a |
|
|
12 | try: | |
|
13 | import psyco | |
|
14 | psyco.full() | |
|
15 | except: | |
|
16 | pass | |
|
11 | # the psyco compiler makes commits a bit faster | |
|
12 | # and makes changegroup merge about 20 times slower! | |
|
13 | # try: | |
|
14 | # import psyco | |
|
15 | # psyco.full() | |
|
16 | # except: | |
|
17 | # pass | |
|
17 | 18 | |
|
18 | 19 | import sys, os, time |
|
19 | 20 | from mercurial import hg, mdiff, fancyopts |
@@ -175,6 +176,15 b' elif cmd == "export":' | |||
|
175 | 176 | prev = repo.changelog.parents(node)[0] |
|
176 | 177 | diff(None, prev, node) |
|
177 | 178 | |
|
179 | elif cmd == "debugchangegroup": | |
|
180 | newer = repo.newer(repo.changelog.lookup(args[0])) | |
|
181 | cg = repo.changegroup(newer) | |
|
182 | sys.stdout.write(cg) | |
|
183 | ||
|
184 | elif cmd == "debugaddchangegroup": | |
|
185 | data = sys.stdin.read() | |
|
186 | repo.addchangegroup(data) | |
|
187 | ||
|
178 | 188 | elif cmd == "addremove": |
|
179 | 189 | (c, a, d) = repo.diffdir(repo.root, repo.current) |
|
180 | 190 | repo.add(a) |
@@ -559,6 +559,151 b' class repository:' | |||
|
559 | 559 | for f in list: |
|
560 | 560 | dl.write(f + "\n") |
|
561 | 561 | |
|
562 | def newer(self, node): | |
|
563 | nodes = [] | |
|
564 | for i in xrange(self.changelog.rev(node) + 1, self.changelog.count()): | |
|
565 | nodes.append(self.changelog.node(i)) | |
|
566 | ||
|
567 | return nodes | |
|
568 | ||
|
569 | def changegroup(self, nodes): | |
|
570 | # construct the link map | |
|
571 | linkmap = {} | |
|
572 | for n in nodes: | |
|
573 | linkmap[self.changelog.rev(n)] = n | |
|
574 | ||
|
575 | # construct a list of all changed files | |
|
576 | changed = {} | |
|
577 | for n in nodes: | |
|
578 | c = self.changelog.read(n) | |
|
579 | for f in c[3]: | |
|
580 | changed[f] = 1 | |
|
581 | changed = changed.keys() | |
|
582 | changed.sort() | |
|
583 | ||
|
584 | # the changegroup is changesets + manifests + all file revs | |
|
585 | cg = [] | |
|
586 | revs = [ self.changelog.rev(n) for n in nodes ] | |
|
587 | ||
|
588 | g = self.changelog.group(linkmap) | |
|
589 | cg.append(g) | |
|
590 | g = self.manifest.group(linkmap) | |
|
591 | cg.append(g) | |
|
592 | ||
|
593 | for f in changed: | |
|
594 | g = self.file(f).group(linkmap) | |
|
595 | if not g: raise "couldn't find change to %s" % f | |
|
596 | l = struct.pack(">l", len(f)) | |
|
597 | cg += [l, f, g] | |
|
598 | ||
|
599 | return compress("".join(cg)) | |
|
600 | ||
|
601 | def addchangegroup(self, data): | |
|
602 | data = decompress(data) | |
|
603 | def getlen(data, pos): | |
|
604 | return struct.unpack(">l", data[pos:pos + 4])[0] | |
|
605 | ||
|
606 | tr = self.transaction() | |
|
607 | simple = True | |
|
608 | ||
|
609 | print "merging changesets" | |
|
610 | # pull off the changeset group | |
|
611 | l = getlen(data, 0) | |
|
612 | csg = data[0:l] | |
|
613 | pos = l | |
|
614 | co = self.changelog.tip() | |
|
615 | cn = self.changelog.addgroup(csg, lambda x: self.changelog.count(), tr) | |
|
616 | ||
|
617 | print "merging manifests" | |
|
618 | # pull off the manifest group | |
|
619 | l = getlen(data, pos) | |
|
620 | mfg = data[pos: pos + l] | |
|
621 | pos += l | |
|
622 | mo = self.manifest.tip() | |
|
623 | mn = self.manifest.addgroup(mfg, lambda x: self.changelog.rev(x), tr) | |
|
624 | ||
|
625 | # do we need a resolve? | |
|
626 | if self.changelog.ancestor(co, cn) != co: | |
|
627 | print "NEED RESOLVE" | |
|
628 | simple = False | |
|
629 | resolverev = self.changelog.count() | |
|
630 | ||
|
631 | # process the files | |
|
632 | print "merging files" | |
|
633 | new = {} | |
|
634 | while pos < len(data): | |
|
635 | l = getlen(data, pos) | |
|
636 | pos += 4 | |
|
637 | f = data[pos:pos + l] | |
|
638 | pos += l | |
|
639 | ||
|
640 | l = getlen(data, pos) | |
|
641 | fg = data[pos: pos + l] | |
|
642 | pos += l | |
|
643 | ||
|
644 | fl = self.file(f) | |
|
645 | o = fl.tip() | |
|
646 | n = fl.addgroup(fg, lambda x: self.changelog.rev(x), tr) | |
|
647 | if not simple: | |
|
648 | new[fl] = fl.resolvedag(o, n, tr, resolverev) | |
|
649 | ||
|
650 | # For simple merges, we don't need to resolve manifests or changesets | |
|
651 | if simple: | |
|
652 | tr.close() | |
|
653 | return | |
|
654 | ||
|
655 | # resolve the manifest to point to all the merged files | |
|
656 | self.ui.status("resolving manifests\n") | |
|
657 | ma = self.manifest.ancestor(mm, mo) | |
|
658 | mmap = self.manifest.read(mm) # mine | |
|
659 | omap = self.manifest.read(mo) # other | |
|
660 | amap = self.manifest.read(ma) # ancestor | |
|
661 | nmap = {} | |
|
662 | ||
|
663 | for f, mid in mmap.iteritems(): | |
|
664 | if f in omap: | |
|
665 | if mid != omap[f]: | |
|
666 | nmap[f] = new.get(f, mid) # use merged version | |
|
667 | else: | |
|
668 | nmap[f] = new.get(f, mid) # they're the same | |
|
669 | del omap[f] | |
|
670 | elif f in amap: | |
|
671 | if mid != amap[f]: | |
|
672 | pass # we should prompt here | |
|
673 | else: | |
|
674 | pass # other deleted it | |
|
675 | else: | |
|
676 | nmap[f] = new.get(f, mid) # we created it | |
|
677 | ||
|
678 | del mmap | |
|
679 | ||
|
680 | for f, oid in omap.iteritems(): | |
|
681 | if f in amap: | |
|
682 | if oid != amap[f]: | |
|
683 | pass # this is the nasty case, we should prompt | |
|
684 | else: | |
|
685 | pass # probably safe | |
|
686 | else: | |
|
687 | nmap[f] = new.get(f, oid) # remote created it | |
|
688 | ||
|
689 | del omap | |
|
690 | del amap | |
|
691 | ||
|
692 | node = self.manifest.add(nmap, tr, resolverev, mm, mo) | |
|
693 | ||
|
694 | # Now all files and manifests are merged, we add the changed files | |
|
695 | # and manifest id to the changelog | |
|
696 | self.ui.status("committing merge changeset\n") | |
|
697 | new = new.keys() | |
|
698 | new.sort() | |
|
699 | if co == cn: cn = -1 | |
|
700 | ||
|
701 | edittext = "\n"+"".join(["HG: changed %s\n" % f for f in new]) | |
|
702 | edittext = self.ui.edit(edittext) | |
|
703 | n = self.changelog.add(node, new, edittext, tr, co, cn) | |
|
704 | ||
|
705 | tr.close() | |
|
706 | ||
|
562 | 707 | class ui: |
|
563 | 708 | def __init__(self, verbose=False, debug=False): |
|
564 | 709 | self.verbose = verbose |
@@ -227,3 +227,186 b' class revlog:' | |||
|
227 | 227 | |
|
228 | 228 | # return the unmerged heads for later resolving |
|
229 | 229 | return (old, self.tip()) |
|
230 | ||
|
231 | def group(self, linkmap): | |
|
232 | # given a list of changeset revs, return a set of deltas and | |
|
233 | # metadata corresponding to nodes the first delta is | |
|
234 | # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to | |
|
235 | # have this parent as it has all history before these | |
|
236 | # changesets. parent is parent[0] | |
|
237 | ||
|
238 | revs = [] | |
|
239 | needed = {} | |
|
240 | ||
|
241 | # find file nodes/revs that match changeset revs | |
|
242 | for i in xrange(0, self.count()): | |
|
243 | if self.index[i][3] in linkmap: | |
|
244 | revs.append(i) | |
|
245 | needed[i] = 1 | |
|
246 | ||
|
247 | # if we don't have any revisions touched by these changesets, bail | |
|
248 | if not revs: return struct.pack(">l", 0) | |
|
249 | ||
|
250 | # add the parent of the first rev | |
|
251 | p = self.parents(self.node(revs[0]))[0] | |
|
252 | revs.insert(0, self.rev(p)) | |
|
253 | ||
|
254 | # for each delta that isn't contiguous in the log, we need to | |
|
255 | # reconstruct the base, reconstruct the result, and then | |
|
256 | # calculate the delta. We also need to do this where we've | |
|
257 | # stored a full version and not a delta | |
|
258 | for i in xrange(0, len(revs) - 1): | |
|
259 | a, b = revs[i], revs[i + 1] | |
|
260 | if a + 1 != b or self.base(b) == b: | |
|
261 | for j in xrange(self.base(a), a + 1): | |
|
262 | needed[j] = 1 | |
|
263 | for j in xrange(self.base(b), b + 1): | |
|
264 | needed[j] = 1 | |
|
265 | ||
|
266 | # calculate spans to retrieve from datafile | |
|
267 | needed = needed.keys() | |
|
268 | needed.sort() | |
|
269 | spans = [] | |
|
270 | for n in needed: | |
|
271 | if n < 0: continue | |
|
272 | o = self.start(n) | |
|
273 | l = self.length(n) | |
|
274 | spans.append((o, l, [(n, l)])) | |
|
275 | ||
|
276 | # merge spans | |
|
277 | merge = [spans.pop(0)] | |
|
278 | while spans: | |
|
279 | e = spans.pop(0) | |
|
280 | f = merge[-1] | |
|
281 | if e[0] == f[0] + f[1]: | |
|
282 | merge[-1] = (f[0], f[1] + e[1], f[2] + e[2]) | |
|
283 | else: | |
|
284 | merge.append(e) | |
|
285 | ||
|
286 | # read spans in, divide up chunks | |
|
287 | chunks = {} | |
|
288 | for span in merge: | |
|
289 | # we reopen the file for each span to make http happy for now | |
|
290 | f = self.opener(self.datafile) | |
|
291 | f.seek(span[0]) | |
|
292 | data = f.read(span[1]) | |
|
293 | ||
|
294 | # divide up the span | |
|
295 | pos = 0 | |
|
296 | for r, l in span[2]: | |
|
297 | chunks[r] = data[pos: pos + l] | |
|
298 | pos += l | |
|
299 | ||
|
300 | # helper to reconstruct intermediate versions | |
|
301 | def construct(text, base, rev): | |
|
302 | for r in range(base + 1, rev + 1): | |
|
303 | b = decompress(chunks[r]) | |
|
304 | text = self.patch(text, b) | |
|
305 | return text | |
|
306 | ||
|
307 | # build deltas | |
|
308 | deltas = [] | |
|
309 | for d in range(0, len(revs) - 1): | |
|
310 | a, b = revs[d], revs[d + 1] | |
|
311 | n = self.node(b) | |
|
312 | ||
|
313 | if a + 1 != b or self.base(b) == b: | |
|
314 | if a >= 0: | |
|
315 | base = self.base(a) | |
|
316 | ta = decompress(chunks[self.base(a)]) | |
|
317 | ta = construct(ta, base, a) | |
|
318 | else: | |
|
319 | ta = "" | |
|
320 | ||
|
321 | base = self.base(b) | |
|
322 | if a > base: | |
|
323 | base = a | |
|
324 | tb = ta | |
|
325 | else: | |
|
326 | tb = decompress(chunks[self.base(b)]) | |
|
327 | tb = construct(tb, base, b) | |
|
328 | d = self.diff(ta, tb) | |
|
329 | else: | |
|
330 | d = decompress(chunks[b]) | |
|
331 | ||
|
332 | p = self.parents(n) | |
|
333 | meta = n + p[0] + p[1] + linkmap[self.linkrev(n)] | |
|
334 | l = struct.pack(">l", len(meta) + len(d) + 4) | |
|
335 | deltas.append(l + meta + d) | |
|
336 | ||
|
337 | l = struct.pack(">l", sum(map(len, deltas)) + 4) | |
|
338 | deltas.insert(0, l) | |
|
339 | return "".join(deltas) | |
|
340 | ||
|
341 | def addgroup(self, data, linkmapper, transaction): | |
|
342 | # given a set of deltas, add them to the revision log. the | |
|
343 | # first delta is against its parent, which should be in our | |
|
344 | # log, the rest are against the previous delta. | |
|
345 | ||
|
346 | if len(data) <= 4: return | |
|
347 | ||
|
348 | # retrieve the parent revision of the delta chain | |
|
349 | chain = data[28:48] | |
|
350 | text = self.revision(chain) | |
|
351 | ||
|
352 | # track the base of the current delta log | |
|
353 | r = self.count() | |
|
354 | t = r - 1 | |
|
355 | ||
|
356 | base = prev = -1 | |
|
357 | start = end = 0 | |
|
358 | if r: | |
|
359 | start = self.start(self.base(t)) | |
|
360 | end = self.end(t) | |
|
361 | measure = self.length(self.base(t)) | |
|
362 | base = self.base(t) | |
|
363 | prev = self.tip() | |
|
364 | ||
|
365 | transaction.add(self.datafile, end) | |
|
366 | transaction.add(self.indexfile, r * struct.calcsize(indexformat)) | |
|
367 | dfh = self.opener(self.datafile, "a") | |
|
368 | ifh = self.opener(self.indexfile, "a") | |
|
369 | ||
|
370 | # loop through our set of deltas | |
|
371 | pos = 4 | |
|
372 | while pos < len(data): | |
|
373 | l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s", | |
|
374 | data[pos:pos+84]) | |
|
375 | link = linkmapper(cs) | |
|
376 | delta = data[pos + 84:pos + l] | |
|
377 | pos += l | |
|
378 | ||
|
379 | # full versions are inserted when the needed deltas become | |
|
380 | # comparable to the uncompressed text or when the previous | |
|
381 | # version is not the one we have a delta against. We use | |
|
382 | # the size of the previous full rev as a proxy for the | |
|
383 | # current size. | |
|
384 | ||
|
385 | if chain == prev: | |
|
386 | cdelta = compress(delta) | |
|
387 | ||
|
388 | if chain != prev or (end - start + len(cdelta)) > measure * 2: | |
|
389 | # flush our writes here so we can read it in revision | |
|
390 | dfh.flush() | |
|
391 | ifh.flush() | |
|
392 | text = self.revision(self.node(t)) | |
|
393 | text = self.patch(text, delta) | |
|
394 | chk = self.addrevision(text, transaction, link, p1, p2) | |
|
395 | if chk != node: | |
|
396 | raise "consistency error adding group" | |
|
397 | measure = len(text) | |
|
398 | else: | |
|
399 | e = (end, len(cdelta), self.base(t), link, p1, p2, node) | |
|
400 | self.index.append(e) | |
|
401 | self.nodemap[node] = r | |
|
402 | dfh.write(cdelta) | |
|
403 | ifh.write(struct.pack(indexformat, *e)) | |
|
404 | ||
|
405 | t, r = r, r + 1 | |
|
406 | chain = prev | |
|
407 | start = self.start(self.base(t)) | |
|
408 | end = self.end(t) | |
|
409 | ||
|
410 | dfh.close() | |
|
411 | ifh.close() | |
|
412 | return node |
General Comments 0
You need to be logged in to leave comments.
Login now