Show More
@@ -561,7 +561,8 b' def unshelve(ui, repo, *shelved, **opts)' | |||||
561 | ui.quiet = True |
|
561 | ui.quiet = True | |
562 | fp = shelvedfile(repo, basename, 'hg').opener() |
|
562 | fp = shelvedfile(repo, basename, 'hg').opener() | |
563 | gen = changegroup.readbundle(fp, fp.name) |
|
563 | gen = changegroup.readbundle(fp, fp.name) | |
564 |
|
|
564 | changegroup.addchangegroup(repo, gen, 'unshelve', | |
|
565 | 'bundle:' + fp.name) | |||
565 | nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)] |
|
566 | nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)] | |
566 | phases.retractboundary(repo, phases.secret, nodes) |
|
567 | phases.retractboundary(repo, phases.secret, nodes) | |
567 | finally: |
|
568 | finally: |
@@ -5,11 +5,12 b'' | |||||
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
|
8 | import weakref | |||
8 | from i18n import _ |
|
9 | from i18n import _ | |
9 | from node import nullrev, nullid, hex |
|
10 | from node import nullrev, nullid, hex, short | |
10 | import mdiff, util, dagutil |
|
11 | import mdiff, util, dagutil | |
11 | import struct, os, bz2, zlib, tempfile |
|
12 | import struct, os, bz2, zlib, tempfile | |
12 | import discovery, error |
|
13 | import discovery, error, phases, branchmap | |
13 |
|
14 | |||
14 | _BUNDLE10_DELTA_HEADER = "20s20s20s20s" |
|
15 | _BUNDLE10_DELTA_HEADER = "20s20s20s20s" | |
15 |
|
16 | |||
@@ -554,3 +555,184 b' def addchangegroupfiles(repo, source, re' | |||||
554 | (f, hex(n))) |
|
555 | (f, hex(n))) | |
555 |
|
556 | |||
556 | return revisions, files |
|
557 | return revisions, files | |
|
558 | ||||
|
559 | def addchangegroup(repo, source, srctype, url, emptyok=False): | |||
|
560 | """Add the changegroup returned by source.read() to this repo. | |||
|
561 | srctype is a string like 'push', 'pull', or 'unbundle'. url is | |||
|
562 | the URL of the repo where this changegroup is coming from. | |||
|
563 | ||||
|
564 | Return an integer summarizing the change to this repo: | |||
|
565 | - nothing changed or no source: 0 | |||
|
566 | - more heads than before: 1+added heads (2..n) | |||
|
567 | - fewer heads than before: -1-removed heads (-2..-n) | |||
|
568 | - number of heads stays the same: 1 | |||
|
569 | """ | |||
|
570 | repo = repo.unfiltered() | |||
|
571 | def csmap(x): | |||
|
572 | repo.ui.debug("add changeset %s\n" % short(x)) | |||
|
573 | return len(cl) | |||
|
574 | ||||
|
575 | def revmap(x): | |||
|
576 | return cl.rev(x) | |||
|
577 | ||||
|
578 | if not source: | |||
|
579 | return 0 | |||
|
580 | ||||
|
581 | repo.hook('prechangegroup', throw=True, source=srctype, url=url) | |||
|
582 | ||||
|
583 | changesets = files = revisions = 0 | |||
|
584 | efiles = set() | |||
|
585 | ||||
|
586 | # write changelog data to temp files so concurrent readers will not see | |||
|
587 | # inconsistent view | |||
|
588 | cl = repo.changelog | |||
|
589 | cl.delayupdate() | |||
|
590 | oldheads = cl.heads() | |||
|
591 | ||||
|
592 | tr = repo.transaction("\n".join([srctype, util.hidepassword(url)])) | |||
|
593 | try: | |||
|
594 | trp = weakref.proxy(tr) | |||
|
595 | # pull off the changeset group | |||
|
596 | repo.ui.status(_("adding changesets\n")) | |||
|
597 | clstart = len(cl) | |||
|
598 | class prog(object): | |||
|
599 | step = _('changesets') | |||
|
600 | count = 1 | |||
|
601 | ui = repo.ui | |||
|
602 | total = None | |||
|
603 | def __call__(repo): | |||
|
604 | repo.ui.progress(repo.step, repo.count, unit=_('chunks'), | |||
|
605 | total=repo.total) | |||
|
606 | repo.count += 1 | |||
|
607 | pr = prog() | |||
|
608 | source.callback = pr | |||
|
609 | ||||
|
610 | source.changelogheader() | |||
|
611 | srccontent = cl.addgroup(source, csmap, trp) | |||
|
612 | if not (srccontent or emptyok): | |||
|
613 | raise util.Abort(_("received changelog group is empty")) | |||
|
614 | clend = len(cl) | |||
|
615 | changesets = clend - clstart | |||
|
616 | for c in xrange(clstart, clend): | |||
|
617 | efiles.update(repo[c].files()) | |||
|
618 | efiles = len(efiles) | |||
|
619 | repo.ui.progress(_('changesets'), None) | |||
|
620 | ||||
|
621 | # pull off the manifest group | |||
|
622 | repo.ui.status(_("adding manifests\n")) | |||
|
623 | pr.step = _('manifests') | |||
|
624 | pr.count = 1 | |||
|
625 | pr.total = changesets # manifests <= changesets | |||
|
626 | # no need to check for empty manifest group here: | |||
|
627 | # if the result of the merge of 1 and 2 is the same in 3 and 4, | |||
|
628 | # no new manifest will be created and the manifest group will | |||
|
629 | # be empty during the pull | |||
|
630 | source.manifestheader() | |||
|
631 | repo.manifest.addgroup(source, revmap, trp) | |||
|
632 | repo.ui.progress(_('manifests'), None) | |||
|
633 | ||||
|
634 | needfiles = {} | |||
|
635 | if repo.ui.configbool('server', 'validate', default=False): | |||
|
636 | # validate incoming csets have their manifests | |||
|
637 | for cset in xrange(clstart, clend): | |||
|
638 | mfest = repo.changelog.read(repo.changelog.node(cset))[0] | |||
|
639 | mfest = repo.manifest.readdelta(mfest) | |||
|
640 | # store file nodes we must see | |||
|
641 | for f, n in mfest.iteritems(): | |||
|
642 | needfiles.setdefault(f, set()).add(n) | |||
|
643 | ||||
|
644 | # process the files | |||
|
645 | repo.ui.status(_("adding file changes\n")) | |||
|
646 | pr.step = _('files') | |||
|
647 | pr.count = 1 | |||
|
648 | pr.total = efiles | |||
|
649 | source.callback = None | |||
|
650 | ||||
|
651 | newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr, | |||
|
652 | needfiles) | |||
|
653 | revisions += newrevs | |||
|
654 | files += newfiles | |||
|
655 | ||||
|
656 | dh = 0 | |||
|
657 | if oldheads: | |||
|
658 | heads = cl.heads() | |||
|
659 | dh = len(heads) - len(oldheads) | |||
|
660 | for h in heads: | |||
|
661 | if h not in oldheads and repo[h].closesbranch(): | |||
|
662 | dh -= 1 | |||
|
663 | htext = "" | |||
|
664 | if dh: | |||
|
665 | htext = _(" (%+d heads)") % dh | |||
|
666 | ||||
|
667 | repo.ui.status(_("added %d changesets" | |||
|
668 | " with %d changes to %d files%s\n") | |||
|
669 | % (changesets, revisions, files, htext)) | |||
|
670 | repo.invalidatevolatilesets() | |||
|
671 | ||||
|
672 | if changesets > 0: | |||
|
673 | p = lambda: cl.writepending() and repo.root or "" | |||
|
674 | repo.hook('pretxnchangegroup', throw=True, | |||
|
675 | node=hex(cl.node(clstart)), source=srctype, | |||
|
676 | url=url, pending=p) | |||
|
677 | ||||
|
678 | added = [cl.node(r) for r in xrange(clstart, clend)] | |||
|
679 | publishing = repo.ui.configbool('phases', 'publish', True) | |||
|
680 | if srctype == 'push': | |||
|
681 | # Old servers can not push the boundary themselves. | |||
|
682 | # New servers won't push the boundary if changeset already | |||
|
683 | # exists locally as secret | |||
|
684 | # | |||
|
685 | # We should not use added here but the list of all change in | |||
|
686 | # the bundle | |||
|
687 | if publishing: | |||
|
688 | phases.advanceboundary(repo, phases.public, srccontent) | |||
|
689 | else: | |||
|
690 | phases.advanceboundary(repo, phases.draft, srccontent) | |||
|
691 | phases.retractboundary(repo, phases.draft, added) | |||
|
692 | elif srctype != 'strip': | |||
|
693 | # publishing only alter behavior during push | |||
|
694 | # | |||
|
695 | # strip should not touch boundary at all | |||
|
696 | phases.retractboundary(repo, phases.draft, added) | |||
|
697 | ||||
|
698 | # make changelog see real files again | |||
|
699 | cl.finalize(trp) | |||
|
700 | ||||
|
701 | tr.close() | |||
|
702 | ||||
|
703 | if changesets > 0: | |||
|
704 | if srctype != 'strip': | |||
|
705 | # During strip, branchcache is invalid but coming call to | |||
|
706 | # `destroyed` will repair it. | |||
|
707 | # In other case we can safely update cache on disk. | |||
|
708 | branchmap.updatecache(repo.filtered('served')) | |||
|
709 | def runhooks(): | |||
|
710 | # These hooks run when the lock releases, not when the | |||
|
711 | # transaction closes. So it's possible for the changelog | |||
|
712 | # to have changed since we last saw it. | |||
|
713 | if clstart >= len(repo): | |||
|
714 | return | |||
|
715 | ||||
|
716 | # forcefully update the on-disk branch cache | |||
|
717 | repo.ui.debug("updating the branch cache\n") | |||
|
718 | repo.hook("changegroup", node=hex(cl.node(clstart)), | |||
|
719 | source=srctype, url=url) | |||
|
720 | ||||
|
721 | for n in added: | |||
|
722 | repo.hook("incoming", node=hex(n), source=srctype, | |||
|
723 | url=url) | |||
|
724 | ||||
|
725 | newheads = [h for h in repo.heads() if h not in oldheads] | |||
|
726 | repo.ui.log("incoming", | |||
|
727 | "%s incoming changes - new heads: %s\n", | |||
|
728 | len(added), | |||
|
729 | ', '.join([hex(c[:6]) for c in newheads])) | |||
|
730 | repo._afterlock(runhooks) | |||
|
731 | ||||
|
732 | finally: | |||
|
733 | tr.release() | |||
|
734 | # never return 0 here: | |||
|
735 | if dh < 0: | |||
|
736 | return dh - 1 | |||
|
737 | else: | |||
|
738 | return dh + 1 |
@@ -5780,7 +5780,8 b' def unbundle(ui, repo, fname1, *fnames, ' | |||||
5780 | for fname in fnames: |
|
5780 | for fname in fnames: | |
5781 | f = hg.openpath(ui, fname) |
|
5781 | f = hg.openpath(ui, fname) | |
5782 | gen = changegroup.readbundle(f, fname) |
|
5782 | gen = changegroup.readbundle(f, fname) | |
5783 |
modheads = |
|
5783 | modheads = changegroup.addchangegroup(repo, gen, 'unbundle', | |
|
5784 | 'bundle:' + fname) | |||
5784 | finally: |
|
5785 | finally: | |
5785 | lock.release() |
|
5786 | lock.release() | |
5786 | bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch()) |
|
5787 | bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch()) |
@@ -508,7 +508,7 b' def _pullchangeset(pullop):' | |||||
508 | "changegroupsubset.")) |
|
508 | "changegroupsubset.")) | |
509 | else: |
|
509 | else: | |
510 | cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull') |
|
510 | cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull') | |
511 |
pullop.cgresult = pullop.repo |
|
511 | pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull', | |
512 | pullop.remote.url()) |
|
512 | pullop.remote.url()) | |
513 |
|
513 | |||
514 | def _pullphase(pullop): |
|
514 | def _pullphase(pullop): |
@@ -114,7 +114,7 b' class localpeer(peer.peerrepository):' | |||||
114 | return self._repo.lock() |
|
114 | return self._repo.lock() | |
115 |
|
115 | |||
116 | def addchangegroup(self, cg, source, url): |
|
116 | def addchangegroup(self, cg, source, url): | |
117 |
return self._repo |
|
117 | return changegroup.addchangegroup(self._repo, cg, source, url) | |
118 |
|
118 | |||
119 | def pushkey(self, namespace, key, old, new): |
|
119 | def pushkey(self, namespace, key, old, new): | |
120 | return self._repo.pushkey(namespace, key, old, new) |
|
120 | return self._repo.pushkey(namespace, key, old, new) | |
@@ -1683,192 +1683,6 b' class localrepository(object):' | |||||
1683 | def push(self, remote, force=False, revs=None, newbranch=False): |
|
1683 | def push(self, remote, force=False, revs=None, newbranch=False): | |
1684 | return exchange.push(self, remote, force, revs, newbranch) |
|
1684 | return exchange.push(self, remote, force, revs, newbranch) | |
1685 |
|
1685 | |||
1686 | @unfilteredmethod |
|
|||
1687 | def addchangegroup(self, source, srctype, url, emptyok=False): |
|
|||
1688 | """Add the changegroup returned by source.read() to this repo. |
|
|||
1689 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
|||
1690 | the URL of the repo where this changegroup is coming from. |
|
|||
1691 |
|
||||
1692 | Return an integer summarizing the change to this repo: |
|
|||
1693 | - nothing changed or no source: 0 |
|
|||
1694 | - more heads than before: 1+added heads (2..n) |
|
|||
1695 | - fewer heads than before: -1-removed heads (-2..-n) |
|
|||
1696 | - number of heads stays the same: 1 |
|
|||
1697 | """ |
|
|||
1698 | def csmap(x): |
|
|||
1699 | self.ui.debug("add changeset %s\n" % short(x)) |
|
|||
1700 | return len(cl) |
|
|||
1701 |
|
||||
1702 | def revmap(x): |
|
|||
1703 | return cl.rev(x) |
|
|||
1704 |
|
||||
1705 | if not source: |
|
|||
1706 | return 0 |
|
|||
1707 |
|
||||
1708 | self.hook('prechangegroup', throw=True, source=srctype, url=url) |
|
|||
1709 |
|
||||
1710 | changesets = files = revisions = 0 |
|
|||
1711 | efiles = set() |
|
|||
1712 |
|
||||
1713 | # write changelog data to temp files so concurrent readers will not see |
|
|||
1714 | # inconsistent view |
|
|||
1715 | cl = self.changelog |
|
|||
1716 | cl.delayupdate() |
|
|||
1717 | oldheads = cl.heads() |
|
|||
1718 |
|
||||
1719 | tr = self.transaction("\n".join([srctype, util.hidepassword(url)])) |
|
|||
1720 | try: |
|
|||
1721 | trp = weakref.proxy(tr) |
|
|||
1722 | # pull off the changeset group |
|
|||
1723 | self.ui.status(_("adding changesets\n")) |
|
|||
1724 | clstart = len(cl) |
|
|||
1725 | class prog(object): |
|
|||
1726 | step = _('changesets') |
|
|||
1727 | count = 1 |
|
|||
1728 | ui = self.ui |
|
|||
1729 | total = None |
|
|||
1730 | def __call__(self): |
|
|||
1731 | self.ui.progress(self.step, self.count, unit=_('chunks'), |
|
|||
1732 | total=self.total) |
|
|||
1733 | self.count += 1 |
|
|||
1734 | pr = prog() |
|
|||
1735 | source.callback = pr |
|
|||
1736 |
|
||||
1737 | source.changelogheader() |
|
|||
1738 | srccontent = cl.addgroup(source, csmap, trp) |
|
|||
1739 | if not (srccontent or emptyok): |
|
|||
1740 | raise util.Abort(_("received changelog group is empty")) |
|
|||
1741 | clend = len(cl) |
|
|||
1742 | changesets = clend - clstart |
|
|||
1743 | for c in xrange(clstart, clend): |
|
|||
1744 | efiles.update(self[c].files()) |
|
|||
1745 | efiles = len(efiles) |
|
|||
1746 | self.ui.progress(_('changesets'), None) |
|
|||
1747 |
|
||||
1748 | # pull off the manifest group |
|
|||
1749 | self.ui.status(_("adding manifests\n")) |
|
|||
1750 | pr.step = _('manifests') |
|
|||
1751 | pr.count = 1 |
|
|||
1752 | pr.total = changesets # manifests <= changesets |
|
|||
1753 | # no need to check for empty manifest group here: |
|
|||
1754 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
|||
1755 | # no new manifest will be created and the manifest group will |
|
|||
1756 | # be empty during the pull |
|
|||
1757 | source.manifestheader() |
|
|||
1758 | self.manifest.addgroup(source, revmap, trp) |
|
|||
1759 | self.ui.progress(_('manifests'), None) |
|
|||
1760 |
|
||||
1761 | needfiles = {} |
|
|||
1762 | if self.ui.configbool('server', 'validate', default=False): |
|
|||
1763 | # validate incoming csets have their manifests |
|
|||
1764 | for cset in xrange(clstart, clend): |
|
|||
1765 | mfest = self.changelog.read(self.changelog.node(cset))[0] |
|
|||
1766 | mfest = self.manifest.readdelta(mfest) |
|
|||
1767 | # store file nodes we must see |
|
|||
1768 | for f, n in mfest.iteritems(): |
|
|||
1769 | needfiles.setdefault(f, set()).add(n) |
|
|||
1770 |
|
||||
1771 | # process the files |
|
|||
1772 | self.ui.status(_("adding file changes\n")) |
|
|||
1773 | pr.step = _('files') |
|
|||
1774 | pr.count = 1 |
|
|||
1775 | pr.total = efiles |
|
|||
1776 | source.callback = None |
|
|||
1777 |
|
||||
1778 | newrevs, newfiles = changegroup.addchangegroupfiles(self, |
|
|||
1779 | source, |
|
|||
1780 | revmap, |
|
|||
1781 | trp, |
|
|||
1782 | pr, |
|
|||
1783 | needfiles) |
|
|||
1784 | revisions += newrevs |
|
|||
1785 | files += newfiles |
|
|||
1786 |
|
||||
1787 | dh = 0 |
|
|||
1788 | if oldheads: |
|
|||
1789 | heads = cl.heads() |
|
|||
1790 | dh = len(heads) - len(oldheads) |
|
|||
1791 | for h in heads: |
|
|||
1792 | if h not in oldheads and self[h].closesbranch(): |
|
|||
1793 | dh -= 1 |
|
|||
1794 | htext = "" |
|
|||
1795 | if dh: |
|
|||
1796 | htext = _(" (%+d heads)") % dh |
|
|||
1797 |
|
||||
1798 | self.ui.status(_("added %d changesets" |
|
|||
1799 | " with %d changes to %d files%s\n") |
|
|||
1800 | % (changesets, revisions, files, htext)) |
|
|||
1801 | self.invalidatevolatilesets() |
|
|||
1802 |
|
||||
1803 | if changesets > 0: |
|
|||
1804 | p = lambda: cl.writepending() and self.root or "" |
|
|||
1805 | self.hook('pretxnchangegroup', throw=True, |
|
|||
1806 | node=hex(cl.node(clstart)), source=srctype, |
|
|||
1807 | url=url, pending=p) |
|
|||
1808 |
|
||||
1809 | added = [cl.node(r) for r in xrange(clstart, clend)] |
|
|||
1810 | publishing = self.ui.configbool('phases', 'publish', True) |
|
|||
1811 | if srctype == 'push': |
|
|||
1812 | # Old servers can not push the boundary themselves. |
|
|||
1813 | # New servers won't push the boundary if changeset already |
|
|||
1814 | # exists locally as secret |
|
|||
1815 | # |
|
|||
1816 | # We should not use added here but the list of all change in |
|
|||
1817 | # the bundle |
|
|||
1818 | if publishing: |
|
|||
1819 | phases.advanceboundary(self, phases.public, srccontent) |
|
|||
1820 | else: |
|
|||
1821 | phases.advanceboundary(self, phases.draft, srccontent) |
|
|||
1822 | phases.retractboundary(self, phases.draft, added) |
|
|||
1823 | elif srctype != 'strip': |
|
|||
1824 | # publishing only alter behavior during push |
|
|||
1825 | # |
|
|||
1826 | # strip should not touch boundary at all |
|
|||
1827 | phases.retractboundary(self, phases.draft, added) |
|
|||
1828 |
|
||||
1829 | # make changelog see real files again |
|
|||
1830 | cl.finalize(trp) |
|
|||
1831 |
|
||||
1832 | tr.close() |
|
|||
1833 |
|
||||
1834 | if changesets > 0: |
|
|||
1835 | if srctype != 'strip': |
|
|||
1836 | # During strip, branchcache is invalid but coming call to |
|
|||
1837 | # `destroyed` will repair it. |
|
|||
1838 | # In other case we can safely update cache on disk. |
|
|||
1839 | branchmap.updatecache(self.filtered('served')) |
|
|||
1840 | def runhooks(): |
|
|||
1841 | # These hooks run when the lock releases, not when the |
|
|||
1842 | # transaction closes. So it's possible for the changelog |
|
|||
1843 | # to have changed since we last saw it. |
|
|||
1844 | if clstart >= len(self): |
|
|||
1845 | return |
|
|||
1846 |
|
||||
1847 | # forcefully update the on-disk branch cache |
|
|||
1848 | self.ui.debug("updating the branch cache\n") |
|
|||
1849 | self.hook("changegroup", node=hex(cl.node(clstart)), |
|
|||
1850 | source=srctype, url=url) |
|
|||
1851 |
|
||||
1852 | for n in added: |
|
|||
1853 | self.hook("incoming", node=hex(n), source=srctype, |
|
|||
1854 | url=url) |
|
|||
1855 |
|
||||
1856 | newheads = [h for h in self.heads() if h not in oldheads] |
|
|||
1857 | self.ui.log("incoming", |
|
|||
1858 | "%s incoming changes - new heads: %s\n", |
|
|||
1859 | len(added), |
|
|||
1860 | ', '.join([hex(c[:6]) for c in newheads])) |
|
|||
1861 | self._afterlock(runhooks) |
|
|||
1862 |
|
||||
1863 | finally: |
|
|||
1864 | tr.release() |
|
|||
1865 | # never return 0 here: |
|
|||
1866 | if dh < 0: |
|
|||
1867 | return dh - 1 |
|
|||
1868 | else: |
|
|||
1869 | return dh + 1 |
|
|||
1870 |
|
||||
1871 |
|
||||
1872 | def stream_in(self, remote, requirements): |
|
1686 | def stream_in(self, remote, requirements): | |
1873 | lock = self.lock() |
|
1687 | lock = self.lock() | |
1874 | try: |
|
1688 | try: |
@@ -148,7 +148,8 b' def strip(ui, repo, nodelist, backup="al' | |||||
148 | if not repo.ui.verbose: |
|
148 | if not repo.ui.verbose: | |
149 | # silence internal shuffling chatter |
|
149 | # silence internal shuffling chatter | |
150 | repo.ui.pushbuffer() |
|
150 | repo.ui.pushbuffer() | |
151 |
|
|
151 | changegroup.addchangegroup(repo, gen, 'strip', | |
|
152 | 'bundle:' + chgrpfile, True) | |||
152 | if not repo.ui.verbose: |
|
153 | if not repo.ui.verbose: | |
153 | repo.ui.popbuffer() |
|
154 | repo.ui.popbuffer() | |
154 | f.close() |
|
155 | f.close() |
@@ -143,7 +143,7 b' class sshserver(wireproto.abstractserver' | |||||
143 |
|
143 | |||
144 | self.sendresponse("") |
|
144 | self.sendresponse("") | |
145 | cg = changegroup.unbundle10(self.fin, "UN") |
|
145 | cg = changegroup.unbundle10(self.fin, "UN") | |
146 |
r = self.repo |
|
146 | r = changegroup.addchangegroup(self.repo, cg, 'serve', self._client()) | |
147 | self.lock.release() |
|
147 | self.lock.release() | |
148 | return str(r) |
|
148 | return str(r) | |
149 |
|
149 |
@@ -786,7 +786,8 b' def unbundle(repo, proto, heads):' | |||||
786 | gen = changegroupmod.readbundle(fp, None) |
|
786 | gen = changegroupmod.readbundle(fp, None) | |
787 |
|
787 | |||
788 | try: |
|
788 | try: | |
789 |
r = |
|
789 | r = changegroupmod.addchangegroup(repo, gen, 'serve', | |
|
790 | proto._client()) | |||
790 | except util.Abort, inst: |
|
791 | except util.Abort, inst: | |
791 | sys.stderr.write("abort: %s\n" % inst) |
|
792 | sys.stderr.write("abort: %s\n" % inst) | |
792 | finally: |
|
793 | finally: |
General Comments 0
You need to be logged in to leave comments.
Login now