Show More
@@ -1,95 +1,96 b'' | |||||
1 | # __init__.py - narrowhg extension |
|
1 | # __init__.py - narrowhg extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | '''create clones which fetch history data for subset of files (EXPERIMENTAL)''' |
|
7 | '''create clones which fetch history data for subset of files (EXPERIMENTAL)''' | |
8 |
|
8 | |||
9 | from __future__ import absolute_import |
|
9 | from __future__ import absolute_import | |
10 |
|
10 | |||
11 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
11 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
12 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
12 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
13 | # be specifying the version(s) of Mercurial they are tested with, or |
|
13 | # be specifying the version(s) of Mercurial they are tested with, or | |
14 | # leave the attribute unspecified. |
|
14 | # leave the attribute unspecified. | |
15 | testedwith = 'ships-with-hg-core' |
|
15 | testedwith = 'ships-with-hg-core' | |
16 |
|
16 | |||
17 | from mercurial import ( |
|
17 | from mercurial import ( | |
|
18 | changegroup, | |||
18 | extensions, |
|
19 | extensions, | |
19 | hg, |
|
20 | hg, | |
20 | localrepo, |
|
21 | localrepo, | |
21 | registrar, |
|
22 | registrar, | |
22 | verify as verifymod, |
|
23 | verify as verifymod, | |
23 | ) |
|
24 | ) | |
24 |
|
25 | |||
25 | from . import ( |
|
26 | from . import ( | |
26 | narrowbundle2, |
|
27 | narrowbundle2, | |
27 | narrowchangegroup, |
|
28 | narrowchangegroup, | |
28 | narrowcommands, |
|
29 | narrowcommands, | |
29 | narrowcopies, |
|
30 | narrowcopies, | |
30 | narrowdirstate, |
|
31 | narrowdirstate, | |
31 | narrowmerge, |
|
32 | narrowmerge, | |
32 | narrowpatch, |
|
33 | narrowpatch, | |
33 | narrowrepo, |
|
34 | narrowrepo, | |
34 | narrowrevlog, |
|
35 | narrowrevlog, | |
35 | narrowtemplates, |
|
36 | narrowtemplates, | |
36 | narrowwirepeer, |
|
37 | narrowwirepeer, | |
37 | ) |
|
38 | ) | |
38 |
|
39 | |||
39 | configtable = {} |
|
40 | configtable = {} | |
40 | configitem = registrar.configitem(configtable) |
|
41 | configitem = registrar.configitem(configtable) | |
41 | # Narrowhg *has* support for serving ellipsis nodes (which are used at |
|
42 | # Narrowhg *has* support for serving ellipsis nodes (which are used at | |
42 | # least by Google's internal server), but that support is pretty |
|
43 | # least by Google's internal server), but that support is pretty | |
43 | # fragile and has a lot of problems on real-world repositories that |
|
44 | # fragile and has a lot of problems on real-world repositories that | |
44 | # have complex graph topologies. This could probably be corrected, but |
|
45 | # have complex graph topologies. This could probably be corrected, but | |
45 | # absent someone needing the full support for ellipsis nodes in |
|
46 | # absent someone needing the full support for ellipsis nodes in | |
46 | # repositories with merges, it's unlikely this work will get done. As |
|
47 | # repositories with merges, it's unlikely this work will get done. As | |
47 | # of this writining in late 2017, all repositories large enough for |
|
48 | # of this writining in late 2017, all repositories large enough for | |
48 | # ellipsis nodes to be a hard requirement also enforce strictly linear |
|
49 | # ellipsis nodes to be a hard requirement also enforce strictly linear | |
49 | # history for other scaling reasons. |
|
50 | # history for other scaling reasons. | |
50 | configitem('experimental', 'narrowservebrokenellipses', |
|
51 | configitem('experimental', 'narrowservebrokenellipses', | |
51 | default=False, |
|
52 | default=False, | |
52 | alias=[('narrow', 'serveellipses')], |
|
53 | alias=[('narrow', 'serveellipses')], | |
53 | ) |
|
54 | ) | |
54 |
|
55 | |||
55 | # Export the commands table for Mercurial to see. |
|
56 | # Export the commands table for Mercurial to see. | |
56 | cmdtable = narrowcommands.table |
|
57 | cmdtable = narrowcommands.table | |
57 |
|
58 | |||
58 |
localrepo.localrepository._basesupported.add( |
|
59 | localrepo.localrepository._basesupported.add(changegroup.NARROW_REQUIREMENT) | |
59 |
|
60 | |||
60 | def uisetup(ui): |
|
61 | def uisetup(ui): | |
61 | """Wraps user-facing mercurial commands with narrow-aware versions.""" |
|
62 | """Wraps user-facing mercurial commands with narrow-aware versions.""" | |
62 | narrowrevlog.setup() |
|
63 | narrowrevlog.setup() | |
63 | narrowbundle2.setup() |
|
64 | narrowbundle2.setup() | |
64 | narrowmerge.setup() |
|
65 | narrowmerge.setup() | |
65 | narrowcommands.setup() |
|
66 | narrowcommands.setup() | |
66 | narrowchangegroup.setup() |
|
67 | narrowchangegroup.setup() | |
67 | narrowwirepeer.uisetup() |
|
68 | narrowwirepeer.uisetup() | |
68 |
|
69 | |||
69 | def reposetup(ui, repo): |
|
70 | def reposetup(ui, repo): | |
70 | """Wraps local repositories with narrow repo support.""" |
|
71 | """Wraps local repositories with narrow repo support.""" | |
71 | if not isinstance(repo, localrepo.localrepository): |
|
72 | if not isinstance(repo, localrepo.localrepository): | |
72 | return |
|
73 | return | |
73 |
|
74 | |||
74 |
if |
|
75 | if changegroup.NARROW_REQUIREMENT in repo.requirements: | |
75 | narrowrepo.wraprepo(repo, True) |
|
76 | narrowrepo.wraprepo(repo, True) | |
76 | narrowcopies.setup(repo) |
|
77 | narrowcopies.setup(repo) | |
77 | narrowdirstate.setup(repo) |
|
78 | narrowdirstate.setup(repo) | |
78 | narrowpatch.setup(repo) |
|
79 | narrowpatch.setup(repo) | |
79 | narrowwirepeer.reposetup(repo) |
|
80 | narrowwirepeer.reposetup(repo) | |
80 |
|
81 | |||
81 | def _verifierinit(orig, self, repo, matcher=None): |
|
82 | def _verifierinit(orig, self, repo, matcher=None): | |
82 | # The verifier's matcher argument was desgined for narrowhg, so it should |
|
83 | # The verifier's matcher argument was desgined for narrowhg, so it should | |
83 | # be None from core. If another extension passes a matcher (unlikely), |
|
84 | # be None from core. If another extension passes a matcher (unlikely), | |
84 | # we'll have to fail until matchers can be composed more easily. |
|
85 | # we'll have to fail until matchers can be composed more easily. | |
85 | assert matcher is None |
|
86 | assert matcher is None | |
86 | matcher = getattr(repo, 'narrowmatch', lambda: None)() |
|
87 | matcher = getattr(repo, 'narrowmatch', lambda: None)() | |
87 | orig(self, repo, matcher) |
|
88 | orig(self, repo, matcher) | |
88 |
|
89 | |||
89 | def extsetup(ui): |
|
90 | def extsetup(ui): | |
90 | extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit) |
|
91 | extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit) | |
91 | extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare) |
|
92 | extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare) | |
92 | extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec) |
|
93 | extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec) | |
93 |
|
94 | |||
94 | templatekeyword = narrowtemplates.templatekeyword |
|
95 | templatekeyword = narrowtemplates.templatekeyword | |
95 | revsetpredicate = narrowtemplates.revsetpredicate |
|
96 | revsetpredicate = narrowtemplates.revsetpredicate |
@@ -1,494 +1,490 b'' | |||||
1 | # narrowbundle2.py - bundle2 extensions for narrow repository support |
|
1 | # narrowbundle2.py - bundle2 extensions for narrow repository support | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import collections |
|
10 | import collections | |
11 | import errno |
|
11 | import errno | |
12 | import struct |
|
12 | import struct | |
13 |
|
13 | |||
14 | from mercurial.i18n import _ |
|
14 | from mercurial.i18n import _ | |
15 | from mercurial.node import ( |
|
15 | from mercurial.node import ( | |
16 | bin, |
|
16 | bin, | |
17 | nullid, |
|
17 | nullid, | |
18 | nullrev, |
|
18 | nullrev, | |
19 | ) |
|
19 | ) | |
20 | from mercurial import ( |
|
20 | from mercurial import ( | |
21 | bundle2, |
|
21 | bundle2, | |
22 | changegroup, |
|
22 | changegroup, | |
23 | dagutil, |
|
23 | dagutil, | |
24 | error, |
|
24 | error, | |
25 | exchange, |
|
25 | exchange, | |
26 | extensions, |
|
26 | extensions, | |
27 | narrowspec, |
|
27 | narrowspec, | |
28 | repair, |
|
28 | repair, | |
29 | util, |
|
29 | util, | |
30 | wireproto, |
|
30 | wireproto, | |
31 | ) |
|
31 | ) | |
32 |
|
32 | |||
33 | from . import ( |
|
|||
34 | narrowrepo, |
|
|||
35 | ) |
|
|||
36 |
|
||||
37 | NARROWCAP = 'narrow' |
|
33 | NARROWCAP = 'narrow' | |
38 | _NARROWACL_SECTION = 'narrowhgacl' |
|
34 | _NARROWACL_SECTION = 'narrowhgacl' | |
39 | _CHANGESPECPART = NARROWCAP + ':changespec' |
|
35 | _CHANGESPECPART = NARROWCAP + ':changespec' | |
40 | _SPECPART = NARROWCAP + ':spec' |
|
36 | _SPECPART = NARROWCAP + ':spec' | |
41 | _SPECPART_INCLUDE = 'include' |
|
37 | _SPECPART_INCLUDE = 'include' | |
42 | _SPECPART_EXCLUDE = 'exclude' |
|
38 | _SPECPART_EXCLUDE = 'exclude' | |
43 | _KILLNODESIGNAL = 'KILL' |
|
39 | _KILLNODESIGNAL = 'KILL' | |
44 | _DONESIGNAL = 'DONE' |
|
40 | _DONESIGNAL = 'DONE' | |
45 | _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text) |
|
41 | _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text) | |
46 | _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text) |
|
42 | _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text) | |
47 | _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) |
|
43 | _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) | |
48 | _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) |
|
44 | _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) | |
49 |
|
45 | |||
50 | # When advertising capabilities, always include narrow clone support. |
|
46 | # When advertising capabilities, always include narrow clone support. | |
51 | def getrepocaps_narrow(orig, repo, **kwargs): |
|
47 | def getrepocaps_narrow(orig, repo, **kwargs): | |
52 | caps = orig(repo, **kwargs) |
|
48 | caps = orig(repo, **kwargs) | |
53 | caps[NARROWCAP] = ['v0'] |
|
49 | caps[NARROWCAP] = ['v0'] | |
54 | return caps |
|
50 | return caps | |
55 |
|
51 | |||
56 | def _computeellipsis(repo, common, heads, known, match, depth=None): |
|
52 | def _computeellipsis(repo, common, heads, known, match, depth=None): | |
57 | """Compute the shape of a narrowed DAG. |
|
53 | """Compute the shape of a narrowed DAG. | |
58 |
|
54 | |||
59 | Args: |
|
55 | Args: | |
60 | repo: The repository we're transferring. |
|
56 | repo: The repository we're transferring. | |
61 | common: The roots of the DAG range we're transferring. |
|
57 | common: The roots of the DAG range we're transferring. | |
62 | May be just [nullid], which means all ancestors of heads. |
|
58 | May be just [nullid], which means all ancestors of heads. | |
63 | heads: The heads of the DAG range we're transferring. |
|
59 | heads: The heads of the DAG range we're transferring. | |
64 | match: The narrowmatcher that allows us to identify relevant changes. |
|
60 | match: The narrowmatcher that allows us to identify relevant changes. | |
65 | depth: If not None, only consider nodes to be full nodes if they are at |
|
61 | depth: If not None, only consider nodes to be full nodes if they are at | |
66 | most depth changesets away from one of heads. |
|
62 | most depth changesets away from one of heads. | |
67 |
|
63 | |||
68 | Returns: |
|
64 | Returns: | |
69 | A tuple of (visitnodes, relevant_nodes, ellipsisroots) where: |
|
65 | A tuple of (visitnodes, relevant_nodes, ellipsisroots) where: | |
70 |
|
66 | |||
71 | visitnodes: The list of nodes (either full or ellipsis) which |
|
67 | visitnodes: The list of nodes (either full or ellipsis) which | |
72 | need to be sent to the client. |
|
68 | need to be sent to the client. | |
73 | relevant_nodes: The set of changelog nodes which change a file inside |
|
69 | relevant_nodes: The set of changelog nodes which change a file inside | |
74 | the narrowspec. The client needs these as non-ellipsis nodes. |
|
70 | the narrowspec. The client needs these as non-ellipsis nodes. | |
75 | ellipsisroots: A dict of {rev: parents} that is used in |
|
71 | ellipsisroots: A dict of {rev: parents} that is used in | |
76 | narrowchangegroup to produce ellipsis nodes with the |
|
72 | narrowchangegroup to produce ellipsis nodes with the | |
77 | correct parents. |
|
73 | correct parents. | |
78 | """ |
|
74 | """ | |
79 | cl = repo.changelog |
|
75 | cl = repo.changelog | |
80 | mfl = repo.manifestlog |
|
76 | mfl = repo.manifestlog | |
81 |
|
77 | |||
82 | cldag = dagutil.revlogdag(cl) |
|
78 | cldag = dagutil.revlogdag(cl) | |
83 | # dagutil does not like nullid/nullrev |
|
79 | # dagutil does not like nullid/nullrev | |
84 | commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev]) |
|
80 | commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev]) | |
85 | headsrevs = cldag.internalizeall(heads) |
|
81 | headsrevs = cldag.internalizeall(heads) | |
86 | if depth: |
|
82 | if depth: | |
87 | revdepth = {h: 0 for h in headsrevs} |
|
83 | revdepth = {h: 0 for h in headsrevs} | |
88 |
|
84 | |||
89 | ellipsisheads = collections.defaultdict(set) |
|
85 | ellipsisheads = collections.defaultdict(set) | |
90 | ellipsisroots = collections.defaultdict(set) |
|
86 | ellipsisroots = collections.defaultdict(set) | |
91 |
|
87 | |||
92 | def addroot(head, curchange): |
|
88 | def addroot(head, curchange): | |
93 | """Add a root to an ellipsis head, splitting heads with 3 roots.""" |
|
89 | """Add a root to an ellipsis head, splitting heads with 3 roots.""" | |
94 | ellipsisroots[head].add(curchange) |
|
90 | ellipsisroots[head].add(curchange) | |
95 | # Recursively split ellipsis heads with 3 roots by finding the |
|
91 | # Recursively split ellipsis heads with 3 roots by finding the | |
96 | # roots' youngest common descendant which is an elided merge commit. |
|
92 | # roots' youngest common descendant which is an elided merge commit. | |
97 | # That descendant takes 2 of the 3 roots as its own, and becomes a |
|
93 | # That descendant takes 2 of the 3 roots as its own, and becomes a | |
98 | # root of the head. |
|
94 | # root of the head. | |
99 | while len(ellipsisroots[head]) > 2: |
|
95 | while len(ellipsisroots[head]) > 2: | |
100 | child, roots = splithead(head) |
|
96 | child, roots = splithead(head) | |
101 | splitroots(head, child, roots) |
|
97 | splitroots(head, child, roots) | |
102 | head = child # Recurse in case we just added a 3rd root |
|
98 | head = child # Recurse in case we just added a 3rd root | |
103 |
|
99 | |||
104 | def splitroots(head, child, roots): |
|
100 | def splitroots(head, child, roots): | |
105 | ellipsisroots[head].difference_update(roots) |
|
101 | ellipsisroots[head].difference_update(roots) | |
106 | ellipsisroots[head].add(child) |
|
102 | ellipsisroots[head].add(child) | |
107 | ellipsisroots[child].update(roots) |
|
103 | ellipsisroots[child].update(roots) | |
108 | ellipsisroots[child].discard(child) |
|
104 | ellipsisroots[child].discard(child) | |
109 |
|
105 | |||
110 | def splithead(head): |
|
106 | def splithead(head): | |
111 | r1, r2, r3 = sorted(ellipsisroots[head]) |
|
107 | r1, r2, r3 = sorted(ellipsisroots[head]) | |
112 | for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)): |
|
108 | for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)): | |
113 | mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)', |
|
109 | mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)', | |
114 | nr1, head, nr2, head) |
|
110 | nr1, head, nr2, head) | |
115 | for j in mid: |
|
111 | for j in mid: | |
116 | if j == nr2: |
|
112 | if j == nr2: | |
117 | return nr2, (nr1, nr2) |
|
113 | return nr2, (nr1, nr2) | |
118 | if j not in ellipsisroots or len(ellipsisroots[j]) < 2: |
|
114 | if j not in ellipsisroots or len(ellipsisroots[j]) < 2: | |
119 | return j, (nr1, nr2) |
|
115 | return j, (nr1, nr2) | |
120 | raise error.Abort('Failed to split up ellipsis node! head: %d, ' |
|
116 | raise error.Abort('Failed to split up ellipsis node! head: %d, ' | |
121 | 'roots: %d %d %d' % (head, r1, r2, r3)) |
|
117 | 'roots: %d %d %d' % (head, r1, r2, r3)) | |
122 |
|
118 | |||
123 | missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs)) |
|
119 | missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs)) | |
124 | visit = reversed(missing) |
|
120 | visit = reversed(missing) | |
125 | relevant_nodes = set() |
|
121 | relevant_nodes = set() | |
126 | visitnodes = [cl.node(m) for m in missing] |
|
122 | visitnodes = [cl.node(m) for m in missing] | |
127 | required = set(headsrevs) | known |
|
123 | required = set(headsrevs) | known | |
128 | for rev in visit: |
|
124 | for rev in visit: | |
129 | clrev = cl.changelogrevision(rev) |
|
125 | clrev = cl.changelogrevision(rev) | |
130 | ps = cldag.parents(rev) |
|
126 | ps = cldag.parents(rev) | |
131 | if depth is not None: |
|
127 | if depth is not None: | |
132 | curdepth = revdepth[rev] |
|
128 | curdepth = revdepth[rev] | |
133 | for p in ps: |
|
129 | for p in ps: | |
134 | revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1)) |
|
130 | revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1)) | |
135 | needed = False |
|
131 | needed = False | |
136 | shallow_enough = depth is None or revdepth[rev] <= depth |
|
132 | shallow_enough = depth is None or revdepth[rev] <= depth | |
137 | if shallow_enough: |
|
133 | if shallow_enough: | |
138 | curmf = mfl[clrev.manifest].read() |
|
134 | curmf = mfl[clrev.manifest].read() | |
139 | if ps: |
|
135 | if ps: | |
140 | # We choose to not trust the changed files list in |
|
136 | # We choose to not trust the changed files list in | |
141 | # changesets because it's not always correct. TODO: could |
|
137 | # changesets because it's not always correct. TODO: could | |
142 | # we trust it for the non-merge case? |
|
138 | # we trust it for the non-merge case? | |
143 | p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read() |
|
139 | p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read() | |
144 | needed = bool(curmf.diff(p1mf, match)) |
|
140 | needed = bool(curmf.diff(p1mf, match)) | |
145 | if not needed and len(ps) > 1: |
|
141 | if not needed and len(ps) > 1: | |
146 | # For merge changes, the list of changed files is not |
|
142 | # For merge changes, the list of changed files is not | |
147 | # helpful, since we need to emit the merge if a file |
|
143 | # helpful, since we need to emit the merge if a file | |
148 | # in the narrow spec has changed on either side of the |
|
144 | # in the narrow spec has changed on either side of the | |
149 | # merge. As a result, we do a manifest diff to check. |
|
145 | # merge. As a result, we do a manifest diff to check. | |
150 | p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read() |
|
146 | p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read() | |
151 | needed = bool(curmf.diff(p2mf, match)) |
|
147 | needed = bool(curmf.diff(p2mf, match)) | |
152 | else: |
|
148 | else: | |
153 | # For a root node, we need to include the node if any |
|
149 | # For a root node, we need to include the node if any | |
154 | # files in the node match the narrowspec. |
|
150 | # files in the node match the narrowspec. | |
155 | needed = any(curmf.walk(match)) |
|
151 | needed = any(curmf.walk(match)) | |
156 |
|
152 | |||
157 | if needed: |
|
153 | if needed: | |
158 | for head in ellipsisheads[rev]: |
|
154 | for head in ellipsisheads[rev]: | |
159 | addroot(head, rev) |
|
155 | addroot(head, rev) | |
160 | for p in ps: |
|
156 | for p in ps: | |
161 | required.add(p) |
|
157 | required.add(p) | |
162 | relevant_nodes.add(cl.node(rev)) |
|
158 | relevant_nodes.add(cl.node(rev)) | |
163 | else: |
|
159 | else: | |
164 | if not ps: |
|
160 | if not ps: | |
165 | ps = [nullrev] |
|
161 | ps = [nullrev] | |
166 | if rev in required: |
|
162 | if rev in required: | |
167 | for head in ellipsisheads[rev]: |
|
163 | for head in ellipsisheads[rev]: | |
168 | addroot(head, rev) |
|
164 | addroot(head, rev) | |
169 | for p in ps: |
|
165 | for p in ps: | |
170 | ellipsisheads[p].add(rev) |
|
166 | ellipsisheads[p].add(rev) | |
171 | else: |
|
167 | else: | |
172 | for p in ps: |
|
168 | for p in ps: | |
173 | ellipsisheads[p] |= ellipsisheads[rev] |
|
169 | ellipsisheads[p] |= ellipsisheads[rev] | |
174 |
|
170 | |||
175 | # add common changesets as roots of their reachable ellipsis heads |
|
171 | # add common changesets as roots of their reachable ellipsis heads | |
176 | for c in commonrevs: |
|
172 | for c in commonrevs: | |
177 | for head in ellipsisheads[c]: |
|
173 | for head in ellipsisheads[c]: | |
178 | addroot(head, c) |
|
174 | addroot(head, c) | |
179 | return visitnodes, relevant_nodes, ellipsisroots |
|
175 | return visitnodes, relevant_nodes, ellipsisroots | |
180 |
|
176 | |||
181 | def _packellipsischangegroup(repo, common, match, relevant_nodes, |
|
177 | def _packellipsischangegroup(repo, common, match, relevant_nodes, | |
182 | ellipsisroots, visitnodes, depth, source, version): |
|
178 | ellipsisroots, visitnodes, depth, source, version): | |
183 | if version in ('01', '02'): |
|
179 | if version in ('01', '02'): | |
184 | raise error.Abort( |
|
180 | raise error.Abort( | |
185 | 'ellipsis nodes require at least cg3 on client and server, ' |
|
181 | 'ellipsis nodes require at least cg3 on client and server, ' | |
186 | 'but negotiated version %s' % version) |
|
182 | 'but negotiated version %s' % version) | |
187 | # We wrap cg1packer.revchunk, using a side channel to pass |
|
183 | # We wrap cg1packer.revchunk, using a side channel to pass | |
188 | # relevant_nodes into that area. Then if linknode isn't in the |
|
184 | # relevant_nodes into that area. Then if linknode isn't in the | |
189 | # set, we know we have an ellipsis node and we should defer |
|
185 | # set, we know we have an ellipsis node and we should defer | |
190 | # sending that node's data. We override close() to detect |
|
186 | # sending that node's data. We override close() to detect | |
191 | # pending ellipsis nodes and flush them. |
|
187 | # pending ellipsis nodes and flush them. | |
192 | packer = changegroup.getbundler(version, repo) |
|
188 | packer = changegroup.getbundler(version, repo) | |
193 | # Let the packer have access to the narrow matcher so it can |
|
189 | # Let the packer have access to the narrow matcher so it can | |
194 | # omit filelogs and dirlogs as needed |
|
190 | # omit filelogs and dirlogs as needed | |
195 | packer._narrow_matcher = lambda : match |
|
191 | packer._narrow_matcher = lambda : match | |
196 | # Give the packer the list of nodes which should not be |
|
192 | # Give the packer the list of nodes which should not be | |
197 | # ellipsis nodes. We store this rather than the set of nodes |
|
193 | # ellipsis nodes. We store this rather than the set of nodes | |
198 | # that should be an ellipsis because for very large histories |
|
194 | # that should be an ellipsis because for very large histories | |
199 | # we expect this to be significantly smaller. |
|
195 | # we expect this to be significantly smaller. | |
200 | packer.full_nodes = relevant_nodes |
|
196 | packer.full_nodes = relevant_nodes | |
201 | # Maps ellipsis revs to their roots at the changelog level. |
|
197 | # Maps ellipsis revs to their roots at the changelog level. | |
202 | packer.precomputed_ellipsis = ellipsisroots |
|
198 | packer.precomputed_ellipsis = ellipsisroots | |
203 | # Maps CL revs to per-revlog revisions. Cleared in close() at |
|
199 | # Maps CL revs to per-revlog revisions. Cleared in close() at | |
204 | # the end of each group. |
|
200 | # the end of each group. | |
205 | packer.clrev_to_localrev = {} |
|
201 | packer.clrev_to_localrev = {} | |
206 | packer.next_clrev_to_localrev = {} |
|
202 | packer.next_clrev_to_localrev = {} | |
207 | # Maps changelog nodes to changelog revs. Filled in once |
|
203 | # Maps changelog nodes to changelog revs. Filled in once | |
208 | # during changelog stage and then left unmodified. |
|
204 | # during changelog stage and then left unmodified. | |
209 | packer.clnode_to_rev = {} |
|
205 | packer.clnode_to_rev = {} | |
210 | packer.changelog_done = False |
|
206 | packer.changelog_done = False | |
211 | # If true, informs the packer that it is serving shallow content and might |
|
207 | # If true, informs the packer that it is serving shallow content and might | |
212 | # need to pack file contents not introduced by the changes being packed. |
|
208 | # need to pack file contents not introduced by the changes being packed. | |
213 | packer.is_shallow = depth is not None |
|
209 | packer.is_shallow = depth is not None | |
214 |
|
210 | |||
215 | return packer.generate(common, visitnodes, False, source) |
|
211 | return packer.generate(common, visitnodes, False, source) | |
216 |
|
212 | |||
217 | # Serve a changegroup for a client with a narrow clone. |
|
213 | # Serve a changegroup for a client with a narrow clone. | |
218 | def getbundlechangegrouppart_narrow(bundler, repo, source, |
|
214 | def getbundlechangegrouppart_narrow(bundler, repo, source, | |
219 | bundlecaps=None, b2caps=None, heads=None, |
|
215 | bundlecaps=None, b2caps=None, heads=None, | |
220 | common=None, **kwargs): |
|
216 | common=None, **kwargs): | |
221 | cgversions = b2caps.get('changegroup') |
|
217 | cgversions = b2caps.get('changegroup') | |
222 | if cgversions: # 3.1 and 3.2 ship with an empty value |
|
218 | if cgversions: # 3.1 and 3.2 ship with an empty value | |
223 | cgversions = [v for v in cgversions |
|
219 | cgversions = [v for v in cgversions | |
224 | if v in changegroup.supportedoutgoingversions(repo)] |
|
220 | if v in changegroup.supportedoutgoingversions(repo)] | |
225 | if not cgversions: |
|
221 | if not cgversions: | |
226 | raise ValueError(_('no common changegroup version')) |
|
222 | raise ValueError(_('no common changegroup version')) | |
227 | version = max(cgversions) |
|
223 | version = max(cgversions) | |
228 | else: |
|
224 | else: | |
229 | raise ValueError(_("server does not advertise changegroup version," |
|
225 | raise ValueError(_("server does not advertise changegroup version," | |
230 | " can't negotiate support for ellipsis nodes")) |
|
226 | " can't negotiate support for ellipsis nodes")) | |
231 |
|
227 | |||
232 | include = sorted(filter(bool, kwargs.get(r'includepats', []))) |
|
228 | include = sorted(filter(bool, kwargs.get(r'includepats', []))) | |
233 | exclude = sorted(filter(bool, kwargs.get(r'excludepats', []))) |
|
229 | exclude = sorted(filter(bool, kwargs.get(r'excludepats', []))) | |
234 | newmatch = narrowspec.match(repo.root, include=include, exclude=exclude) |
|
230 | newmatch = narrowspec.match(repo.root, include=include, exclude=exclude) | |
235 | if not repo.ui.configbool("experimental", "narrowservebrokenellipses"): |
|
231 | if not repo.ui.configbool("experimental", "narrowservebrokenellipses"): | |
236 | outgoing = exchange._computeoutgoing(repo, heads, common) |
|
232 | outgoing = exchange._computeoutgoing(repo, heads, common) | |
237 | if not outgoing.missing: |
|
233 | if not outgoing.missing: | |
238 | return |
|
234 | return | |
239 | def wrappedgetbundler(orig, *args, **kwargs): |
|
235 | def wrappedgetbundler(orig, *args, **kwargs): | |
240 | bundler = orig(*args, **kwargs) |
|
236 | bundler = orig(*args, **kwargs) | |
241 | bundler._narrow_matcher = lambda : newmatch |
|
237 | bundler._narrow_matcher = lambda : newmatch | |
242 | return bundler |
|
238 | return bundler | |
243 | with extensions.wrappedfunction(changegroup, 'getbundler', |
|
239 | with extensions.wrappedfunction(changegroup, 'getbundler', | |
244 | wrappedgetbundler): |
|
240 | wrappedgetbundler): | |
245 | cg = changegroup.makestream(repo, outgoing, version, source) |
|
241 | cg = changegroup.makestream(repo, outgoing, version, source) | |
246 | part = bundler.newpart('changegroup', data=cg) |
|
242 | part = bundler.newpart('changegroup', data=cg) | |
247 | part.addparam('version', version) |
|
243 | part.addparam('version', version) | |
248 | if 'treemanifest' in repo.requirements: |
|
244 | if 'treemanifest' in repo.requirements: | |
249 | part.addparam('treemanifest', '1') |
|
245 | part.addparam('treemanifest', '1') | |
250 |
|
246 | |||
251 | if include or exclude: |
|
247 | if include or exclude: | |
252 | narrowspecpart = bundler.newpart(_SPECPART) |
|
248 | narrowspecpart = bundler.newpart(_SPECPART) | |
253 | if include: |
|
249 | if include: | |
254 | narrowspecpart.addparam( |
|
250 | narrowspecpart.addparam( | |
255 | _SPECPART_INCLUDE, '\n'.join(include), mandatory=True) |
|
251 | _SPECPART_INCLUDE, '\n'.join(include), mandatory=True) | |
256 | if exclude: |
|
252 | if exclude: | |
257 | narrowspecpart.addparam( |
|
253 | narrowspecpart.addparam( | |
258 | _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True) |
|
254 | _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True) | |
259 |
|
255 | |||
260 | return |
|
256 | return | |
261 |
|
257 | |||
262 | depth = kwargs.get(r'depth', None) |
|
258 | depth = kwargs.get(r'depth', None) | |
263 | if depth is not None: |
|
259 | if depth is not None: | |
264 | depth = int(depth) |
|
260 | depth = int(depth) | |
265 | if depth < 1: |
|
261 | if depth < 1: | |
266 | raise error.Abort(_('depth must be positive, got %d') % depth) |
|
262 | raise error.Abort(_('depth must be positive, got %d') % depth) | |
267 |
|
263 | |||
268 | heads = set(heads or repo.heads()) |
|
264 | heads = set(heads or repo.heads()) | |
269 | common = set(common or [nullid]) |
|
265 | common = set(common or [nullid]) | |
270 | oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', []))) |
|
266 | oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', []))) | |
271 | oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', []))) |
|
267 | oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', []))) | |
272 | known = {bin(n) for n in kwargs.get(r'known', [])} |
|
268 | known = {bin(n) for n in kwargs.get(r'known', [])} | |
273 | if known and (oldinclude != include or oldexclude != exclude): |
|
269 | if known and (oldinclude != include or oldexclude != exclude): | |
274 | # Steps: |
|
270 | # Steps: | |
275 | # 1. Send kill for "$known & ::common" |
|
271 | # 1. Send kill for "$known & ::common" | |
276 | # |
|
272 | # | |
277 | # 2. Send changegroup for ::common |
|
273 | # 2. Send changegroup for ::common | |
278 | # |
|
274 | # | |
279 | # 3. Proceed. |
|
275 | # 3. Proceed. | |
280 | # |
|
276 | # | |
281 | # In the future, we can send kills for only the specific |
|
277 | # In the future, we can send kills for only the specific | |
282 | # nodes we know should go away or change shape, and then |
|
278 | # nodes we know should go away or change shape, and then | |
283 | # send a data stream that tells the client something like this: |
|
279 | # send a data stream that tells the client something like this: | |
284 | # |
|
280 | # | |
285 | # a) apply this changegroup |
|
281 | # a) apply this changegroup | |
286 | # b) apply nodes XXX, YYY, ZZZ that you already have |
|
282 | # b) apply nodes XXX, YYY, ZZZ that you already have | |
287 | # c) goto a |
|
283 | # c) goto a | |
288 | # |
|
284 | # | |
289 | # until they've built up the full new state. |
|
285 | # until they've built up the full new state. | |
290 | # Convert to revnums and intersect with "common". The client should |
|
286 | # Convert to revnums and intersect with "common". The client should | |
291 | # have made it a subset of "common" already, but let's be safe. |
|
287 | # have made it a subset of "common" already, but let's be safe. | |
292 | known = set(repo.revs("%ln & ::%ln", known, common)) |
|
288 | known = set(repo.revs("%ln & ::%ln", known, common)) | |
293 | # TODO: we could send only roots() of this set, and the |
|
289 | # TODO: we could send only roots() of this set, and the | |
294 | # list of nodes in common, and the client could work out |
|
290 | # list of nodes in common, and the client could work out | |
295 | # what to strip, instead of us explicitly sending every |
|
291 | # what to strip, instead of us explicitly sending every | |
296 | # single node. |
|
292 | # single node. | |
297 | deadrevs = known |
|
293 | deadrevs = known | |
298 | def genkills(): |
|
294 | def genkills(): | |
299 | for r in deadrevs: |
|
295 | for r in deadrevs: | |
300 | yield _KILLNODESIGNAL |
|
296 | yield _KILLNODESIGNAL | |
301 | yield repo.changelog.node(r) |
|
297 | yield repo.changelog.node(r) | |
302 | yield _DONESIGNAL |
|
298 | yield _DONESIGNAL | |
303 | bundler.newpart(_CHANGESPECPART, data=genkills()) |
|
299 | bundler.newpart(_CHANGESPECPART, data=genkills()) | |
304 | newvisit, newfull, newellipsis = _computeellipsis( |
|
300 | newvisit, newfull, newellipsis = _computeellipsis( | |
305 | repo, set(), common, known, newmatch) |
|
301 | repo, set(), common, known, newmatch) | |
306 | if newvisit: |
|
302 | if newvisit: | |
307 | cg = _packellipsischangegroup( |
|
303 | cg = _packellipsischangegroup( | |
308 | repo, common, newmatch, newfull, newellipsis, |
|
304 | repo, common, newmatch, newfull, newellipsis, | |
309 | newvisit, depth, source, version) |
|
305 | newvisit, depth, source, version) | |
310 | part = bundler.newpart('changegroup', data=cg) |
|
306 | part = bundler.newpart('changegroup', data=cg) | |
311 | part.addparam('version', version) |
|
307 | part.addparam('version', version) | |
312 | if 'treemanifest' in repo.requirements: |
|
308 | if 'treemanifest' in repo.requirements: | |
313 | part.addparam('treemanifest', '1') |
|
309 | part.addparam('treemanifest', '1') | |
314 |
|
310 | |||
315 | visitnodes, relevant_nodes, ellipsisroots = _computeellipsis( |
|
311 | visitnodes, relevant_nodes, ellipsisroots = _computeellipsis( | |
316 | repo, common, heads, set(), newmatch, depth=depth) |
|
312 | repo, common, heads, set(), newmatch, depth=depth) | |
317 |
|
313 | |||
318 | repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes)) |
|
314 | repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes)) | |
319 | if visitnodes: |
|
315 | if visitnodes: | |
320 | cg = _packellipsischangegroup( |
|
316 | cg = _packellipsischangegroup( | |
321 | repo, common, newmatch, relevant_nodes, ellipsisroots, |
|
317 | repo, common, newmatch, relevant_nodes, ellipsisroots, | |
322 | visitnodes, depth, source, version) |
|
318 | visitnodes, depth, source, version) | |
323 | part = bundler.newpart('changegroup', data=cg) |
|
319 | part = bundler.newpart('changegroup', data=cg) | |
324 | part.addparam('version', version) |
|
320 | part.addparam('version', version) | |
325 | if 'treemanifest' in repo.requirements: |
|
321 | if 'treemanifest' in repo.requirements: | |
326 | part.addparam('treemanifest', '1') |
|
322 | part.addparam('treemanifest', '1') | |
327 |
|
323 | |||
328 | def applyacl_narrow(repo, kwargs): |
|
324 | def applyacl_narrow(repo, kwargs): | |
329 | ui = repo.ui |
|
325 | ui = repo.ui | |
330 | username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username()) |
|
326 | username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username()) | |
331 | user_includes = ui.configlist( |
|
327 | user_includes = ui.configlist( | |
332 | _NARROWACL_SECTION, username + '.includes', |
|
328 | _NARROWACL_SECTION, username + '.includes', | |
333 | ui.configlist(_NARROWACL_SECTION, 'default.includes')) |
|
329 | ui.configlist(_NARROWACL_SECTION, 'default.includes')) | |
334 | user_excludes = ui.configlist( |
|
330 | user_excludes = ui.configlist( | |
335 | _NARROWACL_SECTION, username + '.excludes', |
|
331 | _NARROWACL_SECTION, username + '.excludes', | |
336 | ui.configlist(_NARROWACL_SECTION, 'default.excludes')) |
|
332 | ui.configlist(_NARROWACL_SECTION, 'default.excludes')) | |
337 | if not user_includes: |
|
333 | if not user_includes: | |
338 | raise error.Abort(_("{} configuration for user {} is empty") |
|
334 | raise error.Abort(_("{} configuration for user {} is empty") | |
339 | .format(_NARROWACL_SECTION, username)) |
|
335 | .format(_NARROWACL_SECTION, username)) | |
340 |
|
336 | |||
341 | user_includes = [ |
|
337 | user_includes = [ | |
342 | 'path:.' if p == '*' else 'path:' + p for p in user_includes] |
|
338 | 'path:.' if p == '*' else 'path:' + p for p in user_includes] | |
343 | user_excludes = [ |
|
339 | user_excludes = [ | |
344 | 'path:.' if p == '*' else 'path:' + p for p in user_excludes] |
|
340 | 'path:.' if p == '*' else 'path:' + p for p in user_excludes] | |
345 |
|
341 | |||
346 | req_includes = set(kwargs.get(r'includepats', [])) |
|
342 | req_includes = set(kwargs.get(r'includepats', [])) | |
347 | req_excludes = set(kwargs.get(r'excludepats', [])) |
|
343 | req_excludes = set(kwargs.get(r'excludepats', [])) | |
348 |
|
344 | |||
349 | req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns( |
|
345 | req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns( | |
350 | req_includes, req_excludes, user_includes, user_excludes) |
|
346 | req_includes, req_excludes, user_includes, user_excludes) | |
351 |
|
347 | |||
352 | if invalid_includes: |
|
348 | if invalid_includes: | |
353 | raise error.Abort( |
|
349 | raise error.Abort( | |
354 | _("The following includes are not accessible for {}: {}") |
|
350 | _("The following includes are not accessible for {}: {}") | |
355 | .format(username, invalid_includes)) |
|
351 | .format(username, invalid_includes)) | |
356 |
|
352 | |||
357 | new_args = {} |
|
353 | new_args = {} | |
358 | new_args.update(kwargs) |
|
354 | new_args.update(kwargs) | |
359 | new_args['includepats'] = req_includes |
|
355 | new_args['includepats'] = req_includes | |
360 | if req_excludes: |
|
356 | if req_excludes: | |
361 | new_args['excludepats'] = req_excludes |
|
357 | new_args['excludepats'] = req_excludes | |
362 | return new_args |
|
358 | return new_args | |
363 |
|
359 | |||
364 | @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE)) |
|
360 | @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE)) | |
365 | def _handlechangespec_2(op, inpart): |
|
361 | def _handlechangespec_2(op, inpart): | |
366 | includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines()) |
|
362 | includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines()) | |
367 | excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines()) |
|
363 | excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines()) | |
368 | narrowspec.save(op.repo, includepats, excludepats) |
|
364 | narrowspec.save(op.repo, includepats, excludepats) | |
369 |
if not |
|
365 | if not changegroup.NARROW_REQUIREMENT in op.repo.requirements: | |
370 |
op.repo.requirements.add( |
|
366 | op.repo.requirements.add(changegroup.NARROW_REQUIREMENT) | |
371 | op.repo._writerequirements() |
|
367 | op.repo._writerequirements() | |
372 | op.repo.invalidate(clearfilecache=True) |
|
368 | op.repo.invalidate(clearfilecache=True) | |
373 |
|
369 | |||
374 | @bundle2.parthandler(_CHANGESPECPART) |
|
370 | @bundle2.parthandler(_CHANGESPECPART) | |
375 | def _handlechangespec(op, inpart): |
|
371 | def _handlechangespec(op, inpart): | |
376 | repo = op.repo |
|
372 | repo = op.repo | |
377 | cl = repo.changelog |
|
373 | cl = repo.changelog | |
378 |
|
374 | |||
379 | # changesets which need to be stripped entirely. either they're no longer |
|
375 | # changesets which need to be stripped entirely. either they're no longer | |
380 | # needed in the new narrow spec, or the server is sending a replacement |
|
376 | # needed in the new narrow spec, or the server is sending a replacement | |
381 | # in the changegroup part. |
|
377 | # in the changegroup part. | |
382 | clkills = set() |
|
378 | clkills = set() | |
383 |
|
379 | |||
384 | # A changespec part contains all the updates to ellipsis nodes |
|
380 | # A changespec part contains all the updates to ellipsis nodes | |
385 | # that will happen as a result of widening or narrowing a |
|
381 | # that will happen as a result of widening or narrowing a | |
386 | # repo. All the changes that this block encounters are ellipsis |
|
382 | # repo. All the changes that this block encounters are ellipsis | |
387 | # nodes or flags to kill an existing ellipsis. |
|
383 | # nodes or flags to kill an existing ellipsis. | |
388 | chunksignal = changegroup.readexactly(inpart, 4) |
|
384 | chunksignal = changegroup.readexactly(inpart, 4) | |
389 | while chunksignal != _DONESIGNAL: |
|
385 | while chunksignal != _DONESIGNAL: | |
390 | if chunksignal == _KILLNODESIGNAL: |
|
386 | if chunksignal == _KILLNODESIGNAL: | |
391 | # a node used to be an ellipsis but isn't anymore |
|
387 | # a node used to be an ellipsis but isn't anymore | |
392 | ck = changegroup.readexactly(inpart, 20) |
|
388 | ck = changegroup.readexactly(inpart, 20) | |
393 | if cl.hasnode(ck): |
|
389 | if cl.hasnode(ck): | |
394 | clkills.add(ck) |
|
390 | clkills.add(ck) | |
395 | else: |
|
391 | else: | |
396 | raise error.Abort( |
|
392 | raise error.Abort( | |
397 | _('unexpected changespec node chunk type: %s') % chunksignal) |
|
393 | _('unexpected changespec node chunk type: %s') % chunksignal) | |
398 | chunksignal = changegroup.readexactly(inpart, 4) |
|
394 | chunksignal = changegroup.readexactly(inpart, 4) | |
399 |
|
395 | |||
400 | if clkills: |
|
396 | if clkills: | |
401 | # preserve bookmarks that repair.strip() would otherwise strip |
|
397 | # preserve bookmarks that repair.strip() would otherwise strip | |
402 | bmstore = repo._bookmarks |
|
398 | bmstore = repo._bookmarks | |
403 | class dummybmstore(dict): |
|
399 | class dummybmstore(dict): | |
404 | def applychanges(self, repo, tr, changes): |
|
400 | def applychanges(self, repo, tr, changes): | |
405 | pass |
|
401 | pass | |
406 | def recordchange(self, tr): # legacy version |
|
402 | def recordchange(self, tr): # legacy version | |
407 | pass |
|
403 | pass | |
408 | repo._bookmarks = dummybmstore() |
|
404 | repo._bookmarks = dummybmstore() | |
409 | chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True, |
|
405 | chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True, | |
410 | topic='widen') |
|
406 | topic='widen') | |
411 | repo._bookmarks = bmstore |
|
407 | repo._bookmarks = bmstore | |
412 | if chgrpfile: |
|
408 | if chgrpfile: | |
413 | # presence of _widen_bundle attribute activates widen handler later |
|
409 | # presence of _widen_bundle attribute activates widen handler later | |
414 | op._widen_bundle = chgrpfile |
|
410 | op._widen_bundle = chgrpfile | |
415 | # Set the new narrowspec if we're widening. The setnewnarrowpats() method |
|
411 | # Set the new narrowspec if we're widening. The setnewnarrowpats() method | |
416 | # will currently always be there when using the core+narrowhg server, but |
|
412 | # will currently always be there when using the core+narrowhg server, but | |
417 | # other servers may include a changespec part even when not widening (e.g. |
|
413 | # other servers may include a changespec part even when not widening (e.g. | |
418 | # because we're deepening a shallow repo). |
|
414 | # because we're deepening a shallow repo). | |
419 | if util.safehasattr(repo, 'setnewnarrowpats'): |
|
415 | if util.safehasattr(repo, 'setnewnarrowpats'): | |
420 | repo.setnewnarrowpats() |
|
416 | repo.setnewnarrowpats() | |
421 |
|
417 | |||
422 | def handlechangegroup_widen(op, inpart): |
|
418 | def handlechangegroup_widen(op, inpart): | |
423 | """Changegroup exchange handler which restores temporarily-stripped nodes""" |
|
419 | """Changegroup exchange handler which restores temporarily-stripped nodes""" | |
424 | # We saved a bundle with stripped node data we must now restore. |
|
420 | # We saved a bundle with stripped node data we must now restore. | |
425 | # This approach is based on mercurial/repair.py@6ee26a53c111. |
|
421 | # This approach is based on mercurial/repair.py@6ee26a53c111. | |
426 | repo = op.repo |
|
422 | repo = op.repo | |
427 | ui = op.ui |
|
423 | ui = op.ui | |
428 |
|
424 | |||
429 | chgrpfile = op._widen_bundle |
|
425 | chgrpfile = op._widen_bundle | |
430 | del op._widen_bundle |
|
426 | del op._widen_bundle | |
431 | vfs = repo.vfs |
|
427 | vfs = repo.vfs | |
432 |
|
428 | |||
433 | ui.note(_("adding branch\n")) |
|
429 | ui.note(_("adding branch\n")) | |
434 | f = vfs.open(chgrpfile, "rb") |
|
430 | f = vfs.open(chgrpfile, "rb") | |
435 | try: |
|
431 | try: | |
436 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) |
|
432 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) | |
437 | if not ui.verbose: |
|
433 | if not ui.verbose: | |
438 | # silence internal shuffling chatter |
|
434 | # silence internal shuffling chatter | |
439 | ui.pushbuffer() |
|
435 | ui.pushbuffer() | |
440 | if isinstance(gen, bundle2.unbundle20): |
|
436 | if isinstance(gen, bundle2.unbundle20): | |
441 | with repo.transaction('strip') as tr: |
|
437 | with repo.transaction('strip') as tr: | |
442 | bundle2.processbundle(repo, gen, lambda: tr) |
|
438 | bundle2.processbundle(repo, gen, lambda: tr) | |
443 | else: |
|
439 | else: | |
444 | gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True) |
|
440 | gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True) | |
445 | if not ui.verbose: |
|
441 | if not ui.verbose: | |
446 | ui.popbuffer() |
|
442 | ui.popbuffer() | |
447 | finally: |
|
443 | finally: | |
448 | f.close() |
|
444 | f.close() | |
449 |
|
445 | |||
450 | # remove undo files |
|
446 | # remove undo files | |
451 | for undovfs, undofile in repo.undofiles(): |
|
447 | for undovfs, undofile in repo.undofiles(): | |
452 | try: |
|
448 | try: | |
453 | undovfs.unlink(undofile) |
|
449 | undovfs.unlink(undofile) | |
454 | except OSError as e: |
|
450 | except OSError as e: | |
455 | if e.errno != errno.ENOENT: |
|
451 | if e.errno != errno.ENOENT: | |
456 | ui.warn(_('error removing %s: %s\n') % |
|
452 | ui.warn(_('error removing %s: %s\n') % | |
457 | (undovfs.join(undofile), str(e))) |
|
453 | (undovfs.join(undofile), str(e))) | |
458 |
|
454 | |||
459 | # Remove partial backup only if there were no exceptions |
|
455 | # Remove partial backup only if there were no exceptions | |
460 | vfs.unlink(chgrpfile) |
|
456 | vfs.unlink(chgrpfile) | |
461 |
|
457 | |||
462 | def setup(): |
|
458 | def setup(): | |
463 | """Enable narrow repo support in bundle2-related extension points.""" |
|
459 | """Enable narrow repo support in bundle2-related extension points.""" | |
464 | extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow) |
|
460 | extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow) | |
465 |
|
461 | |||
466 | wireproto.gboptsmap['narrow'] = 'boolean' |
|
462 | wireproto.gboptsmap['narrow'] = 'boolean' | |
467 | wireproto.gboptsmap['depth'] = 'plain' |
|
463 | wireproto.gboptsmap['depth'] = 'plain' | |
468 | wireproto.gboptsmap['oldincludepats'] = 'csv' |
|
464 | wireproto.gboptsmap['oldincludepats'] = 'csv' | |
469 | wireproto.gboptsmap['oldexcludepats'] = 'csv' |
|
465 | wireproto.gboptsmap['oldexcludepats'] = 'csv' | |
470 | wireproto.gboptsmap['includepats'] = 'csv' |
|
466 | wireproto.gboptsmap['includepats'] = 'csv' | |
471 | wireproto.gboptsmap['excludepats'] = 'csv' |
|
467 | wireproto.gboptsmap['excludepats'] = 'csv' | |
472 | wireproto.gboptsmap['known'] = 'csv' |
|
468 | wireproto.gboptsmap['known'] = 'csv' | |
473 |
|
469 | |||
474 | # Extend changegroup serving to handle requests from narrow clients. |
|
470 | # Extend changegroup serving to handle requests from narrow clients. | |
475 | origcgfn = exchange.getbundle2partsmapping['changegroup'] |
|
471 | origcgfn = exchange.getbundle2partsmapping['changegroup'] | |
476 | def wrappedcgfn(*args, **kwargs): |
|
472 | def wrappedcgfn(*args, **kwargs): | |
477 | repo = args[1] |
|
473 | repo = args[1] | |
478 | if repo.ui.has_section(_NARROWACL_SECTION): |
|
474 | if repo.ui.has_section(_NARROWACL_SECTION): | |
479 | getbundlechangegrouppart_narrow( |
|
475 | getbundlechangegrouppart_narrow( | |
480 | *args, **applyacl_narrow(repo, kwargs)) |
|
476 | *args, **applyacl_narrow(repo, kwargs)) | |
481 | elif kwargs.get(r'narrow', False): |
|
477 | elif kwargs.get(r'narrow', False): | |
482 | getbundlechangegrouppart_narrow(*args, **kwargs) |
|
478 | getbundlechangegrouppart_narrow(*args, **kwargs) | |
483 | else: |
|
479 | else: | |
484 | origcgfn(*args, **kwargs) |
|
480 | origcgfn(*args, **kwargs) | |
485 | exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn |
|
481 | exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn | |
486 |
|
482 | |||
487 | # Extend changegroup receiver so client can fixup after widen requests. |
|
483 | # Extend changegroup receiver so client can fixup after widen requests. | |
488 | origcghandler = bundle2.parthandlermapping['changegroup'] |
|
484 | origcghandler = bundle2.parthandlermapping['changegroup'] | |
489 | def wrappedcghandler(op, inpart): |
|
485 | def wrappedcghandler(op, inpart): | |
490 | origcghandler(op, inpart) |
|
486 | origcghandler(op, inpart) | |
491 | if util.safehasattr(op, '_widen_bundle'): |
|
487 | if util.safehasattr(op, '_widen_bundle'): | |
492 | handlechangegroup_widen(op, inpart) |
|
488 | handlechangegroup_widen(op, inpart) | |
493 | wrappedcghandler.params = origcghandler.params |
|
489 | wrappedcghandler.params = origcghandler.params | |
494 | bundle2.parthandlermapping['changegroup'] = wrappedcghandler |
|
490 | bundle2.parthandlermapping['changegroup'] = wrappedcghandler |
@@ -1,380 +1,376 b'' | |||||
1 | # narrowchangegroup.py - narrow clone changegroup creation and consumption |
|
1 | # narrowchangegroup.py - narrow clone changegroup creation and consumption | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | from mercurial.i18n import _ |
|
10 | from mercurial.i18n import _ | |
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | changegroup, |
|
12 | changegroup, | |
13 | error, |
|
13 | error, | |
14 | extensions, |
|
14 | extensions, | |
15 | manifest, |
|
15 | manifest, | |
16 | mdiff, |
|
16 | mdiff, | |
17 | node, |
|
17 | node, | |
18 | revlog, |
|
18 | revlog, | |
19 | util, |
|
19 | util, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | from . import ( |
|
|||
23 | narrowrepo, |
|
|||
24 | ) |
|
|||
25 |
|
||||
26 | def setup(): |
|
22 | def setup(): | |
27 |
|
23 | |||
28 | def supportedoutgoingversions(orig, repo): |
|
24 | def supportedoutgoingversions(orig, repo): | |
29 | versions = orig(repo) |
|
25 | versions = orig(repo) | |
30 |
if |
|
26 | if changegroup.NARROW_REQUIREMENT in repo.requirements: | |
31 | versions.discard('01') |
|
27 | versions.discard('01') | |
32 | versions.discard('02') |
|
28 | versions.discard('02') | |
33 | return versions |
|
29 | return versions | |
34 |
|
30 | |||
35 | extensions.wrapfunction(changegroup, 'supportedoutgoingversions', |
|
31 | extensions.wrapfunction(changegroup, 'supportedoutgoingversions', | |
36 | supportedoutgoingversions) |
|
32 | supportedoutgoingversions) | |
37 |
|
33 | |||
38 | def prune(orig, self, revlog, missing, commonrevs): |
|
34 | def prune(orig, self, revlog, missing, commonrevs): | |
39 | if isinstance(revlog, manifest.manifestrevlog): |
|
35 | if isinstance(revlog, manifest.manifestrevlog): | |
40 | matcher = getattr(self._repo, 'narrowmatch', |
|
36 | matcher = getattr(self._repo, 'narrowmatch', | |
41 | getattr(self, '_narrow_matcher', None)) |
|
37 | getattr(self, '_narrow_matcher', None)) | |
42 | if (matcher is not None and |
|
38 | if (matcher is not None and | |
43 | not matcher().visitdir(revlog._dir[:-1] or '.')): |
|
39 | not matcher().visitdir(revlog._dir[:-1] or '.')): | |
44 | return [] |
|
40 | return [] | |
45 | return orig(self, revlog, missing, commonrevs) |
|
41 | return orig(self, revlog, missing, commonrevs) | |
46 |
|
42 | |||
47 | extensions.wrapfunction(changegroup.cg1packer, 'prune', prune) |
|
43 | extensions.wrapfunction(changegroup.cg1packer, 'prune', prune) | |
48 |
|
44 | |||
49 | def generatefiles(orig, self, changedfiles, linknodes, commonrevs, |
|
45 | def generatefiles(orig, self, changedfiles, linknodes, commonrevs, | |
50 | source): |
|
46 | source): | |
51 | matcher = getattr(self._repo, 'narrowmatch', |
|
47 | matcher = getattr(self._repo, 'narrowmatch', | |
52 | getattr(self, '_narrow_matcher', None)) |
|
48 | getattr(self, '_narrow_matcher', None)) | |
53 | if matcher is not None: |
|
49 | if matcher is not None: | |
54 | narrowmatch = matcher() |
|
50 | narrowmatch = matcher() | |
55 | changedfiles = [f for f in changedfiles if narrowmatch(f)] |
|
51 | changedfiles = [f for f in changedfiles if narrowmatch(f)] | |
56 | if getattr(self, 'is_shallow', False): |
|
52 | if getattr(self, 'is_shallow', False): | |
57 | # See comment in generate() for why this sadness is a thing. |
|
53 | # See comment in generate() for why this sadness is a thing. | |
58 | mfdicts = self._mfdicts |
|
54 | mfdicts = self._mfdicts | |
59 | del self._mfdicts |
|
55 | del self._mfdicts | |
60 | # In a shallow clone, the linknodes callback needs to also include |
|
56 | # In a shallow clone, the linknodes callback needs to also include | |
61 | # those file nodes that are in the manifests we sent but weren't |
|
57 | # those file nodes that are in the manifests we sent but weren't | |
62 | # introduced by those manifests. |
|
58 | # introduced by those manifests. | |
63 | commonctxs = [self._repo[c] for c in commonrevs] |
|
59 | commonctxs = [self._repo[c] for c in commonrevs] | |
64 | oldlinknodes = linknodes |
|
60 | oldlinknodes = linknodes | |
65 | clrev = self._repo.changelog.rev |
|
61 | clrev = self._repo.changelog.rev | |
66 | def linknodes(flog, fname): |
|
62 | def linknodes(flog, fname): | |
67 | for c in commonctxs: |
|
63 | for c in commonctxs: | |
68 | try: |
|
64 | try: | |
69 | fnode = c.filenode(fname) |
|
65 | fnode = c.filenode(fname) | |
70 | self.clrev_to_localrev[c.rev()] = flog.rev(fnode) |
|
66 | self.clrev_to_localrev[c.rev()] = flog.rev(fnode) | |
71 | except error.ManifestLookupError: |
|
67 | except error.ManifestLookupError: | |
72 | pass |
|
68 | pass | |
73 | links = oldlinknodes(flog, fname) |
|
69 | links = oldlinknodes(flog, fname) | |
74 | if len(links) != len(mfdicts): |
|
70 | if len(links) != len(mfdicts): | |
75 | for mf, lr in mfdicts: |
|
71 | for mf, lr in mfdicts: | |
76 | fnode = mf.get(fname, None) |
|
72 | fnode = mf.get(fname, None) | |
77 | if fnode in links: |
|
73 | if fnode in links: | |
78 | links[fnode] = min(links[fnode], lr, key=clrev) |
|
74 | links[fnode] = min(links[fnode], lr, key=clrev) | |
79 | elif fnode: |
|
75 | elif fnode: | |
80 | links[fnode] = lr |
|
76 | links[fnode] = lr | |
81 | return links |
|
77 | return links | |
82 | return orig(self, changedfiles, linknodes, commonrevs, source) |
|
78 | return orig(self, changedfiles, linknodes, commonrevs, source) | |
83 | extensions.wrapfunction( |
|
79 | extensions.wrapfunction( | |
84 | changegroup.cg1packer, 'generatefiles', generatefiles) |
|
80 | changegroup.cg1packer, 'generatefiles', generatefiles) | |
85 |
|
81 | |||
86 | def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode): |
|
82 | def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode): | |
87 | n = revlog_.node(rev) |
|
83 | n = revlog_.node(rev) | |
88 | p1n, p2n = revlog_.node(p1), revlog_.node(p2) |
|
84 | p1n, p2n = revlog_.node(p1), revlog_.node(p2) | |
89 | flags = revlog_.flags(rev) |
|
85 | flags = revlog_.flags(rev) | |
90 | flags |= revlog.REVIDX_ELLIPSIS |
|
86 | flags |= revlog.REVIDX_ELLIPSIS | |
91 | meta = packer.builddeltaheader( |
|
87 | meta = packer.builddeltaheader( | |
92 | n, p1n, p2n, node.nullid, linknode, flags) |
|
88 | n, p1n, p2n, node.nullid, linknode, flags) | |
93 | # TODO: try and actually send deltas for ellipsis data blocks |
|
89 | # TODO: try and actually send deltas for ellipsis data blocks | |
94 | diffheader = mdiff.trivialdiffheader(len(data)) |
|
90 | diffheader = mdiff.trivialdiffheader(len(data)) | |
95 | l = len(meta) + len(diffheader) + len(data) |
|
91 | l = len(meta) + len(diffheader) + len(data) | |
96 | return ''.join((changegroup.chunkheader(l), |
|
92 | return ''.join((changegroup.chunkheader(l), | |
97 | meta, |
|
93 | meta, | |
98 | diffheader, |
|
94 | diffheader, | |
99 | data)) |
|
95 | data)) | |
100 |
|
96 | |||
101 | def close(orig, self): |
|
97 | def close(orig, self): | |
102 | getattr(self, 'clrev_to_localrev', {}).clear() |
|
98 | getattr(self, 'clrev_to_localrev', {}).clear() | |
103 | if getattr(self, 'next_clrev_to_localrev', {}): |
|
99 | if getattr(self, 'next_clrev_to_localrev', {}): | |
104 | self.clrev_to_localrev = self.next_clrev_to_localrev |
|
100 | self.clrev_to_localrev = self.next_clrev_to_localrev | |
105 | del self.next_clrev_to_localrev |
|
101 | del self.next_clrev_to_localrev | |
106 | self.changelog_done = True |
|
102 | self.changelog_done = True | |
107 | return orig(self) |
|
103 | return orig(self) | |
108 | extensions.wrapfunction(changegroup.cg1packer, 'close', close) |
|
104 | extensions.wrapfunction(changegroup.cg1packer, 'close', close) | |
109 |
|
105 | |||
110 | # In a perfect world, we'd generate better ellipsis-ified graphs |
|
106 | # In a perfect world, we'd generate better ellipsis-ified graphs | |
111 | # for non-changelog revlogs. In practice, we haven't started doing |
|
107 | # for non-changelog revlogs. In practice, we haven't started doing | |
112 | # that yet, so the resulting DAGs for the manifestlog and filelogs |
|
108 | # that yet, so the resulting DAGs for the manifestlog and filelogs | |
113 | # are actually full of bogus parentage on all the ellipsis |
|
109 | # are actually full of bogus parentage on all the ellipsis | |
114 | # nodes. This has the side effect that, while the contents are |
|
110 | # nodes. This has the side effect that, while the contents are | |
115 | # correct, the individual DAGs might be completely out of whack in |
|
111 | # correct, the individual DAGs might be completely out of whack in | |
116 | # a case like 882681bc3166 and its ancestors (back about 10 |
|
112 | # a case like 882681bc3166 and its ancestors (back about 10 | |
117 | # revisions or so) in the main hg repo. |
|
113 | # revisions or so) in the main hg repo. | |
118 | # |
|
114 | # | |
119 | # The one invariant we *know* holds is that the new (potentially |
|
115 | # The one invariant we *know* holds is that the new (potentially | |
120 | # bogus) DAG shape will be valid if we order the nodes in the |
|
116 | # bogus) DAG shape will be valid if we order the nodes in the | |
121 | # order that they're introduced in dramatis personae by the |
|
117 | # order that they're introduced in dramatis personae by the | |
122 | # changelog, so what we do is we sort the non-changelog histories |
|
118 | # changelog, so what we do is we sort the non-changelog histories | |
123 | # by the order in which they are used by the changelog. |
|
119 | # by the order in which they are used by the changelog. | |
124 | def _sortgroup(orig, self, revlog, nodelist, lookup): |
|
120 | def _sortgroup(orig, self, revlog, nodelist, lookup): | |
125 | if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev: |
|
121 | if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev: | |
126 | return orig(self, revlog, nodelist, lookup) |
|
122 | return orig(self, revlog, nodelist, lookup) | |
127 | key = lambda n: self.clnode_to_rev[lookup(n)] |
|
123 | key = lambda n: self.clnode_to_rev[lookup(n)] | |
128 | return [revlog.rev(n) for n in sorted(nodelist, key=key)] |
|
124 | return [revlog.rev(n) for n in sorted(nodelist, key=key)] | |
129 |
|
125 | |||
130 | extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup) |
|
126 | extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup) | |
131 |
|
127 | |||
132 | def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source): |
|
128 | def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source): | |
133 | '''yield a sequence of changegroup chunks (strings)''' |
|
129 | '''yield a sequence of changegroup chunks (strings)''' | |
134 | # Note: other than delegating to orig, the only deviation in |
|
130 | # Note: other than delegating to orig, the only deviation in | |
135 | # logic from normal hg's generate is marked with BEGIN/END |
|
131 | # logic from normal hg's generate is marked with BEGIN/END | |
136 | # NARROW HACK. |
|
132 | # NARROW HACK. | |
137 | if not util.safehasattr(self, 'full_nodes'): |
|
133 | if not util.safehasattr(self, 'full_nodes'): | |
138 | # not sending a narrow bundle |
|
134 | # not sending a narrow bundle | |
139 | for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
135 | for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source): | |
140 | yield x |
|
136 | yield x | |
141 | return |
|
137 | return | |
142 |
|
138 | |||
143 | repo = self._repo |
|
139 | repo = self._repo | |
144 | cl = repo.changelog |
|
140 | cl = repo.changelog | |
145 | mfl = repo.manifestlog |
|
141 | mfl = repo.manifestlog | |
146 | mfrevlog = mfl._revlog |
|
142 | mfrevlog = mfl._revlog | |
147 |
|
143 | |||
148 | clrevorder = {} |
|
144 | clrevorder = {} | |
149 | mfs = {} # needed manifests |
|
145 | mfs = {} # needed manifests | |
150 | fnodes = {} # needed file nodes |
|
146 | fnodes = {} # needed file nodes | |
151 | changedfiles = set() |
|
147 | changedfiles = set() | |
152 |
|
148 | |||
153 | # Callback for the changelog, used to collect changed files and manifest |
|
149 | # Callback for the changelog, used to collect changed files and manifest | |
154 | # nodes. |
|
150 | # nodes. | |
155 | # Returns the linkrev node (identity in the changelog case). |
|
151 | # Returns the linkrev node (identity in the changelog case). | |
156 | def lookupcl(x): |
|
152 | def lookupcl(x): | |
157 | c = cl.read(x) |
|
153 | c = cl.read(x) | |
158 | clrevorder[x] = len(clrevorder) |
|
154 | clrevorder[x] = len(clrevorder) | |
159 | # BEGIN NARROW HACK |
|
155 | # BEGIN NARROW HACK | |
160 | # |
|
156 | # | |
161 | # Only update mfs if x is going to be sent. Otherwise we |
|
157 | # Only update mfs if x is going to be sent. Otherwise we | |
162 | # end up with bogus linkrevs specified for manifests and |
|
158 | # end up with bogus linkrevs specified for manifests and | |
163 | # we skip some manifest nodes that we should otherwise |
|
159 | # we skip some manifest nodes that we should otherwise | |
164 | # have sent. |
|
160 | # have sent. | |
165 | if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis: |
|
161 | if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis: | |
166 | n = c[0] |
|
162 | n = c[0] | |
167 | # record the first changeset introducing this manifest version |
|
163 | # record the first changeset introducing this manifest version | |
168 | mfs.setdefault(n, x) |
|
164 | mfs.setdefault(n, x) | |
169 | # Set this narrow-specific dict so we have the lowest manifest |
|
165 | # Set this narrow-specific dict so we have the lowest manifest | |
170 | # revnum to look up for this cl revnum. (Part of mapping |
|
166 | # revnum to look up for this cl revnum. (Part of mapping | |
171 | # changelog ellipsis parents to manifest ellipsis parents) |
|
167 | # changelog ellipsis parents to manifest ellipsis parents) | |
172 | self.next_clrev_to_localrev.setdefault(cl.rev(x), |
|
168 | self.next_clrev_to_localrev.setdefault(cl.rev(x), | |
173 | mfrevlog.rev(n)) |
|
169 | mfrevlog.rev(n)) | |
174 | # We can't trust the changed files list in the changeset if the |
|
170 | # We can't trust the changed files list in the changeset if the | |
175 | # client requested a shallow clone. |
|
171 | # client requested a shallow clone. | |
176 | if self.is_shallow: |
|
172 | if self.is_shallow: | |
177 | changedfiles.update(mfl[c[0]].read().keys()) |
|
173 | changedfiles.update(mfl[c[0]].read().keys()) | |
178 | else: |
|
174 | else: | |
179 | changedfiles.update(c[3]) |
|
175 | changedfiles.update(c[3]) | |
180 | # END NARROW HACK |
|
176 | # END NARROW HACK | |
181 | # Record a complete list of potentially-changed files in |
|
177 | # Record a complete list of potentially-changed files in | |
182 | # this manifest. |
|
178 | # this manifest. | |
183 | return x |
|
179 | return x | |
184 |
|
180 | |||
185 | self._verbosenote(_('uncompressed size of bundle content:\n')) |
|
181 | self._verbosenote(_('uncompressed size of bundle content:\n')) | |
186 | size = 0 |
|
182 | size = 0 | |
187 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): |
|
183 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): | |
188 | size += len(chunk) |
|
184 | size += len(chunk) | |
189 | yield chunk |
|
185 | yield chunk | |
190 | self._verbosenote(_('%8.i (changelog)\n') % size) |
|
186 | self._verbosenote(_('%8.i (changelog)\n') % size) | |
191 |
|
187 | |||
192 | # We need to make sure that the linkrev in the changegroup refers to |
|
188 | # We need to make sure that the linkrev in the changegroup refers to | |
193 | # the first changeset that introduced the manifest or file revision. |
|
189 | # the first changeset that introduced the manifest or file revision. | |
194 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
190 | # The fastpath is usually safer than the slowpath, because the filelogs | |
195 | # are walked in revlog order. |
|
191 | # are walked in revlog order. | |
196 | # |
|
192 | # | |
197 | # When taking the slowpath with reorder=None and the manifest revlog |
|
193 | # When taking the slowpath with reorder=None and the manifest revlog | |
198 | # uses generaldelta, the manifest may be walked in the "wrong" order. |
|
194 | # uses generaldelta, the manifest may be walked in the "wrong" order. | |
199 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in |
|
195 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in | |
200 | # cc0ff93d0c0c). |
|
196 | # cc0ff93d0c0c). | |
201 | # |
|
197 | # | |
202 | # When taking the fastpath, we are only vulnerable to reordering |
|
198 | # When taking the fastpath, we are only vulnerable to reordering | |
203 | # of the changelog itself. The changelog never uses generaldelta, so |
|
199 | # of the changelog itself. The changelog never uses generaldelta, so | |
204 | # it is only reordered when reorder=True. To handle this case, we |
|
200 | # it is only reordered when reorder=True. To handle this case, we | |
205 | # simply take the slowpath, which already has the 'clrevorder' logic. |
|
201 | # simply take the slowpath, which already has the 'clrevorder' logic. | |
206 | # This was also fixed in cc0ff93d0c0c. |
|
202 | # This was also fixed in cc0ff93d0c0c. | |
207 | fastpathlinkrev = fastpathlinkrev and not self._reorder |
|
203 | fastpathlinkrev = fastpathlinkrev and not self._reorder | |
208 | # Treemanifests don't work correctly with fastpathlinkrev |
|
204 | # Treemanifests don't work correctly with fastpathlinkrev | |
209 | # either, because we don't discover which directory nodes to |
|
205 | # either, because we don't discover which directory nodes to | |
210 | # send along with files. This could probably be fixed. |
|
206 | # send along with files. This could probably be fixed. | |
211 | fastpathlinkrev = fastpathlinkrev and ( |
|
207 | fastpathlinkrev = fastpathlinkrev and ( | |
212 | 'treemanifest' not in repo.requirements) |
|
208 | 'treemanifest' not in repo.requirements) | |
213 | # Shallow clones also don't work correctly with fastpathlinkrev |
|
209 | # Shallow clones also don't work correctly with fastpathlinkrev | |
214 | # because file nodes may need to be sent for a manifest even if they |
|
210 | # because file nodes may need to be sent for a manifest even if they | |
215 | # weren't introduced by that manifest. |
|
211 | # weren't introduced by that manifest. | |
216 | fastpathlinkrev = fastpathlinkrev and not self.is_shallow |
|
212 | fastpathlinkrev = fastpathlinkrev and not self.is_shallow | |
217 |
|
213 | |||
218 | for chunk in self.generatemanifests(commonrevs, clrevorder, |
|
214 | for chunk in self.generatemanifests(commonrevs, clrevorder, | |
219 | fastpathlinkrev, mfs, fnodes, source): |
|
215 | fastpathlinkrev, mfs, fnodes, source): | |
220 | yield chunk |
|
216 | yield chunk | |
221 | # BEGIN NARROW HACK |
|
217 | # BEGIN NARROW HACK | |
222 | mfdicts = None |
|
218 | mfdicts = None | |
223 | if self.is_shallow: |
|
219 | if self.is_shallow: | |
224 | mfdicts = [(self._repo.manifestlog[n].read(), lr) |
|
220 | mfdicts = [(self._repo.manifestlog[n].read(), lr) | |
225 | for (n, lr) in mfs.iteritems()] |
|
221 | for (n, lr) in mfs.iteritems()] | |
226 | # END NARROW HACK |
|
222 | # END NARROW HACK | |
227 | mfs.clear() |
|
223 | mfs.clear() | |
228 | clrevs = set(cl.rev(x) for x in clnodes) |
|
224 | clrevs = set(cl.rev(x) for x in clnodes) | |
229 |
|
225 | |||
230 | if not fastpathlinkrev: |
|
226 | if not fastpathlinkrev: | |
231 | def linknodes(unused, fname): |
|
227 | def linknodes(unused, fname): | |
232 | return fnodes.get(fname, {}) |
|
228 | return fnodes.get(fname, {}) | |
233 | else: |
|
229 | else: | |
234 | cln = cl.node |
|
230 | cln = cl.node | |
235 | def linknodes(filerevlog, fname): |
|
231 | def linknodes(filerevlog, fname): | |
236 | llr = filerevlog.linkrev |
|
232 | llr = filerevlog.linkrev | |
237 | fln = filerevlog.node |
|
233 | fln = filerevlog.node | |
238 | revs = ((r, llr(r)) for r in filerevlog) |
|
234 | revs = ((r, llr(r)) for r in filerevlog) | |
239 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) |
|
235 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) | |
240 |
|
236 | |||
241 | # BEGIN NARROW HACK |
|
237 | # BEGIN NARROW HACK | |
242 | # |
|
238 | # | |
243 | # We need to pass the mfdicts variable down into |
|
239 | # We need to pass the mfdicts variable down into | |
244 | # generatefiles(), but more than one command might have |
|
240 | # generatefiles(), but more than one command might have | |
245 | # wrapped generatefiles so we can't modify the function |
|
241 | # wrapped generatefiles so we can't modify the function | |
246 | # signature. Instead, we pass the data to ourselves using an |
|
242 | # signature. Instead, we pass the data to ourselves using an | |
247 | # instance attribute. I'm sorry. |
|
243 | # instance attribute. I'm sorry. | |
248 | self._mfdicts = mfdicts |
|
244 | self._mfdicts = mfdicts | |
249 | # END NARROW HACK |
|
245 | # END NARROW HACK | |
250 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, |
|
246 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, | |
251 | source): |
|
247 | source): | |
252 | yield chunk |
|
248 | yield chunk | |
253 |
|
249 | |||
254 | yield self.close() |
|
250 | yield self.close() | |
255 |
|
251 | |||
256 | if clnodes: |
|
252 | if clnodes: | |
257 | repo.hook('outgoing', node=node.hex(clnodes[0]), source=source) |
|
253 | repo.hook('outgoing', node=node.hex(clnodes[0]), source=source) | |
258 | extensions.wrapfunction(changegroup.cg1packer, 'generate', generate) |
|
254 | extensions.wrapfunction(changegroup.cg1packer, 'generate', generate) | |
259 |
|
255 | |||
260 | def revchunk(orig, self, revlog, rev, prev, linknode): |
|
256 | def revchunk(orig, self, revlog, rev, prev, linknode): | |
261 | if not util.safehasattr(self, 'full_nodes'): |
|
257 | if not util.safehasattr(self, 'full_nodes'): | |
262 | # not sending a narrow changegroup |
|
258 | # not sending a narrow changegroup | |
263 | for x in orig(self, revlog, rev, prev, linknode): |
|
259 | for x in orig(self, revlog, rev, prev, linknode): | |
264 | yield x |
|
260 | yield x | |
265 | return |
|
261 | return | |
266 | # build up some mapping information that's useful later. See |
|
262 | # build up some mapping information that's useful later. See | |
267 | # the local() nested function below. |
|
263 | # the local() nested function below. | |
268 | if not self.changelog_done: |
|
264 | if not self.changelog_done: | |
269 | self.clnode_to_rev[linknode] = rev |
|
265 | self.clnode_to_rev[linknode] = rev | |
270 | linkrev = rev |
|
266 | linkrev = rev | |
271 | self.clrev_to_localrev[linkrev] = rev |
|
267 | self.clrev_to_localrev[linkrev] = rev | |
272 | else: |
|
268 | else: | |
273 | linkrev = self.clnode_to_rev[linknode] |
|
269 | linkrev = self.clnode_to_rev[linknode] | |
274 | self.clrev_to_localrev[linkrev] = rev |
|
270 | self.clrev_to_localrev[linkrev] = rev | |
275 | # This is a node to send in full, because the changeset it |
|
271 | # This is a node to send in full, because the changeset it | |
276 | # corresponds to was a full changeset. |
|
272 | # corresponds to was a full changeset. | |
277 | if linknode in self.full_nodes: |
|
273 | if linknode in self.full_nodes: | |
278 | for x in orig(self, revlog, rev, prev, linknode): |
|
274 | for x in orig(self, revlog, rev, prev, linknode): | |
279 | yield x |
|
275 | yield x | |
280 | return |
|
276 | return | |
281 | # At this point, a node can either be one we should skip or an |
|
277 | # At this point, a node can either be one we should skip or an | |
282 | # ellipsis. If it's not an ellipsis, bail immediately. |
|
278 | # ellipsis. If it's not an ellipsis, bail immediately. | |
283 | if linkrev not in self.precomputed_ellipsis: |
|
279 | if linkrev not in self.precomputed_ellipsis: | |
284 | return |
|
280 | return | |
285 | linkparents = self.precomputed_ellipsis[linkrev] |
|
281 | linkparents = self.precomputed_ellipsis[linkrev] | |
286 | def local(clrev): |
|
282 | def local(clrev): | |
287 | """Turn a changelog revnum into a local revnum. |
|
283 | """Turn a changelog revnum into a local revnum. | |
288 |
|
284 | |||
289 | The ellipsis dag is stored as revnums on the changelog, |
|
285 | The ellipsis dag is stored as revnums on the changelog, | |
290 | but when we're producing ellipsis entries for |
|
286 | but when we're producing ellipsis entries for | |
291 | non-changelog revlogs, we need to turn those numbers into |
|
287 | non-changelog revlogs, we need to turn those numbers into | |
292 | something local. This does that for us, and during the |
|
288 | something local. This does that for us, and during the | |
293 | changelog sending phase will also expand the stored |
|
289 | changelog sending phase will also expand the stored | |
294 | mappings as needed. |
|
290 | mappings as needed. | |
295 | """ |
|
291 | """ | |
296 | if clrev == node.nullrev: |
|
292 | if clrev == node.nullrev: | |
297 | return node.nullrev |
|
293 | return node.nullrev | |
298 | if not self.changelog_done: |
|
294 | if not self.changelog_done: | |
299 | # If we're doing the changelog, it's possible that we |
|
295 | # If we're doing the changelog, it's possible that we | |
300 | # have a parent that is already on the client, and we |
|
296 | # have a parent that is already on the client, and we | |
301 | # need to store some extra mapping information so that |
|
297 | # need to store some extra mapping information so that | |
302 | # our contained ellipsis nodes will be able to resolve |
|
298 | # our contained ellipsis nodes will be able to resolve | |
303 | # their parents. |
|
299 | # their parents. | |
304 | if clrev not in self.clrev_to_localrev: |
|
300 | if clrev not in self.clrev_to_localrev: | |
305 | clnode = revlog.node(clrev) |
|
301 | clnode = revlog.node(clrev) | |
306 | self.clnode_to_rev[clnode] = clrev |
|
302 | self.clnode_to_rev[clnode] = clrev | |
307 | return clrev |
|
303 | return clrev | |
308 | # Walk the ellipsis-ized changelog breadth-first looking for a |
|
304 | # Walk the ellipsis-ized changelog breadth-first looking for a | |
309 | # change that has been linked from the current revlog. |
|
305 | # change that has been linked from the current revlog. | |
310 | # |
|
306 | # | |
311 | # For a flat manifest revlog only a single step should be necessary |
|
307 | # For a flat manifest revlog only a single step should be necessary | |
312 | # as all relevant changelog entries are relevant to the flat |
|
308 | # as all relevant changelog entries are relevant to the flat | |
313 | # manifest. |
|
309 | # manifest. | |
314 | # |
|
310 | # | |
315 | # For a filelog or tree manifest dirlog however not every changelog |
|
311 | # For a filelog or tree manifest dirlog however not every changelog | |
316 | # entry will have been relevant, so we need to skip some changelog |
|
312 | # entry will have been relevant, so we need to skip some changelog | |
317 | # nodes even after ellipsis-izing. |
|
313 | # nodes even after ellipsis-izing. | |
318 | walk = [clrev] |
|
314 | walk = [clrev] | |
319 | while walk: |
|
315 | while walk: | |
320 | p = walk[0] |
|
316 | p = walk[0] | |
321 | walk = walk[1:] |
|
317 | walk = walk[1:] | |
322 | if p in self.clrev_to_localrev: |
|
318 | if p in self.clrev_to_localrev: | |
323 | return self.clrev_to_localrev[p] |
|
319 | return self.clrev_to_localrev[p] | |
324 | elif p in self.full_nodes: |
|
320 | elif p in self.full_nodes: | |
325 | walk.extend([pp for pp in self._repo.changelog.parentrevs(p) |
|
321 | walk.extend([pp for pp in self._repo.changelog.parentrevs(p) | |
326 | if pp != node.nullrev]) |
|
322 | if pp != node.nullrev]) | |
327 | elif p in self.precomputed_ellipsis: |
|
323 | elif p in self.precomputed_ellipsis: | |
328 | walk.extend([pp for pp in self.precomputed_ellipsis[p] |
|
324 | walk.extend([pp for pp in self.precomputed_ellipsis[p] | |
329 | if pp != node.nullrev]) |
|
325 | if pp != node.nullrev]) | |
330 | else: |
|
326 | else: | |
331 | # In this case, we've got an ellipsis with parents |
|
327 | # In this case, we've got an ellipsis with parents | |
332 | # outside the current bundle (likely an |
|
328 | # outside the current bundle (likely an | |
333 | # incremental pull). We "know" that we can use the |
|
329 | # incremental pull). We "know" that we can use the | |
334 | # value of this same revlog at whatever revision |
|
330 | # value of this same revlog at whatever revision | |
335 | # is pointed to by linknode. "Know" is in scare |
|
331 | # is pointed to by linknode. "Know" is in scare | |
336 | # quotes because I haven't done enough examination |
|
332 | # quotes because I haven't done enough examination | |
337 | # of edge cases to convince myself this is really |
|
333 | # of edge cases to convince myself this is really | |
338 | # a fact - it works for all the (admittedly |
|
334 | # a fact - it works for all the (admittedly | |
339 | # thorough) cases in our testsuite, but I would be |
|
335 | # thorough) cases in our testsuite, but I would be | |
340 | # somewhat unsurprised to find a case in the wild |
|
336 | # somewhat unsurprised to find a case in the wild | |
341 | # where this breaks down a bit. That said, I don't |
|
337 | # where this breaks down a bit. That said, I don't | |
342 | # know if it would hurt anything. |
|
338 | # know if it would hurt anything. | |
343 | for i in xrange(rev, 0, -1): |
|
339 | for i in xrange(rev, 0, -1): | |
344 | if revlog.linkrev(i) == clrev: |
|
340 | if revlog.linkrev(i) == clrev: | |
345 | return i |
|
341 | return i | |
346 | # We failed to resolve a parent for this node, so |
|
342 | # We failed to resolve a parent for this node, so | |
347 | # we crash the changegroup construction. |
|
343 | # we crash the changegroup construction. | |
348 | raise error.Abort( |
|
344 | raise error.Abort( | |
349 | 'unable to resolve parent while packing %r %r' |
|
345 | 'unable to resolve parent while packing %r %r' | |
350 | ' for changeset %r' % (revlog.indexfile, rev, clrev)) |
|
346 | ' for changeset %r' % (revlog.indexfile, rev, clrev)) | |
351 | return node.nullrev |
|
347 | return node.nullrev | |
352 |
|
348 | |||
353 | if not linkparents or ( |
|
349 | if not linkparents or ( | |
354 | revlog.parentrevs(rev) == (node.nullrev, node.nullrev)): |
|
350 | revlog.parentrevs(rev) == (node.nullrev, node.nullrev)): | |
355 | p1, p2 = node.nullrev, node.nullrev |
|
351 | p1, p2 = node.nullrev, node.nullrev | |
356 | elif len(linkparents) == 1: |
|
352 | elif len(linkparents) == 1: | |
357 | p1, = sorted(local(p) for p in linkparents) |
|
353 | p1, = sorted(local(p) for p in linkparents) | |
358 | p2 = node.nullrev |
|
354 | p2 = node.nullrev | |
359 | else: |
|
355 | else: | |
360 | p1, p2 = sorted(local(p) for p in linkparents) |
|
356 | p1, p2 = sorted(local(p) for p in linkparents) | |
361 | yield ellipsisdata( |
|
357 | yield ellipsisdata( | |
362 | self, rev, revlog, p1, p2, revlog.revision(rev), linknode) |
|
358 | self, rev, revlog, p1, p2, revlog.revision(rev), linknode) | |
363 | extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk) |
|
359 | extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk) | |
364 |
|
360 | |||
365 | def deltaparent(orig, self, revlog, rev, p1, p2, prev): |
|
361 | def deltaparent(orig, self, revlog, rev, p1, p2, prev): | |
366 | if util.safehasattr(self, 'full_nodes'): |
|
362 | if util.safehasattr(self, 'full_nodes'): | |
367 | # TODO: send better deltas when in narrow mode. |
|
363 | # TODO: send better deltas when in narrow mode. | |
368 | # |
|
364 | # | |
369 | # changegroup.group() loops over revisions to send, |
|
365 | # changegroup.group() loops over revisions to send, | |
370 | # including revisions we'll skip. What this means is that |
|
366 | # including revisions we'll skip. What this means is that | |
371 | # `prev` will be a potentially useless delta base for all |
|
367 | # `prev` will be a potentially useless delta base for all | |
372 | # ellipsis nodes, as the client likely won't have it. In |
|
368 | # ellipsis nodes, as the client likely won't have it. In | |
373 | # the future we should do bookkeeping about which nodes |
|
369 | # the future we should do bookkeeping about which nodes | |
374 | # have been sent to the client, and try to be |
|
370 | # have been sent to the client, and try to be | |
375 | # significantly smarter about delta bases. This is |
|
371 | # significantly smarter about delta bases. This is | |
376 | # slightly tricky because this same code has to work for |
|
372 | # slightly tricky because this same code has to work for | |
377 | # all revlogs, and we don't have the linkrev/linknode here. |
|
373 | # all revlogs, and we don't have the linkrev/linknode here. | |
378 | return p1 |
|
374 | return p1 | |
379 | return orig(self, revlog, rev, p1, p2, prev) |
|
375 | return orig(self, revlog, rev, p1, p2, prev) | |
380 | extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent) |
|
376 | extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent) |
@@ -1,405 +1,406 b'' | |||||
1 | # narrowcommands.py - command modifications for narrowhg extension |
|
1 | # narrowcommands.py - command modifications for narrowhg extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import itertools |
|
9 | import itertools | |
10 |
|
10 | |||
11 | from mercurial.i18n import _ |
|
11 | from mercurial.i18n import _ | |
12 | from mercurial import ( |
|
12 | from mercurial import ( | |
|
13 | changegroup, | |||
13 | cmdutil, |
|
14 | cmdutil, | |
14 | commands, |
|
15 | commands, | |
15 | discovery, |
|
16 | discovery, | |
16 | error, |
|
17 | error, | |
17 | exchange, |
|
18 | exchange, | |
18 | extensions, |
|
19 | extensions, | |
19 | hg, |
|
20 | hg, | |
20 | merge, |
|
21 | merge, | |
21 | narrowspec, |
|
22 | narrowspec, | |
22 | node, |
|
23 | node, | |
23 | pycompat, |
|
24 | pycompat, | |
24 | registrar, |
|
25 | registrar, | |
25 | repair, |
|
26 | repair, | |
26 | repoview, |
|
27 | repoview, | |
27 | util, |
|
28 | util, | |
28 | ) |
|
29 | ) | |
29 |
|
30 | |||
30 | from . import ( |
|
31 | from . import ( | |
31 | narrowbundle2, |
|
32 | narrowbundle2, | |
32 | narrowrepo, |
|
33 | narrowrepo, | |
33 | ) |
|
34 | ) | |
34 |
|
35 | |||
35 | table = {} |
|
36 | table = {} | |
36 | command = registrar.command(table) |
|
37 | command = registrar.command(table) | |
37 |
|
38 | |||
38 | def setup(): |
|
39 | def setup(): | |
39 | """Wraps user-facing mercurial commands with narrow-aware versions.""" |
|
40 | """Wraps user-facing mercurial commands with narrow-aware versions.""" | |
40 |
|
41 | |||
41 | entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd) |
|
42 | entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd) | |
42 | entry[1].append(('', 'narrow', None, |
|
43 | entry[1].append(('', 'narrow', None, | |
43 | _("create a narrow clone of select files"))) |
|
44 | _("create a narrow clone of select files"))) | |
44 | entry[1].append(('', 'depth', '', |
|
45 | entry[1].append(('', 'depth', '', | |
45 | _("limit the history fetched by distance from heads"))) |
|
46 | _("limit the history fetched by distance from heads"))) | |
46 | # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit |
|
47 | # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit | |
47 | if 'sparse' not in extensions.enabled(): |
|
48 | if 'sparse' not in extensions.enabled(): | |
48 | entry[1].append(('', 'include', [], |
|
49 | entry[1].append(('', 'include', [], | |
49 | _("specifically fetch this file/directory"))) |
|
50 | _("specifically fetch this file/directory"))) | |
50 | entry[1].append( |
|
51 | entry[1].append( | |
51 | ('', 'exclude', [], |
|
52 | ('', 'exclude', [], | |
52 | _("do not fetch this file/directory, even if included"))) |
|
53 | _("do not fetch this file/directory, even if included"))) | |
53 |
|
54 | |||
54 | entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd) |
|
55 | entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd) | |
55 | entry[1].append(('', 'depth', '', |
|
56 | entry[1].append(('', 'depth', '', | |
56 | _("limit the history fetched by distance from heads"))) |
|
57 | _("limit the history fetched by distance from heads"))) | |
57 |
|
58 | |||
58 | extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd) |
|
59 | extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd) | |
59 |
|
60 | |||
60 | def expandpull(pullop, includepats, excludepats): |
|
61 | def expandpull(pullop, includepats, excludepats): | |
61 | if not narrowspec.needsexpansion(includepats): |
|
62 | if not narrowspec.needsexpansion(includepats): | |
62 | return includepats, excludepats |
|
63 | return includepats, excludepats | |
63 |
|
64 | |||
64 | heads = pullop.heads or pullop.rheads |
|
65 | heads = pullop.heads or pullop.rheads | |
65 | includepats, excludepats = pullop.remote.expandnarrow( |
|
66 | includepats, excludepats = pullop.remote.expandnarrow( | |
66 | includepats, excludepats, heads) |
|
67 | includepats, excludepats, heads) | |
67 | pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % ( |
|
68 | pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % ( | |
68 | includepats, excludepats)) |
|
69 | includepats, excludepats)) | |
69 | return set(includepats), set(excludepats) |
|
70 | return set(includepats), set(excludepats) | |
70 |
|
71 | |||
71 | def clonenarrowcmd(orig, ui, repo, *args, **opts): |
|
72 | def clonenarrowcmd(orig, ui, repo, *args, **opts): | |
72 | """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" |
|
73 | """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" | |
73 | opts = pycompat.byteskwargs(opts) |
|
74 | opts = pycompat.byteskwargs(opts) | |
74 | wrappedextraprepare = util.nullcontextmanager() |
|
75 | wrappedextraprepare = util.nullcontextmanager() | |
75 | opts_narrow = opts['narrow'] |
|
76 | opts_narrow = opts['narrow'] | |
76 | if opts_narrow: |
|
77 | if opts_narrow: | |
77 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
78 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
78 | # Create narrow spec patterns from clone flags |
|
79 | # Create narrow spec patterns from clone flags | |
79 | includepats = narrowspec.parsepatterns(opts['include']) |
|
80 | includepats = narrowspec.parsepatterns(opts['include']) | |
80 | excludepats = narrowspec.parsepatterns(opts['exclude']) |
|
81 | excludepats = narrowspec.parsepatterns(opts['exclude']) | |
81 |
|
82 | |||
82 | # If necessary, ask the server to expand the narrowspec. |
|
83 | # If necessary, ask the server to expand the narrowspec. | |
83 | includepats, excludepats = expandpull( |
|
84 | includepats, excludepats = expandpull( | |
84 | pullop, includepats, excludepats) |
|
85 | pullop, includepats, excludepats) | |
85 |
|
86 | |||
86 | if not includepats and excludepats: |
|
87 | if not includepats and excludepats: | |
87 | # If nothing was included, we assume the user meant to include |
|
88 | # If nothing was included, we assume the user meant to include | |
88 | # everything, except what they asked to exclude. |
|
89 | # everything, except what they asked to exclude. | |
89 | includepats = {'path:.'} |
|
90 | includepats = {'path:.'} | |
90 |
|
91 | |||
91 | narrowspec.save(pullop.repo, includepats, excludepats) |
|
92 | narrowspec.save(pullop.repo, includepats, excludepats) | |
92 |
|
93 | |||
93 | # This will populate 'includepats' etc with the values from the |
|
94 | # This will populate 'includepats' etc with the values from the | |
94 | # narrowspec we just saved. |
|
95 | # narrowspec we just saved. | |
95 | orig(pullop, kwargs) |
|
96 | orig(pullop, kwargs) | |
96 |
|
97 | |||
97 | if opts.get('depth'): |
|
98 | if opts.get('depth'): | |
98 | kwargs['depth'] = opts['depth'] |
|
99 | kwargs['depth'] = opts['depth'] | |
99 | wrappedextraprepare = extensions.wrappedfunction(exchange, |
|
100 | wrappedextraprepare = extensions.wrappedfunction(exchange, | |
100 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) |
|
101 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) | |
101 |
|
102 | |||
102 | def pullnarrow(orig, repo, *args, **kwargs): |
|
103 | def pullnarrow(orig, repo, *args, **kwargs): | |
103 | narrowrepo.wraprepo(repo.unfiltered(), opts_narrow) |
|
104 | narrowrepo.wraprepo(repo.unfiltered(), opts_narrow) | |
104 | if isinstance(repo, repoview.repoview): |
|
105 | if isinstance(repo, repoview.repoview): | |
105 | repo.__class__.__bases__ = (repo.__class__.__bases__[0], |
|
106 | repo.__class__.__bases__ = (repo.__class__.__bases__[0], | |
106 | repo.unfiltered().__class__) |
|
107 | repo.unfiltered().__class__) | |
107 | if opts_narrow: |
|
108 | if opts_narrow: | |
108 |
repo.requirements.add( |
|
109 | repo.requirements.add(changegroup.NARROW_REQUIREMENT) | |
109 | repo._writerequirements() |
|
110 | repo._writerequirements() | |
110 |
|
111 | |||
111 | return orig(repo, *args, **kwargs) |
|
112 | return orig(repo, *args, **kwargs) | |
112 |
|
113 | |||
113 | wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow) |
|
114 | wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow) | |
114 |
|
115 | |||
115 | with wrappedextraprepare, wrappedpull: |
|
116 | with wrappedextraprepare, wrappedpull: | |
116 | return orig(ui, repo, *args, **pycompat.strkwargs(opts)) |
|
117 | return orig(ui, repo, *args, **pycompat.strkwargs(opts)) | |
117 |
|
118 | |||
118 | def pullnarrowcmd(orig, ui, repo, *args, **opts): |
|
119 | def pullnarrowcmd(orig, ui, repo, *args, **opts): | |
119 | """Wraps pull command to allow modifying narrow spec.""" |
|
120 | """Wraps pull command to allow modifying narrow spec.""" | |
120 | wrappedextraprepare = util.nullcontextmanager() |
|
121 | wrappedextraprepare = util.nullcontextmanager() | |
121 |
if |
|
122 | if changegroup.NARROW_REQUIREMENT in repo.requirements: | |
122 |
|
123 | |||
123 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
124 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
124 | orig(pullop, kwargs) |
|
125 | orig(pullop, kwargs) | |
125 | if opts.get(r'depth'): |
|
126 | if opts.get(r'depth'): | |
126 | kwargs['depth'] = opts[r'depth'] |
|
127 | kwargs['depth'] = opts[r'depth'] | |
127 | wrappedextraprepare = extensions.wrappedfunction(exchange, |
|
128 | wrappedextraprepare = extensions.wrappedfunction(exchange, | |
128 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) |
|
129 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) | |
129 |
|
130 | |||
130 | with wrappedextraprepare: |
|
131 | with wrappedextraprepare: | |
131 | return orig(ui, repo, *args, **opts) |
|
132 | return orig(ui, repo, *args, **opts) | |
132 |
|
133 | |||
133 | def archivenarrowcmd(orig, ui, repo, *args, **opts): |
|
134 | def archivenarrowcmd(orig, ui, repo, *args, **opts): | |
134 | """Wraps archive command to narrow the default includes.""" |
|
135 | """Wraps archive command to narrow the default includes.""" | |
135 |
if |
|
136 | if changegroup.NARROW_REQUIREMENT in repo.requirements: | |
136 | repo_includes, repo_excludes = repo.narrowpats |
|
137 | repo_includes, repo_excludes = repo.narrowpats | |
137 | includes = set(opts.get(r'include', [])) |
|
138 | includes = set(opts.get(r'include', [])) | |
138 | excludes = set(opts.get(r'exclude', [])) |
|
139 | excludes = set(opts.get(r'exclude', [])) | |
139 | includes, excludes, unused_invalid = narrowspec.restrictpatterns( |
|
140 | includes, excludes, unused_invalid = narrowspec.restrictpatterns( | |
140 | includes, excludes, repo_includes, repo_excludes) |
|
141 | includes, excludes, repo_includes, repo_excludes) | |
141 | if includes: |
|
142 | if includes: | |
142 | opts[r'include'] = includes |
|
143 | opts[r'include'] = includes | |
143 | if excludes: |
|
144 | if excludes: | |
144 | opts[r'exclude'] = excludes |
|
145 | opts[r'exclude'] = excludes | |
145 | return orig(ui, repo, *args, **opts) |
|
146 | return orig(ui, repo, *args, **opts) | |
146 |
|
147 | |||
147 | def pullbundle2extraprepare(orig, pullop, kwargs): |
|
148 | def pullbundle2extraprepare(orig, pullop, kwargs): | |
148 | repo = pullop.repo |
|
149 | repo = pullop.repo | |
149 |
if |
|
150 | if changegroup.NARROW_REQUIREMENT not in repo.requirements: | |
150 | return orig(pullop, kwargs) |
|
151 | return orig(pullop, kwargs) | |
151 |
|
152 | |||
152 | if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps: |
|
153 | if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps: | |
153 | raise error.Abort(_("server doesn't support narrow clones")) |
|
154 | raise error.Abort(_("server doesn't support narrow clones")) | |
154 | orig(pullop, kwargs) |
|
155 | orig(pullop, kwargs) | |
155 | kwargs['narrow'] = True |
|
156 | kwargs['narrow'] = True | |
156 | include, exclude = repo.narrowpats |
|
157 | include, exclude = repo.narrowpats | |
157 | kwargs['oldincludepats'] = include |
|
158 | kwargs['oldincludepats'] = include | |
158 | kwargs['oldexcludepats'] = exclude |
|
159 | kwargs['oldexcludepats'] = exclude | |
159 | kwargs['includepats'] = include |
|
160 | kwargs['includepats'] = include | |
160 | kwargs['excludepats'] = exclude |
|
161 | kwargs['excludepats'] = exclude | |
161 | kwargs['known'] = [node.hex(ctx.node()) for ctx in |
|
162 | kwargs['known'] = [node.hex(ctx.node()) for ctx in | |
162 | repo.set('::%ln', pullop.common) |
|
163 | repo.set('::%ln', pullop.common) | |
163 | if ctx.node() != node.nullid] |
|
164 | if ctx.node() != node.nullid] | |
164 | if not kwargs['known']: |
|
165 | if not kwargs['known']: | |
165 | # Mercurial serialized an empty list as '' and deserializes it as |
|
166 | # Mercurial serialized an empty list as '' and deserializes it as | |
166 | # [''], so delete it instead to avoid handling the empty string on the |
|
167 | # [''], so delete it instead to avoid handling the empty string on the | |
167 | # server. |
|
168 | # server. | |
168 | del kwargs['known'] |
|
169 | del kwargs['known'] | |
169 |
|
170 | |||
170 | extensions.wrapfunction(exchange,'_pullbundle2extraprepare', |
|
171 | extensions.wrapfunction(exchange,'_pullbundle2extraprepare', | |
171 | pullbundle2extraprepare) |
|
172 | pullbundle2extraprepare) | |
172 |
|
173 | |||
173 | def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, |
|
174 | def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, | |
174 | newincludes, newexcludes, force): |
|
175 | newincludes, newexcludes, force): | |
175 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) |
|
176 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) | |
176 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) |
|
177 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) | |
177 |
|
178 | |||
178 | # This is essentially doing "hg outgoing" to find all local-only |
|
179 | # This is essentially doing "hg outgoing" to find all local-only | |
179 | # commits. We will then check that the local-only commits don't |
|
180 | # commits. We will then check that the local-only commits don't | |
180 | # have any changes to files that will be untracked. |
|
181 | # have any changes to files that will be untracked. | |
181 | unfi = repo.unfiltered() |
|
182 | unfi = repo.unfiltered() | |
182 | outgoing = discovery.findcommonoutgoing(unfi, remote, |
|
183 | outgoing = discovery.findcommonoutgoing(unfi, remote, | |
183 | commoninc=commoninc) |
|
184 | commoninc=commoninc) | |
184 | ui.status(_('looking for local changes to affected paths\n')) |
|
185 | ui.status(_('looking for local changes to affected paths\n')) | |
185 | localnodes = [] |
|
186 | localnodes = [] | |
186 | for n in itertools.chain(outgoing.missing, outgoing.excluded): |
|
187 | for n in itertools.chain(outgoing.missing, outgoing.excluded): | |
187 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): |
|
188 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): | |
188 | localnodes.append(n) |
|
189 | localnodes.append(n) | |
189 | revstostrip = unfi.revs('descendants(%ln)', localnodes) |
|
190 | revstostrip = unfi.revs('descendants(%ln)', localnodes) | |
190 | hiddenrevs = repoview.filterrevs(repo, 'visible') |
|
191 | hiddenrevs = repoview.filterrevs(repo, 'visible') | |
191 | visibletostrip = list(repo.changelog.node(r) |
|
192 | visibletostrip = list(repo.changelog.node(r) | |
192 | for r in (revstostrip - hiddenrevs)) |
|
193 | for r in (revstostrip - hiddenrevs)) | |
193 | if visibletostrip: |
|
194 | if visibletostrip: | |
194 | ui.status(_('The following changeset(s) or their ancestors have ' |
|
195 | ui.status(_('The following changeset(s) or their ancestors have ' | |
195 | 'local changes not on the remote:\n')) |
|
196 | 'local changes not on the remote:\n')) | |
196 | maxnodes = 10 |
|
197 | maxnodes = 10 | |
197 | if ui.verbose or len(visibletostrip) <= maxnodes: |
|
198 | if ui.verbose or len(visibletostrip) <= maxnodes: | |
198 | for n in visibletostrip: |
|
199 | for n in visibletostrip: | |
199 | ui.status('%s\n' % node.short(n)) |
|
200 | ui.status('%s\n' % node.short(n)) | |
200 | else: |
|
201 | else: | |
201 | for n in visibletostrip[:maxnodes]: |
|
202 | for n in visibletostrip[:maxnodes]: | |
202 | ui.status('%s\n' % node.short(n)) |
|
203 | ui.status('%s\n' % node.short(n)) | |
203 | ui.status(_('...and %d more, use --verbose to list all\n') % |
|
204 | ui.status(_('...and %d more, use --verbose to list all\n') % | |
204 | (len(visibletostrip) - maxnodes)) |
|
205 | (len(visibletostrip) - maxnodes)) | |
205 | if not force: |
|
206 | if not force: | |
206 | raise error.Abort(_('local changes found'), |
|
207 | raise error.Abort(_('local changes found'), | |
207 | hint=_('use --force-delete-local-changes to ' |
|
208 | hint=_('use --force-delete-local-changes to ' | |
208 | 'ignore')) |
|
209 | 'ignore')) | |
209 |
|
210 | |||
210 | if revstostrip: |
|
211 | if revstostrip: | |
211 | tostrip = [unfi.changelog.node(r) for r in revstostrip] |
|
212 | tostrip = [unfi.changelog.node(r) for r in revstostrip] | |
212 | if repo['.'].node() in tostrip: |
|
213 | if repo['.'].node() in tostrip: | |
213 | # stripping working copy, so move to a different commit first |
|
214 | # stripping working copy, so move to a different commit first | |
214 | urev = max(repo.revs('(::%n) - %ln + null', |
|
215 | urev = max(repo.revs('(::%n) - %ln + null', | |
215 | repo['.'].node(), visibletostrip)) |
|
216 | repo['.'].node(), visibletostrip)) | |
216 | hg.clean(repo, urev) |
|
217 | hg.clean(repo, urev) | |
217 | repair.strip(ui, unfi, tostrip, topic='narrow') |
|
218 | repair.strip(ui, unfi, tostrip, topic='narrow') | |
218 |
|
219 | |||
219 | todelete = [] |
|
220 | todelete = [] | |
220 | for f, f2, size in repo.store.datafiles(): |
|
221 | for f, f2, size in repo.store.datafiles(): | |
221 | if f.startswith('data/'): |
|
222 | if f.startswith('data/'): | |
222 | file = f[5:-2] |
|
223 | file = f[5:-2] | |
223 | if not newmatch(file): |
|
224 | if not newmatch(file): | |
224 | todelete.append(f) |
|
225 | todelete.append(f) | |
225 | elif f.startswith('meta/'): |
|
226 | elif f.startswith('meta/'): | |
226 | dir = f[5:-13] |
|
227 | dir = f[5:-13] | |
227 | dirs = ['.'] + sorted(util.dirs({dir})) + [dir] |
|
228 | dirs = ['.'] + sorted(util.dirs({dir})) + [dir] | |
228 | include = True |
|
229 | include = True | |
229 | for d in dirs: |
|
230 | for d in dirs: | |
230 | visit = newmatch.visitdir(d) |
|
231 | visit = newmatch.visitdir(d) | |
231 | if not visit: |
|
232 | if not visit: | |
232 | include = False |
|
233 | include = False | |
233 | break |
|
234 | break | |
234 | if visit == 'all': |
|
235 | if visit == 'all': | |
235 | break |
|
236 | break | |
236 | if not include: |
|
237 | if not include: | |
237 | todelete.append(f) |
|
238 | todelete.append(f) | |
238 |
|
239 | |||
239 | repo.destroying() |
|
240 | repo.destroying() | |
240 |
|
241 | |||
241 | with repo.transaction("narrowing"): |
|
242 | with repo.transaction("narrowing"): | |
242 | for f in todelete: |
|
243 | for f in todelete: | |
243 | ui.status(_('deleting %s\n') % f) |
|
244 | ui.status(_('deleting %s\n') % f) | |
244 | util.unlinkpath(repo.svfs.join(f)) |
|
245 | util.unlinkpath(repo.svfs.join(f)) | |
245 | repo.store.markremoved(f) |
|
246 | repo.store.markremoved(f) | |
246 |
|
247 | |||
247 | for f in repo.dirstate: |
|
248 | for f in repo.dirstate: | |
248 | if not newmatch(f): |
|
249 | if not newmatch(f): | |
249 | repo.dirstate.drop(f) |
|
250 | repo.dirstate.drop(f) | |
250 | repo.wvfs.unlinkpath(f) |
|
251 | repo.wvfs.unlinkpath(f) | |
251 | repo.setnarrowpats(newincludes, newexcludes) |
|
252 | repo.setnarrowpats(newincludes, newexcludes) | |
252 |
|
253 | |||
253 | repo.destroyed() |
|
254 | repo.destroyed() | |
254 |
|
255 | |||
255 | def _widen(ui, repo, remote, commoninc, newincludes, newexcludes): |
|
256 | def _widen(ui, repo, remote, commoninc, newincludes, newexcludes): | |
256 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) |
|
257 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) | |
257 |
|
258 | |||
258 | # TODO(martinvonz): Get expansion working with widening/narrowing. |
|
259 | # TODO(martinvonz): Get expansion working with widening/narrowing. | |
259 | if narrowspec.needsexpansion(newincludes): |
|
260 | if narrowspec.needsexpansion(newincludes): | |
260 | raise error.Abort('Expansion not yet supported on pull') |
|
261 | raise error.Abort('Expansion not yet supported on pull') | |
261 |
|
262 | |||
262 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
263 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
263 | orig(pullop, kwargs) |
|
264 | orig(pullop, kwargs) | |
264 | # The old{in,ex}cludepats have already been set by orig() |
|
265 | # The old{in,ex}cludepats have already been set by orig() | |
265 | kwargs['includepats'] = newincludes |
|
266 | kwargs['includepats'] = newincludes | |
266 | kwargs['excludepats'] = newexcludes |
|
267 | kwargs['excludepats'] = newexcludes | |
267 | wrappedextraprepare = extensions.wrappedfunction(exchange, |
|
268 | wrappedextraprepare = extensions.wrappedfunction(exchange, | |
268 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) |
|
269 | '_pullbundle2extraprepare', pullbundle2extraprepare_widen) | |
269 |
|
270 | |||
270 | # define a function that narrowbundle2 can call after creating the |
|
271 | # define a function that narrowbundle2 can call after creating the | |
271 | # backup bundle, but before applying the bundle from the server |
|
272 | # backup bundle, but before applying the bundle from the server | |
272 | def setnewnarrowpats(): |
|
273 | def setnewnarrowpats(): | |
273 | repo.setnarrowpats(newincludes, newexcludes) |
|
274 | repo.setnarrowpats(newincludes, newexcludes) | |
274 | repo.setnewnarrowpats = setnewnarrowpats |
|
275 | repo.setnewnarrowpats = setnewnarrowpats | |
275 |
|
276 | |||
276 | ds = repo.dirstate |
|
277 | ds = repo.dirstate | |
277 | p1, p2 = ds.p1(), ds.p2() |
|
278 | p1, p2 = ds.p1(), ds.p2() | |
278 | with ds.parentchange(): |
|
279 | with ds.parentchange(): | |
279 | ds.setparents(node.nullid, node.nullid) |
|
280 | ds.setparents(node.nullid, node.nullid) | |
280 | common = commoninc[0] |
|
281 | common = commoninc[0] | |
281 | with wrappedextraprepare: |
|
282 | with wrappedextraprepare: | |
282 | exchange.pull(repo, remote, heads=common) |
|
283 | exchange.pull(repo, remote, heads=common) | |
283 | with ds.parentchange(): |
|
284 | with ds.parentchange(): | |
284 | ds.setparents(p1, p2) |
|
285 | ds.setparents(p1, p2) | |
285 |
|
286 | |||
286 | actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()} |
|
287 | actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()} | |
287 | addgaction = actions['g'].append |
|
288 | addgaction = actions['g'].append | |
288 |
|
289 | |||
289 | mf = repo['.'].manifest().matches(newmatch) |
|
290 | mf = repo['.'].manifest().matches(newmatch) | |
290 | for f, fn in mf.iteritems(): |
|
291 | for f, fn in mf.iteritems(): | |
291 | if f not in repo.dirstate: |
|
292 | if f not in repo.dirstate: | |
292 | addgaction((f, (mf.flags(f), False), |
|
293 | addgaction((f, (mf.flags(f), False), | |
293 | "add from widened narrow clone")) |
|
294 | "add from widened narrow clone")) | |
294 |
|
295 | |||
295 | merge.applyupdates(repo, actions, wctx=repo[None], |
|
296 | merge.applyupdates(repo, actions, wctx=repo[None], | |
296 | mctx=repo['.'], overwrite=False) |
|
297 | mctx=repo['.'], overwrite=False) | |
297 | merge.recordupdates(repo, actions, branchmerge=False) |
|
298 | merge.recordupdates(repo, actions, branchmerge=False) | |
298 |
|
299 | |||
299 | # TODO(rdamazio): Make new matcher format and update description |
|
300 | # TODO(rdamazio): Make new matcher format and update description | |
300 | @command('tracked', |
|
301 | @command('tracked', | |
301 | [('', 'addinclude', [], _('new paths to include')), |
|
302 | [('', 'addinclude', [], _('new paths to include')), | |
302 | ('', 'removeinclude', [], _('old paths to no longer include')), |
|
303 | ('', 'removeinclude', [], _('old paths to no longer include')), | |
303 | ('', 'addexclude', [], _('new paths to exclude')), |
|
304 | ('', 'addexclude', [], _('new paths to exclude')), | |
304 | ('', 'removeexclude', [], _('old paths to no longer exclude')), |
|
305 | ('', 'removeexclude', [], _('old paths to no longer exclude')), | |
305 | ('', 'clear', False, _('whether to replace the existing narrowspec')), |
|
306 | ('', 'clear', False, _('whether to replace the existing narrowspec')), | |
306 | ('', 'force-delete-local-changes', False, |
|
307 | ('', 'force-delete-local-changes', False, | |
307 | _('forces deletion of local changes when narrowing')), |
|
308 | _('forces deletion of local changes when narrowing')), | |
308 | ] + commands.remoteopts, |
|
309 | ] + commands.remoteopts, | |
309 | _('[OPTIONS]... [REMOTE]'), |
|
310 | _('[OPTIONS]... [REMOTE]'), | |
310 | inferrepo=True) |
|
311 | inferrepo=True) | |
311 | def trackedcmd(ui, repo, remotepath=None, *pats, **opts): |
|
312 | def trackedcmd(ui, repo, remotepath=None, *pats, **opts): | |
312 | """show or change the current narrowspec |
|
313 | """show or change the current narrowspec | |
313 |
|
314 | |||
314 | With no argument, shows the current narrowspec entries, one per line. Each |
|
315 | With no argument, shows the current narrowspec entries, one per line. Each | |
315 | line will be prefixed with 'I' or 'X' for included or excluded patterns, |
|
316 | line will be prefixed with 'I' or 'X' for included or excluded patterns, | |
316 | respectively. |
|
317 | respectively. | |
317 |
|
318 | |||
318 | The narrowspec is comprised of expressions to match remote files and/or |
|
319 | The narrowspec is comprised of expressions to match remote files and/or | |
319 | directories that should be pulled into your client. |
|
320 | directories that should be pulled into your client. | |
320 | The narrowspec has *include* and *exclude* expressions, with excludes always |
|
321 | The narrowspec has *include* and *exclude* expressions, with excludes always | |
321 | trumping includes: that is, if a file matches an exclude expression, it will |
|
322 | trumping includes: that is, if a file matches an exclude expression, it will | |
322 | be excluded even if it also matches an include expression. |
|
323 | be excluded even if it also matches an include expression. | |
323 | Excluding files that were never included has no effect. |
|
324 | Excluding files that were never included has no effect. | |
324 |
|
325 | |||
325 | Each included or excluded entry is in the format described by |
|
326 | Each included or excluded entry is in the format described by | |
326 | 'hg help patterns'. |
|
327 | 'hg help patterns'. | |
327 |
|
328 | |||
328 | The options allow you to add or remove included and excluded expressions. |
|
329 | The options allow you to add or remove included and excluded expressions. | |
329 |
|
330 | |||
330 | If --clear is specified, then all previous includes and excludes are DROPPED |
|
331 | If --clear is specified, then all previous includes and excludes are DROPPED | |
331 | and replaced by the new ones specified to --addinclude and --addexclude. |
|
332 | and replaced by the new ones specified to --addinclude and --addexclude. | |
332 | If --clear is specified without any further options, the narrowspec will be |
|
333 | If --clear is specified without any further options, the narrowspec will be | |
333 | empty and will not match any files. |
|
334 | empty and will not match any files. | |
334 | """ |
|
335 | """ | |
335 | opts = pycompat.byteskwargs(opts) |
|
336 | opts = pycompat.byteskwargs(opts) | |
336 |
if |
|
337 | if changegroup.NARROW_REQUIREMENT not in repo.requirements: | |
337 | ui.warn(_('The narrow command is only supported on respositories cloned' |
|
338 | ui.warn(_('The narrow command is only supported on respositories cloned' | |
338 | ' with --narrow.\n')) |
|
339 | ' with --narrow.\n')) | |
339 | return 1 |
|
340 | return 1 | |
340 |
|
341 | |||
341 | # Before supporting, decide whether it "hg tracked --clear" should mean |
|
342 | # Before supporting, decide whether it "hg tracked --clear" should mean | |
342 | # tracking no paths or all paths. |
|
343 | # tracking no paths or all paths. | |
343 | if opts['clear']: |
|
344 | if opts['clear']: | |
344 | ui.warn(_('The --clear option is not yet supported.\n')) |
|
345 | ui.warn(_('The --clear option is not yet supported.\n')) | |
345 | return 1 |
|
346 | return 1 | |
346 |
|
347 | |||
347 | if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']): |
|
348 | if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']): | |
348 | raise error.Abort('Expansion not yet supported on widen/narrow') |
|
349 | raise error.Abort('Expansion not yet supported on widen/narrow') | |
349 |
|
350 | |||
350 | addedincludes = narrowspec.parsepatterns(opts['addinclude']) |
|
351 | addedincludes = narrowspec.parsepatterns(opts['addinclude']) | |
351 | removedincludes = narrowspec.parsepatterns(opts['removeinclude']) |
|
352 | removedincludes = narrowspec.parsepatterns(opts['removeinclude']) | |
352 | addedexcludes = narrowspec.parsepatterns(opts['addexclude']) |
|
353 | addedexcludes = narrowspec.parsepatterns(opts['addexclude']) | |
353 | removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) |
|
354 | removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) | |
354 | widening = addedincludes or removedexcludes |
|
355 | widening = addedincludes or removedexcludes | |
355 | narrowing = removedincludes or addedexcludes |
|
356 | narrowing = removedincludes or addedexcludes | |
356 | only_show = not widening and not narrowing |
|
357 | only_show = not widening and not narrowing | |
357 |
|
358 | |||
358 | # Only print the current narrowspec. |
|
359 | # Only print the current narrowspec. | |
359 | if only_show: |
|
360 | if only_show: | |
360 | include, exclude = repo.narrowpats |
|
361 | include, exclude = repo.narrowpats | |
361 |
|
362 | |||
362 | ui.pager('tracked') |
|
363 | ui.pager('tracked') | |
363 | fm = ui.formatter('narrow', opts) |
|
364 | fm = ui.formatter('narrow', opts) | |
364 | for i in sorted(include): |
|
365 | for i in sorted(include): | |
365 | fm.startitem() |
|
366 | fm.startitem() | |
366 | fm.write('status', '%s ', 'I', label='narrow.included') |
|
367 | fm.write('status', '%s ', 'I', label='narrow.included') | |
367 | fm.write('pat', '%s\n', i, label='narrow.included') |
|
368 | fm.write('pat', '%s\n', i, label='narrow.included') | |
368 | for i in sorted(exclude): |
|
369 | for i in sorted(exclude): | |
369 | fm.startitem() |
|
370 | fm.startitem() | |
370 | fm.write('status', '%s ', 'X', label='narrow.excluded') |
|
371 | fm.write('status', '%s ', 'X', label='narrow.excluded') | |
371 | fm.write('pat', '%s\n', i, label='narrow.excluded') |
|
372 | fm.write('pat', '%s\n', i, label='narrow.excluded') | |
372 | fm.end() |
|
373 | fm.end() | |
373 | return 0 |
|
374 | return 0 | |
374 |
|
375 | |||
375 | with repo.wlock(), repo.lock(): |
|
376 | with repo.wlock(), repo.lock(): | |
376 | cmdutil.bailifchanged(repo) |
|
377 | cmdutil.bailifchanged(repo) | |
377 |
|
378 | |||
378 | # Find the revisions we have in common with the remote. These will |
|
379 | # Find the revisions we have in common with the remote. These will | |
379 | # be used for finding local-only changes for narrowing. They will |
|
380 | # be used for finding local-only changes for narrowing. They will | |
380 | # also define the set of revisions to update for widening. |
|
381 | # also define the set of revisions to update for widening. | |
381 | remotepath = ui.expandpath(remotepath or 'default') |
|
382 | remotepath = ui.expandpath(remotepath or 'default') | |
382 | url, branches = hg.parseurl(remotepath) |
|
383 | url, branches = hg.parseurl(remotepath) | |
383 | ui.status(_('comparing with %s\n') % util.hidepassword(url)) |
|
384 | ui.status(_('comparing with %s\n') % util.hidepassword(url)) | |
384 | remote = hg.peer(repo, opts, url) |
|
385 | remote = hg.peer(repo, opts, url) | |
385 | commoninc = discovery.findcommonincoming(repo, remote) |
|
386 | commoninc = discovery.findcommonincoming(repo, remote) | |
386 |
|
387 | |||
387 | oldincludes, oldexcludes = repo.narrowpats |
|
388 | oldincludes, oldexcludes = repo.narrowpats | |
388 | if narrowing: |
|
389 | if narrowing: | |
389 | newincludes = oldincludes - removedincludes |
|
390 | newincludes = oldincludes - removedincludes | |
390 | newexcludes = oldexcludes | addedexcludes |
|
391 | newexcludes = oldexcludes | addedexcludes | |
391 | _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, |
|
392 | _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, | |
392 | newincludes, newexcludes, |
|
393 | newincludes, newexcludes, | |
393 | opts['force_delete_local_changes']) |
|
394 | opts['force_delete_local_changes']) | |
394 | # _narrow() updated the narrowspec and _widen() below needs to |
|
395 | # _narrow() updated the narrowspec and _widen() below needs to | |
395 | # use the updated values as its base (otherwise removed includes |
|
396 | # use the updated values as its base (otherwise removed includes | |
396 | # and addedexcludes will be lost in the resulting narrowspec) |
|
397 | # and addedexcludes will be lost in the resulting narrowspec) | |
397 | oldincludes = newincludes |
|
398 | oldincludes = newincludes | |
398 | oldexcludes = newexcludes |
|
399 | oldexcludes = newexcludes | |
399 |
|
400 | |||
400 | if widening: |
|
401 | if widening: | |
401 | newincludes = oldincludes | addedincludes |
|
402 | newincludes = oldincludes | addedincludes | |
402 | newexcludes = oldexcludes - removedexcludes |
|
403 | newexcludes = oldexcludes - removedexcludes | |
403 | _widen(ui, repo, remote, commoninc, newincludes, newexcludes) |
|
404 | _widen(ui, repo, remote, commoninc, newincludes, newexcludes) | |
404 |
|
405 | |||
405 | return 0 |
|
406 | return 0 |
@@ -1,113 +1,110 b'' | |||||
1 | # narrowrepo.py - repository which supports narrow revlogs, lazy loading |
|
1 | # narrowrepo.py - repository which supports narrow revlogs, lazy loading | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | from mercurial import ( |
|
10 | from mercurial import ( | |
11 | bundlerepo, |
|
11 | bundlerepo, | |
|
12 | changegroup, | |||
12 | hg, |
|
13 | hg, | |
13 | localrepo, |
|
14 | localrepo, | |
14 | match as matchmod, |
|
15 | match as matchmod, | |
15 | narrowspec, |
|
16 | narrowspec, | |
16 | scmutil, |
|
17 | scmutil, | |
17 | ) |
|
18 | ) | |
18 |
|
19 | |||
19 | from . import ( |
|
20 | from . import ( | |
20 | narrowrevlog, |
|
21 | narrowrevlog, | |
21 | ) |
|
22 | ) | |
22 |
|
23 | |||
23 | # When narrowing is finalized and no longer subject to format changes, |
|
|||
24 | # we should move this to just "narrow" or similar. |
|
|||
25 | REQUIREMENT = 'narrowhg-experimental' |
|
|||
26 |
|
||||
27 | def wrappostshare(orig, sourcerepo, destrepo, **kwargs): |
|
24 | def wrappostshare(orig, sourcerepo, destrepo, **kwargs): | |
28 | orig(sourcerepo, destrepo, **kwargs) |
|
25 | orig(sourcerepo, destrepo, **kwargs) | |
29 | if REQUIREMENT in sourcerepo.requirements: |
|
26 | if changegroup.NARROW_REQUIREMENT in sourcerepo.requirements: | |
30 | with destrepo.wlock(): |
|
27 | with destrepo.wlock(): | |
31 | with destrepo.vfs('shared', 'a') as fp: |
|
28 | with destrepo.vfs('shared', 'a') as fp: | |
32 | fp.write(narrowspec.FILENAME + '\n') |
|
29 | fp.write(narrowspec.FILENAME + '\n') | |
33 |
|
30 | |||
34 | def unsharenarrowspec(orig, ui, repo, repopath): |
|
31 | def unsharenarrowspec(orig, ui, repo, repopath): | |
35 | if (REQUIREMENT in repo.requirements |
|
32 | if (changegroup.NARROW_REQUIREMENT in repo.requirements | |
36 | and repo.path == repopath and repo.shared()): |
|
33 | and repo.path == repopath and repo.shared()): | |
37 | srcrepo = hg.sharedreposource(repo) |
|
34 | srcrepo = hg.sharedreposource(repo) | |
38 | with srcrepo.vfs(narrowspec.FILENAME) as f: |
|
35 | with srcrepo.vfs(narrowspec.FILENAME) as f: | |
39 | spec = f.read() |
|
36 | spec = f.read() | |
40 | with repo.vfs(narrowspec.FILENAME, 'w') as f: |
|
37 | with repo.vfs(narrowspec.FILENAME, 'w') as f: | |
41 | f.write(spec) |
|
38 | f.write(spec) | |
42 | return orig(ui, repo, repopath) |
|
39 | return orig(ui, repo, repopath) | |
43 |
|
40 | |||
44 | def wraprepo(repo, opts_narrow): |
|
41 | def wraprepo(repo, opts_narrow): | |
45 | """Enables narrow clone functionality on a single local repository.""" |
|
42 | """Enables narrow clone functionality on a single local repository.""" | |
46 |
|
43 | |||
47 | cacheprop = localrepo.storecache |
|
44 | cacheprop = localrepo.storecache | |
48 | if isinstance(repo, bundlerepo.bundlerepository): |
|
45 | if isinstance(repo, bundlerepo.bundlerepository): | |
49 | # We have to use a different caching property decorator for |
|
46 | # We have to use a different caching property decorator for | |
50 | # bundlerepo because storecache blows up in strange ways on a |
|
47 | # bundlerepo because storecache blows up in strange ways on a | |
51 | # bundlerepo. Fortunately, there's no risk of data changing in |
|
48 | # bundlerepo. Fortunately, there's no risk of data changing in | |
52 | # a bundlerepo. |
|
49 | # a bundlerepo. | |
53 | cacheprop = lambda name: localrepo.unfilteredpropertycache |
|
50 | cacheprop = lambda name: localrepo.unfilteredpropertycache | |
54 |
|
51 | |||
55 | class narrowrepository(repo.__class__): |
|
52 | class narrowrepository(repo.__class__): | |
56 |
|
53 | |||
57 | def _constructmanifest(self): |
|
54 | def _constructmanifest(self): | |
58 | manifest = super(narrowrepository, self)._constructmanifest() |
|
55 | manifest = super(narrowrepository, self)._constructmanifest() | |
59 | narrowrevlog.makenarrowmanifestrevlog(manifest, repo) |
|
56 | narrowrevlog.makenarrowmanifestrevlog(manifest, repo) | |
60 | return manifest |
|
57 | return manifest | |
61 |
|
58 | |||
62 | @cacheprop('00manifest.i') |
|
59 | @cacheprop('00manifest.i') | |
63 | def manifestlog(self): |
|
60 | def manifestlog(self): | |
64 | mfl = super(narrowrepository, self).manifestlog |
|
61 | mfl = super(narrowrepository, self).manifestlog | |
65 | narrowrevlog.makenarrowmanifestlog(mfl, self) |
|
62 | narrowrevlog.makenarrowmanifestlog(mfl, self) | |
66 | return mfl |
|
63 | return mfl | |
67 |
|
64 | |||
68 | def file(self, f): |
|
65 | def file(self, f): | |
69 | fl = super(narrowrepository, self).file(f) |
|
66 | fl = super(narrowrepository, self).file(f) | |
70 | narrowrevlog.makenarrowfilelog(fl, self.narrowmatch()) |
|
67 | narrowrevlog.makenarrowfilelog(fl, self.narrowmatch()) | |
71 | return fl |
|
68 | return fl | |
72 |
|
69 | |||
73 | @localrepo.repofilecache(narrowspec.FILENAME) |
|
70 | @localrepo.repofilecache(narrowspec.FILENAME) | |
74 | def narrowpats(self): |
|
71 | def narrowpats(self): | |
75 | """matcher patterns for this repository's narrowspec |
|
72 | """matcher patterns for this repository's narrowspec | |
76 |
|
73 | |||
77 | A tuple of (includes, excludes). |
|
74 | A tuple of (includes, excludes). | |
78 | """ |
|
75 | """ | |
79 | return narrowspec.load(self) |
|
76 | return narrowspec.load(self) | |
80 |
|
77 | |||
81 | @localrepo.repofilecache(narrowspec.FILENAME) |
|
78 | @localrepo.repofilecache(narrowspec.FILENAME) | |
82 | def _narrowmatch(self): |
|
79 | def _narrowmatch(self): | |
83 | include, exclude = self.narrowpats |
|
80 | include, exclude = self.narrowpats | |
84 | if not opts_narrow and not include and not exclude: |
|
81 | if not opts_narrow and not include and not exclude: | |
85 | return matchmod.always(self.root, '') |
|
82 | return matchmod.always(self.root, '') | |
86 | return narrowspec.match(self.root, include=include, exclude=exclude) |
|
83 | return narrowspec.match(self.root, include=include, exclude=exclude) | |
87 |
|
84 | |||
88 | # TODO(martinvonz): make this property-like instead? |
|
85 | # TODO(martinvonz): make this property-like instead? | |
89 | def narrowmatch(self): |
|
86 | def narrowmatch(self): | |
90 | return self._narrowmatch |
|
87 | return self._narrowmatch | |
91 |
|
88 | |||
92 | def setnarrowpats(self, newincludes, newexcludes): |
|
89 | def setnarrowpats(self, newincludes, newexcludes): | |
93 | narrowspec.save(self, newincludes, newexcludes) |
|
90 | narrowspec.save(self, newincludes, newexcludes) | |
94 | self.invalidate(clearfilecache=True) |
|
91 | self.invalidate(clearfilecache=True) | |
95 |
|
92 | |||
96 | # I'm not sure this is the right place to do this filter. |
|
93 | # I'm not sure this is the right place to do this filter. | |
97 | # context._manifestmatches() would probably be better, or perhaps |
|
94 | # context._manifestmatches() would probably be better, or perhaps | |
98 | # move it to a later place, in case some of the callers do want to know |
|
95 | # move it to a later place, in case some of the callers do want to know | |
99 | # which directories changed. This seems to work for now, though. |
|
96 | # which directories changed. This seems to work for now, though. | |
100 | def status(self, *args, **kwargs): |
|
97 | def status(self, *args, **kwargs): | |
101 | s = super(narrowrepository, self).status(*args, **kwargs) |
|
98 | s = super(narrowrepository, self).status(*args, **kwargs) | |
102 | narrowmatch = self.narrowmatch() |
|
99 | narrowmatch = self.narrowmatch() | |
103 | modified = list(filter(narrowmatch, s.modified)) |
|
100 | modified = list(filter(narrowmatch, s.modified)) | |
104 | added = list(filter(narrowmatch, s.added)) |
|
101 | added = list(filter(narrowmatch, s.added)) | |
105 | removed = list(filter(narrowmatch, s.removed)) |
|
102 | removed = list(filter(narrowmatch, s.removed)) | |
106 | deleted = list(filter(narrowmatch, s.deleted)) |
|
103 | deleted = list(filter(narrowmatch, s.deleted)) | |
107 | unknown = list(filter(narrowmatch, s.unknown)) |
|
104 | unknown = list(filter(narrowmatch, s.unknown)) | |
108 | ignored = list(filter(narrowmatch, s.ignored)) |
|
105 | ignored = list(filter(narrowmatch, s.ignored)) | |
109 | clean = list(filter(narrowmatch, s.clean)) |
|
106 | clean = list(filter(narrowmatch, s.clean)) | |
110 | return scmutil.status(modified, added, removed, deleted, unknown, |
|
107 | return scmutil.status(modified, added, removed, deleted, unknown, | |
111 | ignored, clean) |
|
108 | ignored, clean) | |
112 |
|
109 | |||
113 | repo.__class__ = narrowrepository |
|
110 | repo.__class__ = narrowrepository |
@@ -1,996 +1,1000 b'' | |||||
1 | # changegroup.py - Mercurial changegroup manipulation functions |
|
1 | # changegroup.py - Mercurial changegroup manipulation functions | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import os |
|
10 | import os | |
11 | import struct |
|
11 | import struct | |
12 | import tempfile |
|
12 | import tempfile | |
13 | import weakref |
|
13 | import weakref | |
14 |
|
14 | |||
15 | from .i18n import _ |
|
15 | from .i18n import _ | |
16 | from .node import ( |
|
16 | from .node import ( | |
17 | hex, |
|
17 | hex, | |
18 | nullrev, |
|
18 | nullrev, | |
19 | short, |
|
19 | short, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | from . import ( |
|
22 | from . import ( | |
23 | dagutil, |
|
23 | dagutil, | |
24 | error, |
|
24 | error, | |
25 | mdiff, |
|
25 | mdiff, | |
26 | phases, |
|
26 | phases, | |
27 | pycompat, |
|
27 | pycompat, | |
28 | util, |
|
28 | util, | |
29 | ) |
|
29 | ) | |
30 |
|
30 | |||
31 | _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s" |
|
31 | _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s" | |
32 | _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s" |
|
32 | _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s" | |
33 | _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH" |
|
33 | _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH" | |
34 |
|
34 | |||
|
35 | # When narrowing is finalized and no longer subject to format changes, | |||
|
36 | # we should move this to just "narrow" or similar. | |||
|
37 | NARROW_REQUIREMENT = 'narrowhg-experimental' | |||
|
38 | ||||
35 | readexactly = util.readexactly |
|
39 | readexactly = util.readexactly | |
36 |
|
40 | |||
37 | def getchunk(stream): |
|
41 | def getchunk(stream): | |
38 | """return the next chunk from stream as a string""" |
|
42 | """return the next chunk from stream as a string""" | |
39 | d = readexactly(stream, 4) |
|
43 | d = readexactly(stream, 4) | |
40 | l = struct.unpack(">l", d)[0] |
|
44 | l = struct.unpack(">l", d)[0] | |
41 | if l <= 4: |
|
45 | if l <= 4: | |
42 | if l: |
|
46 | if l: | |
43 | raise error.Abort(_("invalid chunk length %d") % l) |
|
47 | raise error.Abort(_("invalid chunk length %d") % l) | |
44 | return "" |
|
48 | return "" | |
45 | return readexactly(stream, l - 4) |
|
49 | return readexactly(stream, l - 4) | |
46 |
|
50 | |||
47 | def chunkheader(length): |
|
51 | def chunkheader(length): | |
48 | """return a changegroup chunk header (string)""" |
|
52 | """return a changegroup chunk header (string)""" | |
49 | return struct.pack(">l", length + 4) |
|
53 | return struct.pack(">l", length + 4) | |
50 |
|
54 | |||
51 | def closechunk(): |
|
55 | def closechunk(): | |
52 | """return a changegroup chunk header (string) for a zero-length chunk""" |
|
56 | """return a changegroup chunk header (string) for a zero-length chunk""" | |
53 | return struct.pack(">l", 0) |
|
57 | return struct.pack(">l", 0) | |
54 |
|
58 | |||
55 | def writechunks(ui, chunks, filename, vfs=None): |
|
59 | def writechunks(ui, chunks, filename, vfs=None): | |
56 | """Write chunks to a file and return its filename. |
|
60 | """Write chunks to a file and return its filename. | |
57 |
|
61 | |||
58 | The stream is assumed to be a bundle file. |
|
62 | The stream is assumed to be a bundle file. | |
59 | Existing files will not be overwritten. |
|
63 | Existing files will not be overwritten. | |
60 | If no filename is specified, a temporary file is created. |
|
64 | If no filename is specified, a temporary file is created. | |
61 | """ |
|
65 | """ | |
62 | fh = None |
|
66 | fh = None | |
63 | cleanup = None |
|
67 | cleanup = None | |
64 | try: |
|
68 | try: | |
65 | if filename: |
|
69 | if filename: | |
66 | if vfs: |
|
70 | if vfs: | |
67 | fh = vfs.open(filename, "wb") |
|
71 | fh = vfs.open(filename, "wb") | |
68 | else: |
|
72 | else: | |
69 | # Increase default buffer size because default is usually |
|
73 | # Increase default buffer size because default is usually | |
70 | # small (4k is common on Linux). |
|
74 | # small (4k is common on Linux). | |
71 | fh = open(filename, "wb", 131072) |
|
75 | fh = open(filename, "wb", 131072) | |
72 | else: |
|
76 | else: | |
73 | fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") |
|
77 | fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") | |
74 | fh = os.fdopen(fd, pycompat.sysstr("wb")) |
|
78 | fh = os.fdopen(fd, pycompat.sysstr("wb")) | |
75 | cleanup = filename |
|
79 | cleanup = filename | |
76 | for c in chunks: |
|
80 | for c in chunks: | |
77 | fh.write(c) |
|
81 | fh.write(c) | |
78 | cleanup = None |
|
82 | cleanup = None | |
79 | return filename |
|
83 | return filename | |
80 | finally: |
|
84 | finally: | |
81 | if fh is not None: |
|
85 | if fh is not None: | |
82 | fh.close() |
|
86 | fh.close() | |
83 | if cleanup is not None: |
|
87 | if cleanup is not None: | |
84 | if filename and vfs: |
|
88 | if filename and vfs: | |
85 | vfs.unlink(cleanup) |
|
89 | vfs.unlink(cleanup) | |
86 | else: |
|
90 | else: | |
87 | os.unlink(cleanup) |
|
91 | os.unlink(cleanup) | |
88 |
|
92 | |||
89 | class cg1unpacker(object): |
|
93 | class cg1unpacker(object): | |
90 | """Unpacker for cg1 changegroup streams. |
|
94 | """Unpacker for cg1 changegroup streams. | |
91 |
|
95 | |||
92 | A changegroup unpacker handles the framing of the revision data in |
|
96 | A changegroup unpacker handles the framing of the revision data in | |
93 | the wire format. Most consumers will want to use the apply() |
|
97 | the wire format. Most consumers will want to use the apply() | |
94 | method to add the changes from the changegroup to a repository. |
|
98 | method to add the changes from the changegroup to a repository. | |
95 |
|
99 | |||
96 | If you're forwarding a changegroup unmodified to another consumer, |
|
100 | If you're forwarding a changegroup unmodified to another consumer, | |
97 | use getchunks(), which returns an iterator of changegroup |
|
101 | use getchunks(), which returns an iterator of changegroup | |
98 | chunks. This is mostly useful for cases where you need to know the |
|
102 | chunks. This is mostly useful for cases where you need to know the | |
99 | data stream has ended by observing the end of the changegroup. |
|
103 | data stream has ended by observing the end of the changegroup. | |
100 |
|
104 | |||
101 | deltachunk() is useful only if you're applying delta data. Most |
|
105 | deltachunk() is useful only if you're applying delta data. Most | |
102 | consumers should prefer apply() instead. |
|
106 | consumers should prefer apply() instead. | |
103 |
|
107 | |||
104 | A few other public methods exist. Those are used only for |
|
108 | A few other public methods exist. Those are used only for | |
105 | bundlerepo and some debug commands - their use is discouraged. |
|
109 | bundlerepo and some debug commands - their use is discouraged. | |
106 | """ |
|
110 | """ | |
107 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
111 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER | |
108 | deltaheadersize = struct.calcsize(deltaheader) |
|
112 | deltaheadersize = struct.calcsize(deltaheader) | |
109 | version = '01' |
|
113 | version = '01' | |
110 | _grouplistcount = 1 # One list of files after the manifests |
|
114 | _grouplistcount = 1 # One list of files after the manifests | |
111 |
|
115 | |||
112 | def __init__(self, fh, alg, extras=None): |
|
116 | def __init__(self, fh, alg, extras=None): | |
113 | if alg is None: |
|
117 | if alg is None: | |
114 | alg = 'UN' |
|
118 | alg = 'UN' | |
115 | if alg not in util.compengines.supportedbundletypes: |
|
119 | if alg not in util.compengines.supportedbundletypes: | |
116 | raise error.Abort(_('unknown stream compression type: %s') |
|
120 | raise error.Abort(_('unknown stream compression type: %s') | |
117 | % alg) |
|
121 | % alg) | |
118 | if alg == 'BZ': |
|
122 | if alg == 'BZ': | |
119 | alg = '_truncatedBZ' |
|
123 | alg = '_truncatedBZ' | |
120 |
|
124 | |||
121 | compengine = util.compengines.forbundletype(alg) |
|
125 | compengine = util.compengines.forbundletype(alg) | |
122 | self._stream = compengine.decompressorreader(fh) |
|
126 | self._stream = compengine.decompressorreader(fh) | |
123 | self._type = alg |
|
127 | self._type = alg | |
124 | self.extras = extras or {} |
|
128 | self.extras = extras or {} | |
125 | self.callback = None |
|
129 | self.callback = None | |
126 |
|
130 | |||
127 | # These methods (compressed, read, seek, tell) all appear to only |
|
131 | # These methods (compressed, read, seek, tell) all appear to only | |
128 | # be used by bundlerepo, but it's a little hard to tell. |
|
132 | # be used by bundlerepo, but it's a little hard to tell. | |
129 | def compressed(self): |
|
133 | def compressed(self): | |
130 | return self._type is not None and self._type != 'UN' |
|
134 | return self._type is not None and self._type != 'UN' | |
131 | def read(self, l): |
|
135 | def read(self, l): | |
132 | return self._stream.read(l) |
|
136 | return self._stream.read(l) | |
133 | def seek(self, pos): |
|
137 | def seek(self, pos): | |
134 | return self._stream.seek(pos) |
|
138 | return self._stream.seek(pos) | |
135 | def tell(self): |
|
139 | def tell(self): | |
136 | return self._stream.tell() |
|
140 | return self._stream.tell() | |
137 | def close(self): |
|
141 | def close(self): | |
138 | return self._stream.close() |
|
142 | return self._stream.close() | |
139 |
|
143 | |||
140 | def _chunklength(self): |
|
144 | def _chunklength(self): | |
141 | d = readexactly(self._stream, 4) |
|
145 | d = readexactly(self._stream, 4) | |
142 | l = struct.unpack(">l", d)[0] |
|
146 | l = struct.unpack(">l", d)[0] | |
143 | if l <= 4: |
|
147 | if l <= 4: | |
144 | if l: |
|
148 | if l: | |
145 | raise error.Abort(_("invalid chunk length %d") % l) |
|
149 | raise error.Abort(_("invalid chunk length %d") % l) | |
146 | return 0 |
|
150 | return 0 | |
147 | if self.callback: |
|
151 | if self.callback: | |
148 | self.callback() |
|
152 | self.callback() | |
149 | return l - 4 |
|
153 | return l - 4 | |
150 |
|
154 | |||
151 | def changelogheader(self): |
|
155 | def changelogheader(self): | |
152 | """v10 does not have a changelog header chunk""" |
|
156 | """v10 does not have a changelog header chunk""" | |
153 | return {} |
|
157 | return {} | |
154 |
|
158 | |||
155 | def manifestheader(self): |
|
159 | def manifestheader(self): | |
156 | """v10 does not have a manifest header chunk""" |
|
160 | """v10 does not have a manifest header chunk""" | |
157 | return {} |
|
161 | return {} | |
158 |
|
162 | |||
159 | def filelogheader(self): |
|
163 | def filelogheader(self): | |
160 | """return the header of the filelogs chunk, v10 only has the filename""" |
|
164 | """return the header of the filelogs chunk, v10 only has the filename""" | |
161 | l = self._chunklength() |
|
165 | l = self._chunklength() | |
162 | if not l: |
|
166 | if not l: | |
163 | return {} |
|
167 | return {} | |
164 | fname = readexactly(self._stream, l) |
|
168 | fname = readexactly(self._stream, l) | |
165 | return {'filename': fname} |
|
169 | return {'filename': fname} | |
166 |
|
170 | |||
167 | def _deltaheader(self, headertuple, prevnode): |
|
171 | def _deltaheader(self, headertuple, prevnode): | |
168 | node, p1, p2, cs = headertuple |
|
172 | node, p1, p2, cs = headertuple | |
169 | if prevnode is None: |
|
173 | if prevnode is None: | |
170 | deltabase = p1 |
|
174 | deltabase = p1 | |
171 | else: |
|
175 | else: | |
172 | deltabase = prevnode |
|
176 | deltabase = prevnode | |
173 | flags = 0 |
|
177 | flags = 0 | |
174 | return node, p1, p2, deltabase, cs, flags |
|
178 | return node, p1, p2, deltabase, cs, flags | |
175 |
|
179 | |||
176 | def deltachunk(self, prevnode): |
|
180 | def deltachunk(self, prevnode): | |
177 | l = self._chunklength() |
|
181 | l = self._chunklength() | |
178 | if not l: |
|
182 | if not l: | |
179 | return {} |
|
183 | return {} | |
180 | headerdata = readexactly(self._stream, self.deltaheadersize) |
|
184 | headerdata = readexactly(self._stream, self.deltaheadersize) | |
181 | header = struct.unpack(self.deltaheader, headerdata) |
|
185 | header = struct.unpack(self.deltaheader, headerdata) | |
182 | delta = readexactly(self._stream, l - self.deltaheadersize) |
|
186 | delta = readexactly(self._stream, l - self.deltaheadersize) | |
183 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) |
|
187 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) | |
184 | return (node, p1, p2, cs, deltabase, delta, flags) |
|
188 | return (node, p1, p2, cs, deltabase, delta, flags) | |
185 |
|
189 | |||
186 | def getchunks(self): |
|
190 | def getchunks(self): | |
187 | """returns all the chunks contains in the bundle |
|
191 | """returns all the chunks contains in the bundle | |
188 |
|
192 | |||
189 | Used when you need to forward the binary stream to a file or another |
|
193 | Used when you need to forward the binary stream to a file or another | |
190 | network API. To do so, it parse the changegroup data, otherwise it will |
|
194 | network API. To do so, it parse the changegroup data, otherwise it will | |
191 | block in case of sshrepo because it don't know the end of the stream. |
|
195 | block in case of sshrepo because it don't know the end of the stream. | |
192 | """ |
|
196 | """ | |
193 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, |
|
197 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, | |
194 | # and a list of filelogs. For changegroup 3, we expect 4 parts: |
|
198 | # and a list of filelogs. For changegroup 3, we expect 4 parts: | |
195 | # changelog, manifestlog, a list of tree manifestlogs, and a list of |
|
199 | # changelog, manifestlog, a list of tree manifestlogs, and a list of | |
196 | # filelogs. |
|
200 | # filelogs. | |
197 | # |
|
201 | # | |
198 | # Changelog and manifestlog parts are terminated with empty chunks. The |
|
202 | # Changelog and manifestlog parts are terminated with empty chunks. The | |
199 | # tree and file parts are a list of entry sections. Each entry section |
|
203 | # tree and file parts are a list of entry sections. Each entry section | |
200 | # is a series of chunks terminating in an empty chunk. The list of these |
|
204 | # is a series of chunks terminating in an empty chunk. The list of these | |
201 | # entry sections is terminated in yet another empty chunk, so we know |
|
205 | # entry sections is terminated in yet another empty chunk, so we know | |
202 | # we've reached the end of the tree/file list when we reach an empty |
|
206 | # we've reached the end of the tree/file list when we reach an empty | |
203 | # chunk that was proceeded by no non-empty chunks. |
|
207 | # chunk that was proceeded by no non-empty chunks. | |
204 |
|
208 | |||
205 | parts = 0 |
|
209 | parts = 0 | |
206 | while parts < 2 + self._grouplistcount: |
|
210 | while parts < 2 + self._grouplistcount: | |
207 | noentries = True |
|
211 | noentries = True | |
208 | while True: |
|
212 | while True: | |
209 | chunk = getchunk(self) |
|
213 | chunk = getchunk(self) | |
210 | if not chunk: |
|
214 | if not chunk: | |
211 | # The first two empty chunks represent the end of the |
|
215 | # The first two empty chunks represent the end of the | |
212 | # changelog and the manifestlog portions. The remaining |
|
216 | # changelog and the manifestlog portions. The remaining | |
213 | # empty chunks represent either A) the end of individual |
|
217 | # empty chunks represent either A) the end of individual | |
214 | # tree or file entries in the file list, or B) the end of |
|
218 | # tree or file entries in the file list, or B) the end of | |
215 | # the entire list. It's the end of the entire list if there |
|
219 | # the entire list. It's the end of the entire list if there | |
216 | # were no entries (i.e. noentries is True). |
|
220 | # were no entries (i.e. noentries is True). | |
217 | if parts < 2: |
|
221 | if parts < 2: | |
218 | parts += 1 |
|
222 | parts += 1 | |
219 | elif noentries: |
|
223 | elif noentries: | |
220 | parts += 1 |
|
224 | parts += 1 | |
221 | break |
|
225 | break | |
222 | noentries = False |
|
226 | noentries = False | |
223 | yield chunkheader(len(chunk)) |
|
227 | yield chunkheader(len(chunk)) | |
224 | pos = 0 |
|
228 | pos = 0 | |
225 | while pos < len(chunk): |
|
229 | while pos < len(chunk): | |
226 | next = pos + 2**20 |
|
230 | next = pos + 2**20 | |
227 | yield chunk[pos:next] |
|
231 | yield chunk[pos:next] | |
228 | pos = next |
|
232 | pos = next | |
229 | yield closechunk() |
|
233 | yield closechunk() | |
230 |
|
234 | |||
231 | def _unpackmanifests(self, repo, revmap, trp, prog, numchanges): |
|
235 | def _unpackmanifests(self, repo, revmap, trp, prog, numchanges): | |
232 | # We know that we'll never have more manifests than we had |
|
236 | # We know that we'll never have more manifests than we had | |
233 | # changesets. |
|
237 | # changesets. | |
234 | self.callback = prog(_('manifests'), numchanges) |
|
238 | self.callback = prog(_('manifests'), numchanges) | |
235 | # no need to check for empty manifest group here: |
|
239 | # no need to check for empty manifest group here: | |
236 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
240 | # if the result of the merge of 1 and 2 is the same in 3 and 4, | |
237 | # no new manifest will be created and the manifest group will |
|
241 | # no new manifest will be created and the manifest group will | |
238 | # be empty during the pull |
|
242 | # be empty during the pull | |
239 | self.manifestheader() |
|
243 | self.manifestheader() | |
240 | deltas = self.deltaiter() |
|
244 | deltas = self.deltaiter() | |
241 | repo.manifestlog._revlog.addgroup(deltas, revmap, trp) |
|
245 | repo.manifestlog._revlog.addgroup(deltas, revmap, trp) | |
242 | repo.ui.progress(_('manifests'), None) |
|
246 | repo.ui.progress(_('manifests'), None) | |
243 | self.callback = None |
|
247 | self.callback = None | |
244 |
|
248 | |||
245 | def apply(self, repo, tr, srctype, url, targetphase=phases.draft, |
|
249 | def apply(self, repo, tr, srctype, url, targetphase=phases.draft, | |
246 | expectedtotal=None): |
|
250 | expectedtotal=None): | |
247 | """Add the changegroup returned by source.read() to this repo. |
|
251 | """Add the changegroup returned by source.read() to this repo. | |
248 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
252 | srctype is a string like 'push', 'pull', or 'unbundle'. url is | |
249 | the URL of the repo where this changegroup is coming from. |
|
253 | the URL of the repo where this changegroup is coming from. | |
250 |
|
254 | |||
251 | Return an integer summarizing the change to this repo: |
|
255 | Return an integer summarizing the change to this repo: | |
252 | - nothing changed or no source: 0 |
|
256 | - nothing changed or no source: 0 | |
253 | - more heads than before: 1+added heads (2..n) |
|
257 | - more heads than before: 1+added heads (2..n) | |
254 | - fewer heads than before: -1-removed heads (-2..-n) |
|
258 | - fewer heads than before: -1-removed heads (-2..-n) | |
255 | - number of heads stays the same: 1 |
|
259 | - number of heads stays the same: 1 | |
256 | """ |
|
260 | """ | |
257 | repo = repo.unfiltered() |
|
261 | repo = repo.unfiltered() | |
258 | def csmap(x): |
|
262 | def csmap(x): | |
259 | repo.ui.debug("add changeset %s\n" % short(x)) |
|
263 | repo.ui.debug("add changeset %s\n" % short(x)) | |
260 | return len(cl) |
|
264 | return len(cl) | |
261 |
|
265 | |||
262 | def revmap(x): |
|
266 | def revmap(x): | |
263 | return cl.rev(x) |
|
267 | return cl.rev(x) | |
264 |
|
268 | |||
265 | changesets = files = revisions = 0 |
|
269 | changesets = files = revisions = 0 | |
266 |
|
270 | |||
267 | try: |
|
271 | try: | |
268 | # The transaction may already carry source information. In this |
|
272 | # The transaction may already carry source information. In this | |
269 | # case we use the top level data. We overwrite the argument |
|
273 | # case we use the top level data. We overwrite the argument | |
270 | # because we need to use the top level value (if they exist) |
|
274 | # because we need to use the top level value (if they exist) | |
271 | # in this function. |
|
275 | # in this function. | |
272 | srctype = tr.hookargs.setdefault('source', srctype) |
|
276 | srctype = tr.hookargs.setdefault('source', srctype) | |
273 | url = tr.hookargs.setdefault('url', url) |
|
277 | url = tr.hookargs.setdefault('url', url) | |
274 | repo.hook('prechangegroup', |
|
278 | repo.hook('prechangegroup', | |
275 | throw=True, **pycompat.strkwargs(tr.hookargs)) |
|
279 | throw=True, **pycompat.strkwargs(tr.hookargs)) | |
276 |
|
280 | |||
277 | # write changelog data to temp files so concurrent readers |
|
281 | # write changelog data to temp files so concurrent readers | |
278 | # will not see an inconsistent view |
|
282 | # will not see an inconsistent view | |
279 | cl = repo.changelog |
|
283 | cl = repo.changelog | |
280 | cl.delayupdate(tr) |
|
284 | cl.delayupdate(tr) | |
281 | oldheads = set(cl.heads()) |
|
285 | oldheads = set(cl.heads()) | |
282 |
|
286 | |||
283 | trp = weakref.proxy(tr) |
|
287 | trp = weakref.proxy(tr) | |
284 | # pull off the changeset group |
|
288 | # pull off the changeset group | |
285 | repo.ui.status(_("adding changesets\n")) |
|
289 | repo.ui.status(_("adding changesets\n")) | |
286 | clstart = len(cl) |
|
290 | clstart = len(cl) | |
287 | class prog(object): |
|
291 | class prog(object): | |
288 | def __init__(self, step, total): |
|
292 | def __init__(self, step, total): | |
289 | self._step = step |
|
293 | self._step = step | |
290 | self._total = total |
|
294 | self._total = total | |
291 | self._count = 1 |
|
295 | self._count = 1 | |
292 | def __call__(self): |
|
296 | def __call__(self): | |
293 | repo.ui.progress(self._step, self._count, unit=_('chunks'), |
|
297 | repo.ui.progress(self._step, self._count, unit=_('chunks'), | |
294 | total=self._total) |
|
298 | total=self._total) | |
295 | self._count += 1 |
|
299 | self._count += 1 | |
296 | self.callback = prog(_('changesets'), expectedtotal) |
|
300 | self.callback = prog(_('changesets'), expectedtotal) | |
297 |
|
301 | |||
298 | efiles = set() |
|
302 | efiles = set() | |
299 | def onchangelog(cl, node): |
|
303 | def onchangelog(cl, node): | |
300 | efiles.update(cl.readfiles(node)) |
|
304 | efiles.update(cl.readfiles(node)) | |
301 |
|
305 | |||
302 | self.changelogheader() |
|
306 | self.changelogheader() | |
303 | deltas = self.deltaiter() |
|
307 | deltas = self.deltaiter() | |
304 | cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog) |
|
308 | cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog) | |
305 | efiles = len(efiles) |
|
309 | efiles = len(efiles) | |
306 |
|
310 | |||
307 | if not cgnodes: |
|
311 | if not cgnodes: | |
308 | repo.ui.develwarn('applied empty changegroup', |
|
312 | repo.ui.develwarn('applied empty changegroup', | |
309 | config='warn-empty-changegroup') |
|
313 | config='warn-empty-changegroup') | |
310 | clend = len(cl) |
|
314 | clend = len(cl) | |
311 | changesets = clend - clstart |
|
315 | changesets = clend - clstart | |
312 | repo.ui.progress(_('changesets'), None) |
|
316 | repo.ui.progress(_('changesets'), None) | |
313 | self.callback = None |
|
317 | self.callback = None | |
314 |
|
318 | |||
315 | # pull off the manifest group |
|
319 | # pull off the manifest group | |
316 | repo.ui.status(_("adding manifests\n")) |
|
320 | repo.ui.status(_("adding manifests\n")) | |
317 | self._unpackmanifests(repo, revmap, trp, prog, changesets) |
|
321 | self._unpackmanifests(repo, revmap, trp, prog, changesets) | |
318 |
|
322 | |||
319 | needfiles = {} |
|
323 | needfiles = {} | |
320 | if repo.ui.configbool('server', 'validate'): |
|
324 | if repo.ui.configbool('server', 'validate'): | |
321 | cl = repo.changelog |
|
325 | cl = repo.changelog | |
322 | ml = repo.manifestlog |
|
326 | ml = repo.manifestlog | |
323 | # validate incoming csets have their manifests |
|
327 | # validate incoming csets have their manifests | |
324 | for cset in xrange(clstart, clend): |
|
328 | for cset in xrange(clstart, clend): | |
325 | mfnode = cl.changelogrevision(cset).manifest |
|
329 | mfnode = cl.changelogrevision(cset).manifest | |
326 | mfest = ml[mfnode].readdelta() |
|
330 | mfest = ml[mfnode].readdelta() | |
327 | # store file cgnodes we must see |
|
331 | # store file cgnodes we must see | |
328 | for f, n in mfest.iteritems(): |
|
332 | for f, n in mfest.iteritems(): | |
329 | needfiles.setdefault(f, set()).add(n) |
|
333 | needfiles.setdefault(f, set()).add(n) | |
330 |
|
334 | |||
331 | # process the files |
|
335 | # process the files | |
332 | repo.ui.status(_("adding file changes\n")) |
|
336 | repo.ui.status(_("adding file changes\n")) | |
333 | newrevs, newfiles = _addchangegroupfiles( |
|
337 | newrevs, newfiles = _addchangegroupfiles( | |
334 | repo, self, revmap, trp, efiles, needfiles) |
|
338 | repo, self, revmap, trp, efiles, needfiles) | |
335 | revisions += newrevs |
|
339 | revisions += newrevs | |
336 | files += newfiles |
|
340 | files += newfiles | |
337 |
|
341 | |||
338 | deltaheads = 0 |
|
342 | deltaheads = 0 | |
339 | if oldheads: |
|
343 | if oldheads: | |
340 | heads = cl.heads() |
|
344 | heads = cl.heads() | |
341 | deltaheads = len(heads) - len(oldheads) |
|
345 | deltaheads = len(heads) - len(oldheads) | |
342 | for h in heads: |
|
346 | for h in heads: | |
343 | if h not in oldheads and repo[h].closesbranch(): |
|
347 | if h not in oldheads and repo[h].closesbranch(): | |
344 | deltaheads -= 1 |
|
348 | deltaheads -= 1 | |
345 | htext = "" |
|
349 | htext = "" | |
346 | if deltaheads: |
|
350 | if deltaheads: | |
347 | htext = _(" (%+d heads)") % deltaheads |
|
351 | htext = _(" (%+d heads)") % deltaheads | |
348 |
|
352 | |||
349 | repo.ui.status(_("added %d changesets" |
|
353 | repo.ui.status(_("added %d changesets" | |
350 | " with %d changes to %d files%s\n") |
|
354 | " with %d changes to %d files%s\n") | |
351 | % (changesets, revisions, files, htext)) |
|
355 | % (changesets, revisions, files, htext)) | |
352 | repo.invalidatevolatilesets() |
|
356 | repo.invalidatevolatilesets() | |
353 |
|
357 | |||
354 | if changesets > 0: |
|
358 | if changesets > 0: | |
355 | if 'node' not in tr.hookargs: |
|
359 | if 'node' not in tr.hookargs: | |
356 | tr.hookargs['node'] = hex(cl.node(clstart)) |
|
360 | tr.hookargs['node'] = hex(cl.node(clstart)) | |
357 | tr.hookargs['node_last'] = hex(cl.node(clend - 1)) |
|
361 | tr.hookargs['node_last'] = hex(cl.node(clend - 1)) | |
358 | hookargs = dict(tr.hookargs) |
|
362 | hookargs = dict(tr.hookargs) | |
359 | else: |
|
363 | else: | |
360 | hookargs = dict(tr.hookargs) |
|
364 | hookargs = dict(tr.hookargs) | |
361 | hookargs['node'] = hex(cl.node(clstart)) |
|
365 | hookargs['node'] = hex(cl.node(clstart)) | |
362 | hookargs['node_last'] = hex(cl.node(clend - 1)) |
|
366 | hookargs['node_last'] = hex(cl.node(clend - 1)) | |
363 | repo.hook('pretxnchangegroup', |
|
367 | repo.hook('pretxnchangegroup', | |
364 | throw=True, **pycompat.strkwargs(hookargs)) |
|
368 | throw=True, **pycompat.strkwargs(hookargs)) | |
365 |
|
369 | |||
366 | added = [cl.node(r) for r in xrange(clstart, clend)] |
|
370 | added = [cl.node(r) for r in xrange(clstart, clend)] | |
367 | phaseall = None |
|
371 | phaseall = None | |
368 | if srctype in ('push', 'serve'): |
|
372 | if srctype in ('push', 'serve'): | |
369 | # Old servers can not push the boundary themselves. |
|
373 | # Old servers can not push the boundary themselves. | |
370 | # New servers won't push the boundary if changeset already |
|
374 | # New servers won't push the boundary if changeset already | |
371 | # exists locally as secret |
|
375 | # exists locally as secret | |
372 | # |
|
376 | # | |
373 | # We should not use added here but the list of all change in |
|
377 | # We should not use added here but the list of all change in | |
374 | # the bundle |
|
378 | # the bundle | |
375 | if repo.publishing(): |
|
379 | if repo.publishing(): | |
376 | targetphase = phaseall = phases.public |
|
380 | targetphase = phaseall = phases.public | |
377 | else: |
|
381 | else: | |
378 | # closer target phase computation |
|
382 | # closer target phase computation | |
379 |
|
383 | |||
380 | # Those changesets have been pushed from the |
|
384 | # Those changesets have been pushed from the | |
381 | # outside, their phases are going to be pushed |
|
385 | # outside, their phases are going to be pushed | |
382 | # alongside. Therefor `targetphase` is |
|
386 | # alongside. Therefor `targetphase` is | |
383 | # ignored. |
|
387 | # ignored. | |
384 | targetphase = phaseall = phases.draft |
|
388 | targetphase = phaseall = phases.draft | |
385 | if added: |
|
389 | if added: | |
386 | phases.registernew(repo, tr, targetphase, added) |
|
390 | phases.registernew(repo, tr, targetphase, added) | |
387 | if phaseall is not None: |
|
391 | if phaseall is not None: | |
388 | phases.advanceboundary(repo, tr, phaseall, cgnodes) |
|
392 | phases.advanceboundary(repo, tr, phaseall, cgnodes) | |
389 |
|
393 | |||
390 | if changesets > 0: |
|
394 | if changesets > 0: | |
391 |
|
395 | |||
392 | def runhooks(): |
|
396 | def runhooks(): | |
393 | # These hooks run when the lock releases, not when the |
|
397 | # These hooks run when the lock releases, not when the | |
394 | # transaction closes. So it's possible for the changelog |
|
398 | # transaction closes. So it's possible for the changelog | |
395 | # to have changed since we last saw it. |
|
399 | # to have changed since we last saw it. | |
396 | if clstart >= len(repo): |
|
400 | if clstart >= len(repo): | |
397 | return |
|
401 | return | |
398 |
|
402 | |||
399 | repo.hook("changegroup", **pycompat.strkwargs(hookargs)) |
|
403 | repo.hook("changegroup", **pycompat.strkwargs(hookargs)) | |
400 |
|
404 | |||
401 | for n in added: |
|
405 | for n in added: | |
402 | args = hookargs.copy() |
|
406 | args = hookargs.copy() | |
403 | args['node'] = hex(n) |
|
407 | args['node'] = hex(n) | |
404 | del args['node_last'] |
|
408 | del args['node_last'] | |
405 | repo.hook("incoming", **pycompat.strkwargs(args)) |
|
409 | repo.hook("incoming", **pycompat.strkwargs(args)) | |
406 |
|
410 | |||
407 | newheads = [h for h in repo.heads() |
|
411 | newheads = [h for h in repo.heads() | |
408 | if h not in oldheads] |
|
412 | if h not in oldheads] | |
409 | repo.ui.log("incoming", |
|
413 | repo.ui.log("incoming", | |
410 | "%s incoming changes - new heads: %s\n", |
|
414 | "%s incoming changes - new heads: %s\n", | |
411 | len(added), |
|
415 | len(added), | |
412 | ', '.join([hex(c[:6]) for c in newheads])) |
|
416 | ', '.join([hex(c[:6]) for c in newheads])) | |
413 |
|
417 | |||
414 | tr.addpostclose('changegroup-runhooks-%020i' % clstart, |
|
418 | tr.addpostclose('changegroup-runhooks-%020i' % clstart, | |
415 | lambda tr: repo._afterlock(runhooks)) |
|
419 | lambda tr: repo._afterlock(runhooks)) | |
416 | finally: |
|
420 | finally: | |
417 | repo.ui.flush() |
|
421 | repo.ui.flush() | |
418 | # never return 0 here: |
|
422 | # never return 0 here: | |
419 | if deltaheads < 0: |
|
423 | if deltaheads < 0: | |
420 | ret = deltaheads - 1 |
|
424 | ret = deltaheads - 1 | |
421 | else: |
|
425 | else: | |
422 | ret = deltaheads + 1 |
|
426 | ret = deltaheads + 1 | |
423 | return ret |
|
427 | return ret | |
424 |
|
428 | |||
425 | def deltaiter(self): |
|
429 | def deltaiter(self): | |
426 | """ |
|
430 | """ | |
427 | returns an iterator of the deltas in this changegroup |
|
431 | returns an iterator of the deltas in this changegroup | |
428 |
|
432 | |||
429 | Useful for passing to the underlying storage system to be stored. |
|
433 | Useful for passing to the underlying storage system to be stored. | |
430 | """ |
|
434 | """ | |
431 | chain = None |
|
435 | chain = None | |
432 | for chunkdata in iter(lambda: self.deltachunk(chain), {}): |
|
436 | for chunkdata in iter(lambda: self.deltachunk(chain), {}): | |
433 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) |
|
437 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) | |
434 | yield chunkdata |
|
438 | yield chunkdata | |
435 | chain = chunkdata[0] |
|
439 | chain = chunkdata[0] | |
436 |
|
440 | |||
437 | class cg2unpacker(cg1unpacker): |
|
441 | class cg2unpacker(cg1unpacker): | |
438 | """Unpacker for cg2 streams. |
|
442 | """Unpacker for cg2 streams. | |
439 |
|
443 | |||
440 | cg2 streams add support for generaldelta, so the delta header |
|
444 | cg2 streams add support for generaldelta, so the delta header | |
441 | format is slightly different. All other features about the data |
|
445 | format is slightly different. All other features about the data | |
442 | remain the same. |
|
446 | remain the same. | |
443 | """ |
|
447 | """ | |
444 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
448 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER | |
445 | deltaheadersize = struct.calcsize(deltaheader) |
|
449 | deltaheadersize = struct.calcsize(deltaheader) | |
446 | version = '02' |
|
450 | version = '02' | |
447 |
|
451 | |||
448 | def _deltaheader(self, headertuple, prevnode): |
|
452 | def _deltaheader(self, headertuple, prevnode): | |
449 | node, p1, p2, deltabase, cs = headertuple |
|
453 | node, p1, p2, deltabase, cs = headertuple | |
450 | flags = 0 |
|
454 | flags = 0 | |
451 | return node, p1, p2, deltabase, cs, flags |
|
455 | return node, p1, p2, deltabase, cs, flags | |
452 |
|
456 | |||
453 | class cg3unpacker(cg2unpacker): |
|
457 | class cg3unpacker(cg2unpacker): | |
454 | """Unpacker for cg3 streams. |
|
458 | """Unpacker for cg3 streams. | |
455 |
|
459 | |||
456 | cg3 streams add support for exchanging treemanifests and revlog |
|
460 | cg3 streams add support for exchanging treemanifests and revlog | |
457 | flags. It adds the revlog flags to the delta header and an empty chunk |
|
461 | flags. It adds the revlog flags to the delta header and an empty chunk | |
458 | separating manifests and files. |
|
462 | separating manifests and files. | |
459 | """ |
|
463 | """ | |
460 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
464 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER | |
461 | deltaheadersize = struct.calcsize(deltaheader) |
|
465 | deltaheadersize = struct.calcsize(deltaheader) | |
462 | version = '03' |
|
466 | version = '03' | |
463 | _grouplistcount = 2 # One list of manifests and one list of files |
|
467 | _grouplistcount = 2 # One list of manifests and one list of files | |
464 |
|
468 | |||
465 | def _deltaheader(self, headertuple, prevnode): |
|
469 | def _deltaheader(self, headertuple, prevnode): | |
466 | node, p1, p2, deltabase, cs, flags = headertuple |
|
470 | node, p1, p2, deltabase, cs, flags = headertuple | |
467 | return node, p1, p2, deltabase, cs, flags |
|
471 | return node, p1, p2, deltabase, cs, flags | |
468 |
|
472 | |||
469 | def _unpackmanifests(self, repo, revmap, trp, prog, numchanges): |
|
473 | def _unpackmanifests(self, repo, revmap, trp, prog, numchanges): | |
470 | super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog, |
|
474 | super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog, | |
471 | numchanges) |
|
475 | numchanges) | |
472 | for chunkdata in iter(self.filelogheader, {}): |
|
476 | for chunkdata in iter(self.filelogheader, {}): | |
473 | # If we get here, there are directory manifests in the changegroup |
|
477 | # If we get here, there are directory manifests in the changegroup | |
474 | d = chunkdata["filename"] |
|
478 | d = chunkdata["filename"] | |
475 | repo.ui.debug("adding %s revisions\n" % d) |
|
479 | repo.ui.debug("adding %s revisions\n" % d) | |
476 | dirlog = repo.manifestlog._revlog.dirlog(d) |
|
480 | dirlog = repo.manifestlog._revlog.dirlog(d) | |
477 | deltas = self.deltaiter() |
|
481 | deltas = self.deltaiter() | |
478 | if not dirlog.addgroup(deltas, revmap, trp): |
|
482 | if not dirlog.addgroup(deltas, revmap, trp): | |
479 | raise error.Abort(_("received dir revlog group is empty")) |
|
483 | raise error.Abort(_("received dir revlog group is empty")) | |
480 |
|
484 | |||
481 | class headerlessfixup(object): |
|
485 | class headerlessfixup(object): | |
482 | def __init__(self, fh, h): |
|
486 | def __init__(self, fh, h): | |
483 | self._h = h |
|
487 | self._h = h | |
484 | self._fh = fh |
|
488 | self._fh = fh | |
485 | def read(self, n): |
|
489 | def read(self, n): | |
486 | if self._h: |
|
490 | if self._h: | |
487 | d, self._h = self._h[:n], self._h[n:] |
|
491 | d, self._h = self._h[:n], self._h[n:] | |
488 | if len(d) < n: |
|
492 | if len(d) < n: | |
489 | d += readexactly(self._fh, n - len(d)) |
|
493 | d += readexactly(self._fh, n - len(d)) | |
490 | return d |
|
494 | return d | |
491 | return readexactly(self._fh, n) |
|
495 | return readexactly(self._fh, n) | |
492 |
|
496 | |||
493 | class cg1packer(object): |
|
497 | class cg1packer(object): | |
494 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
498 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER | |
495 | version = '01' |
|
499 | version = '01' | |
496 | def __init__(self, repo, bundlecaps=None): |
|
500 | def __init__(self, repo, bundlecaps=None): | |
497 | """Given a source repo, construct a bundler. |
|
501 | """Given a source repo, construct a bundler. | |
498 |
|
502 | |||
499 | bundlecaps is optional and can be used to specify the set of |
|
503 | bundlecaps is optional and can be used to specify the set of | |
500 | capabilities which can be used to build the bundle. While bundlecaps is |
|
504 | capabilities which can be used to build the bundle. While bundlecaps is | |
501 | unused in core Mercurial, extensions rely on this feature to communicate |
|
505 | unused in core Mercurial, extensions rely on this feature to communicate | |
502 | capabilities to customize the changegroup packer. |
|
506 | capabilities to customize the changegroup packer. | |
503 | """ |
|
507 | """ | |
504 | # Set of capabilities we can use to build the bundle. |
|
508 | # Set of capabilities we can use to build the bundle. | |
505 | if bundlecaps is None: |
|
509 | if bundlecaps is None: | |
506 | bundlecaps = set() |
|
510 | bundlecaps = set() | |
507 | self._bundlecaps = bundlecaps |
|
511 | self._bundlecaps = bundlecaps | |
508 | # experimental config: bundle.reorder |
|
512 | # experimental config: bundle.reorder | |
509 | reorder = repo.ui.config('bundle', 'reorder') |
|
513 | reorder = repo.ui.config('bundle', 'reorder') | |
510 | if reorder == 'auto': |
|
514 | if reorder == 'auto': | |
511 | reorder = None |
|
515 | reorder = None | |
512 | else: |
|
516 | else: | |
513 | reorder = util.parsebool(reorder) |
|
517 | reorder = util.parsebool(reorder) | |
514 | self._repo = repo |
|
518 | self._repo = repo | |
515 | self._reorder = reorder |
|
519 | self._reorder = reorder | |
516 | self._progress = repo.ui.progress |
|
520 | self._progress = repo.ui.progress | |
517 | if self._repo.ui.verbose and not self._repo.ui.debugflag: |
|
521 | if self._repo.ui.verbose and not self._repo.ui.debugflag: | |
518 | self._verbosenote = self._repo.ui.note |
|
522 | self._verbosenote = self._repo.ui.note | |
519 | else: |
|
523 | else: | |
520 | self._verbosenote = lambda s: None |
|
524 | self._verbosenote = lambda s: None | |
521 |
|
525 | |||
522 | def close(self): |
|
526 | def close(self): | |
523 | return closechunk() |
|
527 | return closechunk() | |
524 |
|
528 | |||
525 | def fileheader(self, fname): |
|
529 | def fileheader(self, fname): | |
526 | return chunkheader(len(fname)) + fname |
|
530 | return chunkheader(len(fname)) + fname | |
527 |
|
531 | |||
528 | # Extracted both for clarity and for overriding in extensions. |
|
532 | # Extracted both for clarity and for overriding in extensions. | |
529 | def _sortgroup(self, revlog, nodelist, lookup): |
|
533 | def _sortgroup(self, revlog, nodelist, lookup): | |
530 | """Sort nodes for change group and turn them into revnums.""" |
|
534 | """Sort nodes for change group and turn them into revnums.""" | |
531 | # for generaldelta revlogs, we linearize the revs; this will both be |
|
535 | # for generaldelta revlogs, we linearize the revs; this will both be | |
532 | # much quicker and generate a much smaller bundle |
|
536 | # much quicker and generate a much smaller bundle | |
533 | if (revlog._generaldelta and self._reorder is None) or self._reorder: |
|
537 | if (revlog._generaldelta and self._reorder is None) or self._reorder: | |
534 | dag = dagutil.revlogdag(revlog) |
|
538 | dag = dagutil.revlogdag(revlog) | |
535 | return dag.linearize(set(revlog.rev(n) for n in nodelist)) |
|
539 | return dag.linearize(set(revlog.rev(n) for n in nodelist)) | |
536 | else: |
|
540 | else: | |
537 | return sorted([revlog.rev(n) for n in nodelist]) |
|
541 | return sorted([revlog.rev(n) for n in nodelist]) | |
538 |
|
542 | |||
539 | def group(self, nodelist, revlog, lookup, units=None): |
|
543 | def group(self, nodelist, revlog, lookup, units=None): | |
540 | """Calculate a delta group, yielding a sequence of changegroup chunks |
|
544 | """Calculate a delta group, yielding a sequence of changegroup chunks | |
541 | (strings). |
|
545 | (strings). | |
542 |
|
546 | |||
543 | Given a list of changeset revs, return a set of deltas and |
|
547 | Given a list of changeset revs, return a set of deltas and | |
544 | metadata corresponding to nodes. The first delta is |
|
548 | metadata corresponding to nodes. The first delta is | |
545 | first parent(nodelist[0]) -> nodelist[0], the receiver is |
|
549 | first parent(nodelist[0]) -> nodelist[0], the receiver is | |
546 | guaranteed to have this parent as it has all history before |
|
550 | guaranteed to have this parent as it has all history before | |
547 | these changesets. In the case firstparent is nullrev the |
|
551 | these changesets. In the case firstparent is nullrev the | |
548 | changegroup starts with a full revision. |
|
552 | changegroup starts with a full revision. | |
549 |
|
553 | |||
550 | If units is not None, progress detail will be generated, units specifies |
|
554 | If units is not None, progress detail will be generated, units specifies | |
551 | the type of revlog that is touched (changelog, manifest, etc.). |
|
555 | the type of revlog that is touched (changelog, manifest, etc.). | |
552 | """ |
|
556 | """ | |
553 | # if we don't have any revisions touched by these changesets, bail |
|
557 | # if we don't have any revisions touched by these changesets, bail | |
554 | if len(nodelist) == 0: |
|
558 | if len(nodelist) == 0: | |
555 | yield self.close() |
|
559 | yield self.close() | |
556 | return |
|
560 | return | |
557 |
|
561 | |||
558 | revs = self._sortgroup(revlog, nodelist, lookup) |
|
562 | revs = self._sortgroup(revlog, nodelist, lookup) | |
559 |
|
563 | |||
560 | # add the parent of the first rev |
|
564 | # add the parent of the first rev | |
561 | p = revlog.parentrevs(revs[0])[0] |
|
565 | p = revlog.parentrevs(revs[0])[0] | |
562 | revs.insert(0, p) |
|
566 | revs.insert(0, p) | |
563 |
|
567 | |||
564 | # build deltas |
|
568 | # build deltas | |
565 | total = len(revs) - 1 |
|
569 | total = len(revs) - 1 | |
566 | msgbundling = _('bundling') |
|
570 | msgbundling = _('bundling') | |
567 | for r in xrange(len(revs) - 1): |
|
571 | for r in xrange(len(revs) - 1): | |
568 | if units is not None: |
|
572 | if units is not None: | |
569 | self._progress(msgbundling, r + 1, unit=units, total=total) |
|
573 | self._progress(msgbundling, r + 1, unit=units, total=total) | |
570 | prev, curr = revs[r], revs[r + 1] |
|
574 | prev, curr = revs[r], revs[r + 1] | |
571 | linknode = lookup(revlog.node(curr)) |
|
575 | linknode = lookup(revlog.node(curr)) | |
572 | for c in self.revchunk(revlog, curr, prev, linknode): |
|
576 | for c in self.revchunk(revlog, curr, prev, linknode): | |
573 | yield c |
|
577 | yield c | |
574 |
|
578 | |||
575 | if units is not None: |
|
579 | if units is not None: | |
576 | self._progress(msgbundling, None) |
|
580 | self._progress(msgbundling, None) | |
577 | yield self.close() |
|
581 | yield self.close() | |
578 |
|
582 | |||
579 | # filter any nodes that claim to be part of the known set |
|
583 | # filter any nodes that claim to be part of the known set | |
580 | def prune(self, revlog, missing, commonrevs): |
|
584 | def prune(self, revlog, missing, commonrevs): | |
581 | rr, rl = revlog.rev, revlog.linkrev |
|
585 | rr, rl = revlog.rev, revlog.linkrev | |
582 | return [n for n in missing if rl(rr(n)) not in commonrevs] |
|
586 | return [n for n in missing if rl(rr(n)) not in commonrevs] | |
583 |
|
587 | |||
584 | def _packmanifests(self, dir, mfnodes, lookuplinknode): |
|
588 | def _packmanifests(self, dir, mfnodes, lookuplinknode): | |
585 | """Pack flat manifests into a changegroup stream.""" |
|
589 | """Pack flat manifests into a changegroup stream.""" | |
586 | assert not dir |
|
590 | assert not dir | |
587 | for chunk in self.group(mfnodes, self._repo.manifestlog._revlog, |
|
591 | for chunk in self.group(mfnodes, self._repo.manifestlog._revlog, | |
588 | lookuplinknode, units=_('manifests')): |
|
592 | lookuplinknode, units=_('manifests')): | |
589 | yield chunk |
|
593 | yield chunk | |
590 |
|
594 | |||
591 | def _manifestsdone(self): |
|
595 | def _manifestsdone(self): | |
592 | return '' |
|
596 | return '' | |
593 |
|
597 | |||
594 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
598 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): | |
595 | '''yield a sequence of changegroup chunks (strings)''' |
|
599 | '''yield a sequence of changegroup chunks (strings)''' | |
596 | repo = self._repo |
|
600 | repo = self._repo | |
597 | cl = repo.changelog |
|
601 | cl = repo.changelog | |
598 |
|
602 | |||
599 | clrevorder = {} |
|
603 | clrevorder = {} | |
600 | mfs = {} # needed manifests |
|
604 | mfs = {} # needed manifests | |
601 | fnodes = {} # needed file nodes |
|
605 | fnodes = {} # needed file nodes | |
602 | changedfiles = set() |
|
606 | changedfiles = set() | |
603 |
|
607 | |||
604 | # Callback for the changelog, used to collect changed files and manifest |
|
608 | # Callback for the changelog, used to collect changed files and manifest | |
605 | # nodes. |
|
609 | # nodes. | |
606 | # Returns the linkrev node (identity in the changelog case). |
|
610 | # Returns the linkrev node (identity in the changelog case). | |
607 | def lookupcl(x): |
|
611 | def lookupcl(x): | |
608 | c = cl.read(x) |
|
612 | c = cl.read(x) | |
609 | clrevorder[x] = len(clrevorder) |
|
613 | clrevorder[x] = len(clrevorder) | |
610 | n = c[0] |
|
614 | n = c[0] | |
611 | # record the first changeset introducing this manifest version |
|
615 | # record the first changeset introducing this manifest version | |
612 | mfs.setdefault(n, x) |
|
616 | mfs.setdefault(n, x) | |
613 | # Record a complete list of potentially-changed files in |
|
617 | # Record a complete list of potentially-changed files in | |
614 | # this manifest. |
|
618 | # this manifest. | |
615 | changedfiles.update(c[3]) |
|
619 | changedfiles.update(c[3]) | |
616 | return x |
|
620 | return x | |
617 |
|
621 | |||
618 | self._verbosenote(_('uncompressed size of bundle content:\n')) |
|
622 | self._verbosenote(_('uncompressed size of bundle content:\n')) | |
619 | size = 0 |
|
623 | size = 0 | |
620 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): |
|
624 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): | |
621 | size += len(chunk) |
|
625 | size += len(chunk) | |
622 | yield chunk |
|
626 | yield chunk | |
623 | self._verbosenote(_('%8.i (changelog)\n') % size) |
|
627 | self._verbosenote(_('%8.i (changelog)\n') % size) | |
624 |
|
628 | |||
625 | # We need to make sure that the linkrev in the changegroup refers to |
|
629 | # We need to make sure that the linkrev in the changegroup refers to | |
626 | # the first changeset that introduced the manifest or file revision. |
|
630 | # the first changeset that introduced the manifest or file revision. | |
627 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
631 | # The fastpath is usually safer than the slowpath, because the filelogs | |
628 | # are walked in revlog order. |
|
632 | # are walked in revlog order. | |
629 | # |
|
633 | # | |
630 | # When taking the slowpath with reorder=None and the manifest revlog |
|
634 | # When taking the slowpath with reorder=None and the manifest revlog | |
631 | # uses generaldelta, the manifest may be walked in the "wrong" order. |
|
635 | # uses generaldelta, the manifest may be walked in the "wrong" order. | |
632 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in |
|
636 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in | |
633 | # cc0ff93d0c0c). |
|
637 | # cc0ff93d0c0c). | |
634 | # |
|
638 | # | |
635 | # When taking the fastpath, we are only vulnerable to reordering |
|
639 | # When taking the fastpath, we are only vulnerable to reordering | |
636 | # of the changelog itself. The changelog never uses generaldelta, so |
|
640 | # of the changelog itself. The changelog never uses generaldelta, so | |
637 | # it is only reordered when reorder=True. To handle this case, we |
|
641 | # it is only reordered when reorder=True. To handle this case, we | |
638 | # simply take the slowpath, which already has the 'clrevorder' logic. |
|
642 | # simply take the slowpath, which already has the 'clrevorder' logic. | |
639 | # This was also fixed in cc0ff93d0c0c. |
|
643 | # This was also fixed in cc0ff93d0c0c. | |
640 | fastpathlinkrev = fastpathlinkrev and not self._reorder |
|
644 | fastpathlinkrev = fastpathlinkrev and not self._reorder | |
641 | # Treemanifests don't work correctly with fastpathlinkrev |
|
645 | # Treemanifests don't work correctly with fastpathlinkrev | |
642 | # either, because we don't discover which directory nodes to |
|
646 | # either, because we don't discover which directory nodes to | |
643 | # send along with files. This could probably be fixed. |
|
647 | # send along with files. This could probably be fixed. | |
644 | fastpathlinkrev = fastpathlinkrev and ( |
|
648 | fastpathlinkrev = fastpathlinkrev and ( | |
645 | 'treemanifest' not in repo.requirements) |
|
649 | 'treemanifest' not in repo.requirements) | |
646 |
|
650 | |||
647 | for chunk in self.generatemanifests(commonrevs, clrevorder, |
|
651 | for chunk in self.generatemanifests(commonrevs, clrevorder, | |
648 | fastpathlinkrev, mfs, fnodes, source): |
|
652 | fastpathlinkrev, mfs, fnodes, source): | |
649 | yield chunk |
|
653 | yield chunk | |
650 | mfs.clear() |
|
654 | mfs.clear() | |
651 | clrevs = set(cl.rev(x) for x in clnodes) |
|
655 | clrevs = set(cl.rev(x) for x in clnodes) | |
652 |
|
656 | |||
653 | if not fastpathlinkrev: |
|
657 | if not fastpathlinkrev: | |
654 | def linknodes(unused, fname): |
|
658 | def linknodes(unused, fname): | |
655 | return fnodes.get(fname, {}) |
|
659 | return fnodes.get(fname, {}) | |
656 | else: |
|
660 | else: | |
657 | cln = cl.node |
|
661 | cln = cl.node | |
658 | def linknodes(filerevlog, fname): |
|
662 | def linknodes(filerevlog, fname): | |
659 | llr = filerevlog.linkrev |
|
663 | llr = filerevlog.linkrev | |
660 | fln = filerevlog.node |
|
664 | fln = filerevlog.node | |
661 | revs = ((r, llr(r)) for r in filerevlog) |
|
665 | revs = ((r, llr(r)) for r in filerevlog) | |
662 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) |
|
666 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) | |
663 |
|
667 | |||
664 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, |
|
668 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, | |
665 | source): |
|
669 | source): | |
666 | yield chunk |
|
670 | yield chunk | |
667 |
|
671 | |||
668 | yield self.close() |
|
672 | yield self.close() | |
669 |
|
673 | |||
670 | if clnodes: |
|
674 | if clnodes: | |
671 | repo.hook('outgoing', node=hex(clnodes[0]), source=source) |
|
675 | repo.hook('outgoing', node=hex(clnodes[0]), source=source) | |
672 |
|
676 | |||
673 | def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs, |
|
677 | def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs, | |
674 | fnodes, source): |
|
678 | fnodes, source): | |
675 | """Returns an iterator of changegroup chunks containing manifests. |
|
679 | """Returns an iterator of changegroup chunks containing manifests. | |
676 |
|
680 | |||
677 | `source` is unused here, but is used by extensions like remotefilelog to |
|
681 | `source` is unused here, but is used by extensions like remotefilelog to | |
678 | change what is sent based in pulls vs pushes, etc. |
|
682 | change what is sent based in pulls vs pushes, etc. | |
679 | """ |
|
683 | """ | |
680 | repo = self._repo |
|
684 | repo = self._repo | |
681 | mfl = repo.manifestlog |
|
685 | mfl = repo.manifestlog | |
682 | dirlog = mfl._revlog.dirlog |
|
686 | dirlog = mfl._revlog.dirlog | |
683 | tmfnodes = {'': mfs} |
|
687 | tmfnodes = {'': mfs} | |
684 |
|
688 | |||
685 | # Callback for the manifest, used to collect linkrevs for filelog |
|
689 | # Callback for the manifest, used to collect linkrevs for filelog | |
686 | # revisions. |
|
690 | # revisions. | |
687 | # Returns the linkrev node (collected in lookupcl). |
|
691 | # Returns the linkrev node (collected in lookupcl). | |
688 | def makelookupmflinknode(dir, nodes): |
|
692 | def makelookupmflinknode(dir, nodes): | |
689 | if fastpathlinkrev: |
|
693 | if fastpathlinkrev: | |
690 | assert not dir |
|
694 | assert not dir | |
691 | return mfs.__getitem__ |
|
695 | return mfs.__getitem__ | |
692 |
|
696 | |||
693 | def lookupmflinknode(x): |
|
697 | def lookupmflinknode(x): | |
694 | """Callback for looking up the linknode for manifests. |
|
698 | """Callback for looking up the linknode for manifests. | |
695 |
|
699 | |||
696 | Returns the linkrev node for the specified manifest. |
|
700 | Returns the linkrev node for the specified manifest. | |
697 |
|
701 | |||
698 | SIDE EFFECT: |
|
702 | SIDE EFFECT: | |
699 |
|
703 | |||
700 | 1) fclnodes gets populated with the list of relevant |
|
704 | 1) fclnodes gets populated with the list of relevant | |
701 | file nodes if we're not using fastpathlinkrev |
|
705 | file nodes if we're not using fastpathlinkrev | |
702 | 2) When treemanifests are in use, collects treemanifest nodes |
|
706 | 2) When treemanifests are in use, collects treemanifest nodes | |
703 | to send |
|
707 | to send | |
704 |
|
708 | |||
705 | Note that this means manifests must be completely sent to |
|
709 | Note that this means manifests must be completely sent to | |
706 | the client before you can trust the list of files and |
|
710 | the client before you can trust the list of files and | |
707 | treemanifests to send. |
|
711 | treemanifests to send. | |
708 | """ |
|
712 | """ | |
709 | clnode = nodes[x] |
|
713 | clnode = nodes[x] | |
710 | mdata = mfl.get(dir, x).readfast(shallow=True) |
|
714 | mdata = mfl.get(dir, x).readfast(shallow=True) | |
711 | for p, n, fl in mdata.iterentries(): |
|
715 | for p, n, fl in mdata.iterentries(): | |
712 | if fl == 't': # subdirectory manifest |
|
716 | if fl == 't': # subdirectory manifest | |
713 | subdir = dir + p + '/' |
|
717 | subdir = dir + p + '/' | |
714 | tmfclnodes = tmfnodes.setdefault(subdir, {}) |
|
718 | tmfclnodes = tmfnodes.setdefault(subdir, {}) | |
715 | tmfclnode = tmfclnodes.setdefault(n, clnode) |
|
719 | tmfclnode = tmfclnodes.setdefault(n, clnode) | |
716 | if clrevorder[clnode] < clrevorder[tmfclnode]: |
|
720 | if clrevorder[clnode] < clrevorder[tmfclnode]: | |
717 | tmfclnodes[n] = clnode |
|
721 | tmfclnodes[n] = clnode | |
718 | else: |
|
722 | else: | |
719 | f = dir + p |
|
723 | f = dir + p | |
720 | fclnodes = fnodes.setdefault(f, {}) |
|
724 | fclnodes = fnodes.setdefault(f, {}) | |
721 | fclnode = fclnodes.setdefault(n, clnode) |
|
725 | fclnode = fclnodes.setdefault(n, clnode) | |
722 | if clrevorder[clnode] < clrevorder[fclnode]: |
|
726 | if clrevorder[clnode] < clrevorder[fclnode]: | |
723 | fclnodes[n] = clnode |
|
727 | fclnodes[n] = clnode | |
724 | return clnode |
|
728 | return clnode | |
725 | return lookupmflinknode |
|
729 | return lookupmflinknode | |
726 |
|
730 | |||
727 | size = 0 |
|
731 | size = 0 | |
728 | while tmfnodes: |
|
732 | while tmfnodes: | |
729 | dir, nodes = tmfnodes.popitem() |
|
733 | dir, nodes = tmfnodes.popitem() | |
730 | prunednodes = self.prune(dirlog(dir), nodes, commonrevs) |
|
734 | prunednodes = self.prune(dirlog(dir), nodes, commonrevs) | |
731 | if not dir or prunednodes: |
|
735 | if not dir or prunednodes: | |
732 | for x in self._packmanifests(dir, prunednodes, |
|
736 | for x in self._packmanifests(dir, prunednodes, | |
733 | makelookupmflinknode(dir, nodes)): |
|
737 | makelookupmflinknode(dir, nodes)): | |
734 | size += len(x) |
|
738 | size += len(x) | |
735 | yield x |
|
739 | yield x | |
736 | self._verbosenote(_('%8.i (manifests)\n') % size) |
|
740 | self._verbosenote(_('%8.i (manifests)\n') % size) | |
737 | yield self._manifestsdone() |
|
741 | yield self._manifestsdone() | |
738 |
|
742 | |||
739 | # The 'source' parameter is useful for extensions |
|
743 | # The 'source' parameter is useful for extensions | |
740 | def generatefiles(self, changedfiles, linknodes, commonrevs, source): |
|
744 | def generatefiles(self, changedfiles, linknodes, commonrevs, source): | |
741 | repo = self._repo |
|
745 | repo = self._repo | |
742 | progress = self._progress |
|
746 | progress = self._progress | |
743 | msgbundling = _('bundling') |
|
747 | msgbundling = _('bundling') | |
744 |
|
748 | |||
745 | total = len(changedfiles) |
|
749 | total = len(changedfiles) | |
746 | # for progress output |
|
750 | # for progress output | |
747 | msgfiles = _('files') |
|
751 | msgfiles = _('files') | |
748 | for i, fname in enumerate(sorted(changedfiles)): |
|
752 | for i, fname in enumerate(sorted(changedfiles)): | |
749 | filerevlog = repo.file(fname) |
|
753 | filerevlog = repo.file(fname) | |
750 | if not filerevlog: |
|
754 | if not filerevlog: | |
751 | raise error.Abort(_("empty or missing revlog for %s") % fname) |
|
755 | raise error.Abort(_("empty or missing revlog for %s") % fname) | |
752 |
|
756 | |||
753 | linkrevnodes = linknodes(filerevlog, fname) |
|
757 | linkrevnodes = linknodes(filerevlog, fname) | |
754 | # Lookup for filenodes, we collected the linkrev nodes above in the |
|
758 | # Lookup for filenodes, we collected the linkrev nodes above in the | |
755 | # fastpath case and with lookupmf in the slowpath case. |
|
759 | # fastpath case and with lookupmf in the slowpath case. | |
756 | def lookupfilelog(x): |
|
760 | def lookupfilelog(x): | |
757 | return linkrevnodes[x] |
|
761 | return linkrevnodes[x] | |
758 |
|
762 | |||
759 | filenodes = self.prune(filerevlog, linkrevnodes, commonrevs) |
|
763 | filenodes = self.prune(filerevlog, linkrevnodes, commonrevs) | |
760 | if filenodes: |
|
764 | if filenodes: | |
761 | progress(msgbundling, i + 1, item=fname, unit=msgfiles, |
|
765 | progress(msgbundling, i + 1, item=fname, unit=msgfiles, | |
762 | total=total) |
|
766 | total=total) | |
763 | h = self.fileheader(fname) |
|
767 | h = self.fileheader(fname) | |
764 | size = len(h) |
|
768 | size = len(h) | |
765 | yield h |
|
769 | yield h | |
766 | for chunk in self.group(filenodes, filerevlog, lookupfilelog): |
|
770 | for chunk in self.group(filenodes, filerevlog, lookupfilelog): | |
767 | size += len(chunk) |
|
771 | size += len(chunk) | |
768 | yield chunk |
|
772 | yield chunk | |
769 | self._verbosenote(_('%8.i %s\n') % (size, fname)) |
|
773 | self._verbosenote(_('%8.i %s\n') % (size, fname)) | |
770 | progress(msgbundling, None) |
|
774 | progress(msgbundling, None) | |
771 |
|
775 | |||
772 | def deltaparent(self, revlog, rev, p1, p2, prev): |
|
776 | def deltaparent(self, revlog, rev, p1, p2, prev): | |
773 | return prev |
|
777 | return prev | |
774 |
|
778 | |||
775 | def revchunk(self, revlog, rev, prev, linknode): |
|
779 | def revchunk(self, revlog, rev, prev, linknode): | |
776 | node = revlog.node(rev) |
|
780 | node = revlog.node(rev) | |
777 | p1, p2 = revlog.parentrevs(rev) |
|
781 | p1, p2 = revlog.parentrevs(rev) | |
778 | base = self.deltaparent(revlog, rev, p1, p2, prev) |
|
782 | base = self.deltaparent(revlog, rev, p1, p2, prev) | |
779 |
|
783 | |||
780 | prefix = '' |
|
784 | prefix = '' | |
781 | if revlog.iscensored(base) or revlog.iscensored(rev): |
|
785 | if revlog.iscensored(base) or revlog.iscensored(rev): | |
782 | try: |
|
786 | try: | |
783 | delta = revlog.revision(node, raw=True) |
|
787 | delta = revlog.revision(node, raw=True) | |
784 | except error.CensoredNodeError as e: |
|
788 | except error.CensoredNodeError as e: | |
785 | delta = e.tombstone |
|
789 | delta = e.tombstone | |
786 | if base == nullrev: |
|
790 | if base == nullrev: | |
787 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
791 | prefix = mdiff.trivialdiffheader(len(delta)) | |
788 | else: |
|
792 | else: | |
789 | baselen = revlog.rawsize(base) |
|
793 | baselen = revlog.rawsize(base) | |
790 | prefix = mdiff.replacediffheader(baselen, len(delta)) |
|
794 | prefix = mdiff.replacediffheader(baselen, len(delta)) | |
791 | elif base == nullrev: |
|
795 | elif base == nullrev: | |
792 | delta = revlog.revision(node, raw=True) |
|
796 | delta = revlog.revision(node, raw=True) | |
793 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
797 | prefix = mdiff.trivialdiffheader(len(delta)) | |
794 | else: |
|
798 | else: | |
795 | delta = revlog.revdiff(base, rev) |
|
799 | delta = revlog.revdiff(base, rev) | |
796 | p1n, p2n = revlog.parents(node) |
|
800 | p1n, p2n = revlog.parents(node) | |
797 | basenode = revlog.node(base) |
|
801 | basenode = revlog.node(base) | |
798 | flags = revlog.flags(rev) |
|
802 | flags = revlog.flags(rev) | |
799 | meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags) |
|
803 | meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags) | |
800 | meta += prefix |
|
804 | meta += prefix | |
801 | l = len(meta) + len(delta) |
|
805 | l = len(meta) + len(delta) | |
802 | yield chunkheader(l) |
|
806 | yield chunkheader(l) | |
803 | yield meta |
|
807 | yield meta | |
804 | yield delta |
|
808 | yield delta | |
805 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): |
|
809 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): | |
806 | # do nothing with basenode, it is implicitly the previous one in HG10 |
|
810 | # do nothing with basenode, it is implicitly the previous one in HG10 | |
807 | # do nothing with flags, it is implicitly 0 for cg1 and cg2 |
|
811 | # do nothing with flags, it is implicitly 0 for cg1 and cg2 | |
808 | return struct.pack(self.deltaheader, node, p1n, p2n, linknode) |
|
812 | return struct.pack(self.deltaheader, node, p1n, p2n, linknode) | |
809 |
|
813 | |||
810 | class cg2packer(cg1packer): |
|
814 | class cg2packer(cg1packer): | |
811 | version = '02' |
|
815 | version = '02' | |
812 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
816 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER | |
813 |
|
817 | |||
814 | def __init__(self, repo, bundlecaps=None): |
|
818 | def __init__(self, repo, bundlecaps=None): | |
815 | super(cg2packer, self).__init__(repo, bundlecaps) |
|
819 | super(cg2packer, self).__init__(repo, bundlecaps) | |
816 | if self._reorder is None: |
|
820 | if self._reorder is None: | |
817 | # Since generaldelta is directly supported by cg2, reordering |
|
821 | # Since generaldelta is directly supported by cg2, reordering | |
818 | # generally doesn't help, so we disable it by default (treating |
|
822 | # generally doesn't help, so we disable it by default (treating | |
819 | # bundle.reorder=auto just like bundle.reorder=False). |
|
823 | # bundle.reorder=auto just like bundle.reorder=False). | |
820 | self._reorder = False |
|
824 | self._reorder = False | |
821 |
|
825 | |||
822 | def deltaparent(self, revlog, rev, p1, p2, prev): |
|
826 | def deltaparent(self, revlog, rev, p1, p2, prev): | |
823 | dp = revlog.deltaparent(rev) |
|
827 | dp = revlog.deltaparent(rev) | |
824 | if dp == nullrev and revlog.storedeltachains: |
|
828 | if dp == nullrev and revlog.storedeltachains: | |
825 | # Avoid sending full revisions when delta parent is null. Pick prev |
|
829 | # Avoid sending full revisions when delta parent is null. Pick prev | |
826 | # in that case. It's tempting to pick p1 in this case, as p1 will |
|
830 | # in that case. It's tempting to pick p1 in this case, as p1 will | |
827 | # be smaller in the common case. However, computing a delta against |
|
831 | # be smaller in the common case. However, computing a delta against | |
828 | # p1 may require resolving the raw text of p1, which could be |
|
832 | # p1 may require resolving the raw text of p1, which could be | |
829 | # expensive. The revlog caches should have prev cached, meaning |
|
833 | # expensive. The revlog caches should have prev cached, meaning | |
830 | # less CPU for changegroup generation. There is likely room to add |
|
834 | # less CPU for changegroup generation. There is likely room to add | |
831 | # a flag and/or config option to control this behavior. |
|
835 | # a flag and/or config option to control this behavior. | |
832 | return prev |
|
836 | return prev | |
833 | elif dp == nullrev: |
|
837 | elif dp == nullrev: | |
834 | # revlog is configured to use full snapshot for a reason, |
|
838 | # revlog is configured to use full snapshot for a reason, | |
835 | # stick to full snapshot. |
|
839 | # stick to full snapshot. | |
836 | return nullrev |
|
840 | return nullrev | |
837 | elif dp not in (p1, p2, prev): |
|
841 | elif dp not in (p1, p2, prev): | |
838 | # Pick prev when we can't be sure remote has the base revision. |
|
842 | # Pick prev when we can't be sure remote has the base revision. | |
839 | return prev |
|
843 | return prev | |
840 | else: |
|
844 | else: | |
841 | return dp |
|
845 | return dp | |
842 |
|
846 | |||
843 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): |
|
847 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): | |
844 | # Do nothing with flags, it is implicitly 0 in cg1 and cg2 |
|
848 | # Do nothing with flags, it is implicitly 0 in cg1 and cg2 | |
845 | return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode) |
|
849 | return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode) | |
846 |
|
850 | |||
847 | class cg3packer(cg2packer): |
|
851 | class cg3packer(cg2packer): | |
848 | version = '03' |
|
852 | version = '03' | |
849 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
853 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER | |
850 |
|
854 | |||
851 | def _packmanifests(self, dir, mfnodes, lookuplinknode): |
|
855 | def _packmanifests(self, dir, mfnodes, lookuplinknode): | |
852 | if dir: |
|
856 | if dir: | |
853 | yield self.fileheader(dir) |
|
857 | yield self.fileheader(dir) | |
854 |
|
858 | |||
855 | dirlog = self._repo.manifestlog._revlog.dirlog(dir) |
|
859 | dirlog = self._repo.manifestlog._revlog.dirlog(dir) | |
856 | for chunk in self.group(mfnodes, dirlog, lookuplinknode, |
|
860 | for chunk in self.group(mfnodes, dirlog, lookuplinknode, | |
857 | units=_('manifests')): |
|
861 | units=_('manifests')): | |
858 | yield chunk |
|
862 | yield chunk | |
859 |
|
863 | |||
860 | def _manifestsdone(self): |
|
864 | def _manifestsdone(self): | |
861 | return self.close() |
|
865 | return self.close() | |
862 |
|
866 | |||
863 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): |
|
867 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): | |
864 | return struct.pack( |
|
868 | return struct.pack( | |
865 | self.deltaheader, node, p1n, p2n, basenode, linknode, flags) |
|
869 | self.deltaheader, node, p1n, p2n, basenode, linknode, flags) | |
866 |
|
870 | |||
867 | _packermap = {'01': (cg1packer, cg1unpacker), |
|
871 | _packermap = {'01': (cg1packer, cg1unpacker), | |
868 | # cg2 adds support for exchanging generaldelta |
|
872 | # cg2 adds support for exchanging generaldelta | |
869 | '02': (cg2packer, cg2unpacker), |
|
873 | '02': (cg2packer, cg2unpacker), | |
870 | # cg3 adds support for exchanging revlog flags and treemanifests |
|
874 | # cg3 adds support for exchanging revlog flags and treemanifests | |
871 | '03': (cg3packer, cg3unpacker), |
|
875 | '03': (cg3packer, cg3unpacker), | |
872 | } |
|
876 | } | |
873 |
|
877 | |||
874 | def allsupportedversions(repo): |
|
878 | def allsupportedversions(repo): | |
875 | versions = set(_packermap.keys()) |
|
879 | versions = set(_packermap.keys()) | |
876 | if not (repo.ui.configbool('experimental', 'changegroup3') or |
|
880 | if not (repo.ui.configbool('experimental', 'changegroup3') or | |
877 | repo.ui.configbool('experimental', 'treemanifest') or |
|
881 | repo.ui.configbool('experimental', 'treemanifest') or | |
878 | 'treemanifest' in repo.requirements): |
|
882 | 'treemanifest' in repo.requirements): | |
879 | versions.discard('03') |
|
883 | versions.discard('03') | |
880 | return versions |
|
884 | return versions | |
881 |
|
885 | |||
882 | # Changegroup versions that can be applied to the repo |
|
886 | # Changegroup versions that can be applied to the repo | |
883 | def supportedincomingversions(repo): |
|
887 | def supportedincomingversions(repo): | |
884 | return allsupportedversions(repo) |
|
888 | return allsupportedversions(repo) | |
885 |
|
889 | |||
886 | # Changegroup versions that can be created from the repo |
|
890 | # Changegroup versions that can be created from the repo | |
887 | def supportedoutgoingversions(repo): |
|
891 | def supportedoutgoingversions(repo): | |
888 | versions = allsupportedversions(repo) |
|
892 | versions = allsupportedversions(repo) | |
889 | if 'treemanifest' in repo.requirements: |
|
893 | if 'treemanifest' in repo.requirements: | |
890 | # Versions 01 and 02 support only flat manifests and it's just too |
|
894 | # Versions 01 and 02 support only flat manifests and it's just too | |
891 | # expensive to convert between the flat manifest and tree manifest on |
|
895 | # expensive to convert between the flat manifest and tree manifest on | |
892 | # the fly. Since tree manifests are hashed differently, all of history |
|
896 | # the fly. Since tree manifests are hashed differently, all of history | |
893 | # would have to be converted. Instead, we simply don't even pretend to |
|
897 | # would have to be converted. Instead, we simply don't even pretend to | |
894 | # support versions 01 and 02. |
|
898 | # support versions 01 and 02. | |
895 | versions.discard('01') |
|
899 | versions.discard('01') | |
896 | versions.discard('02') |
|
900 | versions.discard('02') | |
897 | return versions |
|
901 | return versions | |
898 |
|
902 | |||
899 | def localversion(repo): |
|
903 | def localversion(repo): | |
900 | # Finds the best version to use for bundles that are meant to be used |
|
904 | # Finds the best version to use for bundles that are meant to be used | |
901 | # locally, such as those from strip and shelve, and temporary bundles. |
|
905 | # locally, such as those from strip and shelve, and temporary bundles. | |
902 | return max(supportedoutgoingversions(repo)) |
|
906 | return max(supportedoutgoingversions(repo)) | |
903 |
|
907 | |||
904 | def safeversion(repo): |
|
908 | def safeversion(repo): | |
905 | # Finds the smallest version that it's safe to assume clients of the repo |
|
909 | # Finds the smallest version that it's safe to assume clients of the repo | |
906 | # will support. For example, all hg versions that support generaldelta also |
|
910 | # will support. For example, all hg versions that support generaldelta also | |
907 | # support changegroup 02. |
|
911 | # support changegroup 02. | |
908 | versions = supportedoutgoingversions(repo) |
|
912 | versions = supportedoutgoingversions(repo) | |
909 | if 'generaldelta' in repo.requirements: |
|
913 | if 'generaldelta' in repo.requirements: | |
910 | versions.discard('01') |
|
914 | versions.discard('01') | |
911 | assert versions |
|
915 | assert versions | |
912 | return min(versions) |
|
916 | return min(versions) | |
913 |
|
917 | |||
914 | def getbundler(version, repo, bundlecaps=None): |
|
918 | def getbundler(version, repo, bundlecaps=None): | |
915 | assert version in supportedoutgoingversions(repo) |
|
919 | assert version in supportedoutgoingversions(repo) | |
916 | return _packermap[version][0](repo, bundlecaps) |
|
920 | return _packermap[version][0](repo, bundlecaps) | |
917 |
|
921 | |||
918 | def getunbundler(version, fh, alg, extras=None): |
|
922 | def getunbundler(version, fh, alg, extras=None): | |
919 | return _packermap[version][1](fh, alg, extras=extras) |
|
923 | return _packermap[version][1](fh, alg, extras=extras) | |
920 |
|
924 | |||
921 | def _changegroupinfo(repo, nodes, source): |
|
925 | def _changegroupinfo(repo, nodes, source): | |
922 | if repo.ui.verbose or source == 'bundle': |
|
926 | if repo.ui.verbose or source == 'bundle': | |
923 | repo.ui.status(_("%d changesets found\n") % len(nodes)) |
|
927 | repo.ui.status(_("%d changesets found\n") % len(nodes)) | |
924 | if repo.ui.debugflag: |
|
928 | if repo.ui.debugflag: | |
925 | repo.ui.debug("list of changesets:\n") |
|
929 | repo.ui.debug("list of changesets:\n") | |
926 | for node in nodes: |
|
930 | for node in nodes: | |
927 | repo.ui.debug("%s\n" % hex(node)) |
|
931 | repo.ui.debug("%s\n" % hex(node)) | |
928 |
|
932 | |||
929 | def makechangegroup(repo, outgoing, version, source, fastpath=False, |
|
933 | def makechangegroup(repo, outgoing, version, source, fastpath=False, | |
930 | bundlecaps=None): |
|
934 | bundlecaps=None): | |
931 | cgstream = makestream(repo, outgoing, version, source, |
|
935 | cgstream = makestream(repo, outgoing, version, source, | |
932 | fastpath=fastpath, bundlecaps=bundlecaps) |
|
936 | fastpath=fastpath, bundlecaps=bundlecaps) | |
933 | return getunbundler(version, util.chunkbuffer(cgstream), None, |
|
937 | return getunbundler(version, util.chunkbuffer(cgstream), None, | |
934 | {'clcount': len(outgoing.missing) }) |
|
938 | {'clcount': len(outgoing.missing) }) | |
935 |
|
939 | |||
936 | def makestream(repo, outgoing, version, source, fastpath=False, |
|
940 | def makestream(repo, outgoing, version, source, fastpath=False, | |
937 | bundlecaps=None): |
|
941 | bundlecaps=None): | |
938 | bundler = getbundler(version, repo, bundlecaps=bundlecaps) |
|
942 | bundler = getbundler(version, repo, bundlecaps=bundlecaps) | |
939 |
|
943 | |||
940 | repo = repo.unfiltered() |
|
944 | repo = repo.unfiltered() | |
941 | commonrevs = outgoing.common |
|
945 | commonrevs = outgoing.common | |
942 | csets = outgoing.missing |
|
946 | csets = outgoing.missing | |
943 | heads = outgoing.missingheads |
|
947 | heads = outgoing.missingheads | |
944 | # We go through the fast path if we get told to, or if all (unfiltered |
|
948 | # We go through the fast path if we get told to, or if all (unfiltered | |
945 | # heads have been requested (since we then know there all linkrevs will |
|
949 | # heads have been requested (since we then know there all linkrevs will | |
946 | # be pulled by the client). |
|
950 | # be pulled by the client). | |
947 | heads.sort() |
|
951 | heads.sort() | |
948 | fastpathlinkrev = fastpath or ( |
|
952 | fastpathlinkrev = fastpath or ( | |
949 | repo.filtername is None and heads == sorted(repo.heads())) |
|
953 | repo.filtername is None and heads == sorted(repo.heads())) | |
950 |
|
954 | |||
951 | repo.hook('preoutgoing', throw=True, source=source) |
|
955 | repo.hook('preoutgoing', throw=True, source=source) | |
952 | _changegroupinfo(repo, csets, source) |
|
956 | _changegroupinfo(repo, csets, source) | |
953 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) |
|
957 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) | |
954 |
|
958 | |||
955 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): |
|
959 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): | |
956 | revisions = 0 |
|
960 | revisions = 0 | |
957 | files = 0 |
|
961 | files = 0 | |
958 | for chunkdata in iter(source.filelogheader, {}): |
|
962 | for chunkdata in iter(source.filelogheader, {}): | |
959 | files += 1 |
|
963 | files += 1 | |
960 | f = chunkdata["filename"] |
|
964 | f = chunkdata["filename"] | |
961 | repo.ui.debug("adding %s revisions\n" % f) |
|
965 | repo.ui.debug("adding %s revisions\n" % f) | |
962 | repo.ui.progress(_('files'), files, unit=_('files'), |
|
966 | repo.ui.progress(_('files'), files, unit=_('files'), | |
963 | total=expectedfiles) |
|
967 | total=expectedfiles) | |
964 | fl = repo.file(f) |
|
968 | fl = repo.file(f) | |
965 | o = len(fl) |
|
969 | o = len(fl) | |
966 | try: |
|
970 | try: | |
967 | deltas = source.deltaiter() |
|
971 | deltas = source.deltaiter() | |
968 | if not fl.addgroup(deltas, revmap, trp): |
|
972 | if not fl.addgroup(deltas, revmap, trp): | |
969 | raise error.Abort(_("received file revlog group is empty")) |
|
973 | raise error.Abort(_("received file revlog group is empty")) | |
970 | except error.CensoredBaseError as e: |
|
974 | except error.CensoredBaseError as e: | |
971 | raise error.Abort(_("received delta base is censored: %s") % e) |
|
975 | raise error.Abort(_("received delta base is censored: %s") % e) | |
972 | revisions += len(fl) - o |
|
976 | revisions += len(fl) - o | |
973 | if f in needfiles: |
|
977 | if f in needfiles: | |
974 | needs = needfiles[f] |
|
978 | needs = needfiles[f] | |
975 | for new in xrange(o, len(fl)): |
|
979 | for new in xrange(o, len(fl)): | |
976 | n = fl.node(new) |
|
980 | n = fl.node(new) | |
977 | if n in needs: |
|
981 | if n in needs: | |
978 | needs.remove(n) |
|
982 | needs.remove(n) | |
979 | else: |
|
983 | else: | |
980 | raise error.Abort( |
|
984 | raise error.Abort( | |
981 | _("received spurious file revlog entry")) |
|
985 | _("received spurious file revlog entry")) | |
982 | if not needs: |
|
986 | if not needs: | |
983 | del needfiles[f] |
|
987 | del needfiles[f] | |
984 | repo.ui.progress(_('files'), None) |
|
988 | repo.ui.progress(_('files'), None) | |
985 |
|
989 | |||
986 | for f, needs in needfiles.iteritems(): |
|
990 | for f, needs in needfiles.iteritems(): | |
987 | fl = repo.file(f) |
|
991 | fl = repo.file(f) | |
988 | for n in needs: |
|
992 | for n in needs: | |
989 | try: |
|
993 | try: | |
990 | fl.rev(n) |
|
994 | fl.rev(n) | |
991 | except error.LookupError: |
|
995 | except error.LookupError: | |
992 | raise error.Abort( |
|
996 | raise error.Abort( | |
993 | _('missing file data for %s:%s - run hg verify') % |
|
997 | _('missing file data for %s:%s - run hg verify') % | |
994 | (f, hex(n))) |
|
998 | (f, hex(n))) | |
995 |
|
999 | |||
996 | return revisions, files |
|
1000 | return revisions, files |
General Comments 0
You need to be logged in to leave comments.
Login now