##// END OF EJS Templates
narrow: mark requirement as a constant...
Augie Fackler -
r36105:8c31187b default
parent child Browse files
Show More
@@ -1,93 +1,93 b''
1 # __init__.py - narrowhg extension
1 # __init__.py - narrowhg extension
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 '''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
7 '''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
11 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
12 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
12 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
13 # be specifying the version(s) of Mercurial they are tested with, or
13 # be specifying the version(s) of Mercurial they are tested with, or
14 # leave the attribute unspecified.
14 # leave the attribute unspecified.
15 testedwith = 'ships-with-hg-core'
15 testedwith = 'ships-with-hg-core'
16
16
17 from mercurial import (
17 from mercurial import (
18 extensions,
18 extensions,
19 hg,
19 hg,
20 localrepo,
20 localrepo,
21 registrar,
21 registrar,
22 verify as verifymod,
22 verify as verifymod,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 narrowbundle2,
26 narrowbundle2,
27 narrowchangegroup,
27 narrowchangegroup,
28 narrowcommands,
28 narrowcommands,
29 narrowcopies,
29 narrowcopies,
30 narrowdirstate,
30 narrowdirstate,
31 narrowmerge,
31 narrowmerge,
32 narrowpatch,
32 narrowpatch,
33 narrowrepo,
33 narrowrepo,
34 narrowrevlog,
34 narrowrevlog,
35 narrowtemplates,
35 narrowtemplates,
36 narrowwirepeer,
36 narrowwirepeer,
37 )
37 )
38
38
39 configtable = {}
39 configtable = {}
40 configitem = registrar.configitem(configtable)
40 configitem = registrar.configitem(configtable)
41 # Narrowhg *has* support for serving ellipsis nodes (which are used at
41 # Narrowhg *has* support for serving ellipsis nodes (which are used at
42 # least by Google's internal server), but that support is pretty
42 # least by Google's internal server), but that support is pretty
43 # fragile and has a lot of problems on real-world repositories that
43 # fragile and has a lot of problems on real-world repositories that
44 # have complex graph topologies. This could probably be corrected, but
44 # have complex graph topologies. This could probably be corrected, but
45 # absent someone needing the full support for ellipsis nodes in
45 # absent someone needing the full support for ellipsis nodes in
46 # repositories with merges, it's unlikely this work will get done. As
46 # repositories with merges, it's unlikely this work will get done. As
47 # of this writining in late 2017, all repositories large enough for
47 # of this writining in late 2017, all repositories large enough for
48 # ellipsis nodes to be a hard requirement also enforce strictly linear
48 # ellipsis nodes to be a hard requirement also enforce strictly linear
49 # history for other scaling reasons.
49 # history for other scaling reasons.
50 configitem('experimental', 'narrowservebrokenellipses',
50 configitem('experimental', 'narrowservebrokenellipses',
51 default=False,
51 default=False,
52 alias=[('narrow', 'serveellipses')],
52 alias=[('narrow', 'serveellipses')],
53 )
53 )
54
54
55 # Export the commands table for Mercurial to see.
55 # Export the commands table for Mercurial to see.
56 cmdtable = narrowcommands.table
56 cmdtable = narrowcommands.table
57
57
58 localrepo.localrepository._basesupported.add(narrowrepo.requirement)
58 localrepo.localrepository._basesupported.add(narrowrepo.REQUIREMENT)
59
59
60 def uisetup(ui):
60 def uisetup(ui):
61 """Wraps user-facing mercurial commands with narrow-aware versions."""
61 """Wraps user-facing mercurial commands with narrow-aware versions."""
62 narrowrevlog.setup()
62 narrowrevlog.setup()
63 narrowbundle2.setup()
63 narrowbundle2.setup()
64 narrowmerge.setup()
64 narrowmerge.setup()
65 narrowtemplates.setup()
65 narrowtemplates.setup()
66 narrowcommands.setup()
66 narrowcommands.setup()
67 narrowchangegroup.setup()
67 narrowchangegroup.setup()
68 narrowwirepeer.uisetup()
68 narrowwirepeer.uisetup()
69
69
70 def reposetup(ui, repo):
70 def reposetup(ui, repo):
71 """Wraps local repositories with narrow repo support."""
71 """Wraps local repositories with narrow repo support."""
72 if not isinstance(repo, localrepo.localrepository):
72 if not isinstance(repo, localrepo.localrepository):
73 return
73 return
74
74
75 if narrowrepo.requirement in repo.requirements:
75 if narrowrepo.REQUIREMENT in repo.requirements:
76 narrowrepo.wraprepo(repo, True)
76 narrowrepo.wraprepo(repo, True)
77 narrowcopies.setup(repo)
77 narrowcopies.setup(repo)
78 narrowdirstate.setup(repo)
78 narrowdirstate.setup(repo)
79 narrowpatch.setup(repo)
79 narrowpatch.setup(repo)
80 narrowwirepeer.reposetup(repo)
80 narrowwirepeer.reposetup(repo)
81
81
82 def _verifierinit(orig, self, repo, matcher=None):
82 def _verifierinit(orig, self, repo, matcher=None):
83 # The verifier's matcher argument was desgined for narrowhg, so it should
83 # The verifier's matcher argument was desgined for narrowhg, so it should
84 # be None from core. If another extension passes a matcher (unlikely),
84 # be None from core. If another extension passes a matcher (unlikely),
85 # we'll have to fail until matchers can be composed more easily.
85 # we'll have to fail until matchers can be composed more easily.
86 assert matcher is None
86 assert matcher is None
87 matcher = getattr(repo, 'narrowmatch', lambda: None)()
87 matcher = getattr(repo, 'narrowmatch', lambda: None)()
88 orig(self, repo, matcher)
88 orig(self, repo, matcher)
89
89
90 def extsetup(ui):
90 def extsetup(ui):
91 extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
91 extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
92 extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
92 extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
93 extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
93 extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
@@ -1,496 +1,496 b''
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import struct
12 import struct
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial.node import (
15 from mercurial.node import (
16 bin,
16 bin,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from mercurial import (
20 from mercurial import (
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 dagutil,
23 dagutil,
24 error,
24 error,
25 exchange,
25 exchange,
26 extensions,
26 extensions,
27 repair,
27 repair,
28 util,
28 util,
29 wireproto,
29 wireproto,
30 )
30 )
31
31
32 from . import (
32 from . import (
33 narrowrepo,
33 narrowrepo,
34 narrowspec,
34 narrowspec,
35 )
35 )
36
36
37 NARROWCAP = 'narrow'
37 NARROWCAP = 'narrow'
38 _NARROWACL_SECTION = 'narrowhgacl'
38 _NARROWACL_SECTION = 'narrowhgacl'
39 _CHANGESPECPART = NARROWCAP + ':changespec'
39 _CHANGESPECPART = NARROWCAP + ':changespec'
40 _SPECPART = NARROWCAP + ':spec'
40 _SPECPART = NARROWCAP + ':spec'
41 _SPECPART_INCLUDE = 'include'
41 _SPECPART_INCLUDE = 'include'
42 _SPECPART_EXCLUDE = 'exclude'
42 _SPECPART_EXCLUDE = 'exclude'
43 _KILLNODESIGNAL = 'KILL'
43 _KILLNODESIGNAL = 'KILL'
44 _DONESIGNAL = 'DONE'
44 _DONESIGNAL = 'DONE'
45 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
45 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
46 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
46 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
47 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
47 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
48 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
48 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
49
49
50 # When advertising capabilities, always include narrow clone support.
50 # When advertising capabilities, always include narrow clone support.
51 def getrepocaps_narrow(orig, repo, **kwargs):
51 def getrepocaps_narrow(orig, repo, **kwargs):
52 caps = orig(repo, **kwargs)
52 caps = orig(repo, **kwargs)
53 caps[NARROWCAP] = ['v0']
53 caps[NARROWCAP] = ['v0']
54 return caps
54 return caps
55
55
56 def _computeellipsis(repo, common, heads, known, match, depth=None):
56 def _computeellipsis(repo, common, heads, known, match, depth=None):
57 """Compute the shape of a narrowed DAG.
57 """Compute the shape of a narrowed DAG.
58
58
59 Args:
59 Args:
60 repo: The repository we're transferring.
60 repo: The repository we're transferring.
61 common: The roots of the DAG range we're transferring.
61 common: The roots of the DAG range we're transferring.
62 May be just [nullid], which means all ancestors of heads.
62 May be just [nullid], which means all ancestors of heads.
63 heads: The heads of the DAG range we're transferring.
63 heads: The heads of the DAG range we're transferring.
64 match: The narrowmatcher that allows us to identify relevant changes.
64 match: The narrowmatcher that allows us to identify relevant changes.
65 depth: If not None, only consider nodes to be full nodes if they are at
65 depth: If not None, only consider nodes to be full nodes if they are at
66 most depth changesets away from one of heads.
66 most depth changesets away from one of heads.
67
67
68 Returns:
68 Returns:
69 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
69 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
70
70
71 visitnodes: The list of nodes (either full or ellipsis) which
71 visitnodes: The list of nodes (either full or ellipsis) which
72 need to be sent to the client.
72 need to be sent to the client.
73 relevant_nodes: The set of changelog nodes which change a file inside
73 relevant_nodes: The set of changelog nodes which change a file inside
74 the narrowspec. The client needs these as non-ellipsis nodes.
74 the narrowspec. The client needs these as non-ellipsis nodes.
75 ellipsisroots: A dict of {rev: parents} that is used in
75 ellipsisroots: A dict of {rev: parents} that is used in
76 narrowchangegroup to produce ellipsis nodes with the
76 narrowchangegroup to produce ellipsis nodes with the
77 correct parents.
77 correct parents.
78 """
78 """
79 cl = repo.changelog
79 cl = repo.changelog
80 mfl = repo.manifestlog
80 mfl = repo.manifestlog
81
81
82 cldag = dagutil.revlogdag(cl)
82 cldag = dagutil.revlogdag(cl)
83 # dagutil does not like nullid/nullrev
83 # dagutil does not like nullid/nullrev
84 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
84 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
85 headsrevs = cldag.internalizeall(heads)
85 headsrevs = cldag.internalizeall(heads)
86 if depth:
86 if depth:
87 revdepth = {h: 0 for h in headsrevs}
87 revdepth = {h: 0 for h in headsrevs}
88
88
89 ellipsisheads = collections.defaultdict(set)
89 ellipsisheads = collections.defaultdict(set)
90 ellipsisroots = collections.defaultdict(set)
90 ellipsisroots = collections.defaultdict(set)
91
91
92 def addroot(head, curchange):
92 def addroot(head, curchange):
93 """Add a root to an ellipsis head, splitting heads with 3 roots."""
93 """Add a root to an ellipsis head, splitting heads with 3 roots."""
94 ellipsisroots[head].add(curchange)
94 ellipsisroots[head].add(curchange)
95 # Recursively split ellipsis heads with 3 roots by finding the
95 # Recursively split ellipsis heads with 3 roots by finding the
96 # roots' youngest common descendant which is an elided merge commit.
96 # roots' youngest common descendant which is an elided merge commit.
97 # That descendant takes 2 of the 3 roots as its own, and becomes a
97 # That descendant takes 2 of the 3 roots as its own, and becomes a
98 # root of the head.
98 # root of the head.
99 while len(ellipsisroots[head]) > 2:
99 while len(ellipsisroots[head]) > 2:
100 child, roots = splithead(head)
100 child, roots = splithead(head)
101 splitroots(head, child, roots)
101 splitroots(head, child, roots)
102 head = child # Recurse in case we just added a 3rd root
102 head = child # Recurse in case we just added a 3rd root
103
103
104 def splitroots(head, child, roots):
104 def splitroots(head, child, roots):
105 ellipsisroots[head].difference_update(roots)
105 ellipsisroots[head].difference_update(roots)
106 ellipsisroots[head].add(child)
106 ellipsisroots[head].add(child)
107 ellipsisroots[child].update(roots)
107 ellipsisroots[child].update(roots)
108 ellipsisroots[child].discard(child)
108 ellipsisroots[child].discard(child)
109
109
110 def splithead(head):
110 def splithead(head):
111 r1, r2, r3 = sorted(ellipsisroots[head])
111 r1, r2, r3 = sorted(ellipsisroots[head])
112 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
112 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
113 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
113 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
114 nr1, head, nr2, head)
114 nr1, head, nr2, head)
115 for j in mid:
115 for j in mid:
116 if j == nr2:
116 if j == nr2:
117 return nr2, (nr1, nr2)
117 return nr2, (nr1, nr2)
118 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
118 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
119 return j, (nr1, nr2)
119 return j, (nr1, nr2)
120 raise error.Abort('Failed to split up ellipsis node! head: %d, '
120 raise error.Abort('Failed to split up ellipsis node! head: %d, '
121 'roots: %d %d %d' % (head, r1, r2, r3))
121 'roots: %d %d %d' % (head, r1, r2, r3))
122
122
123 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
123 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
124 visit = reversed(missing)
124 visit = reversed(missing)
125 relevant_nodes = set()
125 relevant_nodes = set()
126 visitnodes = map(cl.node, missing)
126 visitnodes = map(cl.node, missing)
127 required = set(headsrevs) | known
127 required = set(headsrevs) | known
128 for rev in visit:
128 for rev in visit:
129 clrev = cl.changelogrevision(rev)
129 clrev = cl.changelogrevision(rev)
130 ps = cldag.parents(rev)
130 ps = cldag.parents(rev)
131 if depth is not None:
131 if depth is not None:
132 curdepth = revdepth[rev]
132 curdepth = revdepth[rev]
133 for p in ps:
133 for p in ps:
134 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
134 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
135 needed = False
135 needed = False
136 shallow_enough = depth is None or revdepth[rev] <= depth
136 shallow_enough = depth is None or revdepth[rev] <= depth
137 if shallow_enough:
137 if shallow_enough:
138 curmf = mfl[clrev.manifest].read()
138 curmf = mfl[clrev.manifest].read()
139 if ps:
139 if ps:
140 # We choose to not trust the changed files list in
140 # We choose to not trust the changed files list in
141 # changesets because it's not always correct. TODO: could
141 # changesets because it's not always correct. TODO: could
142 # we trust it for the non-merge case?
142 # we trust it for the non-merge case?
143 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
143 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
144 needed = any(match(f) for f in curmf.diff(p1mf).iterkeys())
144 needed = any(match(f) for f in curmf.diff(p1mf).iterkeys())
145 if not needed and len(ps) > 1:
145 if not needed and len(ps) > 1:
146 # For merge changes, the list of changed files is not
146 # For merge changes, the list of changed files is not
147 # helpful, since we need to emit the merge if a file
147 # helpful, since we need to emit the merge if a file
148 # in the narrow spec has changed on either side of the
148 # in the narrow spec has changed on either side of the
149 # merge. As a result, we do a manifest diff to check.
149 # merge. As a result, we do a manifest diff to check.
150 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
150 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
151 needed = any(match(f) for f in curmf.diff(p2mf).iterkeys())
151 needed = any(match(f) for f in curmf.diff(p2mf).iterkeys())
152 else:
152 else:
153 # For a root node, we need to include the node if any
153 # For a root node, we need to include the node if any
154 # files in the node match the narrowspec.
154 # files in the node match the narrowspec.
155 needed = any(match(f) for f in curmf)
155 needed = any(match(f) for f in curmf)
156
156
157 if needed:
157 if needed:
158 for head in ellipsisheads[rev]:
158 for head in ellipsisheads[rev]:
159 addroot(head, rev)
159 addroot(head, rev)
160 for p in ps:
160 for p in ps:
161 required.add(p)
161 required.add(p)
162 relevant_nodes.add(cl.node(rev))
162 relevant_nodes.add(cl.node(rev))
163 else:
163 else:
164 if not ps:
164 if not ps:
165 ps = [nullrev]
165 ps = [nullrev]
166 if rev in required:
166 if rev in required:
167 for head in ellipsisheads[rev]:
167 for head in ellipsisheads[rev]:
168 addroot(head, rev)
168 addroot(head, rev)
169 for p in ps:
169 for p in ps:
170 ellipsisheads[p].add(rev)
170 ellipsisheads[p].add(rev)
171 else:
171 else:
172 for p in ps:
172 for p in ps:
173 ellipsisheads[p] |= ellipsisheads[rev]
173 ellipsisheads[p] |= ellipsisheads[rev]
174
174
175 # add common changesets as roots of their reachable ellipsis heads
175 # add common changesets as roots of their reachable ellipsis heads
176 for c in commonrevs:
176 for c in commonrevs:
177 for head in ellipsisheads[c]:
177 for head in ellipsisheads[c]:
178 addroot(head, c)
178 addroot(head, c)
179 return visitnodes, relevant_nodes, ellipsisroots
179 return visitnodes, relevant_nodes, ellipsisroots
180
180
181 def _packellipsischangegroup(repo, common, match, relevant_nodes,
181 def _packellipsischangegroup(repo, common, match, relevant_nodes,
182 ellipsisroots, visitnodes, depth, source, version):
182 ellipsisroots, visitnodes, depth, source, version):
183 if version in ('01', '02'):
183 if version in ('01', '02'):
184 raise error.Abort(
184 raise error.Abort(
185 'ellipsis nodes require at least cg3 on client and server, '
185 'ellipsis nodes require at least cg3 on client and server, '
186 'but negotiated version %s' % version)
186 'but negotiated version %s' % version)
187 # We wrap cg1packer.revchunk, using a side channel to pass
187 # We wrap cg1packer.revchunk, using a side channel to pass
188 # relevant_nodes into that area. Then if linknode isn't in the
188 # relevant_nodes into that area. Then if linknode isn't in the
189 # set, we know we have an ellipsis node and we should defer
189 # set, we know we have an ellipsis node and we should defer
190 # sending that node's data. We override close() to detect
190 # sending that node's data. We override close() to detect
191 # pending ellipsis nodes and flush them.
191 # pending ellipsis nodes and flush them.
192 packer = changegroup.getbundler(version, repo)
192 packer = changegroup.getbundler(version, repo)
193 # Let the packer have access to the narrow matcher so it can
193 # Let the packer have access to the narrow matcher so it can
194 # omit filelogs and dirlogs as needed
194 # omit filelogs and dirlogs as needed
195 packer._narrow_matcher = lambda : match
195 packer._narrow_matcher = lambda : match
196 # Give the packer the list of nodes which should not be
196 # Give the packer the list of nodes which should not be
197 # ellipsis nodes. We store this rather than the set of nodes
197 # ellipsis nodes. We store this rather than the set of nodes
198 # that should be an ellipsis because for very large histories
198 # that should be an ellipsis because for very large histories
199 # we expect this to be significantly smaller.
199 # we expect this to be significantly smaller.
200 packer.full_nodes = relevant_nodes
200 packer.full_nodes = relevant_nodes
201 # Maps ellipsis revs to their roots at the changelog level.
201 # Maps ellipsis revs to their roots at the changelog level.
202 packer.precomputed_ellipsis = ellipsisroots
202 packer.precomputed_ellipsis = ellipsisroots
203 # Maps CL revs to per-revlog revisions. Cleared in close() at
203 # Maps CL revs to per-revlog revisions. Cleared in close() at
204 # the end of each group.
204 # the end of each group.
205 packer.clrev_to_localrev = {}
205 packer.clrev_to_localrev = {}
206 packer.next_clrev_to_localrev = {}
206 packer.next_clrev_to_localrev = {}
207 # Maps changelog nodes to changelog revs. Filled in once
207 # Maps changelog nodes to changelog revs. Filled in once
208 # during changelog stage and then left unmodified.
208 # during changelog stage and then left unmodified.
209 packer.clnode_to_rev = {}
209 packer.clnode_to_rev = {}
210 packer.changelog_done = False
210 packer.changelog_done = False
211 # If true, informs the packer that it is serving shallow content and might
211 # If true, informs the packer that it is serving shallow content and might
212 # need to pack file contents not introduced by the changes being packed.
212 # need to pack file contents not introduced by the changes being packed.
213 packer.is_shallow = depth is not None
213 packer.is_shallow = depth is not None
214
214
215 return packer.generate(common, visitnodes, False, source)
215 return packer.generate(common, visitnodes, False, source)
216
216
217 # Serve a changegroup for a client with a narrow clone.
217 # Serve a changegroup for a client with a narrow clone.
218 def getbundlechangegrouppart_narrow(bundler, repo, source,
218 def getbundlechangegrouppart_narrow(bundler, repo, source,
219 bundlecaps=None, b2caps=None, heads=None,
219 bundlecaps=None, b2caps=None, heads=None,
220 common=None, **kwargs):
220 common=None, **kwargs):
221 cgversions = b2caps.get('changegroup')
221 cgversions = b2caps.get('changegroup')
222 getcgkwargs = {}
222 getcgkwargs = {}
223 if cgversions: # 3.1 and 3.2 ship with an empty value
223 if cgversions: # 3.1 and 3.2 ship with an empty value
224 cgversions = [v for v in cgversions
224 cgversions = [v for v in cgversions
225 if v in changegroup.supportedoutgoingversions(repo)]
225 if v in changegroup.supportedoutgoingversions(repo)]
226 if not cgversions:
226 if not cgversions:
227 raise ValueError(_('no common changegroup version'))
227 raise ValueError(_('no common changegroup version'))
228 version = getcgkwargs['version'] = max(cgversions)
228 version = getcgkwargs['version'] = max(cgversions)
229 else:
229 else:
230 raise ValueError(_("server does not advertise changegroup version,"
230 raise ValueError(_("server does not advertise changegroup version,"
231 " can't negotiate support for ellipsis nodes"))
231 " can't negotiate support for ellipsis nodes"))
232
232
233 include = sorted(filter(bool, kwargs.get('includepats', [])))
233 include = sorted(filter(bool, kwargs.get('includepats', [])))
234 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
234 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
235 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
235 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
236 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
236 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
237 outgoing = exchange._computeoutgoing(repo, heads, common)
237 outgoing = exchange._computeoutgoing(repo, heads, common)
238 if not outgoing.missing:
238 if not outgoing.missing:
239 return
239 return
240 def wrappedgetbundler(orig, *args, **kwargs):
240 def wrappedgetbundler(orig, *args, **kwargs):
241 bundler = orig(*args, **kwargs)
241 bundler = orig(*args, **kwargs)
242 bundler._narrow_matcher = lambda : newmatch
242 bundler._narrow_matcher = lambda : newmatch
243 return bundler
243 return bundler
244 with extensions.wrappedfunction(changegroup, 'getbundler',
244 with extensions.wrappedfunction(changegroup, 'getbundler',
245 wrappedgetbundler):
245 wrappedgetbundler):
246 cg = changegroup.makestream(repo, outgoing, version, source)
246 cg = changegroup.makestream(repo, outgoing, version, source)
247 part = bundler.newpart('changegroup', data=cg)
247 part = bundler.newpart('changegroup', data=cg)
248 part.addparam('version', version)
248 part.addparam('version', version)
249 if 'treemanifest' in repo.requirements:
249 if 'treemanifest' in repo.requirements:
250 part.addparam('treemanifest', '1')
250 part.addparam('treemanifest', '1')
251
251
252 if include or exclude:
252 if include or exclude:
253 narrowspecpart = bundler.newpart(_SPECPART)
253 narrowspecpart = bundler.newpart(_SPECPART)
254 if include:
254 if include:
255 narrowspecpart.addparam(
255 narrowspecpart.addparam(
256 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
256 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
257 if exclude:
257 if exclude:
258 narrowspecpart.addparam(
258 narrowspecpart.addparam(
259 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
259 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
260
260
261 return
261 return
262
262
263 depth = kwargs.get('depth', None)
263 depth = kwargs.get('depth', None)
264 if depth is not None:
264 if depth is not None:
265 depth = int(depth)
265 depth = int(depth)
266 if depth < 1:
266 if depth < 1:
267 raise error.Abort(_('depth must be positive, got %d') % depth)
267 raise error.Abort(_('depth must be positive, got %d') % depth)
268
268
269 heads = set(heads or repo.heads())
269 heads = set(heads or repo.heads())
270 common = set(common or [nullid])
270 common = set(common or [nullid])
271 oldinclude = sorted(filter(bool, kwargs.get('oldincludepats', [])))
271 oldinclude = sorted(filter(bool, kwargs.get('oldincludepats', [])))
272 oldexclude = sorted(filter(bool, kwargs.get('oldexcludepats', [])))
272 oldexclude = sorted(filter(bool, kwargs.get('oldexcludepats', [])))
273 known = {bin(n) for n in kwargs.get('known', [])}
273 known = {bin(n) for n in kwargs.get('known', [])}
274 if known and (oldinclude != include or oldexclude != exclude):
274 if known and (oldinclude != include or oldexclude != exclude):
275 # Steps:
275 # Steps:
276 # 1. Send kill for "$known & ::common"
276 # 1. Send kill for "$known & ::common"
277 #
277 #
278 # 2. Send changegroup for ::common
278 # 2. Send changegroup for ::common
279 #
279 #
280 # 3. Proceed.
280 # 3. Proceed.
281 #
281 #
282 # In the future, we can send kills for only the specific
282 # In the future, we can send kills for only the specific
283 # nodes we know should go away or change shape, and then
283 # nodes we know should go away or change shape, and then
284 # send a data stream that tells the client something like this:
284 # send a data stream that tells the client something like this:
285 #
285 #
286 # a) apply this changegroup
286 # a) apply this changegroup
287 # b) apply nodes XXX, YYY, ZZZ that you already have
287 # b) apply nodes XXX, YYY, ZZZ that you already have
288 # c) goto a
288 # c) goto a
289 #
289 #
290 # until they've built up the full new state.
290 # until they've built up the full new state.
291 # Convert to revnums and intersect with "common". The client should
291 # Convert to revnums and intersect with "common". The client should
292 # have made it a subset of "common" already, but let's be safe.
292 # have made it a subset of "common" already, but let's be safe.
293 known = set(repo.revs("%ln & ::%ln", known, common))
293 known = set(repo.revs("%ln & ::%ln", known, common))
294 # TODO: we could send only roots() of this set, and the
294 # TODO: we could send only roots() of this set, and the
295 # list of nodes in common, and the client could work out
295 # list of nodes in common, and the client could work out
296 # what to strip, instead of us explicitly sending every
296 # what to strip, instead of us explicitly sending every
297 # single node.
297 # single node.
298 deadrevs = known
298 deadrevs = known
299 def genkills():
299 def genkills():
300 for r in deadrevs:
300 for r in deadrevs:
301 yield _KILLNODESIGNAL
301 yield _KILLNODESIGNAL
302 yield repo.changelog.node(r)
302 yield repo.changelog.node(r)
303 yield _DONESIGNAL
303 yield _DONESIGNAL
304 bundler.newpart(_CHANGESPECPART, data=genkills())
304 bundler.newpart(_CHANGESPECPART, data=genkills())
305 newvisit, newfull, newellipsis = _computeellipsis(
305 newvisit, newfull, newellipsis = _computeellipsis(
306 repo, set(), common, known, newmatch)
306 repo, set(), common, known, newmatch)
307 if newvisit:
307 if newvisit:
308 cg = _packellipsischangegroup(
308 cg = _packellipsischangegroup(
309 repo, common, newmatch, newfull, newellipsis,
309 repo, common, newmatch, newfull, newellipsis,
310 newvisit, depth, source, version)
310 newvisit, depth, source, version)
311 part = bundler.newpart('changegroup', data=cg)
311 part = bundler.newpart('changegroup', data=cg)
312 part.addparam('version', version)
312 part.addparam('version', version)
313 if 'treemanifest' in repo.requirements:
313 if 'treemanifest' in repo.requirements:
314 part.addparam('treemanifest', '1')
314 part.addparam('treemanifest', '1')
315
315
316 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
316 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
317 repo, common, heads, set(), newmatch, depth=depth)
317 repo, common, heads, set(), newmatch, depth=depth)
318
318
319 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
319 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
320 if visitnodes:
320 if visitnodes:
321 cg = _packellipsischangegroup(
321 cg = _packellipsischangegroup(
322 repo, common, newmatch, relevant_nodes, ellipsisroots,
322 repo, common, newmatch, relevant_nodes, ellipsisroots,
323 visitnodes, depth, source, version)
323 visitnodes, depth, source, version)
324 part = bundler.newpart('changegroup', data=cg)
324 part = bundler.newpart('changegroup', data=cg)
325 part.addparam('version', version)
325 part.addparam('version', version)
326 if 'treemanifest' in repo.requirements:
326 if 'treemanifest' in repo.requirements:
327 part.addparam('treemanifest', '1')
327 part.addparam('treemanifest', '1')
328
328
329 def applyacl_narrow(repo, kwargs):
329 def applyacl_narrow(repo, kwargs):
330 username = repo.ui.shortuser(repo.ui.username())
330 username = repo.ui.shortuser(repo.ui.username())
331 user_includes = repo.ui.configlist(
331 user_includes = repo.ui.configlist(
332 _NARROWACL_SECTION, username + '.includes',
332 _NARROWACL_SECTION, username + '.includes',
333 repo.ui.configlist(_NARROWACL_SECTION, 'default.includes'))
333 repo.ui.configlist(_NARROWACL_SECTION, 'default.includes'))
334 user_excludes = repo.ui.configlist(
334 user_excludes = repo.ui.configlist(
335 _NARROWACL_SECTION, username + '.excludes',
335 _NARROWACL_SECTION, username + '.excludes',
336 repo.ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
336 repo.ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
337 if not user_includes:
337 if not user_includes:
338 raise error.Abort(_("{} configuration for user {} is empty")
338 raise error.Abort(_("{} configuration for user {} is empty")
339 .format(_NARROWACL_SECTION, username))
339 .format(_NARROWACL_SECTION, username))
340
340
341 user_includes = [
341 user_includes = [
342 'path:.' if p == '*' else 'path:' + p for p in user_includes]
342 'path:.' if p == '*' else 'path:' + p for p in user_includes]
343 user_excludes = [
343 user_excludes = [
344 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
344 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
345
345
346 req_includes = set(kwargs.get('includepats', []))
346 req_includes = set(kwargs.get('includepats', []))
347 req_excludes = set(kwargs.get('excludepats', []))
347 req_excludes = set(kwargs.get('excludepats', []))
348
348
349 invalid_includes = []
349 invalid_includes = []
350 req_includes, req_excludes = narrowspec.restrictpatterns(
350 req_includes, req_excludes = narrowspec.restrictpatterns(
351 req_includes, req_excludes,
351 req_includes, req_excludes,
352 user_includes, user_excludes, invalid_includes)
352 user_includes, user_excludes, invalid_includes)
353
353
354 if invalid_includes:
354 if invalid_includes:
355 raise error.Abort(
355 raise error.Abort(
356 _("The following includes are not accessible for {}: {}")
356 _("The following includes are not accessible for {}: {}")
357 .format(username, invalid_includes))
357 .format(username, invalid_includes))
358
358
359 new_args = {}
359 new_args = {}
360 new_args.update(kwargs)
360 new_args.update(kwargs)
361 new_args['includepats'] = req_includes
361 new_args['includepats'] = req_includes
362 if req_excludes:
362 if req_excludes:
363 new_args['excludepats'] = req_excludes
363 new_args['excludepats'] = req_excludes
364 return new_args
364 return new_args
365
365
366 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
366 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
367 def _handlechangespec_2(op, inpart):
367 def _handlechangespec_2(op, inpart):
368 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
368 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
369 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
369 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
370 narrowspec.save(op.repo, includepats, excludepats)
370 narrowspec.save(op.repo, includepats, excludepats)
371 if not narrowrepo.requirement in op.repo.requirements:
371 if not narrowrepo.REQUIREMENT in op.repo.requirements:
372 op.repo.requirements.add(narrowrepo.requirement)
372 op.repo.requirements.add(narrowrepo.REQUIREMENT)
373 op.repo._writerequirements()
373 op.repo._writerequirements()
374 op.repo.invalidate(clearfilecache=True)
374 op.repo.invalidate(clearfilecache=True)
375
375
376 @bundle2.parthandler(_CHANGESPECPART)
376 @bundle2.parthandler(_CHANGESPECPART)
377 def _handlechangespec(op, inpart):
377 def _handlechangespec(op, inpart):
378 repo = op.repo
378 repo = op.repo
379 cl = repo.changelog
379 cl = repo.changelog
380
380
381 # changesets which need to be stripped entirely. either they're no longer
381 # changesets which need to be stripped entirely. either they're no longer
382 # needed in the new narrow spec, or the server is sending a replacement
382 # needed in the new narrow spec, or the server is sending a replacement
383 # in the changegroup part.
383 # in the changegroup part.
384 clkills = set()
384 clkills = set()
385
385
386 # A changespec part contains all the updates to ellipsis nodes
386 # A changespec part contains all the updates to ellipsis nodes
387 # that will happen as a result of widening or narrowing a
387 # that will happen as a result of widening or narrowing a
388 # repo. All the changes that this block encounters are ellipsis
388 # repo. All the changes that this block encounters are ellipsis
389 # nodes or flags to kill an existing ellipsis.
389 # nodes or flags to kill an existing ellipsis.
390 chunksignal = changegroup.readexactly(inpart, 4)
390 chunksignal = changegroup.readexactly(inpart, 4)
391 while chunksignal != _DONESIGNAL:
391 while chunksignal != _DONESIGNAL:
392 if chunksignal == _KILLNODESIGNAL:
392 if chunksignal == _KILLNODESIGNAL:
393 # a node used to be an ellipsis but isn't anymore
393 # a node used to be an ellipsis but isn't anymore
394 ck = changegroup.readexactly(inpart, 20)
394 ck = changegroup.readexactly(inpart, 20)
395 if cl.hasnode(ck):
395 if cl.hasnode(ck):
396 clkills.add(ck)
396 clkills.add(ck)
397 else:
397 else:
398 raise error.Abort(
398 raise error.Abort(
399 _('unexpected changespec node chunk type: %s') % chunksignal)
399 _('unexpected changespec node chunk type: %s') % chunksignal)
400 chunksignal = changegroup.readexactly(inpart, 4)
400 chunksignal = changegroup.readexactly(inpart, 4)
401
401
402 if clkills:
402 if clkills:
403 # preserve bookmarks that repair.strip() would otherwise strip
403 # preserve bookmarks that repair.strip() would otherwise strip
404 bmstore = repo._bookmarks
404 bmstore = repo._bookmarks
405 class dummybmstore(dict):
405 class dummybmstore(dict):
406 def applychanges(self, repo, tr, changes):
406 def applychanges(self, repo, tr, changes):
407 pass
407 pass
408 def recordchange(self, tr): # legacy version
408 def recordchange(self, tr): # legacy version
409 pass
409 pass
410 repo._bookmarks = dummybmstore()
410 repo._bookmarks = dummybmstore()
411 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
411 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
412 topic='widen')
412 topic='widen')
413 repo._bookmarks = bmstore
413 repo._bookmarks = bmstore
414 if chgrpfile:
414 if chgrpfile:
415 # presence of _widen_bundle attribute activates widen handler later
415 # presence of _widen_bundle attribute activates widen handler later
416 op._widen_bundle = chgrpfile
416 op._widen_bundle = chgrpfile
417 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
417 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
418 # will currently always be there when using the core+narrowhg server, but
418 # will currently always be there when using the core+narrowhg server, but
419 # other servers may include a changespec part even when not widening (e.g.
419 # other servers may include a changespec part even when not widening (e.g.
420 # because we're deepening a shallow repo).
420 # because we're deepening a shallow repo).
421 if util.safehasattr(repo, 'setnewnarrowpats'):
421 if util.safehasattr(repo, 'setnewnarrowpats'):
422 repo.setnewnarrowpats()
422 repo.setnewnarrowpats()
423
423
424 def handlechangegroup_widen(op, inpart):
424 def handlechangegroup_widen(op, inpart):
425 """Changegroup exchange handler which restores temporarily-stripped nodes"""
425 """Changegroup exchange handler which restores temporarily-stripped nodes"""
426 # We saved a bundle with stripped node data we must now restore.
426 # We saved a bundle with stripped node data we must now restore.
427 # This approach is based on mercurial/repair.py@6ee26a53c111.
427 # This approach is based on mercurial/repair.py@6ee26a53c111.
428 repo = op.repo
428 repo = op.repo
429 ui = op.ui
429 ui = op.ui
430
430
431 chgrpfile = op._widen_bundle
431 chgrpfile = op._widen_bundle
432 del op._widen_bundle
432 del op._widen_bundle
433 vfs = repo.vfs
433 vfs = repo.vfs
434
434
435 ui.note(_("adding branch\n"))
435 ui.note(_("adding branch\n"))
436 f = vfs.open(chgrpfile, "rb")
436 f = vfs.open(chgrpfile, "rb")
437 try:
437 try:
438 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
438 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
439 if not ui.verbose:
439 if not ui.verbose:
440 # silence internal shuffling chatter
440 # silence internal shuffling chatter
441 ui.pushbuffer()
441 ui.pushbuffer()
442 if isinstance(gen, bundle2.unbundle20):
442 if isinstance(gen, bundle2.unbundle20):
443 with repo.transaction('strip') as tr:
443 with repo.transaction('strip') as tr:
444 bundle2.processbundle(repo, gen, lambda: tr)
444 bundle2.processbundle(repo, gen, lambda: tr)
445 else:
445 else:
446 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
446 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
447 if not ui.verbose:
447 if not ui.verbose:
448 ui.popbuffer()
448 ui.popbuffer()
449 finally:
449 finally:
450 f.close()
450 f.close()
451
451
452 # remove undo files
452 # remove undo files
453 for undovfs, undofile in repo.undofiles():
453 for undovfs, undofile in repo.undofiles():
454 try:
454 try:
455 undovfs.unlink(undofile)
455 undovfs.unlink(undofile)
456 except OSError as e:
456 except OSError as e:
457 if e.errno != errno.ENOENT:
457 if e.errno != errno.ENOENT:
458 ui.warn(_('error removing %s: %s\n') %
458 ui.warn(_('error removing %s: %s\n') %
459 (undovfs.join(undofile), str(e)))
459 (undovfs.join(undofile), str(e)))
460
460
461 # Remove partial backup only if there were no exceptions
461 # Remove partial backup only if there were no exceptions
462 vfs.unlink(chgrpfile)
462 vfs.unlink(chgrpfile)
463
463
464 def setup():
464 def setup():
465 """Enable narrow repo support in bundle2-related extension points."""
465 """Enable narrow repo support in bundle2-related extension points."""
466 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
466 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
467
467
468 wireproto.gboptsmap['narrow'] = 'boolean'
468 wireproto.gboptsmap['narrow'] = 'boolean'
469 wireproto.gboptsmap['depth'] = 'plain'
469 wireproto.gboptsmap['depth'] = 'plain'
470 wireproto.gboptsmap['oldincludepats'] = 'csv'
470 wireproto.gboptsmap['oldincludepats'] = 'csv'
471 wireproto.gboptsmap['oldexcludepats'] = 'csv'
471 wireproto.gboptsmap['oldexcludepats'] = 'csv'
472 wireproto.gboptsmap['includepats'] = 'csv'
472 wireproto.gboptsmap['includepats'] = 'csv'
473 wireproto.gboptsmap['excludepats'] = 'csv'
473 wireproto.gboptsmap['excludepats'] = 'csv'
474 wireproto.gboptsmap['known'] = 'csv'
474 wireproto.gboptsmap['known'] = 'csv'
475
475
476 # Extend changegroup serving to handle requests from narrow clients.
476 # Extend changegroup serving to handle requests from narrow clients.
477 origcgfn = exchange.getbundle2partsmapping['changegroup']
477 origcgfn = exchange.getbundle2partsmapping['changegroup']
478 def wrappedcgfn(*args, **kwargs):
478 def wrappedcgfn(*args, **kwargs):
479 repo = args[1]
479 repo = args[1]
480 if repo.ui.has_section(_NARROWACL_SECTION):
480 if repo.ui.has_section(_NARROWACL_SECTION):
481 getbundlechangegrouppart_narrow(
481 getbundlechangegrouppart_narrow(
482 *args, **applyacl_narrow(repo, kwargs))
482 *args, **applyacl_narrow(repo, kwargs))
483 elif kwargs.get('narrow', False):
483 elif kwargs.get('narrow', False):
484 getbundlechangegrouppart_narrow(*args, **kwargs)
484 getbundlechangegrouppart_narrow(*args, **kwargs)
485 else:
485 else:
486 origcgfn(*args, **kwargs)
486 origcgfn(*args, **kwargs)
487 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
487 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
488
488
489 # Extend changegroup receiver so client can fixup after widen requests.
489 # Extend changegroup receiver so client can fixup after widen requests.
490 origcghandler = bundle2.parthandlermapping['changegroup']
490 origcghandler = bundle2.parthandlermapping['changegroup']
491 def wrappedcghandler(op, inpart):
491 def wrappedcghandler(op, inpart):
492 origcghandler(op, inpart)
492 origcghandler(op, inpart)
493 if util.safehasattr(op, '_widen_bundle'):
493 if util.safehasattr(op, '_widen_bundle'):
494 handlechangegroup_widen(op, inpart)
494 handlechangegroup_widen(op, inpart)
495 wrappedcghandler.params = origcghandler.params
495 wrappedcghandler.params = origcghandler.params
496 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
496 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
@@ -1,385 +1,385 b''
1 # narrowchangegroup.py - narrow clone changegroup creation and consumption
1 # narrowchangegroup.py - narrow clone changegroup creation and consumption
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import (
11 from mercurial import (
12 changegroup,
12 changegroup,
13 error,
13 error,
14 extensions,
14 extensions,
15 manifest,
15 manifest,
16 mdiff,
16 mdiff,
17 node,
17 node,
18 util,
18 util,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 narrowrepo,
22 narrowrepo,
23 narrowrevlog,
23 narrowrevlog,
24 )
24 )
25
25
26 def setup():
26 def setup():
27
27
28 def supportedoutgoingversions(orig, repo):
28 def supportedoutgoingversions(orig, repo):
29 versions = orig(repo)
29 versions = orig(repo)
30 if narrowrepo.requirement in repo.requirements:
30 if narrowrepo.REQUIREMENT in repo.requirements:
31 versions.discard('01')
31 versions.discard('01')
32 versions.discard('02')
32 versions.discard('02')
33 return versions
33 return versions
34
34
35 extensions.wrapfunction(changegroup, 'supportedoutgoingversions',
35 extensions.wrapfunction(changegroup, 'supportedoutgoingversions',
36 supportedoutgoingversions)
36 supportedoutgoingversions)
37
37
38 def prune(orig, self, revlog, missing, commonrevs):
38 def prune(orig, self, revlog, missing, commonrevs):
39 if isinstance(revlog, manifest.manifestrevlog):
39 if isinstance(revlog, manifest.manifestrevlog):
40 matcher = getattr(self._repo, 'narrowmatch',
40 matcher = getattr(self._repo, 'narrowmatch',
41 getattr(self, '_narrow_matcher', None))
41 getattr(self, '_narrow_matcher', None))
42 if (matcher is not None and
42 if (matcher is not None and
43 not matcher().visitdir(revlog._dir[:-1] or '.')):
43 not matcher().visitdir(revlog._dir[:-1] or '.')):
44 return []
44 return []
45 return orig(self, revlog, missing, commonrevs)
45 return orig(self, revlog, missing, commonrevs)
46
46
47 extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
47 extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
48
48
49 def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
49 def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
50 source):
50 source):
51 matcher = getattr(self._repo, 'narrowmatch',
51 matcher = getattr(self._repo, 'narrowmatch',
52 getattr(self, '_narrow_matcher', None))
52 getattr(self, '_narrow_matcher', None))
53 if matcher is not None:
53 if matcher is not None:
54 narrowmatch = matcher()
54 narrowmatch = matcher()
55 changedfiles = filter(narrowmatch, changedfiles)
55 changedfiles = filter(narrowmatch, changedfiles)
56 if getattr(self, 'is_shallow', False):
56 if getattr(self, 'is_shallow', False):
57 # See comment in generate() for why this sadness is a thing.
57 # See comment in generate() for why this sadness is a thing.
58 mfdicts = self._mfdicts
58 mfdicts = self._mfdicts
59 del self._mfdicts
59 del self._mfdicts
60 # In a shallow clone, the linknodes callback needs to also include
60 # In a shallow clone, the linknodes callback needs to also include
61 # those file nodes that are in the manifests we sent but weren't
61 # those file nodes that are in the manifests we sent but weren't
62 # introduced by those manifests.
62 # introduced by those manifests.
63 commonctxs = [self._repo[c] for c in commonrevs]
63 commonctxs = [self._repo[c] for c in commonrevs]
64 oldlinknodes = linknodes
64 oldlinknodes = linknodes
65 clrev = self._repo.changelog.rev
65 clrev = self._repo.changelog.rev
66 def linknodes(flog, fname):
66 def linknodes(flog, fname):
67 for c in commonctxs:
67 for c in commonctxs:
68 try:
68 try:
69 fnode = c.filenode(fname)
69 fnode = c.filenode(fname)
70 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
70 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
71 except error.ManifestLookupError:
71 except error.ManifestLookupError:
72 pass
72 pass
73 links = oldlinknodes(flog, fname)
73 links = oldlinknodes(flog, fname)
74 if len(links) != len(mfdicts):
74 if len(links) != len(mfdicts):
75 for mf, lr in mfdicts:
75 for mf, lr in mfdicts:
76 fnode = mf.get(fname, None)
76 fnode = mf.get(fname, None)
77 if fnode in links:
77 if fnode in links:
78 links[fnode] = min(links[fnode], lr, key=clrev)
78 links[fnode] = min(links[fnode], lr, key=clrev)
79 elif fnode:
79 elif fnode:
80 links[fnode] = lr
80 links[fnode] = lr
81 return links
81 return links
82 return orig(self, changedfiles, linknodes, commonrevs, source)
82 return orig(self, changedfiles, linknodes, commonrevs, source)
83 extensions.wrapfunction(
83 extensions.wrapfunction(
84 changegroup.cg1packer, 'generatefiles', generatefiles)
84 changegroup.cg1packer, 'generatefiles', generatefiles)
85
85
86 def ellipsisdata(packer, rev, revlog, p1, p2, data, linknode):
86 def ellipsisdata(packer, rev, revlog, p1, p2, data, linknode):
87 n = revlog.node(rev)
87 n = revlog.node(rev)
88 p1n, p2n = revlog.node(p1), revlog.node(p2)
88 p1n, p2n = revlog.node(p1), revlog.node(p2)
89 flags = revlog.flags(rev)
89 flags = revlog.flags(rev)
90 flags |= narrowrevlog.ELLIPSIS_NODE_FLAG
90 flags |= narrowrevlog.ELLIPSIS_NODE_FLAG
91 meta = packer.builddeltaheader(
91 meta = packer.builddeltaheader(
92 n, p1n, p2n, node.nullid, linknode, flags)
92 n, p1n, p2n, node.nullid, linknode, flags)
93 # TODO: try and actually send deltas for ellipsis data blocks
93 # TODO: try and actually send deltas for ellipsis data blocks
94 diffheader = mdiff.trivialdiffheader(len(data))
94 diffheader = mdiff.trivialdiffheader(len(data))
95 l = len(meta) + len(diffheader) + len(data)
95 l = len(meta) + len(diffheader) + len(data)
96 return ''.join((changegroup.chunkheader(l),
96 return ''.join((changegroup.chunkheader(l),
97 meta,
97 meta,
98 diffheader,
98 diffheader,
99 data))
99 data))
100
100
101 def close(orig, self):
101 def close(orig, self):
102 getattr(self, 'clrev_to_localrev', {}).clear()
102 getattr(self, 'clrev_to_localrev', {}).clear()
103 if getattr(self, 'next_clrev_to_localrev', {}):
103 if getattr(self, 'next_clrev_to_localrev', {}):
104 self.clrev_to_localrev = self.next_clrev_to_localrev
104 self.clrev_to_localrev = self.next_clrev_to_localrev
105 del self.next_clrev_to_localrev
105 del self.next_clrev_to_localrev
106 self.changelog_done = True
106 self.changelog_done = True
107 return orig(self)
107 return orig(self)
108 extensions.wrapfunction(changegroup.cg1packer, 'close', close)
108 extensions.wrapfunction(changegroup.cg1packer, 'close', close)
109
109
110 # In a perfect world, we'd generate better ellipsis-ified graphs
110 # In a perfect world, we'd generate better ellipsis-ified graphs
111 # for non-changelog revlogs. In practice, we haven't started doing
111 # for non-changelog revlogs. In practice, we haven't started doing
112 # that yet, so the resulting DAGs for the manifestlog and filelogs
112 # that yet, so the resulting DAGs for the manifestlog and filelogs
113 # are actually full of bogus parentage on all the ellipsis
113 # are actually full of bogus parentage on all the ellipsis
114 # nodes. This has the side effect that, while the contents are
114 # nodes. This has the side effect that, while the contents are
115 # correct, the individual DAGs might be completely out of whack in
115 # correct, the individual DAGs might be completely out of whack in
116 # a case like 882681bc3166 and its ancestors (back about 10
116 # a case like 882681bc3166 and its ancestors (back about 10
117 # revisions or so) in the main hg repo.
117 # revisions or so) in the main hg repo.
118 #
118 #
119 # The one invariant we *know* holds is that the new (potentially
119 # The one invariant we *know* holds is that the new (potentially
120 # bogus) DAG shape will be valid if we order the nodes in the
120 # bogus) DAG shape will be valid if we order the nodes in the
121 # order that they're introduced in dramatis personae by the
121 # order that they're introduced in dramatis personae by the
122 # changelog, so what we do is we sort the non-changelog histories
122 # changelog, so what we do is we sort the non-changelog histories
123 # by the order in which they are used by the changelog.
123 # by the order in which they are used by the changelog.
124 def _sortgroup(orig, self, revlog, nodelist, lookup):
124 def _sortgroup(orig, self, revlog, nodelist, lookup):
125 if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
125 if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
126 return orig(self, revlog, nodelist, lookup)
126 return orig(self, revlog, nodelist, lookup)
127 key = lambda n: self.clnode_to_rev[lookup(n)]
127 key = lambda n: self.clnode_to_rev[lookup(n)]
128 return [revlog.rev(n) for n in sorted(nodelist, key=key)]
128 return [revlog.rev(n) for n in sorted(nodelist, key=key)]
129
129
130 extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
130 extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
131
131
132 def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
132 def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
133 '''yield a sequence of changegroup chunks (strings)'''
133 '''yield a sequence of changegroup chunks (strings)'''
134 # Note: other than delegating to orig, the only deviation in
134 # Note: other than delegating to orig, the only deviation in
135 # logic from normal hg's generate is marked with BEGIN/END
135 # logic from normal hg's generate is marked with BEGIN/END
136 # NARROW HACK.
136 # NARROW HACK.
137 if not util.safehasattr(self, 'full_nodes'):
137 if not util.safehasattr(self, 'full_nodes'):
138 # not sending a narrow bundle
138 # not sending a narrow bundle
139 for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
139 for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
140 yield x
140 yield x
141 return
141 return
142
142
143 repo = self._repo
143 repo = self._repo
144 cl = repo.changelog
144 cl = repo.changelog
145 mfl = repo.manifestlog
145 mfl = repo.manifestlog
146 mfrevlog = mfl._revlog
146 mfrevlog = mfl._revlog
147
147
148 clrevorder = {}
148 clrevorder = {}
149 mfs = {} # needed manifests
149 mfs = {} # needed manifests
150 fnodes = {} # needed file nodes
150 fnodes = {} # needed file nodes
151 changedfiles = set()
151 changedfiles = set()
152
152
153 # Callback for the changelog, used to collect changed files and manifest
153 # Callback for the changelog, used to collect changed files and manifest
154 # nodes.
154 # nodes.
155 # Returns the linkrev node (identity in the changelog case).
155 # Returns the linkrev node (identity in the changelog case).
156 def lookupcl(x):
156 def lookupcl(x):
157 c = cl.read(x)
157 c = cl.read(x)
158 clrevorder[x] = len(clrevorder)
158 clrevorder[x] = len(clrevorder)
159 # BEGIN NARROW HACK
159 # BEGIN NARROW HACK
160 #
160 #
161 # Only update mfs if x is going to be sent. Otherwise we
161 # Only update mfs if x is going to be sent. Otherwise we
162 # end up with bogus linkrevs specified for manifests and
162 # end up with bogus linkrevs specified for manifests and
163 # we skip some manifest nodes that we should otherwise
163 # we skip some manifest nodes that we should otherwise
164 # have sent.
164 # have sent.
165 if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
165 if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
166 n = c[0]
166 n = c[0]
167 # record the first changeset introducing this manifest version
167 # record the first changeset introducing this manifest version
168 mfs.setdefault(n, x)
168 mfs.setdefault(n, x)
169 # Set this narrow-specific dict so we have the lowest manifest
169 # Set this narrow-specific dict so we have the lowest manifest
170 # revnum to look up for this cl revnum. (Part of mapping
170 # revnum to look up for this cl revnum. (Part of mapping
171 # changelog ellipsis parents to manifest ellipsis parents)
171 # changelog ellipsis parents to manifest ellipsis parents)
172 self.next_clrev_to_localrev.setdefault(cl.rev(x),
172 self.next_clrev_to_localrev.setdefault(cl.rev(x),
173 mfrevlog.rev(n))
173 mfrevlog.rev(n))
174 # We can't trust the changed files list in the changeset if the
174 # We can't trust the changed files list in the changeset if the
175 # client requested a shallow clone.
175 # client requested a shallow clone.
176 if self.is_shallow:
176 if self.is_shallow:
177 changedfiles.update(mfl[c[0]].read().keys())
177 changedfiles.update(mfl[c[0]].read().keys())
178 else:
178 else:
179 changedfiles.update(c[3])
179 changedfiles.update(c[3])
180 # END NARROW HACK
180 # END NARROW HACK
181 # Record a complete list of potentially-changed files in
181 # Record a complete list of potentially-changed files in
182 # this manifest.
182 # this manifest.
183 return x
183 return x
184
184
185 self._verbosenote(_('uncompressed size of bundle content:\n'))
185 self._verbosenote(_('uncompressed size of bundle content:\n'))
186 size = 0
186 size = 0
187 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
187 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
188 size += len(chunk)
188 size += len(chunk)
189 yield chunk
189 yield chunk
190 self._verbosenote(_('%8.i (changelog)\n') % size)
190 self._verbosenote(_('%8.i (changelog)\n') % size)
191
191
192 # We need to make sure that the linkrev in the changegroup refers to
192 # We need to make sure that the linkrev in the changegroup refers to
193 # the first changeset that introduced the manifest or file revision.
193 # the first changeset that introduced the manifest or file revision.
194 # The fastpath is usually safer than the slowpath, because the filelogs
194 # The fastpath is usually safer than the slowpath, because the filelogs
195 # are walked in revlog order.
195 # are walked in revlog order.
196 #
196 #
197 # When taking the slowpath with reorder=None and the manifest revlog
197 # When taking the slowpath with reorder=None and the manifest revlog
198 # uses generaldelta, the manifest may be walked in the "wrong" order.
198 # uses generaldelta, the manifest may be walked in the "wrong" order.
199 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
199 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
200 # cc0ff93d0c0c).
200 # cc0ff93d0c0c).
201 #
201 #
202 # When taking the fastpath, we are only vulnerable to reordering
202 # When taking the fastpath, we are only vulnerable to reordering
203 # of the changelog itself. The changelog never uses generaldelta, so
203 # of the changelog itself. The changelog never uses generaldelta, so
204 # it is only reordered when reorder=True. To handle this case, we
204 # it is only reordered when reorder=True. To handle this case, we
205 # simply take the slowpath, which already has the 'clrevorder' logic.
205 # simply take the slowpath, which already has the 'clrevorder' logic.
206 # This was also fixed in cc0ff93d0c0c.
206 # This was also fixed in cc0ff93d0c0c.
207 fastpathlinkrev = fastpathlinkrev and not self._reorder
207 fastpathlinkrev = fastpathlinkrev and not self._reorder
208 # Treemanifests don't work correctly with fastpathlinkrev
208 # Treemanifests don't work correctly with fastpathlinkrev
209 # either, because we don't discover which directory nodes to
209 # either, because we don't discover which directory nodes to
210 # send along with files. This could probably be fixed.
210 # send along with files. This could probably be fixed.
211 fastpathlinkrev = fastpathlinkrev and (
211 fastpathlinkrev = fastpathlinkrev and (
212 'treemanifest' not in repo.requirements)
212 'treemanifest' not in repo.requirements)
213 # Shallow clones also don't work correctly with fastpathlinkrev
213 # Shallow clones also don't work correctly with fastpathlinkrev
214 # because file nodes may need to be sent for a manifest even if they
214 # because file nodes may need to be sent for a manifest even if they
215 # weren't introduced by that manifest.
215 # weren't introduced by that manifest.
216 fastpathlinkrev = fastpathlinkrev and not self.is_shallow
216 fastpathlinkrev = fastpathlinkrev and not self.is_shallow
217
217
218 moreargs = []
218 moreargs = []
219 if self.generatemanifests.func_code.co_argcount == 7:
219 if self.generatemanifests.func_code.co_argcount == 7:
220 # The source argument was added to generatemanifests in hg in
220 # The source argument was added to generatemanifests in hg in
221 # 75cc1f1e11f2 (2017/09/11).
221 # 75cc1f1e11f2 (2017/09/11).
222 moreargs.append(source)
222 moreargs.append(source)
223 for chunk in self.generatemanifests(commonrevs, clrevorder,
223 for chunk in self.generatemanifests(commonrevs, clrevorder,
224 fastpathlinkrev, mfs, fnodes, *moreargs):
224 fastpathlinkrev, mfs, fnodes, *moreargs):
225 yield chunk
225 yield chunk
226 # BEGIN NARROW HACK
226 # BEGIN NARROW HACK
227 mfdicts = None
227 mfdicts = None
228 if self.is_shallow:
228 if self.is_shallow:
229 mfdicts = [(self._repo.manifestlog[n].read(), lr)
229 mfdicts = [(self._repo.manifestlog[n].read(), lr)
230 for (n, lr) in mfs.iteritems()]
230 for (n, lr) in mfs.iteritems()]
231 # END NARROW HACK
231 # END NARROW HACK
232 mfs.clear()
232 mfs.clear()
233 clrevs = set(cl.rev(x) for x in clnodes)
233 clrevs = set(cl.rev(x) for x in clnodes)
234
234
235 if not fastpathlinkrev:
235 if not fastpathlinkrev:
236 def linknodes(unused, fname):
236 def linknodes(unused, fname):
237 return fnodes.get(fname, {})
237 return fnodes.get(fname, {})
238 else:
238 else:
239 cln = cl.node
239 cln = cl.node
240 def linknodes(filerevlog, fname):
240 def linknodes(filerevlog, fname):
241 llr = filerevlog.linkrev
241 llr = filerevlog.linkrev
242 fln = filerevlog.node
242 fln = filerevlog.node
243 revs = ((r, llr(r)) for r in filerevlog)
243 revs = ((r, llr(r)) for r in filerevlog)
244 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
244 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
245
245
246 # BEGIN NARROW HACK
246 # BEGIN NARROW HACK
247 #
247 #
248 # We need to pass the mfdicts variable down into
248 # We need to pass the mfdicts variable down into
249 # generatefiles(), but more than one command might have
249 # generatefiles(), but more than one command might have
250 # wrapped generatefiles so we can't modify the function
250 # wrapped generatefiles so we can't modify the function
251 # signature. Instead, we pass the data to ourselves using an
251 # signature. Instead, we pass the data to ourselves using an
252 # instance attribute. I'm sorry.
252 # instance attribute. I'm sorry.
253 self._mfdicts = mfdicts
253 self._mfdicts = mfdicts
254 # END NARROW HACK
254 # END NARROW HACK
255 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
255 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
256 source):
256 source):
257 yield chunk
257 yield chunk
258
258
259 yield self.close()
259 yield self.close()
260
260
261 if clnodes:
261 if clnodes:
262 repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
262 repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
263 extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
263 extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
264
264
265 def revchunk(orig, self, revlog, rev, prev, linknode):
265 def revchunk(orig, self, revlog, rev, prev, linknode):
266 if not util.safehasattr(self, 'full_nodes'):
266 if not util.safehasattr(self, 'full_nodes'):
267 # not sending a narrow changegroup
267 # not sending a narrow changegroup
268 for x in orig(self, revlog, rev, prev, linknode):
268 for x in orig(self, revlog, rev, prev, linknode):
269 yield x
269 yield x
270 return
270 return
271 # build up some mapping information that's useful later. See
271 # build up some mapping information that's useful later. See
272 # the local() nested function below.
272 # the local() nested function below.
273 if not self.changelog_done:
273 if not self.changelog_done:
274 self.clnode_to_rev[linknode] = rev
274 self.clnode_to_rev[linknode] = rev
275 linkrev = rev
275 linkrev = rev
276 self.clrev_to_localrev[linkrev] = rev
276 self.clrev_to_localrev[linkrev] = rev
277 else:
277 else:
278 linkrev = self.clnode_to_rev[linknode]
278 linkrev = self.clnode_to_rev[linknode]
279 self.clrev_to_localrev[linkrev] = rev
279 self.clrev_to_localrev[linkrev] = rev
280 # This is a node to send in full, because the changeset it
280 # This is a node to send in full, because the changeset it
281 # corresponds to was a full changeset.
281 # corresponds to was a full changeset.
282 if linknode in self.full_nodes:
282 if linknode in self.full_nodes:
283 for x in orig(self, revlog, rev, prev, linknode):
283 for x in orig(self, revlog, rev, prev, linknode):
284 yield x
284 yield x
285 return
285 return
286 # At this point, a node can either be one we should skip or an
286 # At this point, a node can either be one we should skip or an
287 # ellipsis. If it's not an ellipsis, bail immediately.
287 # ellipsis. If it's not an ellipsis, bail immediately.
288 if linkrev not in self.precomputed_ellipsis:
288 if linkrev not in self.precomputed_ellipsis:
289 return
289 return
290 linkparents = self.precomputed_ellipsis[linkrev]
290 linkparents = self.precomputed_ellipsis[linkrev]
291 def local(clrev):
291 def local(clrev):
292 """Turn a changelog revnum into a local revnum.
292 """Turn a changelog revnum into a local revnum.
293
293
294 The ellipsis dag is stored as revnums on the changelog,
294 The ellipsis dag is stored as revnums on the changelog,
295 but when we're producing ellipsis entries for
295 but when we're producing ellipsis entries for
296 non-changelog revlogs, we need to turn those numbers into
296 non-changelog revlogs, we need to turn those numbers into
297 something local. This does that for us, and during the
297 something local. This does that for us, and during the
298 changelog sending phase will also expand the stored
298 changelog sending phase will also expand the stored
299 mappings as needed.
299 mappings as needed.
300 """
300 """
301 if clrev == node.nullrev:
301 if clrev == node.nullrev:
302 return node.nullrev
302 return node.nullrev
303 if not self.changelog_done:
303 if not self.changelog_done:
304 # If we're doing the changelog, it's possible that we
304 # If we're doing the changelog, it's possible that we
305 # have a parent that is already on the client, and we
305 # have a parent that is already on the client, and we
306 # need to store some extra mapping information so that
306 # need to store some extra mapping information so that
307 # our contained ellipsis nodes will be able to resolve
307 # our contained ellipsis nodes will be able to resolve
308 # their parents.
308 # their parents.
309 if clrev not in self.clrev_to_localrev:
309 if clrev not in self.clrev_to_localrev:
310 clnode = revlog.node(clrev)
310 clnode = revlog.node(clrev)
311 self.clnode_to_rev[clnode] = clrev
311 self.clnode_to_rev[clnode] = clrev
312 return clrev
312 return clrev
313 # Walk the ellipsis-ized changelog breadth-first looking for a
313 # Walk the ellipsis-ized changelog breadth-first looking for a
314 # change that has been linked from the current revlog.
314 # change that has been linked from the current revlog.
315 #
315 #
316 # For a flat manifest revlog only a single step should be necessary
316 # For a flat manifest revlog only a single step should be necessary
317 # as all relevant changelog entries are relevant to the flat
317 # as all relevant changelog entries are relevant to the flat
318 # manifest.
318 # manifest.
319 #
319 #
320 # For a filelog or tree manifest dirlog however not every changelog
320 # For a filelog or tree manifest dirlog however not every changelog
321 # entry will have been relevant, so we need to skip some changelog
321 # entry will have been relevant, so we need to skip some changelog
322 # nodes even after ellipsis-izing.
322 # nodes even after ellipsis-izing.
323 walk = [clrev]
323 walk = [clrev]
324 while walk:
324 while walk:
325 p = walk[0]
325 p = walk[0]
326 walk = walk[1:]
326 walk = walk[1:]
327 if p in self.clrev_to_localrev:
327 if p in self.clrev_to_localrev:
328 return self.clrev_to_localrev[p]
328 return self.clrev_to_localrev[p]
329 elif p in self.full_nodes:
329 elif p in self.full_nodes:
330 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
330 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
331 if pp != node.nullrev])
331 if pp != node.nullrev])
332 elif p in self.precomputed_ellipsis:
332 elif p in self.precomputed_ellipsis:
333 walk.extend([pp for pp in self.precomputed_ellipsis[p]
333 walk.extend([pp for pp in self.precomputed_ellipsis[p]
334 if pp != node.nullrev])
334 if pp != node.nullrev])
335 else:
335 else:
336 # In this case, we've got an ellipsis with parents
336 # In this case, we've got an ellipsis with parents
337 # outside the current bundle (likely an
337 # outside the current bundle (likely an
338 # incremental pull). We "know" that we can use the
338 # incremental pull). We "know" that we can use the
339 # value of this same revlog at whatever revision
339 # value of this same revlog at whatever revision
340 # is pointed to by linknode. "Know" is in scare
340 # is pointed to by linknode. "Know" is in scare
341 # quotes because I haven't done enough examination
341 # quotes because I haven't done enough examination
342 # of edge cases to convince myself this is really
342 # of edge cases to convince myself this is really
343 # a fact - it works for all the (admittedly
343 # a fact - it works for all the (admittedly
344 # thorough) cases in our testsuite, but I would be
344 # thorough) cases in our testsuite, but I would be
345 # somewhat unsurprised to find a case in the wild
345 # somewhat unsurprised to find a case in the wild
346 # where this breaks down a bit. That said, I don't
346 # where this breaks down a bit. That said, I don't
347 # know if it would hurt anything.
347 # know if it would hurt anything.
348 for i in xrange(rev, 0, -1):
348 for i in xrange(rev, 0, -1):
349 if revlog.linkrev(i) == clrev:
349 if revlog.linkrev(i) == clrev:
350 return i
350 return i
351 # We failed to resolve a parent for this node, so
351 # We failed to resolve a parent for this node, so
352 # we crash the changegroup construction.
352 # we crash the changegroup construction.
353 raise error.Abort(
353 raise error.Abort(
354 'unable to resolve parent while packing %r %r'
354 'unable to resolve parent while packing %r %r'
355 ' for changeset %r' % (revlog.indexfile, rev, clrev))
355 ' for changeset %r' % (revlog.indexfile, rev, clrev))
356 return node.nullrev
356 return node.nullrev
357
357
358 if not linkparents or (
358 if not linkparents or (
359 revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
359 revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
360 p1, p2 = node.nullrev, node.nullrev
360 p1, p2 = node.nullrev, node.nullrev
361 elif len(linkparents) == 1:
361 elif len(linkparents) == 1:
362 p1, = sorted(local(p) for p in linkparents)
362 p1, = sorted(local(p) for p in linkparents)
363 p2 = node.nullrev
363 p2 = node.nullrev
364 else:
364 else:
365 p1, p2 = sorted(local(p) for p in linkparents)
365 p1, p2 = sorted(local(p) for p in linkparents)
366 yield ellipsisdata(
366 yield ellipsisdata(
367 self, rev, revlog, p1, p2, revlog.revision(rev), linknode)
367 self, rev, revlog, p1, p2, revlog.revision(rev), linknode)
368 extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
368 extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
369
369
370 def deltaparent(orig, self, revlog, rev, p1, p2, prev):
370 def deltaparent(orig, self, revlog, rev, p1, p2, prev):
371 if util.safehasattr(self, 'full_nodes'):
371 if util.safehasattr(self, 'full_nodes'):
372 # TODO: send better deltas when in narrow mode.
372 # TODO: send better deltas when in narrow mode.
373 #
373 #
374 # changegroup.group() loops over revisions to send,
374 # changegroup.group() loops over revisions to send,
375 # including revisions we'll skip. What this means is that
375 # including revisions we'll skip. What this means is that
376 # `prev` will be a potentially useless delta base for all
376 # `prev` will be a potentially useless delta base for all
377 # ellipsis nodes, as the client likely won't have it. In
377 # ellipsis nodes, as the client likely won't have it. In
378 # the future we should do bookkeeping about which nodes
378 # the future we should do bookkeeping about which nodes
379 # have been sent to the client, and try to be
379 # have been sent to the client, and try to be
380 # significantly smarter about delta bases. This is
380 # significantly smarter about delta bases. This is
381 # slightly tricky because this same code has to work for
381 # slightly tricky because this same code has to work for
382 # all revlogs, and we don't have the linkrev/linknode here.
382 # all revlogs, and we don't have the linkrev/linknode here.
383 return p1
383 return p1
384 return orig(self, revlog, rev, p1, p2, prev)
384 return orig(self, revlog, rev, p1, p2, prev)
385 extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
385 extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
@@ -1,402 +1,402 b''
1 # narrowcommands.py - command modifications for narrowhg extension
1 # narrowcommands.py - command modifications for narrowhg extension
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import itertools
9 import itertools
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial import (
12 from mercurial import (
13 cmdutil,
13 cmdutil,
14 commands,
14 commands,
15 discovery,
15 discovery,
16 error,
16 error,
17 exchange,
17 exchange,
18 extensions,
18 extensions,
19 hg,
19 hg,
20 merge,
20 merge,
21 node,
21 node,
22 registrar,
22 registrar,
23 repair,
23 repair,
24 repoview,
24 repoview,
25 util,
25 util,
26 )
26 )
27
27
28 from . import (
28 from . import (
29 narrowbundle2,
29 narrowbundle2,
30 narrowrepo,
30 narrowrepo,
31 narrowspec,
31 narrowspec,
32 )
32 )
33
33
34 table = {}
34 table = {}
35 command = registrar.command(table)
35 command = registrar.command(table)
36
36
37 def setup():
37 def setup():
38 """Wraps user-facing mercurial commands with narrow-aware versions."""
38 """Wraps user-facing mercurial commands with narrow-aware versions."""
39
39
40 entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
40 entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
41 entry[1].append(('', 'narrow', None,
41 entry[1].append(('', 'narrow', None,
42 _("create a narrow clone of select files")))
42 _("create a narrow clone of select files")))
43 entry[1].append(('', 'depth', '',
43 entry[1].append(('', 'depth', '',
44 _("limit the history fetched by distance from heads")))
44 _("limit the history fetched by distance from heads")))
45 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
45 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
46 if 'sparse' not in extensions.enabled():
46 if 'sparse' not in extensions.enabled():
47 entry[1].append(('', 'include', [],
47 entry[1].append(('', 'include', [],
48 _("specifically fetch this file/directory")))
48 _("specifically fetch this file/directory")))
49 entry[1].append(
49 entry[1].append(
50 ('', 'exclude', [],
50 ('', 'exclude', [],
51 _("do not fetch this file/directory, even if included")))
51 _("do not fetch this file/directory, even if included")))
52
52
53 entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
53 entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
54 entry[1].append(('', 'depth', '',
54 entry[1].append(('', 'depth', '',
55 _("limit the history fetched by distance from heads")))
55 _("limit the history fetched by distance from heads")))
56
56
57 extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
57 extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
58
58
59 def expandpull(pullop, includepats, excludepats):
59 def expandpull(pullop, includepats, excludepats):
60 if not narrowspec.needsexpansion(includepats):
60 if not narrowspec.needsexpansion(includepats):
61 return includepats, excludepats
61 return includepats, excludepats
62
62
63 heads = pullop.heads or pullop.rheads
63 heads = pullop.heads or pullop.rheads
64 includepats, excludepats = pullop.remote.expandnarrow(
64 includepats, excludepats = pullop.remote.expandnarrow(
65 includepats, excludepats, heads)
65 includepats, excludepats, heads)
66 pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % (
66 pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % (
67 includepats, excludepats))
67 includepats, excludepats))
68 return set(includepats), set(excludepats)
68 return set(includepats), set(excludepats)
69
69
70 def clonenarrowcmd(orig, ui, repo, *args, **opts):
70 def clonenarrowcmd(orig, ui, repo, *args, **opts):
71 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
71 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
72 wrappedextraprepare = util.nullcontextmanager()
72 wrappedextraprepare = util.nullcontextmanager()
73 opts_narrow = opts['narrow']
73 opts_narrow = opts['narrow']
74 if opts_narrow:
74 if opts_narrow:
75 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
75 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
76 # Create narrow spec patterns from clone flags
76 # Create narrow spec patterns from clone flags
77 includepats = narrowspec.parsepatterns(opts['include'])
77 includepats = narrowspec.parsepatterns(opts['include'])
78 excludepats = narrowspec.parsepatterns(opts['exclude'])
78 excludepats = narrowspec.parsepatterns(opts['exclude'])
79
79
80 # If necessary, ask the server to expand the narrowspec.
80 # If necessary, ask the server to expand the narrowspec.
81 includepats, excludepats = expandpull(
81 includepats, excludepats = expandpull(
82 pullop, includepats, excludepats)
82 pullop, includepats, excludepats)
83
83
84 if not includepats and excludepats:
84 if not includepats and excludepats:
85 # If nothing was included, we assume the user meant to include
85 # If nothing was included, we assume the user meant to include
86 # everything, except what they asked to exclude.
86 # everything, except what they asked to exclude.
87 includepats = {'path:.'}
87 includepats = {'path:.'}
88
88
89 narrowspec.save(pullop.repo, includepats, excludepats)
89 narrowspec.save(pullop.repo, includepats, excludepats)
90
90
91 # This will populate 'includepats' etc with the values from the
91 # This will populate 'includepats' etc with the values from the
92 # narrowspec we just saved.
92 # narrowspec we just saved.
93 orig(pullop, kwargs)
93 orig(pullop, kwargs)
94
94
95 if opts.get('depth'):
95 if opts.get('depth'):
96 kwargs['depth'] = opts['depth']
96 kwargs['depth'] = opts['depth']
97 wrappedextraprepare = extensions.wrappedfunction(exchange,
97 wrappedextraprepare = extensions.wrappedfunction(exchange,
98 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
98 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
99
99
100 def pullnarrow(orig, repo, *args, **kwargs):
100 def pullnarrow(orig, repo, *args, **kwargs):
101 narrowrepo.wraprepo(repo.unfiltered(), opts_narrow)
101 narrowrepo.wraprepo(repo.unfiltered(), opts_narrow)
102 if isinstance(repo, repoview.repoview):
102 if isinstance(repo, repoview.repoview):
103 repo.__class__.__bases__ = (repo.__class__.__bases__[0],
103 repo.__class__.__bases__ = (repo.__class__.__bases__[0],
104 repo.unfiltered().__class__)
104 repo.unfiltered().__class__)
105 if opts_narrow:
105 if opts_narrow:
106 repo.requirements.add(narrowrepo.requirement)
106 repo.requirements.add(narrowrepo.REQUIREMENT)
107 repo._writerequirements()
107 repo._writerequirements()
108
108
109 return orig(repo, *args, **kwargs)
109 return orig(repo, *args, **kwargs)
110
110
111 wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow)
111 wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow)
112
112
113 with wrappedextraprepare, wrappedpull:
113 with wrappedextraprepare, wrappedpull:
114 return orig(ui, repo, *args, **opts)
114 return orig(ui, repo, *args, **opts)
115
115
116 def pullnarrowcmd(orig, ui, repo, *args, **opts):
116 def pullnarrowcmd(orig, ui, repo, *args, **opts):
117 """Wraps pull command to allow modifying narrow spec."""
117 """Wraps pull command to allow modifying narrow spec."""
118 wrappedextraprepare = util.nullcontextmanager()
118 wrappedextraprepare = util.nullcontextmanager()
119 if narrowrepo.requirement in repo.requirements:
119 if narrowrepo.REQUIREMENT in repo.requirements:
120
120
121 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
121 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
122 orig(pullop, kwargs)
122 orig(pullop, kwargs)
123 if opts.get('depth'):
123 if opts.get('depth'):
124 kwargs['depth'] = opts['depth']
124 kwargs['depth'] = opts['depth']
125 wrappedextraprepare = extensions.wrappedfunction(exchange,
125 wrappedextraprepare = extensions.wrappedfunction(exchange,
126 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
126 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
127
127
128 with wrappedextraprepare:
128 with wrappedextraprepare:
129 return orig(ui, repo, *args, **opts)
129 return orig(ui, repo, *args, **opts)
130
130
131 def archivenarrowcmd(orig, ui, repo, *args, **opts):
131 def archivenarrowcmd(orig, ui, repo, *args, **opts):
132 """Wraps archive command to narrow the default includes."""
132 """Wraps archive command to narrow the default includes."""
133 if narrowrepo.requirement in repo.requirements:
133 if narrowrepo.REQUIREMENT in repo.requirements:
134 repo_includes, repo_excludes = repo.narrowpats
134 repo_includes, repo_excludes = repo.narrowpats
135 includes = set(opts.get('include', []))
135 includes = set(opts.get('include', []))
136 excludes = set(opts.get('exclude', []))
136 excludes = set(opts.get('exclude', []))
137 includes, excludes = narrowspec.restrictpatterns(
137 includes, excludes = narrowspec.restrictpatterns(
138 includes, excludes, repo_includes, repo_excludes)
138 includes, excludes, repo_includes, repo_excludes)
139 if includes:
139 if includes:
140 opts['include'] = includes
140 opts['include'] = includes
141 if excludes:
141 if excludes:
142 opts['exclude'] = excludes
142 opts['exclude'] = excludes
143 return orig(ui, repo, *args, **opts)
143 return orig(ui, repo, *args, **opts)
144
144
145 def pullbundle2extraprepare(orig, pullop, kwargs):
145 def pullbundle2extraprepare(orig, pullop, kwargs):
146 repo = pullop.repo
146 repo = pullop.repo
147 if narrowrepo.requirement not in repo.requirements:
147 if narrowrepo.REQUIREMENT not in repo.requirements:
148 return orig(pullop, kwargs)
148 return orig(pullop, kwargs)
149
149
150 if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
150 if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
151 raise error.Abort(_("server doesn't support narrow clones"))
151 raise error.Abort(_("server doesn't support narrow clones"))
152 orig(pullop, kwargs)
152 orig(pullop, kwargs)
153 kwargs['narrow'] = True
153 kwargs['narrow'] = True
154 include, exclude = repo.narrowpats
154 include, exclude = repo.narrowpats
155 kwargs['oldincludepats'] = include
155 kwargs['oldincludepats'] = include
156 kwargs['oldexcludepats'] = exclude
156 kwargs['oldexcludepats'] = exclude
157 kwargs['includepats'] = include
157 kwargs['includepats'] = include
158 kwargs['excludepats'] = exclude
158 kwargs['excludepats'] = exclude
159 kwargs['known'] = [node.hex(ctx.node()) for ctx in
159 kwargs['known'] = [node.hex(ctx.node()) for ctx in
160 repo.set('::%ln', pullop.common)
160 repo.set('::%ln', pullop.common)
161 if ctx.node() != node.nullid]
161 if ctx.node() != node.nullid]
162 if not kwargs['known']:
162 if not kwargs['known']:
163 # Mercurial serialized an empty list as '' and deserializes it as
163 # Mercurial serialized an empty list as '' and deserializes it as
164 # [''], so delete it instead to avoid handling the empty string on the
164 # [''], so delete it instead to avoid handling the empty string on the
165 # server.
165 # server.
166 del kwargs['known']
166 del kwargs['known']
167
167
168 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
168 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
169 pullbundle2extraprepare)
169 pullbundle2extraprepare)
170
170
171 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
171 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
172 newincludes, newexcludes, force):
172 newincludes, newexcludes, force):
173 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
173 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
174 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
174 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
175
175
176 # This is essentially doing "hg outgoing" to find all local-only
176 # This is essentially doing "hg outgoing" to find all local-only
177 # commits. We will then check that the local-only commits don't
177 # commits. We will then check that the local-only commits don't
178 # have any changes to files that will be untracked.
178 # have any changes to files that will be untracked.
179 unfi = repo.unfiltered()
179 unfi = repo.unfiltered()
180 outgoing = discovery.findcommonoutgoing(unfi, remote,
180 outgoing = discovery.findcommonoutgoing(unfi, remote,
181 commoninc=commoninc)
181 commoninc=commoninc)
182 ui.status(_('looking for local changes to affected paths\n'))
182 ui.status(_('looking for local changes to affected paths\n'))
183 localnodes = []
183 localnodes = []
184 for n in itertools.chain(outgoing.missing, outgoing.excluded):
184 for n in itertools.chain(outgoing.missing, outgoing.excluded):
185 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
185 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
186 localnodes.append(n)
186 localnodes.append(n)
187 revstostrip = unfi.revs('descendants(%ln)', localnodes)
187 revstostrip = unfi.revs('descendants(%ln)', localnodes)
188 hiddenrevs = repoview.filterrevs(repo, 'visible')
188 hiddenrevs = repoview.filterrevs(repo, 'visible')
189 visibletostrip = list(repo.changelog.node(r)
189 visibletostrip = list(repo.changelog.node(r)
190 for r in (revstostrip - hiddenrevs))
190 for r in (revstostrip - hiddenrevs))
191 if visibletostrip:
191 if visibletostrip:
192 ui.status(_('The following changeset(s) or their ancestors have '
192 ui.status(_('The following changeset(s) or their ancestors have '
193 'local changes not on the remote:\n'))
193 'local changes not on the remote:\n'))
194 maxnodes = 10
194 maxnodes = 10
195 if ui.verbose or len(visibletostrip) <= maxnodes:
195 if ui.verbose or len(visibletostrip) <= maxnodes:
196 for n in visibletostrip:
196 for n in visibletostrip:
197 ui.status('%s\n' % node.short(n))
197 ui.status('%s\n' % node.short(n))
198 else:
198 else:
199 for n in visibletostrip[:maxnodes]:
199 for n in visibletostrip[:maxnodes]:
200 ui.status('%s\n' % node.short(n))
200 ui.status('%s\n' % node.short(n))
201 ui.status(_('...and %d more, use --verbose to list all\n') %
201 ui.status(_('...and %d more, use --verbose to list all\n') %
202 (len(visibletostrip) - maxnodes))
202 (len(visibletostrip) - maxnodes))
203 if not force:
203 if not force:
204 raise error.Abort(_('local changes found'),
204 raise error.Abort(_('local changes found'),
205 hint=_('use --force-delete-local-changes to '
205 hint=_('use --force-delete-local-changes to '
206 'ignore'))
206 'ignore'))
207
207
208 if revstostrip:
208 if revstostrip:
209 tostrip = [unfi.changelog.node(r) for r in revstostrip]
209 tostrip = [unfi.changelog.node(r) for r in revstostrip]
210 if repo['.'].node() in tostrip:
210 if repo['.'].node() in tostrip:
211 # stripping working copy, so move to a different commit first
211 # stripping working copy, so move to a different commit first
212 urev = max(repo.revs('(::%n) - %ln + null',
212 urev = max(repo.revs('(::%n) - %ln + null',
213 repo['.'].node(), visibletostrip))
213 repo['.'].node(), visibletostrip))
214 hg.clean(repo, urev)
214 hg.clean(repo, urev)
215 repair.strip(ui, unfi, tostrip, topic='narrow')
215 repair.strip(ui, unfi, tostrip, topic='narrow')
216
216
217 todelete = []
217 todelete = []
218 for f, f2, size in repo.store.datafiles():
218 for f, f2, size in repo.store.datafiles():
219 if f.startswith('data/'):
219 if f.startswith('data/'):
220 file = f[5:-2]
220 file = f[5:-2]
221 if not newmatch(file):
221 if not newmatch(file):
222 todelete.append(f)
222 todelete.append(f)
223 elif f.startswith('meta/'):
223 elif f.startswith('meta/'):
224 dir = f[5:-13]
224 dir = f[5:-13]
225 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
225 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
226 include = True
226 include = True
227 for d in dirs:
227 for d in dirs:
228 visit = newmatch.visitdir(d)
228 visit = newmatch.visitdir(d)
229 if not visit:
229 if not visit:
230 include = False
230 include = False
231 break
231 break
232 if visit == 'all':
232 if visit == 'all':
233 break
233 break
234 if not include:
234 if not include:
235 todelete.append(f)
235 todelete.append(f)
236
236
237 repo.destroying()
237 repo.destroying()
238
238
239 with repo.transaction("narrowing"):
239 with repo.transaction("narrowing"):
240 for f in todelete:
240 for f in todelete:
241 ui.status(_('deleting %s\n') % f)
241 ui.status(_('deleting %s\n') % f)
242 util.unlinkpath(repo.svfs.join(f))
242 util.unlinkpath(repo.svfs.join(f))
243 repo.store.markremoved(f)
243 repo.store.markremoved(f)
244
244
245 for f in repo.dirstate:
245 for f in repo.dirstate:
246 if not newmatch(f):
246 if not newmatch(f):
247 repo.dirstate.drop(f)
247 repo.dirstate.drop(f)
248 repo.wvfs.unlinkpath(f)
248 repo.wvfs.unlinkpath(f)
249 repo.setnarrowpats(newincludes, newexcludes)
249 repo.setnarrowpats(newincludes, newexcludes)
250
250
251 repo.destroyed()
251 repo.destroyed()
252
252
253 def _widen(ui, repo, remote, commoninc, newincludes, newexcludes):
253 def _widen(ui, repo, remote, commoninc, newincludes, newexcludes):
254 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
254 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
255
255
256 # TODO(martinvonz): Get expansion working with widening/narrowing.
256 # TODO(martinvonz): Get expansion working with widening/narrowing.
257 if narrowspec.needsexpansion(newincludes):
257 if narrowspec.needsexpansion(newincludes):
258 raise error.Abort('Expansion not yet supported on pull')
258 raise error.Abort('Expansion not yet supported on pull')
259
259
260 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
260 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
261 orig(pullop, kwargs)
261 orig(pullop, kwargs)
262 # The old{in,ex}cludepats have already been set by orig()
262 # The old{in,ex}cludepats have already been set by orig()
263 kwargs['includepats'] = newincludes
263 kwargs['includepats'] = newincludes
264 kwargs['excludepats'] = newexcludes
264 kwargs['excludepats'] = newexcludes
265 wrappedextraprepare = extensions.wrappedfunction(exchange,
265 wrappedextraprepare = extensions.wrappedfunction(exchange,
266 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
266 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
267
267
268 # define a function that narrowbundle2 can call after creating the
268 # define a function that narrowbundle2 can call after creating the
269 # backup bundle, but before applying the bundle from the server
269 # backup bundle, but before applying the bundle from the server
270 def setnewnarrowpats():
270 def setnewnarrowpats():
271 repo.setnarrowpats(newincludes, newexcludes)
271 repo.setnarrowpats(newincludes, newexcludes)
272 repo.setnewnarrowpats = setnewnarrowpats
272 repo.setnewnarrowpats = setnewnarrowpats
273
273
274 ds = repo.dirstate
274 ds = repo.dirstate
275 p1, p2 = ds.p1(), ds.p2()
275 p1, p2 = ds.p1(), ds.p2()
276 with ds.parentchange():
276 with ds.parentchange():
277 ds.setparents(node.nullid, node.nullid)
277 ds.setparents(node.nullid, node.nullid)
278 common = commoninc[0]
278 common = commoninc[0]
279 with wrappedextraprepare:
279 with wrappedextraprepare:
280 exchange.pull(repo, remote, heads=common)
280 exchange.pull(repo, remote, heads=common)
281 with ds.parentchange():
281 with ds.parentchange():
282 ds.setparents(p1, p2)
282 ds.setparents(p1, p2)
283
283
284 actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
284 actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
285 addgaction = actions['g'].append
285 addgaction = actions['g'].append
286
286
287 mf = repo['.'].manifest().matches(newmatch)
287 mf = repo['.'].manifest().matches(newmatch)
288 for f, fn in mf.iteritems():
288 for f, fn in mf.iteritems():
289 if f not in repo.dirstate:
289 if f not in repo.dirstate:
290 addgaction((f, (mf.flags(f), False),
290 addgaction((f, (mf.flags(f), False),
291 "add from widened narrow clone"))
291 "add from widened narrow clone"))
292
292
293 merge.applyupdates(repo, actions, wctx=repo[None],
293 merge.applyupdates(repo, actions, wctx=repo[None],
294 mctx=repo['.'], overwrite=False)
294 mctx=repo['.'], overwrite=False)
295 merge.recordupdates(repo, actions, branchmerge=False)
295 merge.recordupdates(repo, actions, branchmerge=False)
296
296
297 # TODO(rdamazio): Make new matcher format and update description
297 # TODO(rdamazio): Make new matcher format and update description
298 @command('tracked',
298 @command('tracked',
299 [('', 'addinclude', [], _('new paths to include')),
299 [('', 'addinclude', [], _('new paths to include')),
300 ('', 'removeinclude', [], _('old paths to no longer include')),
300 ('', 'removeinclude', [], _('old paths to no longer include')),
301 ('', 'addexclude', [], _('new paths to exclude')),
301 ('', 'addexclude', [], _('new paths to exclude')),
302 ('', 'removeexclude', [], _('old paths to no longer exclude')),
302 ('', 'removeexclude', [], _('old paths to no longer exclude')),
303 ('', 'clear', False, _('whether to replace the existing narrowspec')),
303 ('', 'clear', False, _('whether to replace the existing narrowspec')),
304 ('', 'force-delete-local-changes', False,
304 ('', 'force-delete-local-changes', False,
305 _('forces deletion of local changes when narrowing')),
305 _('forces deletion of local changes when narrowing')),
306 ] + commands.remoteopts,
306 ] + commands.remoteopts,
307 _('[OPTIONS]... [REMOTE]'),
307 _('[OPTIONS]... [REMOTE]'),
308 inferrepo=True)
308 inferrepo=True)
309 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
309 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
310 """show or change the current narrowspec
310 """show or change the current narrowspec
311
311
312 With no argument, shows the current narrowspec entries, one per line. Each
312 With no argument, shows the current narrowspec entries, one per line. Each
313 line will be prefixed with 'I' or 'X' for included or excluded patterns,
313 line will be prefixed with 'I' or 'X' for included or excluded patterns,
314 respectively.
314 respectively.
315
315
316 The narrowspec is comprised of expressions to match remote files and/or
316 The narrowspec is comprised of expressions to match remote files and/or
317 directories that should be pulled into your client.
317 directories that should be pulled into your client.
318 The narrowspec has *include* and *exclude* expressions, with excludes always
318 The narrowspec has *include* and *exclude* expressions, with excludes always
319 trumping includes: that is, if a file matches an exclude expression, it will
319 trumping includes: that is, if a file matches an exclude expression, it will
320 be excluded even if it also matches an include expression.
320 be excluded even if it also matches an include expression.
321 Excluding files that were never included has no effect.
321 Excluding files that were never included has no effect.
322
322
323 Each included or excluded entry is in the format described by
323 Each included or excluded entry is in the format described by
324 'hg help patterns'.
324 'hg help patterns'.
325
325
326 The options allow you to add or remove included and excluded expressions.
326 The options allow you to add or remove included and excluded expressions.
327
327
328 If --clear is specified, then all previous includes and excludes are DROPPED
328 If --clear is specified, then all previous includes and excludes are DROPPED
329 and replaced by the new ones specified to --addinclude and --addexclude.
329 and replaced by the new ones specified to --addinclude and --addexclude.
330 If --clear is specified without any further options, the narrowspec will be
330 If --clear is specified without any further options, the narrowspec will be
331 empty and will not match any files.
331 empty and will not match any files.
332 """
332 """
333 if narrowrepo.requirement not in repo.requirements:
333 if narrowrepo.REQUIREMENT not in repo.requirements:
334 ui.warn(_('The narrow command is only supported on respositories cloned'
334 ui.warn(_('The narrow command is only supported on respositories cloned'
335 ' with --narrow.\n'))
335 ' with --narrow.\n'))
336 return 1
336 return 1
337
337
338 # Before supporting, decide whether it "hg tracked --clear" should mean
338 # Before supporting, decide whether it "hg tracked --clear" should mean
339 # tracking no paths or all paths.
339 # tracking no paths or all paths.
340 if opts['clear']:
340 if opts['clear']:
341 ui.warn(_('The --clear option is not yet supported.\n'))
341 ui.warn(_('The --clear option is not yet supported.\n'))
342 return 1
342 return 1
343
343
344 if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']):
344 if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']):
345 raise error.Abort('Expansion not yet supported on widen/narrow')
345 raise error.Abort('Expansion not yet supported on widen/narrow')
346
346
347 addedincludes = narrowspec.parsepatterns(opts['addinclude'])
347 addedincludes = narrowspec.parsepatterns(opts['addinclude'])
348 removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
348 removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
349 addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
349 addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
350 removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
350 removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
351 widening = addedincludes or removedexcludes
351 widening = addedincludes or removedexcludes
352 narrowing = removedincludes or addedexcludes
352 narrowing = removedincludes or addedexcludes
353 only_show = not widening and not narrowing
353 only_show = not widening and not narrowing
354
354
355 # Only print the current narrowspec.
355 # Only print the current narrowspec.
356 if only_show:
356 if only_show:
357 include, exclude = repo.narrowpats
357 include, exclude = repo.narrowpats
358
358
359 ui.pager('tracked')
359 ui.pager('tracked')
360 fm = ui.formatter('narrow', opts)
360 fm = ui.formatter('narrow', opts)
361 for i in sorted(include):
361 for i in sorted(include):
362 fm.startitem()
362 fm.startitem()
363 fm.write('status', '%s ', 'I', label='narrow.included')
363 fm.write('status', '%s ', 'I', label='narrow.included')
364 fm.write('pat', '%s\n', i, label='narrow.included')
364 fm.write('pat', '%s\n', i, label='narrow.included')
365 for i in sorted(exclude):
365 for i in sorted(exclude):
366 fm.startitem()
366 fm.startitem()
367 fm.write('status', '%s ', 'X', label='narrow.excluded')
367 fm.write('status', '%s ', 'X', label='narrow.excluded')
368 fm.write('pat', '%s\n', i, label='narrow.excluded')
368 fm.write('pat', '%s\n', i, label='narrow.excluded')
369 fm.end()
369 fm.end()
370 return 0
370 return 0
371
371
372 with repo.wlock(), repo.lock():
372 with repo.wlock(), repo.lock():
373 cmdutil.bailifchanged(repo)
373 cmdutil.bailifchanged(repo)
374
374
375 # Find the revisions we have in common with the remote. These will
375 # Find the revisions we have in common with the remote. These will
376 # be used for finding local-only changes for narrowing. They will
376 # be used for finding local-only changes for narrowing. They will
377 # also define the set of revisions to update for widening.
377 # also define the set of revisions to update for widening.
378 remotepath = ui.expandpath(remotepath or 'default')
378 remotepath = ui.expandpath(remotepath or 'default')
379 url, branches = hg.parseurl(remotepath)
379 url, branches = hg.parseurl(remotepath)
380 ui.status(_('comparing with %s\n') % util.hidepassword(url))
380 ui.status(_('comparing with %s\n') % util.hidepassword(url))
381 remote = hg.peer(repo, opts, url)
381 remote = hg.peer(repo, opts, url)
382 commoninc = discovery.findcommonincoming(repo, remote)
382 commoninc = discovery.findcommonincoming(repo, remote)
383
383
384 oldincludes, oldexcludes = repo.narrowpats
384 oldincludes, oldexcludes = repo.narrowpats
385 if narrowing:
385 if narrowing:
386 newincludes = oldincludes - removedincludes
386 newincludes = oldincludes - removedincludes
387 newexcludes = oldexcludes | addedexcludes
387 newexcludes = oldexcludes | addedexcludes
388 _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
388 _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
389 newincludes, newexcludes,
389 newincludes, newexcludes,
390 opts['force_delete_local_changes'])
390 opts['force_delete_local_changes'])
391 # _narrow() updated the narrowspec and _widen() below needs to
391 # _narrow() updated the narrowspec and _widen() below needs to
392 # use the updated values as its base (otherwise removed includes
392 # use the updated values as its base (otherwise removed includes
393 # and addedexcludes will be lost in the resulting narrowspec)
393 # and addedexcludes will be lost in the resulting narrowspec)
394 oldincludes = newincludes
394 oldincludes = newincludes
395 oldexcludes = newexcludes
395 oldexcludes = newexcludes
396
396
397 if widening:
397 if widening:
398 newincludes = oldincludes | addedincludes
398 newincludes = oldincludes | addedincludes
399 newexcludes = oldexcludes - removedexcludes
399 newexcludes = oldexcludes - removedexcludes
400 _widen(ui, repo, remote, commoninc, newincludes, newexcludes)
400 _widen(ui, repo, remote, commoninc, newincludes, newexcludes)
401
401
402 return 0
402 return 0
@@ -1,110 +1,110 b''
1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
10 from mercurial import (
11 bundlerepo,
11 bundlerepo,
12 localrepo,
12 localrepo,
13 match as matchmod,
13 match as matchmod,
14 scmutil,
14 scmutil,
15 )
15 )
16
16
17 from .. import (
17 from .. import (
18 share,
18 share,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 narrowrevlog,
22 narrowrevlog,
23 narrowspec,
23 narrowspec,
24 )
24 )
25
25
26 requirement = 'narrowhg'
26 REQUIREMENT = 'narrowhg'
27
27
28 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
28 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
29 orig(sourcerepo, destrepo, **kwargs)
29 orig(sourcerepo, destrepo, **kwargs)
30 if requirement in sourcerepo.requirements:
30 if REQUIREMENT in sourcerepo.requirements:
31 with destrepo.wlock():
31 with destrepo.wlock():
32 with destrepo.vfs('shared', 'a') as fp:
32 with destrepo.vfs('shared', 'a') as fp:
33 fp.write(narrowspec.FILENAME + '\n')
33 fp.write(narrowspec.FILENAME + '\n')
34
34
35 def unsharenarrowspec(orig, ui, repo, repopath):
35 def unsharenarrowspec(orig, ui, repo, repopath):
36 if (requirement in repo.requirements
36 if (REQUIREMENT in repo.requirements
37 and repo.path == repopath and repo.shared()):
37 and repo.path == repopath and repo.shared()):
38 srcrepo = share._getsrcrepo(repo)
38 srcrepo = share._getsrcrepo(repo)
39 with srcrepo.vfs(narrowspec.FILENAME) as f:
39 with srcrepo.vfs(narrowspec.FILENAME) as f:
40 spec = f.read()
40 spec = f.read()
41 with repo.vfs(narrowspec.FILENAME, 'w') as f:
41 with repo.vfs(narrowspec.FILENAME, 'w') as f:
42 f.write(spec)
42 f.write(spec)
43 return orig(ui, repo, repopath)
43 return orig(ui, repo, repopath)
44
44
45 def wraprepo(repo, opts_narrow):
45 def wraprepo(repo, opts_narrow):
46 """Enables narrow clone functionality on a single local repository."""
46 """Enables narrow clone functionality on a single local repository."""
47
47
48 cacheprop = localrepo.storecache
48 cacheprop = localrepo.storecache
49 if isinstance(repo, bundlerepo.bundlerepository):
49 if isinstance(repo, bundlerepo.bundlerepository):
50 # We have to use a different caching property decorator for
50 # We have to use a different caching property decorator for
51 # bundlerepo because storecache blows up in strange ways on a
51 # bundlerepo because storecache blows up in strange ways on a
52 # bundlerepo. Fortunately, there's no risk of data changing in
52 # bundlerepo. Fortunately, there's no risk of data changing in
53 # a bundlerepo.
53 # a bundlerepo.
54 cacheprop = lambda name: localrepo.unfilteredpropertycache
54 cacheprop = lambda name: localrepo.unfilteredpropertycache
55
55
56 class narrowrepository(repo.__class__):
56 class narrowrepository(repo.__class__):
57
57
58 def _constructmanifest(self):
58 def _constructmanifest(self):
59 manifest = super(narrowrepository, self)._constructmanifest()
59 manifest = super(narrowrepository, self)._constructmanifest()
60 narrowrevlog.makenarrowmanifestrevlog(manifest, repo)
60 narrowrevlog.makenarrowmanifestrevlog(manifest, repo)
61 return manifest
61 return manifest
62
62
63 @cacheprop('00manifest.i')
63 @cacheprop('00manifest.i')
64 def manifestlog(self):
64 def manifestlog(self):
65 mfl = super(narrowrepository, self).manifestlog
65 mfl = super(narrowrepository, self).manifestlog
66 narrowrevlog.makenarrowmanifestlog(mfl, self)
66 narrowrevlog.makenarrowmanifestlog(mfl, self)
67 return mfl
67 return mfl
68
68
69 def file(self, f):
69 def file(self, f):
70 fl = super(narrowrepository, self).file(f)
70 fl = super(narrowrepository, self).file(f)
71 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
71 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
72 return fl
72 return fl
73
73
74 @localrepo.repofilecache(narrowspec.FILENAME)
74 @localrepo.repofilecache(narrowspec.FILENAME)
75 def narrowpats(self):
75 def narrowpats(self):
76 return narrowspec.load(self)
76 return narrowspec.load(self)
77
77
78 @localrepo.repofilecache(narrowspec.FILENAME)
78 @localrepo.repofilecache(narrowspec.FILENAME)
79 def _narrowmatch(self):
79 def _narrowmatch(self):
80 include, exclude = self.narrowpats
80 include, exclude = self.narrowpats
81 if not opts_narrow and not include and not exclude:
81 if not opts_narrow and not include and not exclude:
82 return matchmod.always(self.root, '')
82 return matchmod.always(self.root, '')
83 return narrowspec.match(self.root, include=include, exclude=exclude)
83 return narrowspec.match(self.root, include=include, exclude=exclude)
84
84
85 # TODO(martinvonz): make this property-like instead?
85 # TODO(martinvonz): make this property-like instead?
86 def narrowmatch(self):
86 def narrowmatch(self):
87 return self._narrowmatch
87 return self._narrowmatch
88
88
89 def setnarrowpats(self, newincludes, newexcludes):
89 def setnarrowpats(self, newincludes, newexcludes):
90 narrowspec.save(self, newincludes, newexcludes)
90 narrowspec.save(self, newincludes, newexcludes)
91 self.invalidate(clearfilecache=True)
91 self.invalidate(clearfilecache=True)
92
92
93 # I'm not sure this is the right place to do this filter.
93 # I'm not sure this is the right place to do this filter.
94 # context._manifestmatches() would probably be better, or perhaps
94 # context._manifestmatches() would probably be better, or perhaps
95 # move it to a later place, in case some of the callers do want to know
95 # move it to a later place, in case some of the callers do want to know
96 # which directories changed. This seems to work for now, though.
96 # which directories changed. This seems to work for now, though.
97 def status(self, *args, **kwargs):
97 def status(self, *args, **kwargs):
98 s = super(narrowrepository, self).status(*args, **kwargs)
98 s = super(narrowrepository, self).status(*args, **kwargs)
99 narrowmatch = self.narrowmatch()
99 narrowmatch = self.narrowmatch()
100 modified = filter(narrowmatch, s.modified)
100 modified = filter(narrowmatch, s.modified)
101 added = filter(narrowmatch, s.added)
101 added = filter(narrowmatch, s.added)
102 removed = filter(narrowmatch, s.removed)
102 removed = filter(narrowmatch, s.removed)
103 deleted = filter(narrowmatch, s.deleted)
103 deleted = filter(narrowmatch, s.deleted)
104 unknown = filter(narrowmatch, s.unknown)
104 unknown = filter(narrowmatch, s.unknown)
105 ignored = filter(narrowmatch, s.ignored)
105 ignored = filter(narrowmatch, s.ignored)
106 clean = filter(narrowmatch, s.clean)
106 clean = filter(narrowmatch, s.clean)
107 return scmutil.status(modified, added, removed, deleted, unknown,
107 return scmutil.status(modified, added, removed, deleted, unknown,
108 ignored, clean)
108 ignored, clean)
109
109
110 repo.__class__ = narrowrepository
110 repo.__class__ = narrowrepository
General Comments 0
You need to be logged in to leave comments. Login now