##// END OF EJS Templates
requirements: introduce new requirements related module...
Pulkit Goyal -
r45932:77b8588d default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,78 +1,78 b''
1 # __init__.py - narrowhg extension
1 # __init__.py - narrowhg extension
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 '''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
7 '''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 from mercurial import (
11 from mercurial import (
12 localrepo,
12 localrepo,
13 registrar,
13 registrar,
14 requirements,
14 )
15 )
15
16
16 from mercurial.interfaces import repository
17
17
18 from . import (
18 from . import (
19 narrowbundle2,
19 narrowbundle2,
20 narrowcommands,
20 narrowcommands,
21 narrowrepo,
21 narrowrepo,
22 narrowtemplates,
22 narrowtemplates,
23 narrowwirepeer,
23 narrowwirepeer,
24 )
24 )
25
25
26 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
26 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
27 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
27 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
28 # be specifying the version(s) of Mercurial they are tested with, or
28 # be specifying the version(s) of Mercurial they are tested with, or
29 # leave the attribute unspecified.
29 # leave the attribute unspecified.
30 testedwith = b'ships-with-hg-core'
30 testedwith = b'ships-with-hg-core'
31
31
32 configtable = {}
32 configtable = {}
33 configitem = registrar.configitem(configtable)
33 configitem = registrar.configitem(configtable)
34 # Narrowhg *has* support for serving ellipsis nodes (which are used at
34 # Narrowhg *has* support for serving ellipsis nodes (which are used at
35 # least by Google's internal server), but that support is pretty
35 # least by Google's internal server), but that support is pretty
36 # fragile and has a lot of problems on real-world repositories that
36 # fragile and has a lot of problems on real-world repositories that
37 # have complex graph topologies. This could probably be corrected, but
37 # have complex graph topologies. This could probably be corrected, but
38 # absent someone needing the full support for ellipsis nodes in
38 # absent someone needing the full support for ellipsis nodes in
39 # repositories with merges, it's unlikely this work will get done. As
39 # repositories with merges, it's unlikely this work will get done. As
40 # of this writining in late 2017, all repositories large enough for
40 # of this writining in late 2017, all repositories large enough for
41 # ellipsis nodes to be a hard requirement also enforce strictly linear
41 # ellipsis nodes to be a hard requirement also enforce strictly linear
42 # history for other scaling reasons.
42 # history for other scaling reasons.
43 configitem(
43 configitem(
44 b'experimental',
44 b'experimental',
45 b'narrowservebrokenellipses',
45 b'narrowservebrokenellipses',
46 default=False,
46 default=False,
47 alias=[(b'narrow', b'serveellipses')],
47 alias=[(b'narrow', b'serveellipses')],
48 )
48 )
49
49
50 # Export the commands table for Mercurial to see.
50 # Export the commands table for Mercurial to see.
51 cmdtable = narrowcommands.table
51 cmdtable = narrowcommands.table
52
52
53
53
54 def featuresetup(ui, features):
54 def featuresetup(ui, features):
55 features.add(repository.NARROW_REQUIREMENT)
55 features.add(requirements.NARROW_REQUIREMENT)
56
56
57
57
58 def uisetup(ui):
58 def uisetup(ui):
59 """Wraps user-facing mercurial commands with narrow-aware versions."""
59 """Wraps user-facing mercurial commands with narrow-aware versions."""
60 localrepo.featuresetupfuncs.add(featuresetup)
60 localrepo.featuresetupfuncs.add(featuresetup)
61 narrowbundle2.setup()
61 narrowbundle2.setup()
62 narrowcommands.setup()
62 narrowcommands.setup()
63 narrowwirepeer.uisetup()
63 narrowwirepeer.uisetup()
64
64
65
65
66 def reposetup(ui, repo):
66 def reposetup(ui, repo):
67 """Wraps local repositories with narrow repo support."""
67 """Wraps local repositories with narrow repo support."""
68 if not repo.local():
68 if not repo.local():
69 return
69 return
70
70
71 repo.ui.setconfig(b'experimental', b'narrow', True, b'narrow-ext')
71 repo.ui.setconfig(b'experimental', b'narrow', True, b'narrow-ext')
72 if repository.NARROW_REQUIREMENT in repo.requirements:
72 if requirements.NARROW_REQUIREMENT in repo.requirements:
73 narrowrepo.wraprepo(repo)
73 narrowrepo.wraprepo(repo)
74 narrowwirepeer.reposetup(repo)
74 narrowwirepeer.reposetup(repo)
75
75
76
76
77 templatekeyword = narrowtemplates.templatekeyword
77 templatekeyword = narrowtemplates.templatekeyword
78 revsetpredicate = narrowtemplates.revsetpredicate
78 revsetpredicate = narrowtemplates.revsetpredicate
@@ -1,343 +1,343 b''
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import struct
11 import struct
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.node import nullid
14 from mercurial.node import nullid
15 from mercurial import (
15 from mercurial import (
16 bundle2,
16 bundle2,
17 changegroup,
17 changegroup,
18 error,
18 error,
19 exchange,
19 exchange,
20 localrepo,
20 localrepo,
21 narrowspec,
21 narrowspec,
22 repair,
22 repair,
23 requirements,
23 scmutil,
24 scmutil,
24 util,
25 util,
25 wireprototypes,
26 wireprototypes,
26 )
27 )
27 from mercurial.interfaces import repository
28 from mercurial.utils import stringutil
28 from mercurial.utils import stringutil
29
29
30 _NARROWACL_SECTION = b'narrowacl'
30 _NARROWACL_SECTION = b'narrowacl'
31 _CHANGESPECPART = b'narrow:changespec'
31 _CHANGESPECPART = b'narrow:changespec'
32 _RESSPECS = b'narrow:responsespec'
32 _RESSPECS = b'narrow:responsespec'
33 _SPECPART = b'narrow:spec'
33 _SPECPART = b'narrow:spec'
34 _SPECPART_INCLUDE = b'include'
34 _SPECPART_INCLUDE = b'include'
35 _SPECPART_EXCLUDE = b'exclude'
35 _SPECPART_EXCLUDE = b'exclude'
36 _KILLNODESIGNAL = b'KILL'
36 _KILLNODESIGNAL = b'KILL'
37 _DONESIGNAL = b'DONE'
37 _DONESIGNAL = b'DONE'
38 _ELIDEDCSHEADER = b'>20s20s20sl' # cset id, p1, p2, len(text)
38 _ELIDEDCSHEADER = b'>20s20s20sl' # cset id, p1, p2, len(text)
39 _ELIDEDMFHEADER = b'>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
39 _ELIDEDMFHEADER = b'>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
40 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
40 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
41 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
41 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
42
42
43 # Serve a changegroup for a client with a narrow clone.
43 # Serve a changegroup for a client with a narrow clone.
44 def getbundlechangegrouppart_narrow(
44 def getbundlechangegrouppart_narrow(
45 bundler,
45 bundler,
46 repo,
46 repo,
47 source,
47 source,
48 bundlecaps=None,
48 bundlecaps=None,
49 b2caps=None,
49 b2caps=None,
50 heads=None,
50 heads=None,
51 common=None,
51 common=None,
52 **kwargs
52 **kwargs
53 ):
53 ):
54 assert repo.ui.configbool(b'experimental', b'narrowservebrokenellipses')
54 assert repo.ui.configbool(b'experimental', b'narrowservebrokenellipses')
55
55
56 cgversions = b2caps.get(b'changegroup')
56 cgversions = b2caps.get(b'changegroup')
57 cgversions = [
57 cgversions = [
58 v
58 v
59 for v in cgversions
59 for v in cgversions
60 if v in changegroup.supportedoutgoingversions(repo)
60 if v in changegroup.supportedoutgoingversions(repo)
61 ]
61 ]
62 if not cgversions:
62 if not cgversions:
63 raise ValueError(_(b'no common changegroup version'))
63 raise ValueError(_(b'no common changegroup version'))
64 version = max(cgversions)
64 version = max(cgversions)
65
65
66 include = sorted(filter(bool, kwargs.get('includepats', [])))
66 include = sorted(filter(bool, kwargs.get('includepats', [])))
67 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
67 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
68 generateellipsesbundle2(
68 generateellipsesbundle2(
69 bundler,
69 bundler,
70 repo,
70 repo,
71 include,
71 include,
72 exclude,
72 exclude,
73 version,
73 version,
74 common,
74 common,
75 heads,
75 heads,
76 kwargs.get('depth', None),
76 kwargs.get('depth', None),
77 )
77 )
78
78
79
79
80 def generateellipsesbundle2(
80 def generateellipsesbundle2(
81 bundler, repo, include, exclude, version, common, heads, depth,
81 bundler, repo, include, exclude, version, common, heads, depth,
82 ):
82 ):
83 match = narrowspec.match(repo.root, include=include, exclude=exclude)
83 match = narrowspec.match(repo.root, include=include, exclude=exclude)
84 if depth is not None:
84 if depth is not None:
85 depth = int(depth)
85 depth = int(depth)
86 if depth < 1:
86 if depth < 1:
87 raise error.Abort(_(b'depth must be positive, got %d') % depth)
87 raise error.Abort(_(b'depth must be positive, got %d') % depth)
88
88
89 heads = set(heads or repo.heads())
89 heads = set(heads or repo.heads())
90 common = set(common or [nullid])
90 common = set(common or [nullid])
91
91
92 visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
92 visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
93 repo, common, heads, set(), match, depth=depth
93 repo, common, heads, set(), match, depth=depth
94 )
94 )
95
95
96 repo.ui.debug(b'Found %d relevant revs\n' % len(relevant_nodes))
96 repo.ui.debug(b'Found %d relevant revs\n' % len(relevant_nodes))
97 if visitnodes:
97 if visitnodes:
98 packer = changegroup.getbundler(
98 packer = changegroup.getbundler(
99 version,
99 version,
100 repo,
100 repo,
101 matcher=match,
101 matcher=match,
102 ellipses=True,
102 ellipses=True,
103 shallow=depth is not None,
103 shallow=depth is not None,
104 ellipsisroots=ellipsisroots,
104 ellipsisroots=ellipsisroots,
105 fullnodes=relevant_nodes,
105 fullnodes=relevant_nodes,
106 )
106 )
107 cgdata = packer.generate(common, visitnodes, False, b'narrow_widen')
107 cgdata = packer.generate(common, visitnodes, False, b'narrow_widen')
108
108
109 part = bundler.newpart(b'changegroup', data=cgdata)
109 part = bundler.newpart(b'changegroup', data=cgdata)
110 part.addparam(b'version', version)
110 part.addparam(b'version', version)
111 if repository.TREEMANIFEST_REQUIREMENT in repo.requirements:
111 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
112 part.addparam(b'treemanifest', b'1')
112 part.addparam(b'treemanifest', b'1')
113
113
114
114
115 def generate_ellipses_bundle2_for_widening(
115 def generate_ellipses_bundle2_for_widening(
116 bundler, repo, oldmatch, newmatch, version, common, known,
116 bundler, repo, oldmatch, newmatch, version, common, known,
117 ):
117 ):
118 common = set(common or [nullid])
118 common = set(common or [nullid])
119 # Steps:
119 # Steps:
120 # 1. Send kill for "$known & ::common"
120 # 1. Send kill for "$known & ::common"
121 #
121 #
122 # 2. Send changegroup for ::common
122 # 2. Send changegroup for ::common
123 #
123 #
124 # 3. Proceed.
124 # 3. Proceed.
125 #
125 #
126 # In the future, we can send kills for only the specific
126 # In the future, we can send kills for only the specific
127 # nodes we know should go away or change shape, and then
127 # nodes we know should go away or change shape, and then
128 # send a data stream that tells the client something like this:
128 # send a data stream that tells the client something like this:
129 #
129 #
130 # a) apply this changegroup
130 # a) apply this changegroup
131 # b) apply nodes XXX, YYY, ZZZ that you already have
131 # b) apply nodes XXX, YYY, ZZZ that you already have
132 # c) goto a
132 # c) goto a
133 #
133 #
134 # until they've built up the full new state.
134 # until they've built up the full new state.
135 knownrevs = {repo.changelog.rev(n) for n in known}
135 knownrevs = {repo.changelog.rev(n) for n in known}
136 # TODO: we could send only roots() of this set, and the
136 # TODO: we could send only roots() of this set, and the
137 # list of nodes in common, and the client could work out
137 # list of nodes in common, and the client could work out
138 # what to strip, instead of us explicitly sending every
138 # what to strip, instead of us explicitly sending every
139 # single node.
139 # single node.
140 deadrevs = knownrevs
140 deadrevs = knownrevs
141
141
142 def genkills():
142 def genkills():
143 for r in deadrevs:
143 for r in deadrevs:
144 yield _KILLNODESIGNAL
144 yield _KILLNODESIGNAL
145 yield repo.changelog.node(r)
145 yield repo.changelog.node(r)
146 yield _DONESIGNAL
146 yield _DONESIGNAL
147
147
148 bundler.newpart(_CHANGESPECPART, data=genkills())
148 bundler.newpart(_CHANGESPECPART, data=genkills())
149 newvisit, newfull, newellipsis = exchange._computeellipsis(
149 newvisit, newfull, newellipsis = exchange._computeellipsis(
150 repo, set(), common, knownrevs, newmatch
150 repo, set(), common, knownrevs, newmatch
151 )
151 )
152 if newvisit:
152 if newvisit:
153 packer = changegroup.getbundler(
153 packer = changegroup.getbundler(
154 version,
154 version,
155 repo,
155 repo,
156 matcher=newmatch,
156 matcher=newmatch,
157 ellipses=True,
157 ellipses=True,
158 shallow=False,
158 shallow=False,
159 ellipsisroots=newellipsis,
159 ellipsisroots=newellipsis,
160 fullnodes=newfull,
160 fullnodes=newfull,
161 )
161 )
162 cgdata = packer.generate(common, newvisit, False, b'narrow_widen')
162 cgdata = packer.generate(common, newvisit, False, b'narrow_widen')
163
163
164 part = bundler.newpart(b'changegroup', data=cgdata)
164 part = bundler.newpart(b'changegroup', data=cgdata)
165 part.addparam(b'version', version)
165 part.addparam(b'version', version)
166 if repository.TREEMANIFEST_REQUIREMENT in repo.requirements:
166 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
167 part.addparam(b'treemanifest', b'1')
167 part.addparam(b'treemanifest', b'1')
168
168
169
169
170 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
170 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
171 def _handlechangespec_2(op, inpart):
171 def _handlechangespec_2(op, inpart):
172 # XXX: This bundle2 handling is buggy and should be removed after hg5.2 is
172 # XXX: This bundle2 handling is buggy and should be removed after hg5.2 is
173 # released. New servers will send a mandatory bundle2 part named
173 # released. New servers will send a mandatory bundle2 part named
174 # 'Narrowspec' and will send specs as data instead of params.
174 # 'Narrowspec' and will send specs as data instead of params.
175 # Refer to issue5952 and 6019
175 # Refer to issue5952 and 6019
176 includepats = set(inpart.params.get(_SPECPART_INCLUDE, b'').splitlines())
176 includepats = set(inpart.params.get(_SPECPART_INCLUDE, b'').splitlines())
177 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, b'').splitlines())
177 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, b'').splitlines())
178 narrowspec.validatepatterns(includepats)
178 narrowspec.validatepatterns(includepats)
179 narrowspec.validatepatterns(excludepats)
179 narrowspec.validatepatterns(excludepats)
180
180
181 if not repository.NARROW_REQUIREMENT in op.repo.requirements:
181 if not requirements.NARROW_REQUIREMENT in op.repo.requirements:
182 op.repo.requirements.add(repository.NARROW_REQUIREMENT)
182 op.repo.requirements.add(requirements.NARROW_REQUIREMENT)
183 scmutil.writereporequirements(op.repo)
183 scmutil.writereporequirements(op.repo)
184 op.repo.setnarrowpats(includepats, excludepats)
184 op.repo.setnarrowpats(includepats, excludepats)
185 narrowspec.copytoworkingcopy(op.repo)
185 narrowspec.copytoworkingcopy(op.repo)
186
186
187
187
188 @bundle2.parthandler(_RESSPECS)
188 @bundle2.parthandler(_RESSPECS)
189 def _handlenarrowspecs(op, inpart):
189 def _handlenarrowspecs(op, inpart):
190 data = inpart.read()
190 data = inpart.read()
191 inc, exc = data.split(b'\0')
191 inc, exc = data.split(b'\0')
192 includepats = set(inc.splitlines())
192 includepats = set(inc.splitlines())
193 excludepats = set(exc.splitlines())
193 excludepats = set(exc.splitlines())
194 narrowspec.validatepatterns(includepats)
194 narrowspec.validatepatterns(includepats)
195 narrowspec.validatepatterns(excludepats)
195 narrowspec.validatepatterns(excludepats)
196
196
197 if repository.NARROW_REQUIREMENT not in op.repo.requirements:
197 if requirements.NARROW_REQUIREMENT not in op.repo.requirements:
198 op.repo.requirements.add(repository.NARROW_REQUIREMENT)
198 op.repo.requirements.add(requirements.NARROW_REQUIREMENT)
199 scmutil.writereporequirements(op.repo)
199 scmutil.writereporequirements(op.repo)
200 op.repo.setnarrowpats(includepats, excludepats)
200 op.repo.setnarrowpats(includepats, excludepats)
201 narrowspec.copytoworkingcopy(op.repo)
201 narrowspec.copytoworkingcopy(op.repo)
202
202
203
203
204 @bundle2.parthandler(_CHANGESPECPART)
204 @bundle2.parthandler(_CHANGESPECPART)
205 def _handlechangespec(op, inpart):
205 def _handlechangespec(op, inpart):
206 repo = op.repo
206 repo = op.repo
207 cl = repo.changelog
207 cl = repo.changelog
208
208
209 # changesets which need to be stripped entirely. either they're no longer
209 # changesets which need to be stripped entirely. either they're no longer
210 # needed in the new narrow spec, or the server is sending a replacement
210 # needed in the new narrow spec, or the server is sending a replacement
211 # in the changegroup part.
211 # in the changegroup part.
212 clkills = set()
212 clkills = set()
213
213
214 # A changespec part contains all the updates to ellipsis nodes
214 # A changespec part contains all the updates to ellipsis nodes
215 # that will happen as a result of widening or narrowing a
215 # that will happen as a result of widening or narrowing a
216 # repo. All the changes that this block encounters are ellipsis
216 # repo. All the changes that this block encounters are ellipsis
217 # nodes or flags to kill an existing ellipsis.
217 # nodes or flags to kill an existing ellipsis.
218 chunksignal = changegroup.readexactly(inpart, 4)
218 chunksignal = changegroup.readexactly(inpart, 4)
219 while chunksignal != _DONESIGNAL:
219 while chunksignal != _DONESIGNAL:
220 if chunksignal == _KILLNODESIGNAL:
220 if chunksignal == _KILLNODESIGNAL:
221 # a node used to be an ellipsis but isn't anymore
221 # a node used to be an ellipsis but isn't anymore
222 ck = changegroup.readexactly(inpart, 20)
222 ck = changegroup.readexactly(inpart, 20)
223 if cl.hasnode(ck):
223 if cl.hasnode(ck):
224 clkills.add(ck)
224 clkills.add(ck)
225 else:
225 else:
226 raise error.Abort(
226 raise error.Abort(
227 _(b'unexpected changespec node chunk type: %s') % chunksignal
227 _(b'unexpected changespec node chunk type: %s') % chunksignal
228 )
228 )
229 chunksignal = changegroup.readexactly(inpart, 4)
229 chunksignal = changegroup.readexactly(inpart, 4)
230
230
231 if clkills:
231 if clkills:
232 # preserve bookmarks that repair.strip() would otherwise strip
232 # preserve bookmarks that repair.strip() would otherwise strip
233 op._bookmarksbackup = repo._bookmarks
233 op._bookmarksbackup = repo._bookmarks
234
234
235 class dummybmstore(dict):
235 class dummybmstore(dict):
236 def applychanges(self, repo, tr, changes):
236 def applychanges(self, repo, tr, changes):
237 pass
237 pass
238
238
239 localrepo.localrepository._bookmarks.set(repo, dummybmstore())
239 localrepo.localrepository._bookmarks.set(repo, dummybmstore())
240 chgrpfile = repair.strip(
240 chgrpfile = repair.strip(
241 op.ui, repo, list(clkills), backup=True, topic=b'widen'
241 op.ui, repo, list(clkills), backup=True, topic=b'widen'
242 )
242 )
243 if chgrpfile:
243 if chgrpfile:
244 op._widen_uninterr = repo.ui.uninterruptible()
244 op._widen_uninterr = repo.ui.uninterruptible()
245 op._widen_uninterr.__enter__()
245 op._widen_uninterr.__enter__()
246 # presence of _widen_bundle attribute activates widen handler later
246 # presence of _widen_bundle attribute activates widen handler later
247 op._widen_bundle = chgrpfile
247 op._widen_bundle = chgrpfile
248 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
248 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
249 # will currently always be there when using the core+narrowhg server, but
249 # will currently always be there when using the core+narrowhg server, but
250 # other servers may include a changespec part even when not widening (e.g.
250 # other servers may include a changespec part even when not widening (e.g.
251 # because we're deepening a shallow repo).
251 # because we're deepening a shallow repo).
252 if util.safehasattr(repo, 'setnewnarrowpats'):
252 if util.safehasattr(repo, 'setnewnarrowpats'):
253 repo.setnewnarrowpats()
253 repo.setnewnarrowpats()
254
254
255
255
256 def handlechangegroup_widen(op, inpart):
256 def handlechangegroup_widen(op, inpart):
257 """Changegroup exchange handler which restores temporarily-stripped nodes"""
257 """Changegroup exchange handler which restores temporarily-stripped nodes"""
258 # We saved a bundle with stripped node data we must now restore.
258 # We saved a bundle with stripped node data we must now restore.
259 # This approach is based on mercurial/repair.py@6ee26a53c111.
259 # This approach is based on mercurial/repair.py@6ee26a53c111.
260 repo = op.repo
260 repo = op.repo
261 ui = op.ui
261 ui = op.ui
262
262
263 chgrpfile = op._widen_bundle
263 chgrpfile = op._widen_bundle
264 del op._widen_bundle
264 del op._widen_bundle
265 vfs = repo.vfs
265 vfs = repo.vfs
266
266
267 ui.note(_(b"adding branch\n"))
267 ui.note(_(b"adding branch\n"))
268 f = vfs.open(chgrpfile, b"rb")
268 f = vfs.open(chgrpfile, b"rb")
269 try:
269 try:
270 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
270 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
271 # silence internal shuffling chatter
271 # silence internal shuffling chatter
272 override = {(b'ui', b'quiet'): True}
272 override = {(b'ui', b'quiet'): True}
273 if ui.verbose:
273 if ui.verbose:
274 override = {}
274 override = {}
275 with ui.configoverride(override):
275 with ui.configoverride(override):
276 if isinstance(gen, bundle2.unbundle20):
276 if isinstance(gen, bundle2.unbundle20):
277 with repo.transaction(b'strip') as tr:
277 with repo.transaction(b'strip') as tr:
278 bundle2.processbundle(repo, gen, lambda: tr)
278 bundle2.processbundle(repo, gen, lambda: tr)
279 else:
279 else:
280 gen.apply(
280 gen.apply(
281 repo, b'strip', b'bundle:' + vfs.join(chgrpfile), True
281 repo, b'strip', b'bundle:' + vfs.join(chgrpfile), True
282 )
282 )
283 finally:
283 finally:
284 f.close()
284 f.close()
285
285
286 # remove undo files
286 # remove undo files
287 for undovfs, undofile in repo.undofiles():
287 for undovfs, undofile in repo.undofiles():
288 try:
288 try:
289 undovfs.unlink(undofile)
289 undovfs.unlink(undofile)
290 except OSError as e:
290 except OSError as e:
291 if e.errno != errno.ENOENT:
291 if e.errno != errno.ENOENT:
292 ui.warn(
292 ui.warn(
293 _(b'error removing %s: %s\n')
293 _(b'error removing %s: %s\n')
294 % (undovfs.join(undofile), stringutil.forcebytestr(e))
294 % (undovfs.join(undofile), stringutil.forcebytestr(e))
295 )
295 )
296
296
297 # Remove partial backup only if there were no exceptions
297 # Remove partial backup only if there were no exceptions
298 op._widen_uninterr.__exit__(None, None, None)
298 op._widen_uninterr.__exit__(None, None, None)
299 vfs.unlink(chgrpfile)
299 vfs.unlink(chgrpfile)
300
300
301
301
302 def setup():
302 def setup():
303 """Enable narrow repo support in bundle2-related extension points."""
303 """Enable narrow repo support in bundle2-related extension points."""
304 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
304 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
305
305
306 getbundleargs[b'narrow'] = b'boolean'
306 getbundleargs[b'narrow'] = b'boolean'
307 getbundleargs[b'depth'] = b'plain'
307 getbundleargs[b'depth'] = b'plain'
308 getbundleargs[b'oldincludepats'] = b'csv'
308 getbundleargs[b'oldincludepats'] = b'csv'
309 getbundleargs[b'oldexcludepats'] = b'csv'
309 getbundleargs[b'oldexcludepats'] = b'csv'
310 getbundleargs[b'known'] = b'csv'
310 getbundleargs[b'known'] = b'csv'
311
311
312 # Extend changegroup serving to handle requests from narrow clients.
312 # Extend changegroup serving to handle requests from narrow clients.
313 origcgfn = exchange.getbundle2partsmapping[b'changegroup']
313 origcgfn = exchange.getbundle2partsmapping[b'changegroup']
314
314
315 def wrappedcgfn(*args, **kwargs):
315 def wrappedcgfn(*args, **kwargs):
316 repo = args[1]
316 repo = args[1]
317 if repo.ui.has_section(_NARROWACL_SECTION):
317 if repo.ui.has_section(_NARROWACL_SECTION):
318 kwargs = exchange.applynarrowacl(repo, kwargs)
318 kwargs = exchange.applynarrowacl(repo, kwargs)
319
319
320 if kwargs.get('narrow', False) and repo.ui.configbool(
320 if kwargs.get('narrow', False) and repo.ui.configbool(
321 b'experimental', b'narrowservebrokenellipses'
321 b'experimental', b'narrowservebrokenellipses'
322 ):
322 ):
323 getbundlechangegrouppart_narrow(*args, **kwargs)
323 getbundlechangegrouppart_narrow(*args, **kwargs)
324 else:
324 else:
325 origcgfn(*args, **kwargs)
325 origcgfn(*args, **kwargs)
326
326
327 exchange.getbundle2partsmapping[b'changegroup'] = wrappedcgfn
327 exchange.getbundle2partsmapping[b'changegroup'] = wrappedcgfn
328
328
329 # Extend changegroup receiver so client can fixup after widen requests.
329 # Extend changegroup receiver so client can fixup after widen requests.
330 origcghandler = bundle2.parthandlermapping[b'changegroup']
330 origcghandler = bundle2.parthandlermapping[b'changegroup']
331
331
332 def wrappedcghandler(op, inpart):
332 def wrappedcghandler(op, inpart):
333 origcghandler(op, inpart)
333 origcghandler(op, inpart)
334 if util.safehasattr(op, '_widen_bundle'):
334 if util.safehasattr(op, '_widen_bundle'):
335 handlechangegroup_widen(op, inpart)
335 handlechangegroup_widen(op, inpart)
336 if util.safehasattr(op, '_bookmarksbackup'):
336 if util.safehasattr(op, '_bookmarksbackup'):
337 localrepo.localrepository._bookmarks.set(
337 localrepo.localrepository._bookmarks.set(
338 op.repo, op._bookmarksbackup
338 op.repo, op._bookmarksbackup
339 )
339 )
340 del op._bookmarksbackup
340 del op._bookmarksbackup
341
341
342 wrappedcghandler.params = origcghandler.params
342 wrappedcghandler.params = origcghandler.params
343 bundle2.parthandlermapping[b'changegroup'] = wrappedcghandler
343 bundle2.parthandlermapping[b'changegroup'] = wrappedcghandler
@@ -1,659 +1,659 b''
1 # narrowcommands.py - command modifications for narrowhg extension
1 # narrowcommands.py - command modifications for narrowhg extension
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import itertools
9 import itertools
10 import os
10 import os
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import (
13 from mercurial import (
14 bundle2,
14 bundle2,
15 cmdutil,
15 cmdutil,
16 commands,
16 commands,
17 discovery,
17 discovery,
18 encoding,
18 encoding,
19 error,
19 error,
20 exchange,
20 exchange,
21 extensions,
21 extensions,
22 hg,
22 hg,
23 narrowspec,
23 narrowspec,
24 node,
24 node,
25 pathutil,
25 pathutil,
26 pycompat,
26 pycompat,
27 registrar,
27 registrar,
28 repair,
28 repair,
29 repoview,
29 repoview,
30 requirements,
30 sparse,
31 sparse,
31 util,
32 util,
32 wireprototypes,
33 wireprototypes,
33 )
34 )
34 from mercurial.interfaces import repository
35
35
36 table = {}
36 table = {}
37 command = registrar.command(table)
37 command = registrar.command(table)
38
38
39
39
40 def setup():
40 def setup():
41 """Wraps user-facing mercurial commands with narrow-aware versions."""
41 """Wraps user-facing mercurial commands with narrow-aware versions."""
42
42
43 entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd)
43 entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd)
44 entry[1].append(
44 entry[1].append(
45 (b'', b'narrow', None, _(b"create a narrow clone of select files"))
45 (b'', b'narrow', None, _(b"create a narrow clone of select files"))
46 )
46 )
47 entry[1].append(
47 entry[1].append(
48 (
48 (
49 b'',
49 b'',
50 b'depth',
50 b'depth',
51 b'',
51 b'',
52 _(b"limit the history fetched by distance from heads"),
52 _(b"limit the history fetched by distance from heads"),
53 )
53 )
54 )
54 )
55 entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file")))
55 entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file")))
56 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
56 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
57 if b'sparse' not in extensions.enabled():
57 if b'sparse' not in extensions.enabled():
58 entry[1].append(
58 entry[1].append(
59 (b'', b'include', [], _(b"specifically fetch this file/directory"))
59 (b'', b'include', [], _(b"specifically fetch this file/directory"))
60 )
60 )
61 entry[1].append(
61 entry[1].append(
62 (
62 (
63 b'',
63 b'',
64 b'exclude',
64 b'exclude',
65 [],
65 [],
66 _(b"do not fetch this file/directory, even if included"),
66 _(b"do not fetch this file/directory, even if included"),
67 )
67 )
68 )
68 )
69
69
70 entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd)
70 entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd)
71 entry[1].append(
71 entry[1].append(
72 (
72 (
73 b'',
73 b'',
74 b'depth',
74 b'depth',
75 b'',
75 b'',
76 _(b"limit the history fetched by distance from heads"),
76 _(b"limit the history fetched by distance from heads"),
77 )
77 )
78 )
78 )
79
79
80 extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd)
80 extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd)
81
81
82
82
83 def clonenarrowcmd(orig, ui, repo, *args, **opts):
83 def clonenarrowcmd(orig, ui, repo, *args, **opts):
84 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
84 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
85 opts = pycompat.byteskwargs(opts)
85 opts = pycompat.byteskwargs(opts)
86 wrappedextraprepare = util.nullcontextmanager()
86 wrappedextraprepare = util.nullcontextmanager()
87 narrowspecfile = opts[b'narrowspec']
87 narrowspecfile = opts[b'narrowspec']
88
88
89 if narrowspecfile:
89 if narrowspecfile:
90 filepath = os.path.join(encoding.getcwd(), narrowspecfile)
90 filepath = os.path.join(encoding.getcwd(), narrowspecfile)
91 ui.status(_(b"reading narrowspec from '%s'\n") % filepath)
91 ui.status(_(b"reading narrowspec from '%s'\n") % filepath)
92 try:
92 try:
93 fdata = util.readfile(filepath)
93 fdata = util.readfile(filepath)
94 except IOError as inst:
94 except IOError as inst:
95 raise error.Abort(
95 raise error.Abort(
96 _(b"cannot read narrowspecs from '%s': %s")
96 _(b"cannot read narrowspecs from '%s': %s")
97 % (filepath, encoding.strtolocal(inst.strerror))
97 % (filepath, encoding.strtolocal(inst.strerror))
98 )
98 )
99
99
100 includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow')
100 includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow')
101 if profiles:
101 if profiles:
102 raise error.Abort(
102 raise error.Abort(
103 _(
103 _(
104 b"cannot specify other files using '%include' in"
104 b"cannot specify other files using '%include' in"
105 b" narrowspec"
105 b" narrowspec"
106 )
106 )
107 )
107 )
108
108
109 narrowspec.validatepatterns(includes)
109 narrowspec.validatepatterns(includes)
110 narrowspec.validatepatterns(excludes)
110 narrowspec.validatepatterns(excludes)
111
111
112 # narrowspec is passed so we should assume that user wants narrow clone
112 # narrowspec is passed so we should assume that user wants narrow clone
113 opts[b'narrow'] = True
113 opts[b'narrow'] = True
114 opts[b'include'].extend(includes)
114 opts[b'include'].extend(includes)
115 opts[b'exclude'].extend(excludes)
115 opts[b'exclude'].extend(excludes)
116
116
117 if opts[b'narrow']:
117 if opts[b'narrow']:
118
118
119 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
119 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
120 orig(pullop, kwargs)
120 orig(pullop, kwargs)
121
121
122 if opts.get(b'depth'):
122 if opts.get(b'depth'):
123 kwargs[b'depth'] = opts[b'depth']
123 kwargs[b'depth'] = opts[b'depth']
124
124
125 wrappedextraprepare = extensions.wrappedfunction(
125 wrappedextraprepare = extensions.wrappedfunction(
126 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
126 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
127 )
127 )
128
128
129 with wrappedextraprepare:
129 with wrappedextraprepare:
130 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
130 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
131
131
132
132
133 def pullnarrowcmd(orig, ui, repo, *args, **opts):
133 def pullnarrowcmd(orig, ui, repo, *args, **opts):
134 """Wraps pull command to allow modifying narrow spec."""
134 """Wraps pull command to allow modifying narrow spec."""
135 wrappedextraprepare = util.nullcontextmanager()
135 wrappedextraprepare = util.nullcontextmanager()
136 if repository.NARROW_REQUIREMENT in repo.requirements:
136 if requirements.NARROW_REQUIREMENT in repo.requirements:
137
137
138 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
138 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
139 orig(pullop, kwargs)
139 orig(pullop, kwargs)
140 if opts.get('depth'):
140 if opts.get('depth'):
141 kwargs[b'depth'] = opts['depth']
141 kwargs[b'depth'] = opts['depth']
142
142
143 wrappedextraprepare = extensions.wrappedfunction(
143 wrappedextraprepare = extensions.wrappedfunction(
144 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
144 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
145 )
145 )
146
146
147 with wrappedextraprepare:
147 with wrappedextraprepare:
148 return orig(ui, repo, *args, **opts)
148 return orig(ui, repo, *args, **opts)
149
149
150
150
151 def archivenarrowcmd(orig, ui, repo, *args, **opts):
151 def archivenarrowcmd(orig, ui, repo, *args, **opts):
152 """Wraps archive command to narrow the default includes."""
152 """Wraps archive command to narrow the default includes."""
153 if repository.NARROW_REQUIREMENT in repo.requirements:
153 if requirements.NARROW_REQUIREMENT in repo.requirements:
154 repo_includes, repo_excludes = repo.narrowpats
154 repo_includes, repo_excludes = repo.narrowpats
155 includes = set(opts.get('include', []))
155 includes = set(opts.get('include', []))
156 excludes = set(opts.get('exclude', []))
156 excludes = set(opts.get('exclude', []))
157 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
157 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
158 includes, excludes, repo_includes, repo_excludes
158 includes, excludes, repo_includes, repo_excludes
159 )
159 )
160 if includes:
160 if includes:
161 opts['include'] = includes
161 opts['include'] = includes
162 if excludes:
162 if excludes:
163 opts['exclude'] = excludes
163 opts['exclude'] = excludes
164 return orig(ui, repo, *args, **opts)
164 return orig(ui, repo, *args, **opts)
165
165
166
166
167 def pullbundle2extraprepare(orig, pullop, kwargs):
167 def pullbundle2extraprepare(orig, pullop, kwargs):
168 repo = pullop.repo
168 repo = pullop.repo
169 if repository.NARROW_REQUIREMENT not in repo.requirements:
169 if requirements.NARROW_REQUIREMENT not in repo.requirements:
170 return orig(pullop, kwargs)
170 return orig(pullop, kwargs)
171
171
172 if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
172 if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
173 raise error.Abort(_(b"server does not support narrow clones"))
173 raise error.Abort(_(b"server does not support narrow clones"))
174 orig(pullop, kwargs)
174 orig(pullop, kwargs)
175 kwargs[b'narrow'] = True
175 kwargs[b'narrow'] = True
176 include, exclude = repo.narrowpats
176 include, exclude = repo.narrowpats
177 kwargs[b'oldincludepats'] = include
177 kwargs[b'oldincludepats'] = include
178 kwargs[b'oldexcludepats'] = exclude
178 kwargs[b'oldexcludepats'] = exclude
179 if include:
179 if include:
180 kwargs[b'includepats'] = include
180 kwargs[b'includepats'] = include
181 if exclude:
181 if exclude:
182 kwargs[b'excludepats'] = exclude
182 kwargs[b'excludepats'] = exclude
183 # calculate known nodes only in ellipses cases because in non-ellipses cases
183 # calculate known nodes only in ellipses cases because in non-ellipses cases
184 # we have all the nodes
184 # we have all the nodes
185 if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities():
185 if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities():
186 kwargs[b'known'] = [
186 kwargs[b'known'] = [
187 node.hex(ctx.node())
187 node.hex(ctx.node())
188 for ctx in repo.set(b'::%ln', pullop.common)
188 for ctx in repo.set(b'::%ln', pullop.common)
189 if ctx.node() != node.nullid
189 if ctx.node() != node.nullid
190 ]
190 ]
191 if not kwargs[b'known']:
191 if not kwargs[b'known']:
192 # Mercurial serializes an empty list as '' and deserializes it as
192 # Mercurial serializes an empty list as '' and deserializes it as
193 # [''], so delete it instead to avoid handling the empty string on
193 # [''], so delete it instead to avoid handling the empty string on
194 # the server.
194 # the server.
195 del kwargs[b'known']
195 del kwargs[b'known']
196
196
197
197
198 extensions.wrapfunction(
198 extensions.wrapfunction(
199 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare
199 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare
200 )
200 )
201
201
202
202
203 def _narrow(
203 def _narrow(
204 ui,
204 ui,
205 repo,
205 repo,
206 remote,
206 remote,
207 commoninc,
207 commoninc,
208 oldincludes,
208 oldincludes,
209 oldexcludes,
209 oldexcludes,
210 newincludes,
210 newincludes,
211 newexcludes,
211 newexcludes,
212 force,
212 force,
213 ):
213 ):
214 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
214 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
215 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
215 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
216
216
217 # This is essentially doing "hg outgoing" to find all local-only
217 # This is essentially doing "hg outgoing" to find all local-only
218 # commits. We will then check that the local-only commits don't
218 # commits. We will then check that the local-only commits don't
219 # have any changes to files that will be untracked.
219 # have any changes to files that will be untracked.
220 unfi = repo.unfiltered()
220 unfi = repo.unfiltered()
221 outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
221 outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
222 ui.status(_(b'looking for local changes to affected paths\n'))
222 ui.status(_(b'looking for local changes to affected paths\n'))
223 localnodes = []
223 localnodes = []
224 for n in itertools.chain(outgoing.missing, outgoing.excluded):
224 for n in itertools.chain(outgoing.missing, outgoing.excluded):
225 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
225 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
226 localnodes.append(n)
226 localnodes.append(n)
227 revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
227 revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
228 hiddenrevs = repoview.filterrevs(repo, b'visible')
228 hiddenrevs = repoview.filterrevs(repo, b'visible')
229 visibletostrip = list(
229 visibletostrip = list(
230 repo.changelog.node(r) for r in (revstostrip - hiddenrevs)
230 repo.changelog.node(r) for r in (revstostrip - hiddenrevs)
231 )
231 )
232 if visibletostrip:
232 if visibletostrip:
233 ui.status(
233 ui.status(
234 _(
234 _(
235 b'The following changeset(s) or their ancestors have '
235 b'The following changeset(s) or their ancestors have '
236 b'local changes not on the remote:\n'
236 b'local changes not on the remote:\n'
237 )
237 )
238 )
238 )
239 maxnodes = 10
239 maxnodes = 10
240 if ui.verbose or len(visibletostrip) <= maxnodes:
240 if ui.verbose or len(visibletostrip) <= maxnodes:
241 for n in visibletostrip:
241 for n in visibletostrip:
242 ui.status(b'%s\n' % node.short(n))
242 ui.status(b'%s\n' % node.short(n))
243 else:
243 else:
244 for n in visibletostrip[:maxnodes]:
244 for n in visibletostrip[:maxnodes]:
245 ui.status(b'%s\n' % node.short(n))
245 ui.status(b'%s\n' % node.short(n))
246 ui.status(
246 ui.status(
247 _(b'...and %d more, use --verbose to list all\n')
247 _(b'...and %d more, use --verbose to list all\n')
248 % (len(visibletostrip) - maxnodes)
248 % (len(visibletostrip) - maxnodes)
249 )
249 )
250 if not force:
250 if not force:
251 raise error.Abort(
251 raise error.Abort(
252 _(b'local changes found'),
252 _(b'local changes found'),
253 hint=_(b'use --force-delete-local-changes to ignore'),
253 hint=_(b'use --force-delete-local-changes to ignore'),
254 )
254 )
255
255
256 with ui.uninterruptible():
256 with ui.uninterruptible():
257 if revstostrip:
257 if revstostrip:
258 tostrip = [unfi.changelog.node(r) for r in revstostrip]
258 tostrip = [unfi.changelog.node(r) for r in revstostrip]
259 if repo[b'.'].node() in tostrip:
259 if repo[b'.'].node() in tostrip:
260 # stripping working copy, so move to a different commit first
260 # stripping working copy, so move to a different commit first
261 urev = max(
261 urev = max(
262 repo.revs(
262 repo.revs(
263 b'(::%n) - %ln + null',
263 b'(::%n) - %ln + null',
264 repo[b'.'].node(),
264 repo[b'.'].node(),
265 visibletostrip,
265 visibletostrip,
266 )
266 )
267 )
267 )
268 hg.clean(repo, urev)
268 hg.clean(repo, urev)
269 overrides = {(b'devel', b'strip-obsmarkers'): False}
269 overrides = {(b'devel', b'strip-obsmarkers'): False}
270 with ui.configoverride(overrides, b'narrow'):
270 with ui.configoverride(overrides, b'narrow'):
271 repair.strip(ui, unfi, tostrip, topic=b'narrow')
271 repair.strip(ui, unfi, tostrip, topic=b'narrow')
272
272
273 todelete = []
273 todelete = []
274 for f, f2, size in repo.store.datafiles():
274 for f, f2, size in repo.store.datafiles():
275 if f.startswith(b'data/'):
275 if f.startswith(b'data/'):
276 file = f[5:-2]
276 file = f[5:-2]
277 if not newmatch(file):
277 if not newmatch(file):
278 todelete.append(f)
278 todelete.append(f)
279 elif f.startswith(b'meta/'):
279 elif f.startswith(b'meta/'):
280 dir = f[5:-13]
280 dir = f[5:-13]
281 dirs = sorted(pathutil.dirs({dir})) + [dir]
281 dirs = sorted(pathutil.dirs({dir})) + [dir]
282 include = True
282 include = True
283 for d in dirs:
283 for d in dirs:
284 visit = newmatch.visitdir(d)
284 visit = newmatch.visitdir(d)
285 if not visit:
285 if not visit:
286 include = False
286 include = False
287 break
287 break
288 if visit == b'all':
288 if visit == b'all':
289 break
289 break
290 if not include:
290 if not include:
291 todelete.append(f)
291 todelete.append(f)
292
292
293 repo.destroying()
293 repo.destroying()
294
294
295 with repo.transaction(b'narrowing'):
295 with repo.transaction(b'narrowing'):
296 # Update narrowspec before removing revlogs, so repo won't be
296 # Update narrowspec before removing revlogs, so repo won't be
297 # corrupt in case of crash
297 # corrupt in case of crash
298 repo.setnarrowpats(newincludes, newexcludes)
298 repo.setnarrowpats(newincludes, newexcludes)
299
299
300 for f in todelete:
300 for f in todelete:
301 ui.status(_(b'deleting %s\n') % f)
301 ui.status(_(b'deleting %s\n') % f)
302 util.unlinkpath(repo.svfs.join(f))
302 util.unlinkpath(repo.svfs.join(f))
303 repo.store.markremoved(f)
303 repo.store.markremoved(f)
304
304
305 narrowspec.updateworkingcopy(repo, assumeclean=True)
305 narrowspec.updateworkingcopy(repo, assumeclean=True)
306 narrowspec.copytoworkingcopy(repo)
306 narrowspec.copytoworkingcopy(repo)
307
307
308 repo.destroyed()
308 repo.destroyed()
309
309
310
310
311 def _widen(
311 def _widen(
312 ui,
312 ui,
313 repo,
313 repo,
314 remote,
314 remote,
315 commoninc,
315 commoninc,
316 oldincludes,
316 oldincludes,
317 oldexcludes,
317 oldexcludes,
318 newincludes,
318 newincludes,
319 newexcludes,
319 newexcludes,
320 ):
320 ):
321 # for now we assume that if a server has ellipses enabled, we will be
321 # for now we assume that if a server has ellipses enabled, we will be
322 # exchanging ellipses nodes. In future we should add ellipses as a client
322 # exchanging ellipses nodes. In future we should add ellipses as a client
323 # side requirement (maybe) to distinguish a client is shallow or not and
323 # side requirement (maybe) to distinguish a client is shallow or not and
324 # then send that information to server whether we want ellipses or not.
324 # then send that information to server whether we want ellipses or not.
325 # Theoretically a non-ellipses repo should be able to use narrow
325 # Theoretically a non-ellipses repo should be able to use narrow
326 # functionality from an ellipses enabled server
326 # functionality from an ellipses enabled server
327 remotecap = remote.capabilities()
327 remotecap = remote.capabilities()
328 ellipsesremote = any(
328 ellipsesremote = any(
329 cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP
329 cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP
330 )
330 )
331
331
332 # check whether we are talking to a server which supports old version of
332 # check whether we are talking to a server which supports old version of
333 # ellipses capabilities
333 # ellipses capabilities
334 isoldellipses = (
334 isoldellipses = (
335 ellipsesremote
335 ellipsesremote
336 and wireprototypes.ELLIPSESCAP1 in remotecap
336 and wireprototypes.ELLIPSESCAP1 in remotecap
337 and wireprototypes.ELLIPSESCAP not in remotecap
337 and wireprototypes.ELLIPSESCAP not in remotecap
338 )
338 )
339
339
340 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
340 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
341 orig(pullop, kwargs)
341 orig(pullop, kwargs)
342 # The old{in,ex}cludepats have already been set by orig()
342 # The old{in,ex}cludepats have already been set by orig()
343 kwargs[b'includepats'] = newincludes
343 kwargs[b'includepats'] = newincludes
344 kwargs[b'excludepats'] = newexcludes
344 kwargs[b'excludepats'] = newexcludes
345
345
346 wrappedextraprepare = extensions.wrappedfunction(
346 wrappedextraprepare = extensions.wrappedfunction(
347 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
347 exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
348 )
348 )
349
349
350 # define a function that narrowbundle2 can call after creating the
350 # define a function that narrowbundle2 can call after creating the
351 # backup bundle, but before applying the bundle from the server
351 # backup bundle, but before applying the bundle from the server
352 def setnewnarrowpats():
352 def setnewnarrowpats():
353 repo.setnarrowpats(newincludes, newexcludes)
353 repo.setnarrowpats(newincludes, newexcludes)
354
354
355 repo.setnewnarrowpats = setnewnarrowpats
355 repo.setnewnarrowpats = setnewnarrowpats
356 # silence the devel-warning of applying an empty changegroup
356 # silence the devel-warning of applying an empty changegroup
357 overrides = {(b'devel', b'all-warnings'): False}
357 overrides = {(b'devel', b'all-warnings'): False}
358
358
359 common = commoninc[0]
359 common = commoninc[0]
360 with ui.uninterruptible():
360 with ui.uninterruptible():
361 if ellipsesremote:
361 if ellipsesremote:
362 ds = repo.dirstate
362 ds = repo.dirstate
363 p1, p2 = ds.p1(), ds.p2()
363 p1, p2 = ds.p1(), ds.p2()
364 with ds.parentchange():
364 with ds.parentchange():
365 ds.setparents(node.nullid, node.nullid)
365 ds.setparents(node.nullid, node.nullid)
366 if isoldellipses:
366 if isoldellipses:
367 with wrappedextraprepare:
367 with wrappedextraprepare:
368 exchange.pull(repo, remote, heads=common)
368 exchange.pull(repo, remote, heads=common)
369 else:
369 else:
370 known = []
370 known = []
371 if ellipsesremote:
371 if ellipsesremote:
372 known = [
372 known = [
373 ctx.node()
373 ctx.node()
374 for ctx in repo.set(b'::%ln', common)
374 for ctx in repo.set(b'::%ln', common)
375 if ctx.node() != node.nullid
375 if ctx.node() != node.nullid
376 ]
376 ]
377 with remote.commandexecutor() as e:
377 with remote.commandexecutor() as e:
378 bundle = e.callcommand(
378 bundle = e.callcommand(
379 b'narrow_widen',
379 b'narrow_widen',
380 {
380 {
381 b'oldincludes': oldincludes,
381 b'oldincludes': oldincludes,
382 b'oldexcludes': oldexcludes,
382 b'oldexcludes': oldexcludes,
383 b'newincludes': newincludes,
383 b'newincludes': newincludes,
384 b'newexcludes': newexcludes,
384 b'newexcludes': newexcludes,
385 b'cgversion': b'03',
385 b'cgversion': b'03',
386 b'commonheads': common,
386 b'commonheads': common,
387 b'known': known,
387 b'known': known,
388 b'ellipses': ellipsesremote,
388 b'ellipses': ellipsesremote,
389 },
389 },
390 ).result()
390 ).result()
391
391
392 trmanager = exchange.transactionmanager(
392 trmanager = exchange.transactionmanager(
393 repo, b'widen', remote.url()
393 repo, b'widen', remote.url()
394 )
394 )
395 with trmanager, repo.ui.configoverride(overrides, b'widen'):
395 with trmanager, repo.ui.configoverride(overrides, b'widen'):
396 op = bundle2.bundleoperation(
396 op = bundle2.bundleoperation(
397 repo, trmanager.transaction, source=b'widen'
397 repo, trmanager.transaction, source=b'widen'
398 )
398 )
399 # TODO: we should catch error.Abort here
399 # TODO: we should catch error.Abort here
400 bundle2.processbundle(repo, bundle, op=op)
400 bundle2.processbundle(repo, bundle, op=op)
401
401
402 if ellipsesremote:
402 if ellipsesremote:
403 with ds.parentchange():
403 with ds.parentchange():
404 ds.setparents(p1, p2)
404 ds.setparents(p1, p2)
405
405
406 with repo.transaction(b'widening'):
406 with repo.transaction(b'widening'):
407 repo.setnewnarrowpats()
407 repo.setnewnarrowpats()
408 narrowspec.updateworkingcopy(repo)
408 narrowspec.updateworkingcopy(repo)
409 narrowspec.copytoworkingcopy(repo)
409 narrowspec.copytoworkingcopy(repo)
410
410
411
411
412 # TODO(rdamazio): Make new matcher format and update description
412 # TODO(rdamazio): Make new matcher format and update description
413 @command(
413 @command(
414 b'tracked',
414 b'tracked',
415 [
415 [
416 (b'', b'addinclude', [], _(b'new paths to include')),
416 (b'', b'addinclude', [], _(b'new paths to include')),
417 (b'', b'removeinclude', [], _(b'old paths to no longer include')),
417 (b'', b'removeinclude', [], _(b'old paths to no longer include')),
418 (
418 (
419 b'',
419 b'',
420 b'auto-remove-includes',
420 b'auto-remove-includes',
421 False,
421 False,
422 _(b'automatically choose unused includes to remove'),
422 _(b'automatically choose unused includes to remove'),
423 ),
423 ),
424 (b'', b'addexclude', [], _(b'new paths to exclude')),
424 (b'', b'addexclude', [], _(b'new paths to exclude')),
425 (b'', b'import-rules', b'', _(b'import narrowspecs from a file')),
425 (b'', b'import-rules', b'', _(b'import narrowspecs from a file')),
426 (b'', b'removeexclude', [], _(b'old paths to no longer exclude')),
426 (b'', b'removeexclude', [], _(b'old paths to no longer exclude')),
427 (
427 (
428 b'',
428 b'',
429 b'clear',
429 b'clear',
430 False,
430 False,
431 _(b'whether to replace the existing narrowspec'),
431 _(b'whether to replace the existing narrowspec'),
432 ),
432 ),
433 (
433 (
434 b'',
434 b'',
435 b'force-delete-local-changes',
435 b'force-delete-local-changes',
436 False,
436 False,
437 _(b'forces deletion of local changes when narrowing'),
437 _(b'forces deletion of local changes when narrowing'),
438 ),
438 ),
439 (
439 (
440 b'',
440 b'',
441 b'update-working-copy',
441 b'update-working-copy',
442 False,
442 False,
443 _(b'update working copy when the store has changed'),
443 _(b'update working copy when the store has changed'),
444 ),
444 ),
445 ]
445 ]
446 + commands.remoteopts,
446 + commands.remoteopts,
447 _(b'[OPTIONS]... [REMOTE]'),
447 _(b'[OPTIONS]... [REMOTE]'),
448 inferrepo=True,
448 inferrepo=True,
449 helpcategory=command.CATEGORY_MAINTENANCE,
449 helpcategory=command.CATEGORY_MAINTENANCE,
450 )
450 )
451 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
451 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
452 """show or change the current narrowspec
452 """show or change the current narrowspec
453
453
454 With no argument, shows the current narrowspec entries, one per line. Each
454 With no argument, shows the current narrowspec entries, one per line. Each
455 line will be prefixed with 'I' or 'X' for included or excluded patterns,
455 line will be prefixed with 'I' or 'X' for included or excluded patterns,
456 respectively.
456 respectively.
457
457
458 The narrowspec is comprised of expressions to match remote files and/or
458 The narrowspec is comprised of expressions to match remote files and/or
459 directories that should be pulled into your client.
459 directories that should be pulled into your client.
460 The narrowspec has *include* and *exclude* expressions, with excludes always
460 The narrowspec has *include* and *exclude* expressions, with excludes always
461 trumping includes: that is, if a file matches an exclude expression, it will
461 trumping includes: that is, if a file matches an exclude expression, it will
462 be excluded even if it also matches an include expression.
462 be excluded even if it also matches an include expression.
463 Excluding files that were never included has no effect.
463 Excluding files that were never included has no effect.
464
464
465 Each included or excluded entry is in the format described by
465 Each included or excluded entry is in the format described by
466 'hg help patterns'.
466 'hg help patterns'.
467
467
468 The options allow you to add or remove included and excluded expressions.
468 The options allow you to add or remove included and excluded expressions.
469
469
470 If --clear is specified, then all previous includes and excludes are DROPPED
470 If --clear is specified, then all previous includes and excludes are DROPPED
471 and replaced by the new ones specified to --addinclude and --addexclude.
471 and replaced by the new ones specified to --addinclude and --addexclude.
472 If --clear is specified without any further options, the narrowspec will be
472 If --clear is specified without any further options, the narrowspec will be
473 empty and will not match any files.
473 empty and will not match any files.
474
474
475 If --auto-remove-includes is specified, then those includes that don't match
475 If --auto-remove-includes is specified, then those includes that don't match
476 any files modified by currently visible local commits (those not shared by
476 any files modified by currently visible local commits (those not shared by
477 the remote) will be added to the set of explicitly specified includes to
477 the remote) will be added to the set of explicitly specified includes to
478 remove.
478 remove.
479
479
480 --import-rules accepts a path to a file containing rules, allowing you to
480 --import-rules accepts a path to a file containing rules, allowing you to
481 add --addinclude, --addexclude rules in bulk. Like the other include and
481 add --addinclude, --addexclude rules in bulk. Like the other include and
482 exclude switches, the changes are applied immediately.
482 exclude switches, the changes are applied immediately.
483 """
483 """
484 opts = pycompat.byteskwargs(opts)
484 opts = pycompat.byteskwargs(opts)
485 if repository.NARROW_REQUIREMENT not in repo.requirements:
485 if requirements.NARROW_REQUIREMENT not in repo.requirements:
486 raise error.Abort(
486 raise error.Abort(
487 _(
487 _(
488 b'the tracked command is only supported on '
488 b'the tracked command is only supported on '
489 b'repositories cloned with --narrow'
489 b'repositories cloned with --narrow'
490 )
490 )
491 )
491 )
492
492
493 # Before supporting, decide whether it "hg tracked --clear" should mean
493 # Before supporting, decide whether it "hg tracked --clear" should mean
494 # tracking no paths or all paths.
494 # tracking no paths or all paths.
495 if opts[b'clear']:
495 if opts[b'clear']:
496 raise error.Abort(_(b'the --clear option is not yet supported'))
496 raise error.Abort(_(b'the --clear option is not yet supported'))
497
497
498 # import rules from a file
498 # import rules from a file
499 newrules = opts.get(b'import_rules')
499 newrules = opts.get(b'import_rules')
500 if newrules:
500 if newrules:
501 try:
501 try:
502 filepath = os.path.join(encoding.getcwd(), newrules)
502 filepath = os.path.join(encoding.getcwd(), newrules)
503 fdata = util.readfile(filepath)
503 fdata = util.readfile(filepath)
504 except IOError as inst:
504 except IOError as inst:
505 raise error.Abort(
505 raise error.Abort(
506 _(b"cannot read narrowspecs from '%s': %s")
506 _(b"cannot read narrowspecs from '%s': %s")
507 % (filepath, encoding.strtolocal(inst.strerror))
507 % (filepath, encoding.strtolocal(inst.strerror))
508 )
508 )
509 includepats, excludepats, profiles = sparse.parseconfig(
509 includepats, excludepats, profiles = sparse.parseconfig(
510 ui, fdata, b'narrow'
510 ui, fdata, b'narrow'
511 )
511 )
512 if profiles:
512 if profiles:
513 raise error.Abort(
513 raise error.Abort(
514 _(
514 _(
515 b"including other spec files using '%include' "
515 b"including other spec files using '%include' "
516 b"is not supported in narrowspec"
516 b"is not supported in narrowspec"
517 )
517 )
518 )
518 )
519 opts[b'addinclude'].extend(includepats)
519 opts[b'addinclude'].extend(includepats)
520 opts[b'addexclude'].extend(excludepats)
520 opts[b'addexclude'].extend(excludepats)
521
521
522 addedincludes = narrowspec.parsepatterns(opts[b'addinclude'])
522 addedincludes = narrowspec.parsepatterns(opts[b'addinclude'])
523 removedincludes = narrowspec.parsepatterns(opts[b'removeinclude'])
523 removedincludes = narrowspec.parsepatterns(opts[b'removeinclude'])
524 addedexcludes = narrowspec.parsepatterns(opts[b'addexclude'])
524 addedexcludes = narrowspec.parsepatterns(opts[b'addexclude'])
525 removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude'])
525 removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude'])
526 autoremoveincludes = opts[b'auto_remove_includes']
526 autoremoveincludes = opts[b'auto_remove_includes']
527
527
528 update_working_copy = opts[b'update_working_copy']
528 update_working_copy = opts[b'update_working_copy']
529 only_show = not (
529 only_show = not (
530 addedincludes
530 addedincludes
531 or removedincludes
531 or removedincludes
532 or addedexcludes
532 or addedexcludes
533 or removedexcludes
533 or removedexcludes
534 or newrules
534 or newrules
535 or autoremoveincludes
535 or autoremoveincludes
536 or update_working_copy
536 or update_working_copy
537 )
537 )
538
538
539 oldincludes, oldexcludes = repo.narrowpats
539 oldincludes, oldexcludes = repo.narrowpats
540
540
541 # filter the user passed additions and deletions into actual additions and
541 # filter the user passed additions and deletions into actual additions and
542 # deletions of excludes and includes
542 # deletions of excludes and includes
543 addedincludes -= oldincludes
543 addedincludes -= oldincludes
544 removedincludes &= oldincludes
544 removedincludes &= oldincludes
545 addedexcludes -= oldexcludes
545 addedexcludes -= oldexcludes
546 removedexcludes &= oldexcludes
546 removedexcludes &= oldexcludes
547
547
548 widening = addedincludes or removedexcludes
548 widening = addedincludes or removedexcludes
549 narrowing = removedincludes or addedexcludes
549 narrowing = removedincludes or addedexcludes
550
550
551 # Only print the current narrowspec.
551 # Only print the current narrowspec.
552 if only_show:
552 if only_show:
553 ui.pager(b'tracked')
553 ui.pager(b'tracked')
554 fm = ui.formatter(b'narrow', opts)
554 fm = ui.formatter(b'narrow', opts)
555 for i in sorted(oldincludes):
555 for i in sorted(oldincludes):
556 fm.startitem()
556 fm.startitem()
557 fm.write(b'status', b'%s ', b'I', label=b'narrow.included')
557 fm.write(b'status', b'%s ', b'I', label=b'narrow.included')
558 fm.write(b'pat', b'%s\n', i, label=b'narrow.included')
558 fm.write(b'pat', b'%s\n', i, label=b'narrow.included')
559 for i in sorted(oldexcludes):
559 for i in sorted(oldexcludes):
560 fm.startitem()
560 fm.startitem()
561 fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded')
561 fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded')
562 fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded')
562 fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded')
563 fm.end()
563 fm.end()
564 return 0
564 return 0
565
565
566 if update_working_copy:
566 if update_working_copy:
567 with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'):
567 with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'):
568 narrowspec.updateworkingcopy(repo)
568 narrowspec.updateworkingcopy(repo)
569 narrowspec.copytoworkingcopy(repo)
569 narrowspec.copytoworkingcopy(repo)
570 return 0
570 return 0
571
571
572 if not (widening or narrowing or autoremoveincludes):
572 if not (widening or narrowing or autoremoveincludes):
573 ui.status(_(b"nothing to widen or narrow\n"))
573 ui.status(_(b"nothing to widen or narrow\n"))
574 return 0
574 return 0
575
575
576 with repo.wlock(), repo.lock():
576 with repo.wlock(), repo.lock():
577 cmdutil.bailifchanged(repo)
577 cmdutil.bailifchanged(repo)
578
578
579 # Find the revisions we have in common with the remote. These will
579 # Find the revisions we have in common with the remote. These will
580 # be used for finding local-only changes for narrowing. They will
580 # be used for finding local-only changes for narrowing. They will
581 # also define the set of revisions to update for widening.
581 # also define the set of revisions to update for widening.
582 remotepath = ui.expandpath(remotepath or b'default')
582 remotepath = ui.expandpath(remotepath or b'default')
583 url, branches = hg.parseurl(remotepath)
583 url, branches = hg.parseurl(remotepath)
584 ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
584 ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
585 remote = hg.peer(repo, opts, url)
585 remote = hg.peer(repo, opts, url)
586
586
587 # check narrow support before doing anything if widening needs to be
587 # check narrow support before doing anything if widening needs to be
588 # performed. In future we should also abort if client is ellipses and
588 # performed. In future we should also abort if client is ellipses and
589 # server does not support ellipses
589 # server does not support ellipses
590 if widening and wireprototypes.NARROWCAP not in remote.capabilities():
590 if widening and wireprototypes.NARROWCAP not in remote.capabilities():
591 raise error.Abort(_(b"server does not support narrow clones"))
591 raise error.Abort(_(b"server does not support narrow clones"))
592
592
593 commoninc = discovery.findcommonincoming(repo, remote)
593 commoninc = discovery.findcommonincoming(repo, remote)
594
594
595 if autoremoveincludes:
595 if autoremoveincludes:
596 outgoing = discovery.findcommonoutgoing(
596 outgoing = discovery.findcommonoutgoing(
597 repo, remote, commoninc=commoninc
597 repo, remote, commoninc=commoninc
598 )
598 )
599 ui.status(_(b'looking for unused includes to remove\n'))
599 ui.status(_(b'looking for unused includes to remove\n'))
600 localfiles = set()
600 localfiles = set()
601 for n in itertools.chain(outgoing.missing, outgoing.excluded):
601 for n in itertools.chain(outgoing.missing, outgoing.excluded):
602 localfiles.update(repo[n].files())
602 localfiles.update(repo[n].files())
603 suggestedremovals = []
603 suggestedremovals = []
604 for include in sorted(oldincludes):
604 for include in sorted(oldincludes):
605 match = narrowspec.match(repo.root, [include], oldexcludes)
605 match = narrowspec.match(repo.root, [include], oldexcludes)
606 if not any(match(f) for f in localfiles):
606 if not any(match(f) for f in localfiles):
607 suggestedremovals.append(include)
607 suggestedremovals.append(include)
608 if suggestedremovals:
608 if suggestedremovals:
609 for s in suggestedremovals:
609 for s in suggestedremovals:
610 ui.status(b'%s\n' % s)
610 ui.status(b'%s\n' % s)
611 if (
611 if (
612 ui.promptchoice(
612 ui.promptchoice(
613 _(
613 _(
614 b'remove these unused includes (yn)?'
614 b'remove these unused includes (yn)?'
615 b'$$ &Yes $$ &No'
615 b'$$ &Yes $$ &No'
616 )
616 )
617 )
617 )
618 == 0
618 == 0
619 ):
619 ):
620 removedincludes.update(suggestedremovals)
620 removedincludes.update(suggestedremovals)
621 narrowing = True
621 narrowing = True
622 else:
622 else:
623 ui.status(_(b'found no unused includes\n'))
623 ui.status(_(b'found no unused includes\n'))
624
624
625 if narrowing:
625 if narrowing:
626 newincludes = oldincludes - removedincludes
626 newincludes = oldincludes - removedincludes
627 newexcludes = oldexcludes | addedexcludes
627 newexcludes = oldexcludes | addedexcludes
628 _narrow(
628 _narrow(
629 ui,
629 ui,
630 repo,
630 repo,
631 remote,
631 remote,
632 commoninc,
632 commoninc,
633 oldincludes,
633 oldincludes,
634 oldexcludes,
634 oldexcludes,
635 newincludes,
635 newincludes,
636 newexcludes,
636 newexcludes,
637 opts[b'force_delete_local_changes'],
637 opts[b'force_delete_local_changes'],
638 )
638 )
639 # _narrow() updated the narrowspec and _widen() below needs to
639 # _narrow() updated the narrowspec and _widen() below needs to
640 # use the updated values as its base (otherwise removed includes
640 # use the updated values as its base (otherwise removed includes
641 # and addedexcludes will be lost in the resulting narrowspec)
641 # and addedexcludes will be lost in the resulting narrowspec)
642 oldincludes = newincludes
642 oldincludes = newincludes
643 oldexcludes = newexcludes
643 oldexcludes = newexcludes
644
644
645 if widening:
645 if widening:
646 newincludes = oldincludes | addedincludes
646 newincludes = oldincludes | addedincludes
647 newexcludes = oldexcludes - removedexcludes
647 newexcludes = oldexcludes - removedexcludes
648 _widen(
648 _widen(
649 ui,
649 ui,
650 repo,
650 repo,
651 remote,
651 remote,
652 commoninc,
652 commoninc,
653 oldincludes,
653 oldincludes,
654 oldexcludes,
654 oldexcludes,
655 newincludes,
655 newincludes,
656 newexcludes,
656 newexcludes,
657 )
657 )
658
658
659 return 0
659 return 0
@@ -1,443 +1,443 b''
1 # remotefilelogserver.py - server logic for a remotefilelog server
1 # remotefilelogserver.py - server logic for a remotefilelog server
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import errno
9 import errno
10 import os
10 import os
11 import stat
11 import stat
12 import time
12 import time
13 import zlib
13 import zlib
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.node import bin, hex, nullid
16 from mercurial.node import bin, hex, nullid
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18 from mercurial import (
18 from mercurial import (
19 changegroup,
19 changegroup,
20 changelog,
20 changelog,
21 context,
21 context,
22 error,
22 error,
23 extensions,
23 extensions,
24 match,
24 match,
25 pycompat,
25 pycompat,
26 requirements,
26 store,
27 store,
27 streamclone,
28 streamclone,
28 util,
29 util,
29 wireprotoserver,
30 wireprotoserver,
30 wireprototypes,
31 wireprototypes,
31 wireprotov1server,
32 wireprotov1server,
32 )
33 )
33 from mercurial.interfaces import repository
34 from . import (
34 from . import (
35 constants,
35 constants,
36 shallowutil,
36 shallowutil,
37 )
37 )
38
38
39 _sshv1server = wireprotoserver.sshv1protocolhandler
39 _sshv1server = wireprotoserver.sshv1protocolhandler
40
40
41
41
42 def setupserver(ui, repo):
42 def setupserver(ui, repo):
43 """Sets up a normal Mercurial repo so it can serve files to shallow repos.
43 """Sets up a normal Mercurial repo so it can serve files to shallow repos.
44 """
44 """
45 onetimesetup(ui)
45 onetimesetup(ui)
46
46
47 # don't send files to shallow clients during pulls
47 # don't send files to shallow clients during pulls
48 def generatefiles(
48 def generatefiles(
49 orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
49 orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
50 ):
50 ):
51 caps = self._bundlecaps or []
51 caps = self._bundlecaps or []
52 if constants.BUNDLE2_CAPABLITY in caps:
52 if constants.BUNDLE2_CAPABLITY in caps:
53 # only send files that don't match the specified patterns
53 # only send files that don't match the specified patterns
54 includepattern = None
54 includepattern = None
55 excludepattern = None
55 excludepattern = None
56 for cap in self._bundlecaps or []:
56 for cap in self._bundlecaps or []:
57 if cap.startswith(b"includepattern="):
57 if cap.startswith(b"includepattern="):
58 includepattern = cap[len(b"includepattern=") :].split(b'\0')
58 includepattern = cap[len(b"includepattern=") :].split(b'\0')
59 elif cap.startswith(b"excludepattern="):
59 elif cap.startswith(b"excludepattern="):
60 excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
60 excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
61
61
62 m = match.always()
62 m = match.always()
63 if includepattern or excludepattern:
63 if includepattern or excludepattern:
64 m = match.match(
64 m = match.match(
65 repo.root, b'', None, includepattern, excludepattern
65 repo.root, b'', None, includepattern, excludepattern
66 )
66 )
67
67
68 changedfiles = list([f for f in changedfiles if not m(f)])
68 changedfiles = list([f for f in changedfiles if not m(f)])
69 return orig(
69 return orig(
70 self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
70 self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
71 )
71 )
72
72
73 extensions.wrapfunction(
73 extensions.wrapfunction(
74 changegroup.cgpacker, b'generatefiles', generatefiles
74 changegroup.cgpacker, b'generatefiles', generatefiles
75 )
75 )
76
76
77
77
78 onetime = False
78 onetime = False
79
79
80
80
81 def onetimesetup(ui):
81 def onetimesetup(ui):
82 """Configures the wireprotocol for both clients and servers.
82 """Configures the wireprotocol for both clients and servers.
83 """
83 """
84 global onetime
84 global onetime
85 if onetime:
85 if onetime:
86 return
86 return
87 onetime = True
87 onetime = True
88
88
89 # support file content requests
89 # support file content requests
90 wireprotov1server.wireprotocommand(
90 wireprotov1server.wireprotocommand(
91 b'x_rfl_getflogheads', b'path', permission=b'pull'
91 b'x_rfl_getflogheads', b'path', permission=b'pull'
92 )(getflogheads)
92 )(getflogheads)
93 wireprotov1server.wireprotocommand(
93 wireprotov1server.wireprotocommand(
94 b'x_rfl_getfiles', b'', permission=b'pull'
94 b'x_rfl_getfiles', b'', permission=b'pull'
95 )(getfiles)
95 )(getfiles)
96 wireprotov1server.wireprotocommand(
96 wireprotov1server.wireprotocommand(
97 b'x_rfl_getfile', b'file node', permission=b'pull'
97 b'x_rfl_getfile', b'file node', permission=b'pull'
98 )(getfile)
98 )(getfile)
99
99
100 class streamstate(object):
100 class streamstate(object):
101 match = None
101 match = None
102 shallowremote = False
102 shallowremote = False
103 noflatmf = False
103 noflatmf = False
104
104
105 state = streamstate()
105 state = streamstate()
106
106
107 def stream_out_shallow(repo, proto, other):
107 def stream_out_shallow(repo, proto, other):
108 includepattern = None
108 includepattern = None
109 excludepattern = None
109 excludepattern = None
110 raw = other.get(b'includepattern')
110 raw = other.get(b'includepattern')
111 if raw:
111 if raw:
112 includepattern = raw.split(b'\0')
112 includepattern = raw.split(b'\0')
113 raw = other.get(b'excludepattern')
113 raw = other.get(b'excludepattern')
114 if raw:
114 if raw:
115 excludepattern = raw.split(b'\0')
115 excludepattern = raw.split(b'\0')
116
116
117 oldshallow = state.shallowremote
117 oldshallow = state.shallowremote
118 oldmatch = state.match
118 oldmatch = state.match
119 oldnoflatmf = state.noflatmf
119 oldnoflatmf = state.noflatmf
120 try:
120 try:
121 state.shallowremote = True
121 state.shallowremote = True
122 state.match = match.always()
122 state.match = match.always()
123 state.noflatmf = other.get(b'noflatmanifest') == b'True'
123 state.noflatmf = other.get(b'noflatmanifest') == b'True'
124 if includepattern or excludepattern:
124 if includepattern or excludepattern:
125 state.match = match.match(
125 state.match = match.match(
126 repo.root, b'', None, includepattern, excludepattern
126 repo.root, b'', None, includepattern, excludepattern
127 )
127 )
128 streamres = wireprotov1server.stream(repo, proto)
128 streamres = wireprotov1server.stream(repo, proto)
129
129
130 # Force the first value to execute, so the file list is computed
130 # Force the first value to execute, so the file list is computed
131 # within the try/finally scope
131 # within the try/finally scope
132 first = next(streamres.gen)
132 first = next(streamres.gen)
133 second = next(streamres.gen)
133 second = next(streamres.gen)
134
134
135 def gen():
135 def gen():
136 yield first
136 yield first
137 yield second
137 yield second
138 for value in streamres.gen:
138 for value in streamres.gen:
139 yield value
139 yield value
140
140
141 return wireprototypes.streamres(gen())
141 return wireprototypes.streamres(gen())
142 finally:
142 finally:
143 state.shallowremote = oldshallow
143 state.shallowremote = oldshallow
144 state.match = oldmatch
144 state.match = oldmatch
145 state.noflatmf = oldnoflatmf
145 state.noflatmf = oldnoflatmf
146
146
147 wireprotov1server.commands[b'stream_out_shallow'] = (
147 wireprotov1server.commands[b'stream_out_shallow'] = (
148 stream_out_shallow,
148 stream_out_shallow,
149 b'*',
149 b'*',
150 )
150 )
151
151
152 # don't clone filelogs to shallow clients
152 # don't clone filelogs to shallow clients
153 def _walkstreamfiles(orig, repo, matcher=None):
153 def _walkstreamfiles(orig, repo, matcher=None):
154 if state.shallowremote:
154 if state.shallowremote:
155 # if we are shallow ourselves, stream our local commits
155 # if we are shallow ourselves, stream our local commits
156 if shallowutil.isenabled(repo):
156 if shallowutil.isenabled(repo):
157 striplen = len(repo.store.path) + 1
157 striplen = len(repo.store.path) + 1
158 readdir = repo.store.rawvfs.readdir
158 readdir = repo.store.rawvfs.readdir
159 visit = [os.path.join(repo.store.path, b'data')]
159 visit = [os.path.join(repo.store.path, b'data')]
160 while visit:
160 while visit:
161 p = visit.pop()
161 p = visit.pop()
162 for f, kind, st in readdir(p, stat=True):
162 for f, kind, st in readdir(p, stat=True):
163 fp = p + b'/' + f
163 fp = p + b'/' + f
164 if kind == stat.S_IFREG:
164 if kind == stat.S_IFREG:
165 if not fp.endswith(b'.i') and not fp.endswith(
165 if not fp.endswith(b'.i') and not fp.endswith(
166 b'.d'
166 b'.d'
167 ):
167 ):
168 n = util.pconvert(fp[striplen:])
168 n = util.pconvert(fp[striplen:])
169 yield (store.decodedir(n), n, st.st_size)
169 yield (store.decodedir(n), n, st.st_size)
170 if kind == stat.S_IFDIR:
170 if kind == stat.S_IFDIR:
171 visit.append(fp)
171 visit.append(fp)
172
172
173 if repository.TREEMANIFEST_REQUIREMENT in repo.requirements:
173 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
174 for (u, e, s) in repo.store.datafiles():
174 for (u, e, s) in repo.store.datafiles():
175 if u.startswith(b'meta/') and (
175 if u.startswith(b'meta/') and (
176 u.endswith(b'.i') or u.endswith(b'.d')
176 u.endswith(b'.i') or u.endswith(b'.d')
177 ):
177 ):
178 yield (u, e, s)
178 yield (u, e, s)
179
179
180 # Return .d and .i files that do not match the shallow pattern
180 # Return .d and .i files that do not match the shallow pattern
181 match = state.match
181 match = state.match
182 if match and not match.always():
182 if match and not match.always():
183 for (u, e, s) in repo.store.datafiles():
183 for (u, e, s) in repo.store.datafiles():
184 f = u[5:-2] # trim data/... and .i/.d
184 f = u[5:-2] # trim data/... and .i/.d
185 if not state.match(f):
185 if not state.match(f):
186 yield (u, e, s)
186 yield (u, e, s)
187
187
188 for x in repo.store.topfiles():
188 for x in repo.store.topfiles():
189 if state.noflatmf and x[0][:11] == b'00manifest.':
189 if state.noflatmf and x[0][:11] == b'00manifest.':
190 continue
190 continue
191 yield x
191 yield x
192
192
193 elif shallowutil.isenabled(repo):
193 elif shallowutil.isenabled(repo):
194 # don't allow cloning from a shallow repo to a full repo
194 # don't allow cloning from a shallow repo to a full repo
195 # since it would require fetching every version of every
195 # since it would require fetching every version of every
196 # file in order to create the revlogs.
196 # file in order to create the revlogs.
197 raise error.Abort(
197 raise error.Abort(
198 _(b"Cannot clone from a shallow repo to a full repo.")
198 _(b"Cannot clone from a shallow repo to a full repo.")
199 )
199 )
200 else:
200 else:
201 for x in orig(repo, matcher):
201 for x in orig(repo, matcher):
202 yield x
202 yield x
203
203
204 extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
204 extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
205
205
206 # expose remotefilelog capabilities
206 # expose remotefilelog capabilities
207 def _capabilities(orig, repo, proto):
207 def _capabilities(orig, repo, proto):
208 caps = orig(repo, proto)
208 caps = orig(repo, proto)
209 if shallowutil.isenabled(repo) or ui.configbool(
209 if shallowutil.isenabled(repo) or ui.configbool(
210 b'remotefilelog', b'server'
210 b'remotefilelog', b'server'
211 ):
211 ):
212 if isinstance(proto, _sshv1server):
212 if isinstance(proto, _sshv1server):
213 # legacy getfiles method which only works over ssh
213 # legacy getfiles method which only works over ssh
214 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
214 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
215 caps.append(b'x_rfl_getflogheads')
215 caps.append(b'x_rfl_getflogheads')
216 caps.append(b'x_rfl_getfile')
216 caps.append(b'x_rfl_getfile')
217 return caps
217 return caps
218
218
219 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
219 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
220
220
221 def _adjustlinkrev(orig, self, *args, **kwargs):
221 def _adjustlinkrev(orig, self, *args, **kwargs):
222 # When generating file blobs, taking the real path is too slow on large
222 # When generating file blobs, taking the real path is too slow on large
223 # repos, so force it to just return the linkrev directly.
223 # repos, so force it to just return the linkrev directly.
224 repo = self._repo
224 repo = self._repo
225 if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
225 if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
226 return self._filelog.linkrev(self._filelog.rev(self._filenode))
226 return self._filelog.linkrev(self._filelog.rev(self._filenode))
227 return orig(self, *args, **kwargs)
227 return orig(self, *args, **kwargs)
228
228
229 extensions.wrapfunction(
229 extensions.wrapfunction(
230 context.basefilectx, b'_adjustlinkrev', _adjustlinkrev
230 context.basefilectx, b'_adjustlinkrev', _adjustlinkrev
231 )
231 )
232
232
233 def _iscmd(orig, cmd):
233 def _iscmd(orig, cmd):
234 if cmd == b'x_rfl_getfiles':
234 if cmd == b'x_rfl_getfiles':
235 return False
235 return False
236 return orig(cmd)
236 return orig(cmd)
237
237
238 extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd)
238 extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd)
239
239
240
240
241 def _loadfileblob(repo, cachepath, path, node):
241 def _loadfileblob(repo, cachepath, path, node):
242 filecachepath = os.path.join(cachepath, path, hex(node))
242 filecachepath = os.path.join(cachepath, path, hex(node))
243 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
243 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
244 filectx = repo.filectx(path, fileid=node)
244 filectx = repo.filectx(path, fileid=node)
245 if filectx.node() == nullid:
245 if filectx.node() == nullid:
246 repo.changelog = changelog.changelog(repo.svfs)
246 repo.changelog = changelog.changelog(repo.svfs)
247 filectx = repo.filectx(path, fileid=node)
247 filectx = repo.filectx(path, fileid=node)
248
248
249 text = createfileblob(filectx)
249 text = createfileblob(filectx)
250 # TODO configurable compression engines
250 # TODO configurable compression engines
251 text = zlib.compress(text)
251 text = zlib.compress(text)
252
252
253 # everything should be user & group read/writable
253 # everything should be user & group read/writable
254 oldumask = os.umask(0o002)
254 oldumask = os.umask(0o002)
255 try:
255 try:
256 dirname = os.path.dirname(filecachepath)
256 dirname = os.path.dirname(filecachepath)
257 if not os.path.exists(dirname):
257 if not os.path.exists(dirname):
258 try:
258 try:
259 os.makedirs(dirname)
259 os.makedirs(dirname)
260 except OSError as ex:
260 except OSError as ex:
261 if ex.errno != errno.EEXIST:
261 if ex.errno != errno.EEXIST:
262 raise
262 raise
263
263
264 f = None
264 f = None
265 try:
265 try:
266 f = util.atomictempfile(filecachepath, b"wb")
266 f = util.atomictempfile(filecachepath, b"wb")
267 f.write(text)
267 f.write(text)
268 except (IOError, OSError):
268 except (IOError, OSError):
269 # Don't abort if the user only has permission to read,
269 # Don't abort if the user only has permission to read,
270 # and not write.
270 # and not write.
271 pass
271 pass
272 finally:
272 finally:
273 if f:
273 if f:
274 f.close()
274 f.close()
275 finally:
275 finally:
276 os.umask(oldumask)
276 os.umask(oldumask)
277 else:
277 else:
278 with open(filecachepath, b"rb") as f:
278 with open(filecachepath, b"rb") as f:
279 text = f.read()
279 text = f.read()
280 return text
280 return text
281
281
282
282
283 def getflogheads(repo, proto, path):
283 def getflogheads(repo, proto, path):
284 """A server api for requesting a filelog's heads
284 """A server api for requesting a filelog's heads
285 """
285 """
286 flog = repo.file(path)
286 flog = repo.file(path)
287 heads = flog.heads()
287 heads = flog.heads()
288 return b'\n'.join((hex(head) for head in heads if head != nullid))
288 return b'\n'.join((hex(head) for head in heads if head != nullid))
289
289
290
290
291 def getfile(repo, proto, file, node):
291 def getfile(repo, proto, file, node):
292 """A server api for requesting a particular version of a file. Can be used
292 """A server api for requesting a particular version of a file. Can be used
293 in batches to request many files at once. The return protocol is:
293 in batches to request many files at once. The return protocol is:
294 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
294 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
295 non-zero for an error.
295 non-zero for an error.
296
296
297 data is a compressed blob with revlog flag and ancestors information. See
297 data is a compressed blob with revlog flag and ancestors information. See
298 createfileblob for its content.
298 createfileblob for its content.
299 """
299 """
300 if shallowutil.isenabled(repo):
300 if shallowutil.isenabled(repo):
301 return b'1\0' + _(b'cannot fetch remote files from shallow repo')
301 return b'1\0' + _(b'cannot fetch remote files from shallow repo')
302 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
302 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
303 if not cachepath:
303 if not cachepath:
304 cachepath = os.path.join(repo.path, b"remotefilelogcache")
304 cachepath = os.path.join(repo.path, b"remotefilelogcache")
305 node = bin(node.strip())
305 node = bin(node.strip())
306 if node == nullid:
306 if node == nullid:
307 return b'0\0'
307 return b'0\0'
308 return b'0\0' + _loadfileblob(repo, cachepath, file, node)
308 return b'0\0' + _loadfileblob(repo, cachepath, file, node)
309
309
310
310
311 def getfiles(repo, proto):
311 def getfiles(repo, proto):
312 """A server api for requesting particular versions of particular files.
312 """A server api for requesting particular versions of particular files.
313 """
313 """
314 if shallowutil.isenabled(repo):
314 if shallowutil.isenabled(repo):
315 raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
315 raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
316 if not isinstance(proto, _sshv1server):
316 if not isinstance(proto, _sshv1server):
317 raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
317 raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
318
318
319 def streamer():
319 def streamer():
320 fin = proto._fin
320 fin = proto._fin
321
321
322 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
322 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
323 if not cachepath:
323 if not cachepath:
324 cachepath = os.path.join(repo.path, b"remotefilelogcache")
324 cachepath = os.path.join(repo.path, b"remotefilelogcache")
325
325
326 while True:
326 while True:
327 request = fin.readline()[:-1]
327 request = fin.readline()[:-1]
328 if not request:
328 if not request:
329 break
329 break
330
330
331 node = bin(request[:40])
331 node = bin(request[:40])
332 if node == nullid:
332 if node == nullid:
333 yield b'0\n'
333 yield b'0\n'
334 continue
334 continue
335
335
336 path = request[40:]
336 path = request[40:]
337
337
338 text = _loadfileblob(repo, cachepath, path, node)
338 text = _loadfileblob(repo, cachepath, path, node)
339
339
340 yield b'%d\n%s' % (len(text), text)
340 yield b'%d\n%s' % (len(text), text)
341
341
342 # it would be better to only flush after processing a whole batch
342 # it would be better to only flush after processing a whole batch
343 # but currently we don't know if there are more requests coming
343 # but currently we don't know if there are more requests coming
344 proto._fout.flush()
344 proto._fout.flush()
345
345
346 return wireprototypes.streamres(streamer())
346 return wireprototypes.streamres(streamer())
347
347
348
348
349 def createfileblob(filectx):
349 def createfileblob(filectx):
350 """
350 """
351 format:
351 format:
352 v0:
352 v0:
353 str(len(rawtext)) + '\0' + rawtext + ancestortext
353 str(len(rawtext)) + '\0' + rawtext + ancestortext
354 v1:
354 v1:
355 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
355 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
356 metalist := metalist + '\n' + meta | meta
356 metalist := metalist + '\n' + meta | meta
357 meta := sizemeta | flagmeta
357 meta := sizemeta | flagmeta
358 sizemeta := METAKEYSIZE + str(len(rawtext))
358 sizemeta := METAKEYSIZE + str(len(rawtext))
359 flagmeta := METAKEYFLAG + str(flag)
359 flagmeta := METAKEYFLAG + str(flag)
360
360
361 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
361 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
362 length of 1.
362 length of 1.
363 """
363 """
364 flog = filectx.filelog()
364 flog = filectx.filelog()
365 frev = filectx.filerev()
365 frev = filectx.filerev()
366 revlogflags = flog._revlog.flags(frev)
366 revlogflags = flog._revlog.flags(frev)
367 if revlogflags == 0:
367 if revlogflags == 0:
368 # normal files
368 # normal files
369 text = filectx.data()
369 text = filectx.data()
370 else:
370 else:
371 # lfs, read raw revision data
371 # lfs, read raw revision data
372 text = flog.rawdata(frev)
372 text = flog.rawdata(frev)
373
373
374 repo = filectx._repo
374 repo = filectx._repo
375
375
376 ancestors = [filectx]
376 ancestors = [filectx]
377
377
378 try:
378 try:
379 repo.forcelinkrev = True
379 repo.forcelinkrev = True
380 ancestors.extend([f for f in filectx.ancestors()])
380 ancestors.extend([f for f in filectx.ancestors()])
381
381
382 ancestortext = b""
382 ancestortext = b""
383 for ancestorctx in ancestors:
383 for ancestorctx in ancestors:
384 parents = ancestorctx.parents()
384 parents = ancestorctx.parents()
385 p1 = nullid
385 p1 = nullid
386 p2 = nullid
386 p2 = nullid
387 if len(parents) > 0:
387 if len(parents) > 0:
388 p1 = parents[0].filenode()
388 p1 = parents[0].filenode()
389 if len(parents) > 1:
389 if len(parents) > 1:
390 p2 = parents[1].filenode()
390 p2 = parents[1].filenode()
391
391
392 copyname = b""
392 copyname = b""
393 rename = ancestorctx.renamed()
393 rename = ancestorctx.renamed()
394 if rename:
394 if rename:
395 copyname = rename[0]
395 copyname = rename[0]
396 linknode = ancestorctx.node()
396 linknode = ancestorctx.node()
397 ancestortext += b"%s%s%s%s%s\0" % (
397 ancestortext += b"%s%s%s%s%s\0" % (
398 ancestorctx.filenode(),
398 ancestorctx.filenode(),
399 p1,
399 p1,
400 p2,
400 p2,
401 linknode,
401 linknode,
402 copyname,
402 copyname,
403 )
403 )
404 finally:
404 finally:
405 repo.forcelinkrev = False
405 repo.forcelinkrev = False
406
406
407 header = shallowutil.buildfileblobheader(len(text), revlogflags)
407 header = shallowutil.buildfileblobheader(len(text), revlogflags)
408
408
409 return b"%s\0%s%s" % (header, text, ancestortext)
409 return b"%s\0%s%s" % (header, text, ancestortext)
410
410
411
411
412 def gcserver(ui, repo):
412 def gcserver(ui, repo):
413 if not repo.ui.configbool(b"remotefilelog", b"server"):
413 if not repo.ui.configbool(b"remotefilelog", b"server"):
414 return
414 return
415
415
416 neededfiles = set()
416 neededfiles = set()
417 heads = repo.revs(b"heads(tip~25000:) - null")
417 heads = repo.revs(b"heads(tip~25000:) - null")
418
418
419 cachepath = repo.vfs.join(b"remotefilelogcache")
419 cachepath = repo.vfs.join(b"remotefilelogcache")
420 for head in heads:
420 for head in heads:
421 mf = repo[head].manifest()
421 mf = repo[head].manifest()
422 for filename, filenode in pycompat.iteritems(mf):
422 for filename, filenode in pycompat.iteritems(mf):
423 filecachepath = os.path.join(cachepath, filename, hex(filenode))
423 filecachepath = os.path.join(cachepath, filename, hex(filenode))
424 neededfiles.add(filecachepath)
424 neededfiles.add(filecachepath)
425
425
426 # delete unneeded older files
426 # delete unneeded older files
427 days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
427 days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
428 expiration = time.time() - (days * 24 * 60 * 60)
428 expiration = time.time() - (days * 24 * 60 * 60)
429
429
430 progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
430 progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
431 progress.update(0)
431 progress.update(0)
432 for root, dirs, files in os.walk(cachepath):
432 for root, dirs, files in os.walk(cachepath):
433 for file in files:
433 for file in files:
434 filepath = os.path.join(root, file)
434 filepath = os.path.join(root, file)
435 progress.increment()
435 progress.increment()
436 if filepath in neededfiles:
436 if filepath in neededfiles:
437 continue
437 continue
438
438
439 stat = os.stat(filepath)
439 stat = os.stat(filepath)
440 if stat.st_mtime < expiration:
440 if stat.st_mtime < expiration:
441 os.remove(filepath)
441 os.remove(filepath)
442
442
443 progress.complete()
443 progress.complete()
@@ -1,1295 +1,1296 b''
1 # sqlitestore.py - Storage backend that uses SQLite
1 # sqlitestore.py - Storage backend that uses SQLite
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """store repository data in SQLite (EXPERIMENTAL)
8 """store repository data in SQLite (EXPERIMENTAL)
9
9
10 The sqlitestore extension enables the storage of repository data in SQLite.
10 The sqlitestore extension enables the storage of repository data in SQLite.
11
11
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 GUARANTEES. This means that repositories created with this extension may
13 GUARANTEES. This means that repositories created with this extension may
14 only be usable with the exact version of this extension/Mercurial that was
14 only be usable with the exact version of this extension/Mercurial that was
15 used. The extension attempts to enforce this in order to prevent repository
15 used. The extension attempts to enforce this in order to prevent repository
16 corruption.
16 corruption.
17
17
18 In addition, several features are not yet supported or have known bugs:
18 In addition, several features are not yet supported or have known bugs:
19
19
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 data is not yet stored in SQLite.
21 data is not yet stored in SQLite.
22 * Transactions are not robust. If the process is aborted at the right time
22 * Transactions are not robust. If the process is aborted at the right time
23 during transaction close/rollback, the repository could be in an inconsistent
23 during transaction close/rollback, the repository could be in an inconsistent
24 state. This problem will diminish once all repository data is tracked by
24 state. This problem will diminish once all repository data is tracked by
25 SQLite.
25 SQLite.
26 * Bundle repositories do not work (the ability to use e.g.
26 * Bundle repositories do not work (the ability to use e.g.
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 existing repository).
28 existing repository).
29 * Various other features don't work.
29 * Various other features don't work.
30
30
31 This extension should work for basic clone/pull, update, and commit workflows.
31 This extension should work for basic clone/pull, update, and commit workflows.
32 Some history rewriting operations may fail due to lack of support for bundle
32 Some history rewriting operations may fail due to lack of support for bundle
33 repositories.
33 repositories.
34
34
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 """
37 """
38
38
39 # To run the test suite with repos using SQLite by default, execute the
39 # To run the test suite with repos using SQLite by default, execute the
40 # following:
40 # following:
41 #
41 #
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 # --extra-config-opt extensions.sqlitestore= \
43 # --extra-config-opt extensions.sqlitestore= \
44 # --extra-config-opt storage.new-repo-backend=sqlite
44 # --extra-config-opt storage.new-repo-backend=sqlite
45
45
46 from __future__ import absolute_import
46 from __future__ import absolute_import
47
47
48 import sqlite3
48 import sqlite3
49 import struct
49 import struct
50 import threading
50 import threading
51 import zlib
51 import zlib
52
52
53 from mercurial.i18n import _
53 from mercurial.i18n import _
54 from mercurial.node import (
54 from mercurial.node import (
55 nullid,
55 nullid,
56 nullrev,
56 nullrev,
57 short,
57 short,
58 )
58 )
59 from mercurial.thirdparty import attr
59 from mercurial.thirdparty import attr
60 from mercurial import (
60 from mercurial import (
61 ancestor,
61 ancestor,
62 dagop,
62 dagop,
63 encoding,
63 encoding,
64 error,
64 error,
65 extensions,
65 extensions,
66 localrepo,
66 localrepo,
67 mdiff,
67 mdiff,
68 pycompat,
68 pycompat,
69 registrar,
69 registrar,
70 requirements,
70 util,
71 util,
71 verify,
72 verify,
72 )
73 )
73 from mercurial.interfaces import (
74 from mercurial.interfaces import (
74 repository,
75 repository,
75 util as interfaceutil,
76 util as interfaceutil,
76 )
77 )
77 from mercurial.utils import (
78 from mercurial.utils import (
78 hashutil,
79 hashutil,
79 storageutil,
80 storageutil,
80 )
81 )
81
82
82 try:
83 try:
83 from mercurial import zstd
84 from mercurial import zstd
84
85
85 zstd.__version__
86 zstd.__version__
86 except ImportError:
87 except ImportError:
87 zstd = None
88 zstd = None
88
89
89 configtable = {}
90 configtable = {}
90 configitem = registrar.configitem(configtable)
91 configitem = registrar.configitem(configtable)
91
92
92 # experimental config: storage.sqlite.compression
93 # experimental config: storage.sqlite.compression
93 configitem(
94 configitem(
94 b'storage',
95 b'storage',
95 b'sqlite.compression',
96 b'sqlite.compression',
96 default=b'zstd' if zstd else b'zlib',
97 default=b'zstd' if zstd else b'zlib',
97 experimental=True,
98 experimental=True,
98 )
99 )
99
100
100 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
101 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
101 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
102 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
102 # be specifying the version(s) of Mercurial they are tested with, or
103 # be specifying the version(s) of Mercurial they are tested with, or
103 # leave the attribute unspecified.
104 # leave the attribute unspecified.
104 testedwith = b'ships-with-hg-core'
105 testedwith = b'ships-with-hg-core'
105
106
106 REQUIREMENT = b'exp-sqlite-001'
107 REQUIREMENT = b'exp-sqlite-001'
107 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
108 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
108 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
109 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
109 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
110 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
110 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
111 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
111
112
112 CURRENT_SCHEMA_VERSION = 1
113 CURRENT_SCHEMA_VERSION = 1
113
114
114 COMPRESSION_NONE = 1
115 COMPRESSION_NONE = 1
115 COMPRESSION_ZSTD = 2
116 COMPRESSION_ZSTD = 2
116 COMPRESSION_ZLIB = 3
117 COMPRESSION_ZLIB = 3
117
118
118 FLAG_CENSORED = 1
119 FLAG_CENSORED = 1
119 FLAG_MISSING_P1 = 2
120 FLAG_MISSING_P1 = 2
120 FLAG_MISSING_P2 = 4
121 FLAG_MISSING_P2 = 4
121
122
122 CREATE_SCHEMA = [
123 CREATE_SCHEMA = [
123 # Deltas are stored as content-indexed blobs.
124 # Deltas are stored as content-indexed blobs.
124 # compression column holds COMPRESSION_* constant for how the
125 # compression column holds COMPRESSION_* constant for how the
125 # delta is encoded.
126 # delta is encoded.
126 'CREATE TABLE delta ('
127 'CREATE TABLE delta ('
127 ' id INTEGER PRIMARY KEY, '
128 ' id INTEGER PRIMARY KEY, '
128 ' compression INTEGER NOT NULL, '
129 ' compression INTEGER NOT NULL, '
129 ' hash BLOB UNIQUE ON CONFLICT ABORT, '
130 ' hash BLOB UNIQUE ON CONFLICT ABORT, '
130 ' delta BLOB NOT NULL '
131 ' delta BLOB NOT NULL '
131 ')',
132 ')',
132 # Tracked paths are denormalized to integers to avoid redundant
133 # Tracked paths are denormalized to integers to avoid redundant
133 # storage of the path name.
134 # storage of the path name.
134 'CREATE TABLE filepath ('
135 'CREATE TABLE filepath ('
135 ' id INTEGER PRIMARY KEY, '
136 ' id INTEGER PRIMARY KEY, '
136 ' path BLOB NOT NULL '
137 ' path BLOB NOT NULL '
137 ')',
138 ')',
138 'CREATE UNIQUE INDEX filepath_path ON filepath (path)',
139 'CREATE UNIQUE INDEX filepath_path ON filepath (path)',
139 # We have a single table for all file revision data.
140 # We have a single table for all file revision data.
140 # Each file revision is uniquely described by a (path, rev) and
141 # Each file revision is uniquely described by a (path, rev) and
141 # (path, node).
142 # (path, node).
142 #
143 #
143 # Revision data is stored as a pointer to the delta producing this
144 # Revision data is stored as a pointer to the delta producing this
144 # revision and the file revision whose delta should be applied before
145 # revision and the file revision whose delta should be applied before
145 # that one. One can reconstruct the delta chain by recursively following
146 # that one. One can reconstruct the delta chain by recursively following
146 # the delta base revision pointers until one encounters NULL.
147 # the delta base revision pointers until one encounters NULL.
147 #
148 #
148 # flags column holds bitwise integer flags controlling storage options.
149 # flags column holds bitwise integer flags controlling storage options.
149 # These flags are defined by the FLAG_* constants.
150 # These flags are defined by the FLAG_* constants.
150 'CREATE TABLE fileindex ('
151 'CREATE TABLE fileindex ('
151 ' id INTEGER PRIMARY KEY, '
152 ' id INTEGER PRIMARY KEY, '
152 ' pathid INTEGER REFERENCES filepath(id), '
153 ' pathid INTEGER REFERENCES filepath(id), '
153 ' revnum INTEGER NOT NULL, '
154 ' revnum INTEGER NOT NULL, '
154 ' p1rev INTEGER NOT NULL, '
155 ' p1rev INTEGER NOT NULL, '
155 ' p2rev INTEGER NOT NULL, '
156 ' p2rev INTEGER NOT NULL, '
156 ' linkrev INTEGER NOT NULL, '
157 ' linkrev INTEGER NOT NULL, '
157 ' flags INTEGER NOT NULL, '
158 ' flags INTEGER NOT NULL, '
158 ' deltaid INTEGER REFERENCES delta(id), '
159 ' deltaid INTEGER REFERENCES delta(id), '
159 ' deltabaseid INTEGER REFERENCES fileindex(id), '
160 ' deltabaseid INTEGER REFERENCES fileindex(id), '
160 ' node BLOB NOT NULL '
161 ' node BLOB NOT NULL '
161 ')',
162 ')',
162 'CREATE UNIQUE INDEX fileindex_pathrevnum '
163 'CREATE UNIQUE INDEX fileindex_pathrevnum '
163 ' ON fileindex (pathid, revnum)',
164 ' ON fileindex (pathid, revnum)',
164 'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)',
165 'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)',
165 # Provide a view over all file data for convenience.
166 # Provide a view over all file data for convenience.
166 'CREATE VIEW filedata AS '
167 'CREATE VIEW filedata AS '
167 'SELECT '
168 'SELECT '
168 ' fileindex.id AS id, '
169 ' fileindex.id AS id, '
169 ' filepath.id AS pathid, '
170 ' filepath.id AS pathid, '
170 ' filepath.path AS path, '
171 ' filepath.path AS path, '
171 ' fileindex.revnum AS revnum, '
172 ' fileindex.revnum AS revnum, '
172 ' fileindex.node AS node, '
173 ' fileindex.node AS node, '
173 ' fileindex.p1rev AS p1rev, '
174 ' fileindex.p1rev AS p1rev, '
174 ' fileindex.p2rev AS p2rev, '
175 ' fileindex.p2rev AS p2rev, '
175 ' fileindex.linkrev AS linkrev, '
176 ' fileindex.linkrev AS linkrev, '
176 ' fileindex.flags AS flags, '
177 ' fileindex.flags AS flags, '
177 ' fileindex.deltaid AS deltaid, '
178 ' fileindex.deltaid AS deltaid, '
178 ' fileindex.deltabaseid AS deltabaseid '
179 ' fileindex.deltabaseid AS deltabaseid '
179 'FROM filepath, fileindex '
180 'FROM filepath, fileindex '
180 'WHERE fileindex.pathid=filepath.id',
181 'WHERE fileindex.pathid=filepath.id',
181 'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
182 'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
182 ]
183 ]
183
184
184
185
185 def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
186 def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
186 """Resolve a delta chain for a file node."""
187 """Resolve a delta chain for a file node."""
187
188
188 # TODO the "not in ({stops})" here is possibly slowing down the query
189 # TODO the "not in ({stops})" here is possibly slowing down the query
189 # because it needs to perform the lookup on every recursive invocation.
190 # because it needs to perform the lookup on every recursive invocation.
190 # This could possibly be faster if we created a temporary query with
191 # This could possibly be faster if we created a temporary query with
191 # baseid "poisoned" to null and limited the recursive filter to
192 # baseid "poisoned" to null and limited the recursive filter to
192 # "is not null".
193 # "is not null".
193 res = db.execute(
194 res = db.execute(
194 'WITH RECURSIVE '
195 'WITH RECURSIVE '
195 ' deltachain(deltaid, baseid) AS ('
196 ' deltachain(deltaid, baseid) AS ('
196 ' SELECT deltaid, deltabaseid FROM fileindex '
197 ' SELECT deltaid, deltabaseid FROM fileindex '
197 ' WHERE pathid=? AND node=? '
198 ' WHERE pathid=? AND node=? '
198 ' UNION ALL '
199 ' UNION ALL '
199 ' SELECT fileindex.deltaid, deltabaseid '
200 ' SELECT fileindex.deltaid, deltabaseid '
200 ' FROM fileindex, deltachain '
201 ' FROM fileindex, deltachain '
201 ' WHERE '
202 ' WHERE '
202 ' fileindex.id=deltachain.baseid '
203 ' fileindex.id=deltachain.baseid '
203 ' AND deltachain.baseid IS NOT NULL '
204 ' AND deltachain.baseid IS NOT NULL '
204 ' AND fileindex.id NOT IN ({stops}) '
205 ' AND fileindex.id NOT IN ({stops}) '
205 ' ) '
206 ' ) '
206 'SELECT deltachain.baseid, compression, delta '
207 'SELECT deltachain.baseid, compression, delta '
207 'FROM deltachain, delta '
208 'FROM deltachain, delta '
208 'WHERE delta.id=deltachain.deltaid'.format(
209 'WHERE delta.id=deltachain.deltaid'.format(
209 stops=','.join(['?'] * len(stoprids))
210 stops=','.join(['?'] * len(stoprids))
210 ),
211 ),
211 tuple([pathid, node] + list(stoprids.keys())),
212 tuple([pathid, node] + list(stoprids.keys())),
212 )
213 )
213
214
214 deltas = []
215 deltas = []
215 lastdeltabaseid = None
216 lastdeltabaseid = None
216
217
217 for deltabaseid, compression, delta in res:
218 for deltabaseid, compression, delta in res:
218 lastdeltabaseid = deltabaseid
219 lastdeltabaseid = deltabaseid
219
220
220 if compression == COMPRESSION_ZSTD:
221 if compression == COMPRESSION_ZSTD:
221 delta = zstddctx.decompress(delta)
222 delta = zstddctx.decompress(delta)
222 elif compression == COMPRESSION_NONE:
223 elif compression == COMPRESSION_NONE:
223 delta = delta
224 delta = delta
224 elif compression == COMPRESSION_ZLIB:
225 elif compression == COMPRESSION_ZLIB:
225 delta = zlib.decompress(delta)
226 delta = zlib.decompress(delta)
226 else:
227 else:
227 raise SQLiteStoreError(
228 raise SQLiteStoreError(
228 b'unhandled compression type: %d' % compression
229 b'unhandled compression type: %d' % compression
229 )
230 )
230
231
231 deltas.append(delta)
232 deltas.append(delta)
232
233
233 if lastdeltabaseid in stoprids:
234 if lastdeltabaseid in stoprids:
234 basetext = revisioncache[stoprids[lastdeltabaseid]]
235 basetext = revisioncache[stoprids[lastdeltabaseid]]
235 else:
236 else:
236 basetext = deltas.pop()
237 basetext = deltas.pop()
237
238
238 deltas.reverse()
239 deltas.reverse()
239 fulltext = mdiff.patches(basetext, deltas)
240 fulltext = mdiff.patches(basetext, deltas)
240
241
241 # SQLite returns buffer instances for blob columns on Python 2. This
242 # SQLite returns buffer instances for blob columns on Python 2. This
242 # type can propagate through the delta application layer. Because
243 # type can propagate through the delta application layer. Because
243 # downstream callers assume revisions are bytes, cast as needed.
244 # downstream callers assume revisions are bytes, cast as needed.
244 if not isinstance(fulltext, bytes):
245 if not isinstance(fulltext, bytes):
245 fulltext = bytes(delta)
246 fulltext = bytes(delta)
246
247
247 return fulltext
248 return fulltext
248
249
249
250
250 def insertdelta(db, compression, hash, delta):
251 def insertdelta(db, compression, hash, delta):
251 try:
252 try:
252 return db.execute(
253 return db.execute(
253 'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)',
254 'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)',
254 (compression, hash, delta),
255 (compression, hash, delta),
255 ).lastrowid
256 ).lastrowid
256 except sqlite3.IntegrityError:
257 except sqlite3.IntegrityError:
257 return db.execute(
258 return db.execute(
258 'SELECT id FROM delta WHERE hash=?', (hash,)
259 'SELECT id FROM delta WHERE hash=?', (hash,)
259 ).fetchone()[0]
260 ).fetchone()[0]
260
261
261
262
262 class SQLiteStoreError(error.StorageError):
263 class SQLiteStoreError(error.StorageError):
263 pass
264 pass
264
265
265
266
266 @attr.s
267 @attr.s
267 class revisionentry(object):
268 class revisionentry(object):
268 rid = attr.ib()
269 rid = attr.ib()
269 rev = attr.ib()
270 rev = attr.ib()
270 node = attr.ib()
271 node = attr.ib()
271 p1rev = attr.ib()
272 p1rev = attr.ib()
272 p2rev = attr.ib()
273 p2rev = attr.ib()
273 p1node = attr.ib()
274 p1node = attr.ib()
274 p2node = attr.ib()
275 p2node = attr.ib()
275 linkrev = attr.ib()
276 linkrev = attr.ib()
276 flags = attr.ib()
277 flags = attr.ib()
277
278
278
279
279 @interfaceutil.implementer(repository.irevisiondelta)
280 @interfaceutil.implementer(repository.irevisiondelta)
280 @attr.s(slots=True)
281 @attr.s(slots=True)
281 class sqliterevisiondelta(object):
282 class sqliterevisiondelta(object):
282 node = attr.ib()
283 node = attr.ib()
283 p1node = attr.ib()
284 p1node = attr.ib()
284 p2node = attr.ib()
285 p2node = attr.ib()
285 basenode = attr.ib()
286 basenode = attr.ib()
286 flags = attr.ib()
287 flags = attr.ib()
287 baserevisionsize = attr.ib()
288 baserevisionsize = attr.ib()
288 revision = attr.ib()
289 revision = attr.ib()
289 delta = attr.ib()
290 delta = attr.ib()
290 linknode = attr.ib(default=None)
291 linknode = attr.ib(default=None)
291
292
292
293
293 @interfaceutil.implementer(repository.iverifyproblem)
294 @interfaceutil.implementer(repository.iverifyproblem)
294 @attr.s(frozen=True)
295 @attr.s(frozen=True)
295 class sqliteproblem(object):
296 class sqliteproblem(object):
296 warning = attr.ib(default=None)
297 warning = attr.ib(default=None)
297 error = attr.ib(default=None)
298 error = attr.ib(default=None)
298 node = attr.ib(default=None)
299 node = attr.ib(default=None)
299
300
300
301
301 @interfaceutil.implementer(repository.ifilestorage)
302 @interfaceutil.implementer(repository.ifilestorage)
302 class sqlitefilestore(object):
303 class sqlitefilestore(object):
303 """Implements storage for an individual tracked path."""
304 """Implements storage for an individual tracked path."""
304
305
305 def __init__(self, db, path, compression):
306 def __init__(self, db, path, compression):
306 self._db = db
307 self._db = db
307 self._path = path
308 self._path = path
308
309
309 self._pathid = None
310 self._pathid = None
310
311
311 # revnum -> node
312 # revnum -> node
312 self._revtonode = {}
313 self._revtonode = {}
313 # node -> revnum
314 # node -> revnum
314 self._nodetorev = {}
315 self._nodetorev = {}
315 # node -> data structure
316 # node -> data structure
316 self._revisions = {}
317 self._revisions = {}
317
318
318 self._revisioncache = util.lrucachedict(10)
319 self._revisioncache = util.lrucachedict(10)
319
320
320 self._compengine = compression
321 self._compengine = compression
321
322
322 if compression == b'zstd':
323 if compression == b'zstd':
323 self._cctx = zstd.ZstdCompressor(level=3)
324 self._cctx = zstd.ZstdCompressor(level=3)
324 self._dctx = zstd.ZstdDecompressor()
325 self._dctx = zstd.ZstdDecompressor()
325 else:
326 else:
326 self._cctx = None
327 self._cctx = None
327 self._dctx = None
328 self._dctx = None
328
329
329 self._refreshindex()
330 self._refreshindex()
330
331
331 def _refreshindex(self):
332 def _refreshindex(self):
332 self._revtonode = {}
333 self._revtonode = {}
333 self._nodetorev = {}
334 self._nodetorev = {}
334 self._revisions = {}
335 self._revisions = {}
335
336
336 res = list(
337 res = list(
337 self._db.execute(
338 self._db.execute(
338 'SELECT id FROM filepath WHERE path=?', (self._path,)
339 'SELECT id FROM filepath WHERE path=?', (self._path,)
339 )
340 )
340 )
341 )
341
342
342 if not res:
343 if not res:
343 self._pathid = None
344 self._pathid = None
344 return
345 return
345
346
346 self._pathid = res[0][0]
347 self._pathid = res[0][0]
347
348
348 res = self._db.execute(
349 res = self._db.execute(
349 'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
350 'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
350 'FROM fileindex '
351 'FROM fileindex '
351 'WHERE pathid=? '
352 'WHERE pathid=? '
352 'ORDER BY revnum ASC',
353 'ORDER BY revnum ASC',
353 (self._pathid,),
354 (self._pathid,),
354 )
355 )
355
356
356 for i, row in enumerate(res):
357 for i, row in enumerate(res):
357 rid, rev, node, p1rev, p2rev, linkrev, flags = row
358 rid, rev, node, p1rev, p2rev, linkrev, flags = row
358
359
359 if i != rev:
360 if i != rev:
360 raise SQLiteStoreError(
361 raise SQLiteStoreError(
361 _(b'sqlite database has inconsistent revision numbers')
362 _(b'sqlite database has inconsistent revision numbers')
362 )
363 )
363
364
364 if p1rev == nullrev:
365 if p1rev == nullrev:
365 p1node = nullid
366 p1node = nullid
366 else:
367 else:
367 p1node = self._revtonode[p1rev]
368 p1node = self._revtonode[p1rev]
368
369
369 if p2rev == nullrev:
370 if p2rev == nullrev:
370 p2node = nullid
371 p2node = nullid
371 else:
372 else:
372 p2node = self._revtonode[p2rev]
373 p2node = self._revtonode[p2rev]
373
374
374 entry = revisionentry(
375 entry = revisionentry(
375 rid=rid,
376 rid=rid,
376 rev=rev,
377 rev=rev,
377 node=node,
378 node=node,
378 p1rev=p1rev,
379 p1rev=p1rev,
379 p2rev=p2rev,
380 p2rev=p2rev,
380 p1node=p1node,
381 p1node=p1node,
381 p2node=p2node,
382 p2node=p2node,
382 linkrev=linkrev,
383 linkrev=linkrev,
383 flags=flags,
384 flags=flags,
384 )
385 )
385
386
386 self._revtonode[rev] = node
387 self._revtonode[rev] = node
387 self._nodetorev[node] = rev
388 self._nodetorev[node] = rev
388 self._revisions[node] = entry
389 self._revisions[node] = entry
389
390
390 # Start of ifileindex interface.
391 # Start of ifileindex interface.
391
392
392 def __len__(self):
393 def __len__(self):
393 return len(self._revisions)
394 return len(self._revisions)
394
395
395 def __iter__(self):
396 def __iter__(self):
396 return iter(pycompat.xrange(len(self._revisions)))
397 return iter(pycompat.xrange(len(self._revisions)))
397
398
398 def hasnode(self, node):
399 def hasnode(self, node):
399 if node == nullid:
400 if node == nullid:
400 return False
401 return False
401
402
402 return node in self._nodetorev
403 return node in self._nodetorev
403
404
404 def revs(self, start=0, stop=None):
405 def revs(self, start=0, stop=None):
405 return storageutil.iterrevs(
406 return storageutil.iterrevs(
406 len(self._revisions), start=start, stop=stop
407 len(self._revisions), start=start, stop=stop
407 )
408 )
408
409
409 def parents(self, node):
410 def parents(self, node):
410 if node == nullid:
411 if node == nullid:
411 return nullid, nullid
412 return nullid, nullid
412
413
413 if node not in self._revisions:
414 if node not in self._revisions:
414 raise error.LookupError(node, self._path, _(b'no node'))
415 raise error.LookupError(node, self._path, _(b'no node'))
415
416
416 entry = self._revisions[node]
417 entry = self._revisions[node]
417 return entry.p1node, entry.p2node
418 return entry.p1node, entry.p2node
418
419
419 def parentrevs(self, rev):
420 def parentrevs(self, rev):
420 if rev == nullrev:
421 if rev == nullrev:
421 return nullrev, nullrev
422 return nullrev, nullrev
422
423
423 if rev not in self._revtonode:
424 if rev not in self._revtonode:
424 raise IndexError(rev)
425 raise IndexError(rev)
425
426
426 entry = self._revisions[self._revtonode[rev]]
427 entry = self._revisions[self._revtonode[rev]]
427 return entry.p1rev, entry.p2rev
428 return entry.p1rev, entry.p2rev
428
429
429 def rev(self, node):
430 def rev(self, node):
430 if node == nullid:
431 if node == nullid:
431 return nullrev
432 return nullrev
432
433
433 if node not in self._nodetorev:
434 if node not in self._nodetorev:
434 raise error.LookupError(node, self._path, _(b'no node'))
435 raise error.LookupError(node, self._path, _(b'no node'))
435
436
436 return self._nodetorev[node]
437 return self._nodetorev[node]
437
438
438 def node(self, rev):
439 def node(self, rev):
439 if rev == nullrev:
440 if rev == nullrev:
440 return nullid
441 return nullid
441
442
442 if rev not in self._revtonode:
443 if rev not in self._revtonode:
443 raise IndexError(rev)
444 raise IndexError(rev)
444
445
445 return self._revtonode[rev]
446 return self._revtonode[rev]
446
447
447 def lookup(self, node):
448 def lookup(self, node):
448 return storageutil.fileidlookup(self, node, self._path)
449 return storageutil.fileidlookup(self, node, self._path)
449
450
450 def linkrev(self, rev):
451 def linkrev(self, rev):
451 if rev == nullrev:
452 if rev == nullrev:
452 return nullrev
453 return nullrev
453
454
454 if rev not in self._revtonode:
455 if rev not in self._revtonode:
455 raise IndexError(rev)
456 raise IndexError(rev)
456
457
457 entry = self._revisions[self._revtonode[rev]]
458 entry = self._revisions[self._revtonode[rev]]
458 return entry.linkrev
459 return entry.linkrev
459
460
460 def iscensored(self, rev):
461 def iscensored(self, rev):
461 if rev == nullrev:
462 if rev == nullrev:
462 return False
463 return False
463
464
464 if rev not in self._revtonode:
465 if rev not in self._revtonode:
465 raise IndexError(rev)
466 raise IndexError(rev)
466
467
467 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
468 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
468
469
469 def commonancestorsheads(self, node1, node2):
470 def commonancestorsheads(self, node1, node2):
470 rev1 = self.rev(node1)
471 rev1 = self.rev(node1)
471 rev2 = self.rev(node2)
472 rev2 = self.rev(node2)
472
473
473 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
474 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
474 return pycompat.maplist(self.node, ancestors)
475 return pycompat.maplist(self.node, ancestors)
475
476
476 def descendants(self, revs):
477 def descendants(self, revs):
477 # TODO we could implement this using a recursive SQL query, which
478 # TODO we could implement this using a recursive SQL query, which
478 # might be faster.
479 # might be faster.
479 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
480 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
480
481
481 def heads(self, start=None, stop=None):
482 def heads(self, start=None, stop=None):
482 if start is None and stop is None:
483 if start is None and stop is None:
483 if not len(self):
484 if not len(self):
484 return [nullid]
485 return [nullid]
485
486
486 startrev = self.rev(start) if start is not None else nullrev
487 startrev = self.rev(start) if start is not None else nullrev
487 stoprevs = {self.rev(n) for n in stop or []}
488 stoprevs = {self.rev(n) for n in stop or []}
488
489
489 revs = dagop.headrevssubset(
490 revs = dagop.headrevssubset(
490 self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
491 self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
491 )
492 )
492
493
493 return [self.node(rev) for rev in revs]
494 return [self.node(rev) for rev in revs]
494
495
495 def children(self, node):
496 def children(self, node):
496 rev = self.rev(node)
497 rev = self.rev(node)
497
498
498 res = self._db.execute(
499 res = self._db.execute(
499 'SELECT'
500 'SELECT'
500 ' node '
501 ' node '
501 ' FROM filedata '
502 ' FROM filedata '
502 ' WHERE path=? AND (p1rev=? OR p2rev=?) '
503 ' WHERE path=? AND (p1rev=? OR p2rev=?) '
503 ' ORDER BY revnum ASC',
504 ' ORDER BY revnum ASC',
504 (self._path, rev, rev),
505 (self._path, rev, rev),
505 )
506 )
506
507
507 return [row[0] for row in res]
508 return [row[0] for row in res]
508
509
509 # End of ifileindex interface.
510 # End of ifileindex interface.
510
511
511 # Start of ifiledata interface.
512 # Start of ifiledata interface.
512
513
513 def size(self, rev):
514 def size(self, rev):
514 if rev == nullrev:
515 if rev == nullrev:
515 return 0
516 return 0
516
517
517 if rev not in self._revtonode:
518 if rev not in self._revtonode:
518 raise IndexError(rev)
519 raise IndexError(rev)
519
520
520 node = self._revtonode[rev]
521 node = self._revtonode[rev]
521
522
522 if self.renamed(node):
523 if self.renamed(node):
523 return len(self.read(node))
524 return len(self.read(node))
524
525
525 return len(self.revision(node))
526 return len(self.revision(node))
526
527
527 def revision(self, node, raw=False, _verifyhash=True):
528 def revision(self, node, raw=False, _verifyhash=True):
528 if node in (nullid, nullrev):
529 if node in (nullid, nullrev):
529 return b''
530 return b''
530
531
531 if isinstance(node, int):
532 if isinstance(node, int):
532 node = self.node(node)
533 node = self.node(node)
533
534
534 if node not in self._nodetorev:
535 if node not in self._nodetorev:
535 raise error.LookupError(node, self._path, _(b'no node'))
536 raise error.LookupError(node, self._path, _(b'no node'))
536
537
537 if node in self._revisioncache:
538 if node in self._revisioncache:
538 return self._revisioncache[node]
539 return self._revisioncache[node]
539
540
540 # Because we have a fulltext revision cache, we are able to
541 # Because we have a fulltext revision cache, we are able to
541 # short-circuit delta chain traversal and decompression as soon as
542 # short-circuit delta chain traversal and decompression as soon as
542 # we encounter a revision in the cache.
543 # we encounter a revision in the cache.
543
544
544 stoprids = {self._revisions[n].rid: n for n in self._revisioncache}
545 stoprids = {self._revisions[n].rid: n for n in self._revisioncache}
545
546
546 if not stoprids:
547 if not stoprids:
547 stoprids[-1] = None
548 stoprids[-1] = None
548
549
549 fulltext = resolvedeltachain(
550 fulltext = resolvedeltachain(
550 self._db,
551 self._db,
551 self._pathid,
552 self._pathid,
552 node,
553 node,
553 self._revisioncache,
554 self._revisioncache,
554 stoprids,
555 stoprids,
555 zstddctx=self._dctx,
556 zstddctx=self._dctx,
556 )
557 )
557
558
558 # Don't verify hashes if parent nodes were rewritten, as the hash
559 # Don't verify hashes if parent nodes were rewritten, as the hash
559 # wouldn't verify.
560 # wouldn't verify.
560 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
561 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
561 _verifyhash = False
562 _verifyhash = False
562
563
563 if _verifyhash:
564 if _verifyhash:
564 self._checkhash(fulltext, node)
565 self._checkhash(fulltext, node)
565 self._revisioncache[node] = fulltext
566 self._revisioncache[node] = fulltext
566
567
567 return fulltext
568 return fulltext
568
569
569 def rawdata(self, *args, **kwargs):
570 def rawdata(self, *args, **kwargs):
570 return self.revision(*args, **kwargs)
571 return self.revision(*args, **kwargs)
571
572
572 def read(self, node):
573 def read(self, node):
573 return storageutil.filtermetadata(self.revision(node))
574 return storageutil.filtermetadata(self.revision(node))
574
575
575 def renamed(self, node):
576 def renamed(self, node):
576 return storageutil.filerevisioncopied(self, node)
577 return storageutil.filerevisioncopied(self, node)
577
578
578 def cmp(self, node, fulltext):
579 def cmp(self, node, fulltext):
579 return not storageutil.filedataequivalent(self, node, fulltext)
580 return not storageutil.filedataequivalent(self, node, fulltext)
580
581
581 def emitrevisions(
582 def emitrevisions(
582 self,
583 self,
583 nodes,
584 nodes,
584 nodesorder=None,
585 nodesorder=None,
585 revisiondata=False,
586 revisiondata=False,
586 assumehaveparentrevisions=False,
587 assumehaveparentrevisions=False,
587 deltamode=repository.CG_DELTAMODE_STD,
588 deltamode=repository.CG_DELTAMODE_STD,
588 ):
589 ):
589 if nodesorder not in (b'nodes', b'storage', b'linear', None):
590 if nodesorder not in (b'nodes', b'storage', b'linear', None):
590 raise error.ProgrammingError(
591 raise error.ProgrammingError(
591 b'unhandled value for nodesorder: %s' % nodesorder
592 b'unhandled value for nodesorder: %s' % nodesorder
592 )
593 )
593
594
594 nodes = [n for n in nodes if n != nullid]
595 nodes = [n for n in nodes if n != nullid]
595
596
596 if not nodes:
597 if not nodes:
597 return
598 return
598
599
599 # TODO perform in a single query.
600 # TODO perform in a single query.
600 res = self._db.execute(
601 res = self._db.execute(
601 'SELECT revnum, deltaid FROM fileindex '
602 'SELECT revnum, deltaid FROM fileindex '
602 'WHERE pathid=? '
603 'WHERE pathid=? '
603 ' AND node in (%s)' % (','.join(['?'] * len(nodes))),
604 ' AND node in (%s)' % (','.join(['?'] * len(nodes))),
604 tuple([self._pathid] + nodes),
605 tuple([self._pathid] + nodes),
605 )
606 )
606
607
607 deltabases = {}
608 deltabases = {}
608
609
609 for rev, deltaid in res:
610 for rev, deltaid in res:
610 res = self._db.execute(
611 res = self._db.execute(
611 'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
612 'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
612 (self._pathid, deltaid),
613 (self._pathid, deltaid),
613 )
614 )
614 deltabases[rev] = res.fetchone()[0]
615 deltabases[rev] = res.fetchone()[0]
615
616
616 # TODO define revdifffn so we can use delta from storage.
617 # TODO define revdifffn so we can use delta from storage.
617 for delta in storageutil.emitrevisions(
618 for delta in storageutil.emitrevisions(
618 self,
619 self,
619 nodes,
620 nodes,
620 nodesorder,
621 nodesorder,
621 sqliterevisiondelta,
622 sqliterevisiondelta,
622 deltaparentfn=deltabases.__getitem__,
623 deltaparentfn=deltabases.__getitem__,
623 revisiondata=revisiondata,
624 revisiondata=revisiondata,
624 assumehaveparentrevisions=assumehaveparentrevisions,
625 assumehaveparentrevisions=assumehaveparentrevisions,
625 deltamode=deltamode,
626 deltamode=deltamode,
626 ):
627 ):
627
628
628 yield delta
629 yield delta
629
630
630 # End of ifiledata interface.
631 # End of ifiledata interface.
631
632
632 # Start of ifilemutation interface.
633 # Start of ifilemutation interface.
633
634
634 def add(self, filedata, meta, transaction, linkrev, p1, p2):
635 def add(self, filedata, meta, transaction, linkrev, p1, p2):
635 if meta or filedata.startswith(b'\x01\n'):
636 if meta or filedata.startswith(b'\x01\n'):
636 filedata = storageutil.packmeta(meta, filedata)
637 filedata = storageutil.packmeta(meta, filedata)
637
638
638 return self.addrevision(filedata, transaction, linkrev, p1, p2)
639 return self.addrevision(filedata, transaction, linkrev, p1, p2)
639
640
640 def addrevision(
641 def addrevision(
641 self,
642 self,
642 revisiondata,
643 revisiondata,
643 transaction,
644 transaction,
644 linkrev,
645 linkrev,
645 p1,
646 p1,
646 p2,
647 p2,
647 node=None,
648 node=None,
648 flags=0,
649 flags=0,
649 cachedelta=None,
650 cachedelta=None,
650 ):
651 ):
651 if flags:
652 if flags:
652 raise SQLiteStoreError(_(b'flags not supported on revisions'))
653 raise SQLiteStoreError(_(b'flags not supported on revisions'))
653
654
654 validatehash = node is not None
655 validatehash = node is not None
655 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
656 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
656
657
657 if validatehash:
658 if validatehash:
658 self._checkhash(revisiondata, node, p1, p2)
659 self._checkhash(revisiondata, node, p1, p2)
659
660
660 if node in self._nodetorev:
661 if node in self._nodetorev:
661 return node
662 return node
662
663
663 node = self._addrawrevision(
664 node = self._addrawrevision(
664 node, revisiondata, transaction, linkrev, p1, p2
665 node, revisiondata, transaction, linkrev, p1, p2
665 )
666 )
666
667
667 self._revisioncache[node] = revisiondata
668 self._revisioncache[node] = revisiondata
668 return node
669 return node
669
670
670 def addgroup(
671 def addgroup(
671 self,
672 self,
672 deltas,
673 deltas,
673 linkmapper,
674 linkmapper,
674 transaction,
675 transaction,
675 addrevisioncb=None,
676 addrevisioncb=None,
676 maybemissingparents=False,
677 maybemissingparents=False,
677 ):
678 ):
678 nodes = []
679 nodes = []
679
680
680 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
681 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
681 storeflags = 0
682 storeflags = 0
682
683
683 if wireflags & repository.REVISION_FLAG_CENSORED:
684 if wireflags & repository.REVISION_FLAG_CENSORED:
684 storeflags |= FLAG_CENSORED
685 storeflags |= FLAG_CENSORED
685
686
686 if wireflags & ~repository.REVISION_FLAG_CENSORED:
687 if wireflags & ~repository.REVISION_FLAG_CENSORED:
687 raise SQLiteStoreError(b'unhandled revision flag')
688 raise SQLiteStoreError(b'unhandled revision flag')
688
689
689 if maybemissingparents:
690 if maybemissingparents:
690 if p1 != nullid and not self.hasnode(p1):
691 if p1 != nullid and not self.hasnode(p1):
691 p1 = nullid
692 p1 = nullid
692 storeflags |= FLAG_MISSING_P1
693 storeflags |= FLAG_MISSING_P1
693
694
694 if p2 != nullid and not self.hasnode(p2):
695 if p2 != nullid and not self.hasnode(p2):
695 p2 = nullid
696 p2 = nullid
696 storeflags |= FLAG_MISSING_P2
697 storeflags |= FLAG_MISSING_P2
697
698
698 baserev = self.rev(deltabase)
699 baserev = self.rev(deltabase)
699
700
700 # If base is censored, delta must be full replacement in a single
701 # If base is censored, delta must be full replacement in a single
701 # patch operation.
702 # patch operation.
702 if baserev != nullrev and self.iscensored(baserev):
703 if baserev != nullrev and self.iscensored(baserev):
703 hlen = struct.calcsize(b'>lll')
704 hlen = struct.calcsize(b'>lll')
704 oldlen = len(self.rawdata(deltabase, _verifyhash=False))
705 oldlen = len(self.rawdata(deltabase, _verifyhash=False))
705 newlen = len(delta) - hlen
706 newlen = len(delta) - hlen
706
707
707 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
708 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
708 raise error.CensoredBaseError(self._path, deltabase)
709 raise error.CensoredBaseError(self._path, deltabase)
709
710
710 if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
711 if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
711 delta, baserev, lambda x: len(self.rawdata(x))
712 delta, baserev, lambda x: len(self.rawdata(x))
712 ):
713 ):
713 storeflags |= FLAG_CENSORED
714 storeflags |= FLAG_CENSORED
714
715
715 linkrev = linkmapper(linknode)
716 linkrev = linkmapper(linknode)
716
717
717 nodes.append(node)
718 nodes.append(node)
718
719
719 if node in self._revisions:
720 if node in self._revisions:
720 # Possibly reset parents to make them proper.
721 # Possibly reset parents to make them proper.
721 entry = self._revisions[node]
722 entry = self._revisions[node]
722
723
723 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
724 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
724 entry.p1node = p1
725 entry.p1node = p1
725 entry.p1rev = self._nodetorev[p1]
726 entry.p1rev = self._nodetorev[p1]
726 entry.flags &= ~FLAG_MISSING_P1
727 entry.flags &= ~FLAG_MISSING_P1
727
728
728 self._db.execute(
729 self._db.execute(
729 'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
730 'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
730 (self._nodetorev[p1], entry.flags, entry.rid),
731 (self._nodetorev[p1], entry.flags, entry.rid),
731 )
732 )
732
733
733 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
734 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
734 entry.p2node = p2
735 entry.p2node = p2
735 entry.p2rev = self._nodetorev[p2]
736 entry.p2rev = self._nodetorev[p2]
736 entry.flags &= ~FLAG_MISSING_P2
737 entry.flags &= ~FLAG_MISSING_P2
737
738
738 self._db.execute(
739 self._db.execute(
739 'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
740 'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
740 (self._nodetorev[p1], entry.flags, entry.rid),
741 (self._nodetorev[p1], entry.flags, entry.rid),
741 )
742 )
742
743
743 continue
744 continue
744
745
745 if deltabase == nullid:
746 if deltabase == nullid:
746 text = mdiff.patch(b'', delta)
747 text = mdiff.patch(b'', delta)
747 storedelta = None
748 storedelta = None
748 else:
749 else:
749 text = None
750 text = None
750 storedelta = (deltabase, delta)
751 storedelta = (deltabase, delta)
751
752
752 self._addrawrevision(
753 self._addrawrevision(
753 node,
754 node,
754 text,
755 text,
755 transaction,
756 transaction,
756 linkrev,
757 linkrev,
757 p1,
758 p1,
758 p2,
759 p2,
759 storedelta=storedelta,
760 storedelta=storedelta,
760 flags=storeflags,
761 flags=storeflags,
761 )
762 )
762
763
763 if addrevisioncb:
764 if addrevisioncb:
764 addrevisioncb(self, node)
765 addrevisioncb(self, node)
765
766
766 return nodes
767 return nodes
767
768
768 def censorrevision(self, tr, censornode, tombstone=b''):
769 def censorrevision(self, tr, censornode, tombstone=b''):
769 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
770 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
770
771
771 # This restriction is cargo culted from revlogs and makes no sense for
772 # This restriction is cargo culted from revlogs and makes no sense for
772 # SQLite, since columns can be resized at will.
773 # SQLite, since columns can be resized at will.
773 if len(tombstone) > len(self.rawdata(censornode)):
774 if len(tombstone) > len(self.rawdata(censornode)):
774 raise error.Abort(
775 raise error.Abort(
775 _(b'censor tombstone must be no longer than censored data')
776 _(b'censor tombstone must be no longer than censored data')
776 )
777 )
777
778
778 # We need to replace the censored revision's data with the tombstone.
779 # We need to replace the censored revision's data with the tombstone.
779 # But replacing that data will have implications for delta chains that
780 # But replacing that data will have implications for delta chains that
780 # reference it.
781 # reference it.
781 #
782 #
782 # While "better," more complex strategies are possible, we do something
783 # While "better," more complex strategies are possible, we do something
783 # simple: we find delta chain children of the censored revision and we
784 # simple: we find delta chain children of the censored revision and we
784 # replace those incremental deltas with fulltexts of their corresponding
785 # replace those incremental deltas with fulltexts of their corresponding
785 # revision. Then we delete the now-unreferenced delta and original
786 # revision. Then we delete the now-unreferenced delta and original
786 # revision and insert a replacement.
787 # revision and insert a replacement.
787
788
788 # Find the delta to be censored.
789 # Find the delta to be censored.
789 censoreddeltaid = self._db.execute(
790 censoreddeltaid = self._db.execute(
790 'SELECT deltaid FROM fileindex WHERE id=?',
791 'SELECT deltaid FROM fileindex WHERE id=?',
791 (self._revisions[censornode].rid,),
792 (self._revisions[censornode].rid,),
792 ).fetchone()[0]
793 ).fetchone()[0]
793
794
794 # Find all its delta chain children.
795 # Find all its delta chain children.
795 # TODO once we support storing deltas for !files, we'll need to look
796 # TODO once we support storing deltas for !files, we'll need to look
796 # for those delta chains too.
797 # for those delta chains too.
797 rows = list(
798 rows = list(
798 self._db.execute(
799 self._db.execute(
799 'SELECT id, pathid, node FROM fileindex '
800 'SELECT id, pathid, node FROM fileindex '
800 'WHERE deltabaseid=? OR deltaid=?',
801 'WHERE deltabaseid=? OR deltaid=?',
801 (censoreddeltaid, censoreddeltaid),
802 (censoreddeltaid, censoreddeltaid),
802 )
803 )
803 )
804 )
804
805
805 for row in rows:
806 for row in rows:
806 rid, pathid, node = row
807 rid, pathid, node = row
807
808
808 fulltext = resolvedeltachain(
809 fulltext = resolvedeltachain(
809 self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
810 self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
810 )
811 )
811
812
812 deltahash = hashutil.sha1(fulltext).digest()
813 deltahash = hashutil.sha1(fulltext).digest()
813
814
814 if self._compengine == b'zstd':
815 if self._compengine == b'zstd':
815 deltablob = self._cctx.compress(fulltext)
816 deltablob = self._cctx.compress(fulltext)
816 compression = COMPRESSION_ZSTD
817 compression = COMPRESSION_ZSTD
817 elif self._compengine == b'zlib':
818 elif self._compengine == b'zlib':
818 deltablob = zlib.compress(fulltext)
819 deltablob = zlib.compress(fulltext)
819 compression = COMPRESSION_ZLIB
820 compression = COMPRESSION_ZLIB
820 elif self._compengine == b'none':
821 elif self._compengine == b'none':
821 deltablob = fulltext
822 deltablob = fulltext
822 compression = COMPRESSION_NONE
823 compression = COMPRESSION_NONE
823 else:
824 else:
824 raise error.ProgrammingError(
825 raise error.ProgrammingError(
825 b'unhandled compression engine: %s' % self._compengine
826 b'unhandled compression engine: %s' % self._compengine
826 )
827 )
827
828
828 if len(deltablob) >= len(fulltext):
829 if len(deltablob) >= len(fulltext):
829 deltablob = fulltext
830 deltablob = fulltext
830 compression = COMPRESSION_NONE
831 compression = COMPRESSION_NONE
831
832
832 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
833 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
833
834
834 self._db.execute(
835 self._db.execute(
835 'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
836 'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
836 'WHERE id=?',
837 'WHERE id=?',
837 (deltaid, rid),
838 (deltaid, rid),
838 )
839 )
839
840
840 # Now create the tombstone delta and replace the delta on the censored
841 # Now create the tombstone delta and replace the delta on the censored
841 # node.
842 # node.
842 deltahash = hashutil.sha1(tombstone).digest()
843 deltahash = hashutil.sha1(tombstone).digest()
843 tombstonedeltaid = insertdelta(
844 tombstonedeltaid = insertdelta(
844 self._db, COMPRESSION_NONE, deltahash, tombstone
845 self._db, COMPRESSION_NONE, deltahash, tombstone
845 )
846 )
846
847
847 flags = self._revisions[censornode].flags
848 flags = self._revisions[censornode].flags
848 flags |= FLAG_CENSORED
849 flags |= FLAG_CENSORED
849
850
850 self._db.execute(
851 self._db.execute(
851 'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
852 'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
852 'WHERE pathid=? AND node=?',
853 'WHERE pathid=? AND node=?',
853 (flags, tombstonedeltaid, self._pathid, censornode),
854 (flags, tombstonedeltaid, self._pathid, censornode),
854 )
855 )
855
856
856 self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))
857 self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))
857
858
858 self._refreshindex()
859 self._refreshindex()
859 self._revisioncache.clear()
860 self._revisioncache.clear()
860
861
861 def getstrippoint(self, minlink):
862 def getstrippoint(self, minlink):
862 return storageutil.resolvestripinfo(
863 return storageutil.resolvestripinfo(
863 minlink,
864 minlink,
864 len(self) - 1,
865 len(self) - 1,
865 [self.rev(n) for n in self.heads()],
866 [self.rev(n) for n in self.heads()],
866 self.linkrev,
867 self.linkrev,
867 self.parentrevs,
868 self.parentrevs,
868 )
869 )
869
870
870 def strip(self, minlink, transaction):
871 def strip(self, minlink, transaction):
871 if not len(self):
872 if not len(self):
872 return
873 return
873
874
874 rev, _ignored = self.getstrippoint(minlink)
875 rev, _ignored = self.getstrippoint(minlink)
875
876
876 if rev == len(self):
877 if rev == len(self):
877 return
878 return
878
879
879 for rev in self.revs(rev):
880 for rev in self.revs(rev):
880 self._db.execute(
881 self._db.execute(
881 'DELETE FROM fileindex WHERE pathid=? AND node=?',
882 'DELETE FROM fileindex WHERE pathid=? AND node=?',
882 (self._pathid, self.node(rev)),
883 (self._pathid, self.node(rev)),
883 )
884 )
884
885
885 # TODO how should we garbage collect data in delta table?
886 # TODO how should we garbage collect data in delta table?
886
887
887 self._refreshindex()
888 self._refreshindex()
888
889
889 # End of ifilemutation interface.
890 # End of ifilemutation interface.
890
891
891 # Start of ifilestorage interface.
892 # Start of ifilestorage interface.
892
893
893 def files(self):
894 def files(self):
894 return []
895 return []
895
896
896 def storageinfo(
897 def storageinfo(
897 self,
898 self,
898 exclusivefiles=False,
899 exclusivefiles=False,
899 sharedfiles=False,
900 sharedfiles=False,
900 revisionscount=False,
901 revisionscount=False,
901 trackedsize=False,
902 trackedsize=False,
902 storedsize=False,
903 storedsize=False,
903 ):
904 ):
904 d = {}
905 d = {}
905
906
906 if exclusivefiles:
907 if exclusivefiles:
907 d[b'exclusivefiles'] = []
908 d[b'exclusivefiles'] = []
908
909
909 if sharedfiles:
910 if sharedfiles:
910 # TODO list sqlite file(s) here.
911 # TODO list sqlite file(s) here.
911 d[b'sharedfiles'] = []
912 d[b'sharedfiles'] = []
912
913
913 if revisionscount:
914 if revisionscount:
914 d[b'revisionscount'] = len(self)
915 d[b'revisionscount'] = len(self)
915
916
916 if trackedsize:
917 if trackedsize:
917 d[b'trackedsize'] = sum(
918 d[b'trackedsize'] = sum(
918 len(self.revision(node)) for node in self._nodetorev
919 len(self.revision(node)) for node in self._nodetorev
919 )
920 )
920
921
921 if storedsize:
922 if storedsize:
922 # TODO implement this?
923 # TODO implement this?
923 d[b'storedsize'] = None
924 d[b'storedsize'] = None
924
925
925 return d
926 return d
926
927
927 def verifyintegrity(self, state):
928 def verifyintegrity(self, state):
928 state[b'skipread'] = set()
929 state[b'skipread'] = set()
929
930
930 for rev in self:
931 for rev in self:
931 node = self.node(rev)
932 node = self.node(rev)
932
933
933 try:
934 try:
934 self.revision(node)
935 self.revision(node)
935 except Exception as e:
936 except Exception as e:
936 yield sqliteproblem(
937 yield sqliteproblem(
937 error=_(b'unpacking %s: %s') % (short(node), e), node=node
938 error=_(b'unpacking %s: %s') % (short(node), e), node=node
938 )
939 )
939
940
940 state[b'skipread'].add(node)
941 state[b'skipread'].add(node)
941
942
942 # End of ifilestorage interface.
943 # End of ifilestorage interface.
943
944
944 def _checkhash(self, fulltext, node, p1=None, p2=None):
945 def _checkhash(self, fulltext, node, p1=None, p2=None):
945 if p1 is None and p2 is None:
946 if p1 is None and p2 is None:
946 p1, p2 = self.parents(node)
947 p1, p2 = self.parents(node)
947
948
948 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
949 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
949 return
950 return
950
951
951 try:
952 try:
952 del self._revisioncache[node]
953 del self._revisioncache[node]
953 except KeyError:
954 except KeyError:
954 pass
955 pass
955
956
956 if storageutil.iscensoredtext(fulltext):
957 if storageutil.iscensoredtext(fulltext):
957 raise error.CensoredNodeError(self._path, node, fulltext)
958 raise error.CensoredNodeError(self._path, node, fulltext)
958
959
959 raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path)
960 raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path)
960
961
961 def _addrawrevision(
962 def _addrawrevision(
962 self,
963 self,
963 node,
964 node,
964 revisiondata,
965 revisiondata,
965 transaction,
966 transaction,
966 linkrev,
967 linkrev,
967 p1,
968 p1,
968 p2,
969 p2,
969 storedelta=None,
970 storedelta=None,
970 flags=0,
971 flags=0,
971 ):
972 ):
972 if self._pathid is None:
973 if self._pathid is None:
973 res = self._db.execute(
974 res = self._db.execute(
974 'INSERT INTO filepath (path) VALUES (?)', (self._path,)
975 'INSERT INTO filepath (path) VALUES (?)', (self._path,)
975 )
976 )
976 self._pathid = res.lastrowid
977 self._pathid = res.lastrowid
977
978
978 # For simplicity, always store a delta against p1.
979 # For simplicity, always store a delta against p1.
979 # TODO we need a lot more logic here to make behavior reasonable.
980 # TODO we need a lot more logic here to make behavior reasonable.
980
981
981 if storedelta:
982 if storedelta:
982 deltabase, delta = storedelta
983 deltabase, delta = storedelta
983
984
984 if isinstance(deltabase, int):
985 if isinstance(deltabase, int):
985 deltabase = self.node(deltabase)
986 deltabase = self.node(deltabase)
986
987
987 else:
988 else:
988 assert revisiondata is not None
989 assert revisiondata is not None
989 deltabase = p1
990 deltabase = p1
990
991
991 if deltabase == nullid:
992 if deltabase == nullid:
992 delta = revisiondata
993 delta = revisiondata
993 else:
994 else:
994 delta = mdiff.textdiff(
995 delta = mdiff.textdiff(
995 self.revision(self.rev(deltabase)), revisiondata
996 self.revision(self.rev(deltabase)), revisiondata
996 )
997 )
997
998
998 # File index stores a pointer to its delta and the parent delta.
999 # File index stores a pointer to its delta and the parent delta.
999 # The parent delta is stored via a pointer to the fileindex PK.
1000 # The parent delta is stored via a pointer to the fileindex PK.
1000 if deltabase == nullid:
1001 if deltabase == nullid:
1001 baseid = None
1002 baseid = None
1002 else:
1003 else:
1003 baseid = self._revisions[deltabase].rid
1004 baseid = self._revisions[deltabase].rid
1004
1005
1005 # Deltas are stored with a hash of their content. This allows
1006 # Deltas are stored with a hash of their content. This allows
1006 # us to de-duplicate. The table is configured to ignore conflicts
1007 # us to de-duplicate. The table is configured to ignore conflicts
1007 # and it is faster to just insert and silently noop than to look
1008 # and it is faster to just insert and silently noop than to look
1008 # first.
1009 # first.
1009 deltahash = hashutil.sha1(delta).digest()
1010 deltahash = hashutil.sha1(delta).digest()
1010
1011
1011 if self._compengine == b'zstd':
1012 if self._compengine == b'zstd':
1012 deltablob = self._cctx.compress(delta)
1013 deltablob = self._cctx.compress(delta)
1013 compression = COMPRESSION_ZSTD
1014 compression = COMPRESSION_ZSTD
1014 elif self._compengine == b'zlib':
1015 elif self._compengine == b'zlib':
1015 deltablob = zlib.compress(delta)
1016 deltablob = zlib.compress(delta)
1016 compression = COMPRESSION_ZLIB
1017 compression = COMPRESSION_ZLIB
1017 elif self._compengine == b'none':
1018 elif self._compengine == b'none':
1018 deltablob = delta
1019 deltablob = delta
1019 compression = COMPRESSION_NONE
1020 compression = COMPRESSION_NONE
1020 else:
1021 else:
1021 raise error.ProgrammingError(
1022 raise error.ProgrammingError(
1022 b'unhandled compression engine: %s' % self._compengine
1023 b'unhandled compression engine: %s' % self._compengine
1023 )
1024 )
1024
1025
1025 # Don't store compressed data if it isn't practical.
1026 # Don't store compressed data if it isn't practical.
1026 if len(deltablob) >= len(delta):
1027 if len(deltablob) >= len(delta):
1027 deltablob = delta
1028 deltablob = delta
1028 compression = COMPRESSION_NONE
1029 compression = COMPRESSION_NONE
1029
1030
1030 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
1031 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
1031
1032
1032 rev = len(self)
1033 rev = len(self)
1033
1034
1034 if p1 == nullid:
1035 if p1 == nullid:
1035 p1rev = nullrev
1036 p1rev = nullrev
1036 else:
1037 else:
1037 p1rev = self._nodetorev[p1]
1038 p1rev = self._nodetorev[p1]
1038
1039
1039 if p2 == nullid:
1040 if p2 == nullid:
1040 p2rev = nullrev
1041 p2rev = nullrev
1041 else:
1042 else:
1042 p2rev = self._nodetorev[p2]
1043 p2rev = self._nodetorev[p2]
1043
1044
1044 rid = self._db.execute(
1045 rid = self._db.execute(
1045 'INSERT INTO fileindex ('
1046 'INSERT INTO fileindex ('
1046 ' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
1047 ' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
1047 ' deltaid, deltabaseid) '
1048 ' deltaid, deltabaseid) '
1048 ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
1049 ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
1049 (
1050 (
1050 self._pathid,
1051 self._pathid,
1051 rev,
1052 rev,
1052 node,
1053 node,
1053 p1rev,
1054 p1rev,
1054 p2rev,
1055 p2rev,
1055 linkrev,
1056 linkrev,
1056 flags,
1057 flags,
1057 deltaid,
1058 deltaid,
1058 baseid,
1059 baseid,
1059 ),
1060 ),
1060 ).lastrowid
1061 ).lastrowid
1061
1062
1062 entry = revisionentry(
1063 entry = revisionentry(
1063 rid=rid,
1064 rid=rid,
1064 rev=rev,
1065 rev=rev,
1065 node=node,
1066 node=node,
1066 p1rev=p1rev,
1067 p1rev=p1rev,
1067 p2rev=p2rev,
1068 p2rev=p2rev,
1068 p1node=p1,
1069 p1node=p1,
1069 p2node=p2,
1070 p2node=p2,
1070 linkrev=linkrev,
1071 linkrev=linkrev,
1071 flags=flags,
1072 flags=flags,
1072 )
1073 )
1073
1074
1074 self._nodetorev[node] = rev
1075 self._nodetorev[node] = rev
1075 self._revtonode[rev] = node
1076 self._revtonode[rev] = node
1076 self._revisions[node] = entry
1077 self._revisions[node] = entry
1077
1078
1078 return node
1079 return node
1079
1080
1080
1081
1081 class sqliterepository(localrepo.localrepository):
1082 class sqliterepository(localrepo.localrepository):
1082 def cancopy(self):
1083 def cancopy(self):
1083 return False
1084 return False
1084
1085
1085 def transaction(self, *args, **kwargs):
1086 def transaction(self, *args, **kwargs):
1086 current = self.currenttransaction()
1087 current = self.currenttransaction()
1087
1088
1088 tr = super(sqliterepository, self).transaction(*args, **kwargs)
1089 tr = super(sqliterepository, self).transaction(*args, **kwargs)
1089
1090
1090 if current:
1091 if current:
1091 return tr
1092 return tr
1092
1093
1093 self._dbconn.execute('BEGIN TRANSACTION')
1094 self._dbconn.execute('BEGIN TRANSACTION')
1094
1095
1095 def committransaction(_):
1096 def committransaction(_):
1096 self._dbconn.commit()
1097 self._dbconn.commit()
1097
1098
1098 tr.addfinalize(b'sqlitestore', committransaction)
1099 tr.addfinalize(b'sqlitestore', committransaction)
1099
1100
1100 return tr
1101 return tr
1101
1102
1102 @property
1103 @property
1103 def _dbconn(self):
1104 def _dbconn(self):
1104 # SQLite connections can only be used on the thread that created
1105 # SQLite connections can only be used on the thread that created
1105 # them. In most cases, this "just works." However, hgweb uses
1106 # them. In most cases, this "just works." However, hgweb uses
1106 # multiple threads.
1107 # multiple threads.
1107 tid = threading.current_thread().ident
1108 tid = threading.current_thread().ident
1108
1109
1109 if self._db:
1110 if self._db:
1110 if self._db[0] == tid:
1111 if self._db[0] == tid:
1111 return self._db[1]
1112 return self._db[1]
1112
1113
1113 db = makedb(self.svfs.join(b'db.sqlite'))
1114 db = makedb(self.svfs.join(b'db.sqlite'))
1114 self._db = (tid, db)
1115 self._db = (tid, db)
1115
1116
1116 return db
1117 return db
1117
1118
1118
1119
1119 def makedb(path):
1120 def makedb(path):
1120 """Construct a database handle for a database at path."""
1121 """Construct a database handle for a database at path."""
1121
1122
1122 db = sqlite3.connect(encoding.strfromlocal(path))
1123 db = sqlite3.connect(encoding.strfromlocal(path))
1123 db.text_factory = bytes
1124 db.text_factory = bytes
1124
1125
1125 res = db.execute('PRAGMA user_version').fetchone()[0]
1126 res = db.execute('PRAGMA user_version').fetchone()[0]
1126
1127
1127 # New database.
1128 # New database.
1128 if res == 0:
1129 if res == 0:
1129 for statement in CREATE_SCHEMA:
1130 for statement in CREATE_SCHEMA:
1130 db.execute(statement)
1131 db.execute(statement)
1131
1132
1132 db.commit()
1133 db.commit()
1133
1134
1134 elif res == CURRENT_SCHEMA_VERSION:
1135 elif res == CURRENT_SCHEMA_VERSION:
1135 pass
1136 pass
1136
1137
1137 else:
1138 else:
1138 raise error.Abort(_(b'sqlite database has unrecognized version'))
1139 raise error.Abort(_(b'sqlite database has unrecognized version'))
1139
1140
1140 db.execute('PRAGMA journal_mode=WAL')
1141 db.execute('PRAGMA journal_mode=WAL')
1141
1142
1142 return db
1143 return db
1143
1144
1144
1145
1145 def featuresetup(ui, supported):
1146 def featuresetup(ui, supported):
1146 supported.add(REQUIREMENT)
1147 supported.add(REQUIREMENT)
1147
1148
1148 if zstd:
1149 if zstd:
1149 supported.add(REQUIREMENT_ZSTD)
1150 supported.add(REQUIREMENT_ZSTD)
1150
1151
1151 supported.add(REQUIREMENT_ZLIB)
1152 supported.add(REQUIREMENT_ZLIB)
1152 supported.add(REQUIREMENT_NONE)
1153 supported.add(REQUIREMENT_NONE)
1153 supported.add(REQUIREMENT_SHALLOW_FILES)
1154 supported.add(REQUIREMENT_SHALLOW_FILES)
1154 supported.add(repository.NARROW_REQUIREMENT)
1155 supported.add(requirements.NARROW_REQUIREMENT)
1155
1156
1156
1157
1157 def newreporequirements(orig, ui, createopts):
1158 def newreporequirements(orig, ui, createopts):
1158 if createopts[b'backend'] != b'sqlite':
1159 if createopts[b'backend'] != b'sqlite':
1159 return orig(ui, createopts)
1160 return orig(ui, createopts)
1160
1161
1161 # This restriction can be lifted once we have more confidence.
1162 # This restriction can be lifted once we have more confidence.
1162 if b'sharedrepo' in createopts:
1163 if b'sharedrepo' in createopts:
1163 raise error.Abort(
1164 raise error.Abort(
1164 _(b'shared repositories not supported with SQLite store')
1165 _(b'shared repositories not supported with SQLite store')
1165 )
1166 )
1166
1167
1167 # This filtering is out of an abundance of caution: we want to ensure
1168 # This filtering is out of an abundance of caution: we want to ensure
1168 # we honor creation options and we do that by annotating exactly the
1169 # we honor creation options and we do that by annotating exactly the
1169 # creation options we recognize.
1170 # creation options we recognize.
1170 known = {
1171 known = {
1171 b'narrowfiles',
1172 b'narrowfiles',
1172 b'backend',
1173 b'backend',
1173 b'shallowfilestore',
1174 b'shallowfilestore',
1174 }
1175 }
1175
1176
1176 unsupported = set(createopts) - known
1177 unsupported = set(createopts) - known
1177 if unsupported:
1178 if unsupported:
1178 raise error.Abort(
1179 raise error.Abort(
1179 _(b'SQLite store does not support repo creation option: %s')
1180 _(b'SQLite store does not support repo creation option: %s')
1180 % b', '.join(sorted(unsupported))
1181 % b', '.join(sorted(unsupported))
1181 )
1182 )
1182
1183
1183 # Since we're a hybrid store that still relies on revlogs, we fall back
1184 # Since we're a hybrid store that still relies on revlogs, we fall back
1184 # to using the revlogv1 backend's storage requirements then adding our
1185 # to using the revlogv1 backend's storage requirements then adding our
1185 # own requirement.
1186 # own requirement.
1186 createopts[b'backend'] = b'revlogv1'
1187 createopts[b'backend'] = b'revlogv1'
1187 requirements = orig(ui, createopts)
1188 requirements = orig(ui, createopts)
1188 requirements.add(REQUIREMENT)
1189 requirements.add(REQUIREMENT)
1189
1190
1190 compression = ui.config(b'storage', b'sqlite.compression')
1191 compression = ui.config(b'storage', b'sqlite.compression')
1191
1192
1192 if compression == b'zstd' and not zstd:
1193 if compression == b'zstd' and not zstd:
1193 raise error.Abort(
1194 raise error.Abort(
1194 _(
1195 _(
1195 b'storage.sqlite.compression set to "zstd" but '
1196 b'storage.sqlite.compression set to "zstd" but '
1196 b'zstandard compression not available to this '
1197 b'zstandard compression not available to this '
1197 b'Mercurial install'
1198 b'Mercurial install'
1198 )
1199 )
1199 )
1200 )
1200
1201
1201 if compression == b'zstd':
1202 if compression == b'zstd':
1202 requirements.add(REQUIREMENT_ZSTD)
1203 requirements.add(REQUIREMENT_ZSTD)
1203 elif compression == b'zlib':
1204 elif compression == b'zlib':
1204 requirements.add(REQUIREMENT_ZLIB)
1205 requirements.add(REQUIREMENT_ZLIB)
1205 elif compression == b'none':
1206 elif compression == b'none':
1206 requirements.add(REQUIREMENT_NONE)
1207 requirements.add(REQUIREMENT_NONE)
1207 else:
1208 else:
1208 raise error.Abort(
1209 raise error.Abort(
1209 _(
1210 _(
1210 b'unknown compression engine defined in '
1211 b'unknown compression engine defined in '
1211 b'storage.sqlite.compression: %s'
1212 b'storage.sqlite.compression: %s'
1212 )
1213 )
1213 % compression
1214 % compression
1214 )
1215 )
1215
1216
1216 if createopts.get(b'shallowfilestore'):
1217 if createopts.get(b'shallowfilestore'):
1217 requirements.add(REQUIREMENT_SHALLOW_FILES)
1218 requirements.add(REQUIREMENT_SHALLOW_FILES)
1218
1219
1219 return requirements
1220 return requirements
1220
1221
1221
1222
1222 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1223 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1223 class sqlitefilestorage(object):
1224 class sqlitefilestorage(object):
1224 """Repository file storage backed by SQLite."""
1225 """Repository file storage backed by SQLite."""
1225
1226
1226 def file(self, path):
1227 def file(self, path):
1227 if path[0] == b'/':
1228 if path[0] == b'/':
1228 path = path[1:]
1229 path = path[1:]
1229
1230
1230 if REQUIREMENT_ZSTD in self.requirements:
1231 if REQUIREMENT_ZSTD in self.requirements:
1231 compression = b'zstd'
1232 compression = b'zstd'
1232 elif REQUIREMENT_ZLIB in self.requirements:
1233 elif REQUIREMENT_ZLIB in self.requirements:
1233 compression = b'zlib'
1234 compression = b'zlib'
1234 elif REQUIREMENT_NONE in self.requirements:
1235 elif REQUIREMENT_NONE in self.requirements:
1235 compression = b'none'
1236 compression = b'none'
1236 else:
1237 else:
1237 raise error.Abort(
1238 raise error.Abort(
1238 _(
1239 _(
1239 b'unable to determine what compression engine '
1240 b'unable to determine what compression engine '
1240 b'to use for SQLite storage'
1241 b'to use for SQLite storage'
1241 )
1242 )
1242 )
1243 )
1243
1244
1244 return sqlitefilestore(self._dbconn, path, compression)
1245 return sqlitefilestore(self._dbconn, path, compression)
1245
1246
1246
1247
1247 def makefilestorage(orig, requirements, features, **kwargs):
1248 def makefilestorage(orig, requirements, features, **kwargs):
1248 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1249 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1249 if REQUIREMENT in requirements:
1250 if REQUIREMENT in requirements:
1250 if REQUIREMENT_SHALLOW_FILES in requirements:
1251 if REQUIREMENT_SHALLOW_FILES in requirements:
1251 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1252 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1252
1253
1253 return sqlitefilestorage
1254 return sqlitefilestorage
1254 else:
1255 else:
1255 return orig(requirements=requirements, features=features, **kwargs)
1256 return orig(requirements=requirements, features=features, **kwargs)
1256
1257
1257
1258
1258 def makemain(orig, ui, requirements, **kwargs):
1259 def makemain(orig, ui, requirements, **kwargs):
1259 if REQUIREMENT in requirements:
1260 if REQUIREMENT in requirements:
1260 if REQUIREMENT_ZSTD in requirements and not zstd:
1261 if REQUIREMENT_ZSTD in requirements and not zstd:
1261 raise error.Abort(
1262 raise error.Abort(
1262 _(
1263 _(
1263 b'repository uses zstandard compression, which '
1264 b'repository uses zstandard compression, which '
1264 b'is not available to this Mercurial install'
1265 b'is not available to this Mercurial install'
1265 )
1266 )
1266 )
1267 )
1267
1268
1268 return sqliterepository
1269 return sqliterepository
1269
1270
1270 return orig(requirements=requirements, **kwargs)
1271 return orig(requirements=requirements, **kwargs)
1271
1272
1272
1273
1273 def verifierinit(orig, self, *args, **kwargs):
1274 def verifierinit(orig, self, *args, **kwargs):
1274 orig(self, *args, **kwargs)
1275 orig(self, *args, **kwargs)
1275
1276
1276 # We don't care that files in the store don't align with what is
1277 # We don't care that files in the store don't align with what is
1277 # advertised. So suppress these warnings.
1278 # advertised. So suppress these warnings.
1278 self.warnorphanstorefiles = False
1279 self.warnorphanstorefiles = False
1279
1280
1280
1281
1281 def extsetup(ui):
1282 def extsetup(ui):
1282 localrepo.featuresetupfuncs.add(featuresetup)
1283 localrepo.featuresetupfuncs.add(featuresetup)
1283 extensions.wrapfunction(
1284 extensions.wrapfunction(
1284 localrepo, b'newreporequirements', newreporequirements
1285 localrepo, b'newreporequirements', newreporequirements
1285 )
1286 )
1286 extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage)
1287 extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage)
1287 extensions.wrapfunction(localrepo, b'makemain', makemain)
1288 extensions.wrapfunction(localrepo, b'makemain', makemain)
1288 extensions.wrapfunction(verify.verifier, b'__init__', verifierinit)
1289 extensions.wrapfunction(verify.verifier, b'__init__', verifierinit)
1289
1290
1290
1291
1291 def reposetup(ui, repo):
1292 def reposetup(ui, repo):
1292 if isinstance(repo, sqliterepository):
1293 if isinstance(repo, sqliterepository):
1293 repo._db = None
1294 repo._db = None
1294
1295
1295 # TODO check for bundlerepository?
1296 # TODO check for bundlerepository?
@@ -1,2585 +1,2585 b''
1 # bundle2.py - generic container format to transmit arbitrary data.
1 # bundle2.py - generic container format to transmit arbitrary data.
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """Handling of the new bundle2 format
7 """Handling of the new bundle2 format
8
8
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
10 payloads in an application agnostic way. It consist in a sequence of "parts"
10 payloads in an application agnostic way. It consist in a sequence of "parts"
11 that will be handed to and processed by the application layer.
11 that will be handed to and processed by the application layer.
12
12
13
13
14 General format architecture
14 General format architecture
15 ===========================
15 ===========================
16
16
17 The format is architectured as follow
17 The format is architectured as follow
18
18
19 - magic string
19 - magic string
20 - stream level parameters
20 - stream level parameters
21 - payload parts (any number)
21 - payload parts (any number)
22 - end of stream marker.
22 - end of stream marker.
23
23
24 the Binary format
24 the Binary format
25 ============================
25 ============================
26
26
27 All numbers are unsigned and big-endian.
27 All numbers are unsigned and big-endian.
28
28
29 stream level parameters
29 stream level parameters
30 ------------------------
30 ------------------------
31
31
32 Binary format is as follow
32 Binary format is as follow
33
33
34 :params size: int32
34 :params size: int32
35
35
36 The total number of Bytes used by the parameters
36 The total number of Bytes used by the parameters
37
37
38 :params value: arbitrary number of Bytes
38 :params value: arbitrary number of Bytes
39
39
40 A blob of `params size` containing the serialized version of all stream level
40 A blob of `params size` containing the serialized version of all stream level
41 parameters.
41 parameters.
42
42
43 The blob contains a space separated list of parameters. Parameters with value
43 The blob contains a space separated list of parameters. Parameters with value
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
45
45
46 Empty name are obviously forbidden.
46 Empty name are obviously forbidden.
47
47
48 Name MUST start with a letter. If this first letter is lower case, the
48 Name MUST start with a letter. If this first letter is lower case, the
49 parameter is advisory and can be safely ignored. However when the first
49 parameter is advisory and can be safely ignored. However when the first
50 letter is capital, the parameter is mandatory and the bundling process MUST
50 letter is capital, the parameter is mandatory and the bundling process MUST
51 stop if he is not able to proceed it.
51 stop if he is not able to proceed it.
52
52
53 Stream parameters use a simple textual format for two main reasons:
53 Stream parameters use a simple textual format for two main reasons:
54
54
55 - Stream level parameters should remain simple and we want to discourage any
55 - Stream level parameters should remain simple and we want to discourage any
56 crazy usage.
56 crazy usage.
57 - Textual data allow easy human inspection of a bundle2 header in case of
57 - Textual data allow easy human inspection of a bundle2 header in case of
58 troubles.
58 troubles.
59
59
60 Any Applicative level options MUST go into a bundle2 part instead.
60 Any Applicative level options MUST go into a bundle2 part instead.
61
61
62 Payload part
62 Payload part
63 ------------------------
63 ------------------------
64
64
65 Binary format is as follow
65 Binary format is as follow
66
66
67 :header size: int32
67 :header size: int32
68
68
69 The total number of Bytes used by the part header. When the header is empty
69 The total number of Bytes used by the part header. When the header is empty
70 (size = 0) this is interpreted as the end of stream marker.
70 (size = 0) this is interpreted as the end of stream marker.
71
71
72 :header:
72 :header:
73
73
74 The header defines how to interpret the part. It contains two piece of
74 The header defines how to interpret the part. It contains two piece of
75 data: the part type, and the part parameters.
75 data: the part type, and the part parameters.
76
76
77 The part type is used to route an application level handler, that can
77 The part type is used to route an application level handler, that can
78 interpret payload.
78 interpret payload.
79
79
80 Part parameters are passed to the application level handler. They are
80 Part parameters are passed to the application level handler. They are
81 meant to convey information that will help the application level object to
81 meant to convey information that will help the application level object to
82 interpret the part payload.
82 interpret the part payload.
83
83
84 The binary format of the header is has follow
84 The binary format of the header is has follow
85
85
86 :typesize: (one byte)
86 :typesize: (one byte)
87
87
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
89
89
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
91 to this part.
91 to this part.
92
92
93 :parameters:
93 :parameters:
94
94
95 Part's parameter may have arbitrary content, the binary structure is::
95 Part's parameter may have arbitrary content, the binary structure is::
96
96
97 <mandatory-count><advisory-count><param-sizes><param-data>
97 <mandatory-count><advisory-count><param-sizes><param-data>
98
98
99 :mandatory-count: 1 byte, number of mandatory parameters
99 :mandatory-count: 1 byte, number of mandatory parameters
100
100
101 :advisory-count: 1 byte, number of advisory parameters
101 :advisory-count: 1 byte, number of advisory parameters
102
102
103 :param-sizes:
103 :param-sizes:
104
104
105 N couple of bytes, where N is the total number of parameters. Each
105 N couple of bytes, where N is the total number of parameters. Each
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
107
107
108 :param-data:
108 :param-data:
109
109
110 A blob of bytes from which each parameter key and value can be
110 A blob of bytes from which each parameter key and value can be
111 retrieved using the list of size couples stored in the previous
111 retrieved using the list of size couples stored in the previous
112 field.
112 field.
113
113
114 Mandatory parameters comes first, then the advisory ones.
114 Mandatory parameters comes first, then the advisory ones.
115
115
116 Each parameter's key MUST be unique within the part.
116 Each parameter's key MUST be unique within the part.
117
117
118 :payload:
118 :payload:
119
119
120 payload is a series of `<chunksize><chunkdata>`.
120 payload is a series of `<chunksize><chunkdata>`.
121
121
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
124
124
125 The current implementation always produces either zero or one chunk.
125 The current implementation always produces either zero or one chunk.
126 This is an implementation limitation that will ultimately be lifted.
126 This is an implementation limitation that will ultimately be lifted.
127
127
128 `chunksize` can be negative to trigger special case processing. No such
128 `chunksize` can be negative to trigger special case processing. No such
129 processing is in place yet.
129 processing is in place yet.
130
130
131 Bundle processing
131 Bundle processing
132 ============================
132 ============================
133
133
134 Each part is processed in order using a "part handler". Handler are registered
134 Each part is processed in order using a "part handler". Handler are registered
135 for a certain part type.
135 for a certain part type.
136
136
137 The matching of a part to its handler is case insensitive. The case of the
137 The matching of a part to its handler is case insensitive. The case of the
138 part type is used to know if a part is mandatory or advisory. If the Part type
138 part type is used to know if a part is mandatory or advisory. If the Part type
139 contains any uppercase char it is considered mandatory. When no handler is
139 contains any uppercase char it is considered mandatory. When no handler is
140 known for a Mandatory part, the process is aborted and an exception is raised.
140 known for a Mandatory part, the process is aborted and an exception is raised.
141 If the part is advisory and no handler is known, the part is ignored. When the
141 If the part is advisory and no handler is known, the part is ignored. When the
142 process is aborted, the full bundle is still read from the stream to keep the
142 process is aborted, the full bundle is still read from the stream to keep the
143 channel usable. But none of the part read from an abort are processed. In the
143 channel usable. But none of the part read from an abort are processed. In the
144 future, dropping the stream may become an option for channel we do not care to
144 future, dropping the stream may become an option for channel we do not care to
145 preserve.
145 preserve.
146 """
146 """
147
147
148 from __future__ import absolute_import, division
148 from __future__ import absolute_import, division
149
149
150 import collections
150 import collections
151 import errno
151 import errno
152 import os
152 import os
153 import re
153 import re
154 import string
154 import string
155 import struct
155 import struct
156 import sys
156 import sys
157
157
158 from .i18n import _
158 from .i18n import _
159 from . import (
159 from . import (
160 bookmarks,
160 bookmarks,
161 changegroup,
161 changegroup,
162 encoding,
162 encoding,
163 error,
163 error,
164 node as nodemod,
164 node as nodemod,
165 obsolete,
165 obsolete,
166 phases,
166 phases,
167 pushkey,
167 pushkey,
168 pycompat,
168 pycompat,
169 requirements,
169 scmutil,
170 scmutil,
170 streamclone,
171 streamclone,
171 tags,
172 tags,
172 url,
173 url,
173 util,
174 util,
174 )
175 )
175 from .interfaces import repository
176 from .utils import stringutil
176 from .utils import stringutil
177
177
178 urlerr = util.urlerr
178 urlerr = util.urlerr
179 urlreq = util.urlreq
179 urlreq = util.urlreq
180
180
181 _pack = struct.pack
181 _pack = struct.pack
182 _unpack = struct.unpack
182 _unpack = struct.unpack
183
183
184 _fstreamparamsize = b'>i'
184 _fstreamparamsize = b'>i'
185 _fpartheadersize = b'>i'
185 _fpartheadersize = b'>i'
186 _fparttypesize = b'>B'
186 _fparttypesize = b'>B'
187 _fpartid = b'>I'
187 _fpartid = b'>I'
188 _fpayloadsize = b'>i'
188 _fpayloadsize = b'>i'
189 _fpartparamcount = b'>BB'
189 _fpartparamcount = b'>BB'
190
190
191 preferedchunksize = 32768
191 preferedchunksize = 32768
192
192
193 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
193 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
194
194
195
195
196 def outdebug(ui, message):
196 def outdebug(ui, message):
197 """debug regarding output stream (bundling)"""
197 """debug regarding output stream (bundling)"""
198 if ui.configbool(b'devel', b'bundle2.debug'):
198 if ui.configbool(b'devel', b'bundle2.debug'):
199 ui.debug(b'bundle2-output: %s\n' % message)
199 ui.debug(b'bundle2-output: %s\n' % message)
200
200
201
201
202 def indebug(ui, message):
202 def indebug(ui, message):
203 """debug on input stream (unbundling)"""
203 """debug on input stream (unbundling)"""
204 if ui.configbool(b'devel', b'bundle2.debug'):
204 if ui.configbool(b'devel', b'bundle2.debug'):
205 ui.debug(b'bundle2-input: %s\n' % message)
205 ui.debug(b'bundle2-input: %s\n' % message)
206
206
207
207
208 def validateparttype(parttype):
208 def validateparttype(parttype):
209 """raise ValueError if a parttype contains invalid character"""
209 """raise ValueError if a parttype contains invalid character"""
210 if _parttypeforbidden.search(parttype):
210 if _parttypeforbidden.search(parttype):
211 raise ValueError(parttype)
211 raise ValueError(parttype)
212
212
213
213
214 def _makefpartparamsizes(nbparams):
214 def _makefpartparamsizes(nbparams):
215 """return a struct format to read part parameter sizes
215 """return a struct format to read part parameter sizes
216
216
217 The number parameters is variable so we need to build that format
217 The number parameters is variable so we need to build that format
218 dynamically.
218 dynamically.
219 """
219 """
220 return b'>' + (b'BB' * nbparams)
220 return b'>' + (b'BB' * nbparams)
221
221
222
222
223 parthandlermapping = {}
223 parthandlermapping = {}
224
224
225
225
226 def parthandler(parttype, params=()):
226 def parthandler(parttype, params=()):
227 """decorator that register a function as a bundle2 part handler
227 """decorator that register a function as a bundle2 part handler
228
228
229 eg::
229 eg::
230
230
231 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
231 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
232 def myparttypehandler(...):
232 def myparttypehandler(...):
233 '''process a part of type "my part".'''
233 '''process a part of type "my part".'''
234 ...
234 ...
235 """
235 """
236 validateparttype(parttype)
236 validateparttype(parttype)
237
237
238 def _decorator(func):
238 def _decorator(func):
239 lparttype = parttype.lower() # enforce lower case matching.
239 lparttype = parttype.lower() # enforce lower case matching.
240 assert lparttype not in parthandlermapping
240 assert lparttype not in parthandlermapping
241 parthandlermapping[lparttype] = func
241 parthandlermapping[lparttype] = func
242 func.params = frozenset(params)
242 func.params = frozenset(params)
243 return func
243 return func
244
244
245 return _decorator
245 return _decorator
246
246
247
247
248 class unbundlerecords(object):
248 class unbundlerecords(object):
249 """keep record of what happens during and unbundle
249 """keep record of what happens during and unbundle
250
250
251 New records are added using `records.add('cat', obj)`. Where 'cat' is a
251 New records are added using `records.add('cat', obj)`. Where 'cat' is a
252 category of record and obj is an arbitrary object.
252 category of record and obj is an arbitrary object.
253
253
254 `records['cat']` will return all entries of this category 'cat'.
254 `records['cat']` will return all entries of this category 'cat'.
255
255
256 Iterating on the object itself will yield `('category', obj)` tuples
256 Iterating on the object itself will yield `('category', obj)` tuples
257 for all entries.
257 for all entries.
258
258
259 All iterations happens in chronological order.
259 All iterations happens in chronological order.
260 """
260 """
261
261
262 def __init__(self):
262 def __init__(self):
263 self._categories = {}
263 self._categories = {}
264 self._sequences = []
264 self._sequences = []
265 self._replies = {}
265 self._replies = {}
266
266
267 def add(self, category, entry, inreplyto=None):
267 def add(self, category, entry, inreplyto=None):
268 """add a new record of a given category.
268 """add a new record of a given category.
269
269
270 The entry can then be retrieved in the list returned by
270 The entry can then be retrieved in the list returned by
271 self['category']."""
271 self['category']."""
272 self._categories.setdefault(category, []).append(entry)
272 self._categories.setdefault(category, []).append(entry)
273 self._sequences.append((category, entry))
273 self._sequences.append((category, entry))
274 if inreplyto is not None:
274 if inreplyto is not None:
275 self.getreplies(inreplyto).add(category, entry)
275 self.getreplies(inreplyto).add(category, entry)
276
276
277 def getreplies(self, partid):
277 def getreplies(self, partid):
278 """get the records that are replies to a specific part"""
278 """get the records that are replies to a specific part"""
279 return self._replies.setdefault(partid, unbundlerecords())
279 return self._replies.setdefault(partid, unbundlerecords())
280
280
281 def __getitem__(self, cat):
281 def __getitem__(self, cat):
282 return tuple(self._categories.get(cat, ()))
282 return tuple(self._categories.get(cat, ()))
283
283
284 def __iter__(self):
284 def __iter__(self):
285 return iter(self._sequences)
285 return iter(self._sequences)
286
286
287 def __len__(self):
287 def __len__(self):
288 return len(self._sequences)
288 return len(self._sequences)
289
289
290 def __nonzero__(self):
290 def __nonzero__(self):
291 return bool(self._sequences)
291 return bool(self._sequences)
292
292
293 __bool__ = __nonzero__
293 __bool__ = __nonzero__
294
294
295
295
296 class bundleoperation(object):
296 class bundleoperation(object):
297 """an object that represents a single bundling process
297 """an object that represents a single bundling process
298
298
299 Its purpose is to carry unbundle-related objects and states.
299 Its purpose is to carry unbundle-related objects and states.
300
300
301 A new object should be created at the beginning of each bundle processing.
301 A new object should be created at the beginning of each bundle processing.
302 The object is to be returned by the processing function.
302 The object is to be returned by the processing function.
303
303
304 The object has very little content now it will ultimately contain:
304 The object has very little content now it will ultimately contain:
305 * an access to the repo the bundle is applied to,
305 * an access to the repo the bundle is applied to,
306 * a ui object,
306 * a ui object,
307 * a way to retrieve a transaction to add changes to the repo,
307 * a way to retrieve a transaction to add changes to the repo,
308 * a way to record the result of processing each part,
308 * a way to record the result of processing each part,
309 * a way to construct a bundle response when applicable.
309 * a way to construct a bundle response when applicable.
310 """
310 """
311
311
312 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
312 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
313 self.repo = repo
313 self.repo = repo
314 self.ui = repo.ui
314 self.ui = repo.ui
315 self.records = unbundlerecords()
315 self.records = unbundlerecords()
316 self.reply = None
316 self.reply = None
317 self.captureoutput = captureoutput
317 self.captureoutput = captureoutput
318 self.hookargs = {}
318 self.hookargs = {}
319 self._gettransaction = transactiongetter
319 self._gettransaction = transactiongetter
320 # carries value that can modify part behavior
320 # carries value that can modify part behavior
321 self.modes = {}
321 self.modes = {}
322 self.source = source
322 self.source = source
323
323
324 def gettransaction(self):
324 def gettransaction(self):
325 transaction = self._gettransaction()
325 transaction = self._gettransaction()
326
326
327 if self.hookargs:
327 if self.hookargs:
328 # the ones added to the transaction supercede those added
328 # the ones added to the transaction supercede those added
329 # to the operation.
329 # to the operation.
330 self.hookargs.update(transaction.hookargs)
330 self.hookargs.update(transaction.hookargs)
331 transaction.hookargs = self.hookargs
331 transaction.hookargs = self.hookargs
332
332
333 # mark the hookargs as flushed. further attempts to add to
333 # mark the hookargs as flushed. further attempts to add to
334 # hookargs will result in an abort.
334 # hookargs will result in an abort.
335 self.hookargs = None
335 self.hookargs = None
336
336
337 return transaction
337 return transaction
338
338
339 def addhookargs(self, hookargs):
339 def addhookargs(self, hookargs):
340 if self.hookargs is None:
340 if self.hookargs is None:
341 raise error.ProgrammingError(
341 raise error.ProgrammingError(
342 b'attempted to add hookargs to '
342 b'attempted to add hookargs to '
343 b'operation after transaction started'
343 b'operation after transaction started'
344 )
344 )
345 self.hookargs.update(hookargs)
345 self.hookargs.update(hookargs)
346
346
347
347
348 class TransactionUnavailable(RuntimeError):
348 class TransactionUnavailable(RuntimeError):
349 pass
349 pass
350
350
351
351
352 def _notransaction():
352 def _notransaction():
353 """default method to get a transaction while processing a bundle
353 """default method to get a transaction while processing a bundle
354
354
355 Raise an exception to highlight the fact that no transaction was expected
355 Raise an exception to highlight the fact that no transaction was expected
356 to be created"""
356 to be created"""
357 raise TransactionUnavailable()
357 raise TransactionUnavailable()
358
358
359
359
360 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
360 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
361 # transform me into unbundler.apply() as soon as the freeze is lifted
361 # transform me into unbundler.apply() as soon as the freeze is lifted
362 if isinstance(unbundler, unbundle20):
362 if isinstance(unbundler, unbundle20):
363 tr.hookargs[b'bundle2'] = b'1'
363 tr.hookargs[b'bundle2'] = b'1'
364 if source is not None and b'source' not in tr.hookargs:
364 if source is not None and b'source' not in tr.hookargs:
365 tr.hookargs[b'source'] = source
365 tr.hookargs[b'source'] = source
366 if url is not None and b'url' not in tr.hookargs:
366 if url is not None and b'url' not in tr.hookargs:
367 tr.hookargs[b'url'] = url
367 tr.hookargs[b'url'] = url
368 return processbundle(repo, unbundler, lambda: tr, source=source)
368 return processbundle(repo, unbundler, lambda: tr, source=source)
369 else:
369 else:
370 # the transactiongetter won't be used, but we might as well set it
370 # the transactiongetter won't be used, but we might as well set it
371 op = bundleoperation(repo, lambda: tr, source=source)
371 op = bundleoperation(repo, lambda: tr, source=source)
372 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
372 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
373 return op
373 return op
374
374
375
375
376 class partiterator(object):
376 class partiterator(object):
377 def __init__(self, repo, op, unbundler):
377 def __init__(self, repo, op, unbundler):
378 self.repo = repo
378 self.repo = repo
379 self.op = op
379 self.op = op
380 self.unbundler = unbundler
380 self.unbundler = unbundler
381 self.iterator = None
381 self.iterator = None
382 self.count = 0
382 self.count = 0
383 self.current = None
383 self.current = None
384
384
385 def __enter__(self):
385 def __enter__(self):
386 def func():
386 def func():
387 itr = enumerate(self.unbundler.iterparts(), 1)
387 itr = enumerate(self.unbundler.iterparts(), 1)
388 for count, p in itr:
388 for count, p in itr:
389 self.count = count
389 self.count = count
390 self.current = p
390 self.current = p
391 yield p
391 yield p
392 p.consume()
392 p.consume()
393 self.current = None
393 self.current = None
394
394
395 self.iterator = func()
395 self.iterator = func()
396 return self.iterator
396 return self.iterator
397
397
398 def __exit__(self, type, exc, tb):
398 def __exit__(self, type, exc, tb):
399 if not self.iterator:
399 if not self.iterator:
400 return
400 return
401
401
402 # Only gracefully abort in a normal exception situation. User aborts
402 # Only gracefully abort in a normal exception situation. User aborts
403 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
403 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
404 # and should not gracefully cleanup.
404 # and should not gracefully cleanup.
405 if isinstance(exc, Exception):
405 if isinstance(exc, Exception):
406 # Any exceptions seeking to the end of the bundle at this point are
406 # Any exceptions seeking to the end of the bundle at this point are
407 # almost certainly related to the underlying stream being bad.
407 # almost certainly related to the underlying stream being bad.
408 # And, chances are that the exception we're handling is related to
408 # And, chances are that the exception we're handling is related to
409 # getting in that bad state. So, we swallow the seeking error and
409 # getting in that bad state. So, we swallow the seeking error and
410 # re-raise the original error.
410 # re-raise the original error.
411 seekerror = False
411 seekerror = False
412 try:
412 try:
413 if self.current:
413 if self.current:
414 # consume the part content to not corrupt the stream.
414 # consume the part content to not corrupt the stream.
415 self.current.consume()
415 self.current.consume()
416
416
417 for part in self.iterator:
417 for part in self.iterator:
418 # consume the bundle content
418 # consume the bundle content
419 part.consume()
419 part.consume()
420 except Exception:
420 except Exception:
421 seekerror = True
421 seekerror = True
422
422
423 # Small hack to let caller code distinguish exceptions from bundle2
423 # Small hack to let caller code distinguish exceptions from bundle2
424 # processing from processing the old format. This is mostly needed
424 # processing from processing the old format. This is mostly needed
425 # to handle different return codes to unbundle according to the type
425 # to handle different return codes to unbundle according to the type
426 # of bundle. We should probably clean up or drop this return code
426 # of bundle. We should probably clean up or drop this return code
427 # craziness in a future version.
427 # craziness in a future version.
428 exc.duringunbundle2 = True
428 exc.duringunbundle2 = True
429 salvaged = []
429 salvaged = []
430 replycaps = None
430 replycaps = None
431 if self.op.reply is not None:
431 if self.op.reply is not None:
432 salvaged = self.op.reply.salvageoutput()
432 salvaged = self.op.reply.salvageoutput()
433 replycaps = self.op.reply.capabilities
433 replycaps = self.op.reply.capabilities
434 exc._replycaps = replycaps
434 exc._replycaps = replycaps
435 exc._bundle2salvagedoutput = salvaged
435 exc._bundle2salvagedoutput = salvaged
436
436
437 # Re-raising from a variable loses the original stack. So only use
437 # Re-raising from a variable loses the original stack. So only use
438 # that form if we need to.
438 # that form if we need to.
439 if seekerror:
439 if seekerror:
440 raise exc
440 raise exc
441
441
442 self.repo.ui.debug(
442 self.repo.ui.debug(
443 b'bundle2-input-bundle: %i parts total\n' % self.count
443 b'bundle2-input-bundle: %i parts total\n' % self.count
444 )
444 )
445
445
446
446
447 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
447 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
448 """This function process a bundle, apply effect to/from a repo
448 """This function process a bundle, apply effect to/from a repo
449
449
450 It iterates over each part then searches for and uses the proper handling
450 It iterates over each part then searches for and uses the proper handling
451 code to process the part. Parts are processed in order.
451 code to process the part. Parts are processed in order.
452
452
453 Unknown Mandatory part will abort the process.
453 Unknown Mandatory part will abort the process.
454
454
455 It is temporarily possible to provide a prebuilt bundleoperation to the
455 It is temporarily possible to provide a prebuilt bundleoperation to the
456 function. This is used to ensure output is properly propagated in case of
456 function. This is used to ensure output is properly propagated in case of
457 an error during the unbundling. This output capturing part will likely be
457 an error during the unbundling. This output capturing part will likely be
458 reworked and this ability will probably go away in the process.
458 reworked and this ability will probably go away in the process.
459 """
459 """
460 if op is None:
460 if op is None:
461 if transactiongetter is None:
461 if transactiongetter is None:
462 transactiongetter = _notransaction
462 transactiongetter = _notransaction
463 op = bundleoperation(repo, transactiongetter, source=source)
463 op = bundleoperation(repo, transactiongetter, source=source)
464 # todo:
464 # todo:
465 # - replace this is a init function soon.
465 # - replace this is a init function soon.
466 # - exception catching
466 # - exception catching
467 unbundler.params
467 unbundler.params
468 if repo.ui.debugflag:
468 if repo.ui.debugflag:
469 msg = [b'bundle2-input-bundle:']
469 msg = [b'bundle2-input-bundle:']
470 if unbundler.params:
470 if unbundler.params:
471 msg.append(b' %i params' % len(unbundler.params))
471 msg.append(b' %i params' % len(unbundler.params))
472 if op._gettransaction is None or op._gettransaction is _notransaction:
472 if op._gettransaction is None or op._gettransaction is _notransaction:
473 msg.append(b' no-transaction')
473 msg.append(b' no-transaction')
474 else:
474 else:
475 msg.append(b' with-transaction')
475 msg.append(b' with-transaction')
476 msg.append(b'\n')
476 msg.append(b'\n')
477 repo.ui.debug(b''.join(msg))
477 repo.ui.debug(b''.join(msg))
478
478
479 processparts(repo, op, unbundler)
479 processparts(repo, op, unbundler)
480
480
481 return op
481 return op
482
482
483
483
484 def processparts(repo, op, unbundler):
484 def processparts(repo, op, unbundler):
485 with partiterator(repo, op, unbundler) as parts:
485 with partiterator(repo, op, unbundler) as parts:
486 for part in parts:
486 for part in parts:
487 _processpart(op, part)
487 _processpart(op, part)
488
488
489
489
490 def _processchangegroup(op, cg, tr, source, url, **kwargs):
490 def _processchangegroup(op, cg, tr, source, url, **kwargs):
491 ret = cg.apply(op.repo, tr, source, url, **kwargs)
491 ret = cg.apply(op.repo, tr, source, url, **kwargs)
492 op.records.add(b'changegroup', {b'return': ret,})
492 op.records.add(b'changegroup', {b'return': ret,})
493 return ret
493 return ret
494
494
495
495
496 def _gethandler(op, part):
496 def _gethandler(op, part):
497 status = b'unknown' # used by debug output
497 status = b'unknown' # used by debug output
498 try:
498 try:
499 handler = parthandlermapping.get(part.type)
499 handler = parthandlermapping.get(part.type)
500 if handler is None:
500 if handler is None:
501 status = b'unsupported-type'
501 status = b'unsupported-type'
502 raise error.BundleUnknownFeatureError(parttype=part.type)
502 raise error.BundleUnknownFeatureError(parttype=part.type)
503 indebug(op.ui, b'found a handler for part %s' % part.type)
503 indebug(op.ui, b'found a handler for part %s' % part.type)
504 unknownparams = part.mandatorykeys - handler.params
504 unknownparams = part.mandatorykeys - handler.params
505 if unknownparams:
505 if unknownparams:
506 unknownparams = list(unknownparams)
506 unknownparams = list(unknownparams)
507 unknownparams.sort()
507 unknownparams.sort()
508 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
508 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
509 raise error.BundleUnknownFeatureError(
509 raise error.BundleUnknownFeatureError(
510 parttype=part.type, params=unknownparams
510 parttype=part.type, params=unknownparams
511 )
511 )
512 status = b'supported'
512 status = b'supported'
513 except error.BundleUnknownFeatureError as exc:
513 except error.BundleUnknownFeatureError as exc:
514 if part.mandatory: # mandatory parts
514 if part.mandatory: # mandatory parts
515 raise
515 raise
516 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
516 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
517 return # skip to part processing
517 return # skip to part processing
518 finally:
518 finally:
519 if op.ui.debugflag:
519 if op.ui.debugflag:
520 msg = [b'bundle2-input-part: "%s"' % part.type]
520 msg = [b'bundle2-input-part: "%s"' % part.type]
521 if not part.mandatory:
521 if not part.mandatory:
522 msg.append(b' (advisory)')
522 msg.append(b' (advisory)')
523 nbmp = len(part.mandatorykeys)
523 nbmp = len(part.mandatorykeys)
524 nbap = len(part.params) - nbmp
524 nbap = len(part.params) - nbmp
525 if nbmp or nbap:
525 if nbmp or nbap:
526 msg.append(b' (params:')
526 msg.append(b' (params:')
527 if nbmp:
527 if nbmp:
528 msg.append(b' %i mandatory' % nbmp)
528 msg.append(b' %i mandatory' % nbmp)
529 if nbap:
529 if nbap:
530 msg.append(b' %i advisory' % nbmp)
530 msg.append(b' %i advisory' % nbmp)
531 msg.append(b')')
531 msg.append(b')')
532 msg.append(b' %s\n' % status)
532 msg.append(b' %s\n' % status)
533 op.ui.debug(b''.join(msg))
533 op.ui.debug(b''.join(msg))
534
534
535 return handler
535 return handler
536
536
537
537
538 def _processpart(op, part):
538 def _processpart(op, part):
539 """process a single part from a bundle
539 """process a single part from a bundle
540
540
541 The part is guaranteed to have been fully consumed when the function exits
541 The part is guaranteed to have been fully consumed when the function exits
542 (even if an exception is raised)."""
542 (even if an exception is raised)."""
543 handler = _gethandler(op, part)
543 handler = _gethandler(op, part)
544 if handler is None:
544 if handler is None:
545 return
545 return
546
546
547 # handler is called outside the above try block so that we don't
547 # handler is called outside the above try block so that we don't
548 # risk catching KeyErrors from anything other than the
548 # risk catching KeyErrors from anything other than the
549 # parthandlermapping lookup (any KeyError raised by handler()
549 # parthandlermapping lookup (any KeyError raised by handler()
550 # itself represents a defect of a different variety).
550 # itself represents a defect of a different variety).
551 output = None
551 output = None
552 if op.captureoutput and op.reply is not None:
552 if op.captureoutput and op.reply is not None:
553 op.ui.pushbuffer(error=True, subproc=True)
553 op.ui.pushbuffer(error=True, subproc=True)
554 output = b''
554 output = b''
555 try:
555 try:
556 handler(op, part)
556 handler(op, part)
557 finally:
557 finally:
558 if output is not None:
558 if output is not None:
559 output = op.ui.popbuffer()
559 output = op.ui.popbuffer()
560 if output:
560 if output:
561 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
561 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
562 outpart.addparam(
562 outpart.addparam(
563 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
563 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
564 )
564 )
565
565
566
566
567 def decodecaps(blob):
567 def decodecaps(blob):
568 """decode a bundle2 caps bytes blob into a dictionary
568 """decode a bundle2 caps bytes blob into a dictionary
569
569
570 The blob is a list of capabilities (one per line)
570 The blob is a list of capabilities (one per line)
571 Capabilities may have values using a line of the form::
571 Capabilities may have values using a line of the form::
572
572
573 capability=value1,value2,value3
573 capability=value1,value2,value3
574
574
575 The values are always a list."""
575 The values are always a list."""
576 caps = {}
576 caps = {}
577 for line in blob.splitlines():
577 for line in blob.splitlines():
578 if not line:
578 if not line:
579 continue
579 continue
580 if b'=' not in line:
580 if b'=' not in line:
581 key, vals = line, ()
581 key, vals = line, ()
582 else:
582 else:
583 key, vals = line.split(b'=', 1)
583 key, vals = line.split(b'=', 1)
584 vals = vals.split(b',')
584 vals = vals.split(b',')
585 key = urlreq.unquote(key)
585 key = urlreq.unquote(key)
586 vals = [urlreq.unquote(v) for v in vals]
586 vals = [urlreq.unquote(v) for v in vals]
587 caps[key] = vals
587 caps[key] = vals
588 return caps
588 return caps
589
589
590
590
591 def encodecaps(caps):
591 def encodecaps(caps):
592 """encode a bundle2 caps dictionary into a bytes blob"""
592 """encode a bundle2 caps dictionary into a bytes blob"""
593 chunks = []
593 chunks = []
594 for ca in sorted(caps):
594 for ca in sorted(caps):
595 vals = caps[ca]
595 vals = caps[ca]
596 ca = urlreq.quote(ca)
596 ca = urlreq.quote(ca)
597 vals = [urlreq.quote(v) for v in vals]
597 vals = [urlreq.quote(v) for v in vals]
598 if vals:
598 if vals:
599 ca = b"%s=%s" % (ca, b','.join(vals))
599 ca = b"%s=%s" % (ca, b','.join(vals))
600 chunks.append(ca)
600 chunks.append(ca)
601 return b'\n'.join(chunks)
601 return b'\n'.join(chunks)
602
602
603
603
604 bundletypes = {
604 bundletypes = {
605 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
605 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
606 # since the unification ssh accepts a header but there
606 # since the unification ssh accepts a header but there
607 # is no capability signaling it.
607 # is no capability signaling it.
608 b"HG20": (), # special-cased below
608 b"HG20": (), # special-cased below
609 b"HG10UN": (b"HG10UN", b'UN'),
609 b"HG10UN": (b"HG10UN", b'UN'),
610 b"HG10BZ": (b"HG10", b'BZ'),
610 b"HG10BZ": (b"HG10", b'BZ'),
611 b"HG10GZ": (b"HG10GZ", b'GZ'),
611 b"HG10GZ": (b"HG10GZ", b'GZ'),
612 }
612 }
613
613
614 # hgweb uses this list to communicate its preferred type
614 # hgweb uses this list to communicate its preferred type
615 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
615 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
616
616
617
617
618 class bundle20(object):
618 class bundle20(object):
619 """represent an outgoing bundle2 container
619 """represent an outgoing bundle2 container
620
620
621 Use the `addparam` method to add stream level parameter. and `newpart` to
621 Use the `addparam` method to add stream level parameter. and `newpart` to
622 populate it. Then call `getchunks` to retrieve all the binary chunks of
622 populate it. Then call `getchunks` to retrieve all the binary chunks of
623 data that compose the bundle2 container."""
623 data that compose the bundle2 container."""
624
624
625 _magicstring = b'HG20'
625 _magicstring = b'HG20'
626
626
627 def __init__(self, ui, capabilities=()):
627 def __init__(self, ui, capabilities=()):
628 self.ui = ui
628 self.ui = ui
629 self._params = []
629 self._params = []
630 self._parts = []
630 self._parts = []
631 self.capabilities = dict(capabilities)
631 self.capabilities = dict(capabilities)
632 self._compengine = util.compengines.forbundletype(b'UN')
632 self._compengine = util.compengines.forbundletype(b'UN')
633 self._compopts = None
633 self._compopts = None
634 # If compression is being handled by a consumer of the raw
634 # If compression is being handled by a consumer of the raw
635 # data (e.g. the wire protocol), unsetting this flag tells
635 # data (e.g. the wire protocol), unsetting this flag tells
636 # consumers that the bundle is best left uncompressed.
636 # consumers that the bundle is best left uncompressed.
637 self.prefercompressed = True
637 self.prefercompressed = True
638
638
639 def setcompression(self, alg, compopts=None):
639 def setcompression(self, alg, compopts=None):
640 """setup core part compression to <alg>"""
640 """setup core part compression to <alg>"""
641 if alg in (None, b'UN'):
641 if alg in (None, b'UN'):
642 return
642 return
643 assert not any(n.lower() == b'compression' for n, v in self._params)
643 assert not any(n.lower() == b'compression' for n, v in self._params)
644 self.addparam(b'Compression', alg)
644 self.addparam(b'Compression', alg)
645 self._compengine = util.compengines.forbundletype(alg)
645 self._compengine = util.compengines.forbundletype(alg)
646 self._compopts = compopts
646 self._compopts = compopts
647
647
648 @property
648 @property
649 def nbparts(self):
649 def nbparts(self):
650 """total number of parts added to the bundler"""
650 """total number of parts added to the bundler"""
651 return len(self._parts)
651 return len(self._parts)
652
652
653 # methods used to defines the bundle2 content
653 # methods used to defines the bundle2 content
654 def addparam(self, name, value=None):
654 def addparam(self, name, value=None):
655 """add a stream level parameter"""
655 """add a stream level parameter"""
656 if not name:
656 if not name:
657 raise error.ProgrammingError(b'empty parameter name')
657 raise error.ProgrammingError(b'empty parameter name')
658 if name[0:1] not in pycompat.bytestr(
658 if name[0:1] not in pycompat.bytestr(
659 string.ascii_letters # pytype: disable=wrong-arg-types
659 string.ascii_letters # pytype: disable=wrong-arg-types
660 ):
660 ):
661 raise error.ProgrammingError(
661 raise error.ProgrammingError(
662 b'non letter first character: %s' % name
662 b'non letter first character: %s' % name
663 )
663 )
664 self._params.append((name, value))
664 self._params.append((name, value))
665
665
666 def addpart(self, part):
666 def addpart(self, part):
667 """add a new part to the bundle2 container
667 """add a new part to the bundle2 container
668
668
669 Parts contains the actual applicative payload."""
669 Parts contains the actual applicative payload."""
670 assert part.id is None
670 assert part.id is None
671 part.id = len(self._parts) # very cheap counter
671 part.id = len(self._parts) # very cheap counter
672 self._parts.append(part)
672 self._parts.append(part)
673
673
674 def newpart(self, typeid, *args, **kwargs):
674 def newpart(self, typeid, *args, **kwargs):
675 """create a new part and add it to the containers
675 """create a new part and add it to the containers
676
676
677 As the part is directly added to the containers. For now, this means
677 As the part is directly added to the containers. For now, this means
678 that any failure to properly initialize the part after calling
678 that any failure to properly initialize the part after calling
679 ``newpart`` should result in a failure of the whole bundling process.
679 ``newpart`` should result in a failure of the whole bundling process.
680
680
681 You can still fall back to manually create and add if you need better
681 You can still fall back to manually create and add if you need better
682 control."""
682 control."""
683 part = bundlepart(typeid, *args, **kwargs)
683 part = bundlepart(typeid, *args, **kwargs)
684 self.addpart(part)
684 self.addpart(part)
685 return part
685 return part
686
686
687 # methods used to generate the bundle2 stream
687 # methods used to generate the bundle2 stream
688 def getchunks(self):
688 def getchunks(self):
689 if self.ui.debugflag:
689 if self.ui.debugflag:
690 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
690 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
691 if self._params:
691 if self._params:
692 msg.append(b' (%i params)' % len(self._params))
692 msg.append(b' (%i params)' % len(self._params))
693 msg.append(b' %i parts total\n' % len(self._parts))
693 msg.append(b' %i parts total\n' % len(self._parts))
694 self.ui.debug(b''.join(msg))
694 self.ui.debug(b''.join(msg))
695 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
695 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
696 yield self._magicstring
696 yield self._magicstring
697 param = self._paramchunk()
697 param = self._paramchunk()
698 outdebug(self.ui, b'bundle parameter: %s' % param)
698 outdebug(self.ui, b'bundle parameter: %s' % param)
699 yield _pack(_fstreamparamsize, len(param))
699 yield _pack(_fstreamparamsize, len(param))
700 if param:
700 if param:
701 yield param
701 yield param
702 for chunk in self._compengine.compressstream(
702 for chunk in self._compengine.compressstream(
703 self._getcorechunk(), self._compopts
703 self._getcorechunk(), self._compopts
704 ):
704 ):
705 yield chunk
705 yield chunk
706
706
707 def _paramchunk(self):
707 def _paramchunk(self):
708 """return a encoded version of all stream parameters"""
708 """return a encoded version of all stream parameters"""
709 blocks = []
709 blocks = []
710 for par, value in self._params:
710 for par, value in self._params:
711 par = urlreq.quote(par)
711 par = urlreq.quote(par)
712 if value is not None:
712 if value is not None:
713 value = urlreq.quote(value)
713 value = urlreq.quote(value)
714 par = b'%s=%s' % (par, value)
714 par = b'%s=%s' % (par, value)
715 blocks.append(par)
715 blocks.append(par)
716 return b' '.join(blocks)
716 return b' '.join(blocks)
717
717
718 def _getcorechunk(self):
718 def _getcorechunk(self):
719 """yield chunk for the core part of the bundle
719 """yield chunk for the core part of the bundle
720
720
721 (all but headers and parameters)"""
721 (all but headers and parameters)"""
722 outdebug(self.ui, b'start of parts')
722 outdebug(self.ui, b'start of parts')
723 for part in self._parts:
723 for part in self._parts:
724 outdebug(self.ui, b'bundle part: "%s"' % part.type)
724 outdebug(self.ui, b'bundle part: "%s"' % part.type)
725 for chunk in part.getchunks(ui=self.ui):
725 for chunk in part.getchunks(ui=self.ui):
726 yield chunk
726 yield chunk
727 outdebug(self.ui, b'end of bundle')
727 outdebug(self.ui, b'end of bundle')
728 yield _pack(_fpartheadersize, 0)
728 yield _pack(_fpartheadersize, 0)
729
729
730 def salvageoutput(self):
730 def salvageoutput(self):
731 """return a list with a copy of all output parts in the bundle
731 """return a list with a copy of all output parts in the bundle
732
732
733 This is meant to be used during error handling to make sure we preserve
733 This is meant to be used during error handling to make sure we preserve
734 server output"""
734 server output"""
735 salvaged = []
735 salvaged = []
736 for part in self._parts:
736 for part in self._parts:
737 if part.type.startswith(b'output'):
737 if part.type.startswith(b'output'):
738 salvaged.append(part.copy())
738 salvaged.append(part.copy())
739 return salvaged
739 return salvaged
740
740
741
741
742 class unpackermixin(object):
742 class unpackermixin(object):
743 """A mixin to extract bytes and struct data from a stream"""
743 """A mixin to extract bytes and struct data from a stream"""
744
744
745 def __init__(self, fp):
745 def __init__(self, fp):
746 self._fp = fp
746 self._fp = fp
747
747
748 def _unpack(self, format):
748 def _unpack(self, format):
749 """unpack this struct format from the stream
749 """unpack this struct format from the stream
750
750
751 This method is meant for internal usage by the bundle2 protocol only.
751 This method is meant for internal usage by the bundle2 protocol only.
752 They directly manipulate the low level stream including bundle2 level
752 They directly manipulate the low level stream including bundle2 level
753 instruction.
753 instruction.
754
754
755 Do not use it to implement higher-level logic or methods."""
755 Do not use it to implement higher-level logic or methods."""
756 data = self._readexact(struct.calcsize(format))
756 data = self._readexact(struct.calcsize(format))
757 return _unpack(format, data)
757 return _unpack(format, data)
758
758
759 def _readexact(self, size):
759 def _readexact(self, size):
760 """read exactly <size> bytes from the stream
760 """read exactly <size> bytes from the stream
761
761
762 This method is meant for internal usage by the bundle2 protocol only.
762 This method is meant for internal usage by the bundle2 protocol only.
763 They directly manipulate the low level stream including bundle2 level
763 They directly manipulate the low level stream including bundle2 level
764 instruction.
764 instruction.
765
765
766 Do not use it to implement higher-level logic or methods."""
766 Do not use it to implement higher-level logic or methods."""
767 return changegroup.readexactly(self._fp, size)
767 return changegroup.readexactly(self._fp, size)
768
768
769
769
770 def getunbundler(ui, fp, magicstring=None):
770 def getunbundler(ui, fp, magicstring=None):
771 """return a valid unbundler object for a given magicstring"""
771 """return a valid unbundler object for a given magicstring"""
772 if magicstring is None:
772 if magicstring is None:
773 magicstring = changegroup.readexactly(fp, 4)
773 magicstring = changegroup.readexactly(fp, 4)
774 magic, version = magicstring[0:2], magicstring[2:4]
774 magic, version = magicstring[0:2], magicstring[2:4]
775 if magic != b'HG':
775 if magic != b'HG':
776 ui.debug(
776 ui.debug(
777 b"error: invalid magic: %r (version %r), should be 'HG'\n"
777 b"error: invalid magic: %r (version %r), should be 'HG'\n"
778 % (magic, version)
778 % (magic, version)
779 )
779 )
780 raise error.Abort(_(b'not a Mercurial bundle'))
780 raise error.Abort(_(b'not a Mercurial bundle'))
781 unbundlerclass = formatmap.get(version)
781 unbundlerclass = formatmap.get(version)
782 if unbundlerclass is None:
782 if unbundlerclass is None:
783 raise error.Abort(_(b'unknown bundle version %s') % version)
783 raise error.Abort(_(b'unknown bundle version %s') % version)
784 unbundler = unbundlerclass(ui, fp)
784 unbundler = unbundlerclass(ui, fp)
785 indebug(ui, b'start processing of %s stream' % magicstring)
785 indebug(ui, b'start processing of %s stream' % magicstring)
786 return unbundler
786 return unbundler
787
787
788
788
789 class unbundle20(unpackermixin):
789 class unbundle20(unpackermixin):
790 """interpret a bundle2 stream
790 """interpret a bundle2 stream
791
791
792 This class is fed with a binary stream and yields parts through its
792 This class is fed with a binary stream and yields parts through its
793 `iterparts` methods."""
793 `iterparts` methods."""
794
794
795 _magicstring = b'HG20'
795 _magicstring = b'HG20'
796
796
797 def __init__(self, ui, fp):
797 def __init__(self, ui, fp):
798 """If header is specified, we do not read it out of the stream."""
798 """If header is specified, we do not read it out of the stream."""
799 self.ui = ui
799 self.ui = ui
800 self._compengine = util.compengines.forbundletype(b'UN')
800 self._compengine = util.compengines.forbundletype(b'UN')
801 self._compressed = None
801 self._compressed = None
802 super(unbundle20, self).__init__(fp)
802 super(unbundle20, self).__init__(fp)
803
803
804 @util.propertycache
804 @util.propertycache
805 def params(self):
805 def params(self):
806 """dictionary of stream level parameters"""
806 """dictionary of stream level parameters"""
807 indebug(self.ui, b'reading bundle2 stream parameters')
807 indebug(self.ui, b'reading bundle2 stream parameters')
808 params = {}
808 params = {}
809 paramssize = self._unpack(_fstreamparamsize)[0]
809 paramssize = self._unpack(_fstreamparamsize)[0]
810 if paramssize < 0:
810 if paramssize < 0:
811 raise error.BundleValueError(
811 raise error.BundleValueError(
812 b'negative bundle param size: %i' % paramssize
812 b'negative bundle param size: %i' % paramssize
813 )
813 )
814 if paramssize:
814 if paramssize:
815 params = self._readexact(paramssize)
815 params = self._readexact(paramssize)
816 params = self._processallparams(params)
816 params = self._processallparams(params)
817 return params
817 return params
818
818
819 def _processallparams(self, paramsblock):
819 def _processallparams(self, paramsblock):
820 """"""
820 """"""
821 params = util.sortdict()
821 params = util.sortdict()
822 for p in paramsblock.split(b' '):
822 for p in paramsblock.split(b' '):
823 p = p.split(b'=', 1)
823 p = p.split(b'=', 1)
824 p = [urlreq.unquote(i) for i in p]
824 p = [urlreq.unquote(i) for i in p]
825 if len(p) < 2:
825 if len(p) < 2:
826 p.append(None)
826 p.append(None)
827 self._processparam(*p)
827 self._processparam(*p)
828 params[p[0]] = p[1]
828 params[p[0]] = p[1]
829 return params
829 return params
830
830
831 def _processparam(self, name, value):
831 def _processparam(self, name, value):
832 """process a parameter, applying its effect if needed
832 """process a parameter, applying its effect if needed
833
833
834 Parameter starting with a lower case letter are advisory and will be
834 Parameter starting with a lower case letter are advisory and will be
835 ignored when unknown. Those starting with an upper case letter are
835 ignored when unknown. Those starting with an upper case letter are
836 mandatory and will this function will raise a KeyError when unknown.
836 mandatory and will this function will raise a KeyError when unknown.
837
837
838 Note: no option are currently supported. Any input will be either
838 Note: no option are currently supported. Any input will be either
839 ignored or failing.
839 ignored or failing.
840 """
840 """
841 if not name:
841 if not name:
842 raise ValueError('empty parameter name')
842 raise ValueError('empty parameter name')
843 if name[0:1] not in pycompat.bytestr(
843 if name[0:1] not in pycompat.bytestr(
844 string.ascii_letters # pytype: disable=wrong-arg-types
844 string.ascii_letters # pytype: disable=wrong-arg-types
845 ):
845 ):
846 raise ValueError('non letter first character: %s' % name)
846 raise ValueError('non letter first character: %s' % name)
847 try:
847 try:
848 handler = b2streamparamsmap[name.lower()]
848 handler = b2streamparamsmap[name.lower()]
849 except KeyError:
849 except KeyError:
850 if name[0:1].islower():
850 if name[0:1].islower():
851 indebug(self.ui, b"ignoring unknown parameter %s" % name)
851 indebug(self.ui, b"ignoring unknown parameter %s" % name)
852 else:
852 else:
853 raise error.BundleUnknownFeatureError(params=(name,))
853 raise error.BundleUnknownFeatureError(params=(name,))
854 else:
854 else:
855 handler(self, name, value)
855 handler(self, name, value)
856
856
857 def _forwardchunks(self):
857 def _forwardchunks(self):
858 """utility to transfer a bundle2 as binary
858 """utility to transfer a bundle2 as binary
859
859
860 This is made necessary by the fact the 'getbundle' command over 'ssh'
860 This is made necessary by the fact the 'getbundle' command over 'ssh'
861 have no way to know then the reply end, relying on the bundle to be
861 have no way to know then the reply end, relying on the bundle to be
862 interpreted to know its end. This is terrible and we are sorry, but we
862 interpreted to know its end. This is terrible and we are sorry, but we
863 needed to move forward to get general delta enabled.
863 needed to move forward to get general delta enabled.
864 """
864 """
865 yield self._magicstring
865 yield self._magicstring
866 assert 'params' not in vars(self)
866 assert 'params' not in vars(self)
867 paramssize = self._unpack(_fstreamparamsize)[0]
867 paramssize = self._unpack(_fstreamparamsize)[0]
868 if paramssize < 0:
868 if paramssize < 0:
869 raise error.BundleValueError(
869 raise error.BundleValueError(
870 b'negative bundle param size: %i' % paramssize
870 b'negative bundle param size: %i' % paramssize
871 )
871 )
872 if paramssize:
872 if paramssize:
873 params = self._readexact(paramssize)
873 params = self._readexact(paramssize)
874 self._processallparams(params)
874 self._processallparams(params)
875 # The payload itself is decompressed below, so drop
875 # The payload itself is decompressed below, so drop
876 # the compression parameter passed down to compensate.
876 # the compression parameter passed down to compensate.
877 outparams = []
877 outparams = []
878 for p in params.split(b' '):
878 for p in params.split(b' '):
879 k, v = p.split(b'=', 1)
879 k, v = p.split(b'=', 1)
880 if k.lower() != b'compression':
880 if k.lower() != b'compression':
881 outparams.append(p)
881 outparams.append(p)
882 outparams = b' '.join(outparams)
882 outparams = b' '.join(outparams)
883 yield _pack(_fstreamparamsize, len(outparams))
883 yield _pack(_fstreamparamsize, len(outparams))
884 yield outparams
884 yield outparams
885 else:
885 else:
886 yield _pack(_fstreamparamsize, paramssize)
886 yield _pack(_fstreamparamsize, paramssize)
887 # From there, payload might need to be decompressed
887 # From there, payload might need to be decompressed
888 self._fp = self._compengine.decompressorreader(self._fp)
888 self._fp = self._compengine.decompressorreader(self._fp)
889 emptycount = 0
889 emptycount = 0
890 while emptycount < 2:
890 while emptycount < 2:
891 # so we can brainlessly loop
891 # so we can brainlessly loop
892 assert _fpartheadersize == _fpayloadsize
892 assert _fpartheadersize == _fpayloadsize
893 size = self._unpack(_fpartheadersize)[0]
893 size = self._unpack(_fpartheadersize)[0]
894 yield _pack(_fpartheadersize, size)
894 yield _pack(_fpartheadersize, size)
895 if size:
895 if size:
896 emptycount = 0
896 emptycount = 0
897 else:
897 else:
898 emptycount += 1
898 emptycount += 1
899 continue
899 continue
900 if size == flaginterrupt:
900 if size == flaginterrupt:
901 continue
901 continue
902 elif size < 0:
902 elif size < 0:
903 raise error.BundleValueError(b'negative chunk size: %i')
903 raise error.BundleValueError(b'negative chunk size: %i')
904 yield self._readexact(size)
904 yield self._readexact(size)
905
905
906 def iterparts(self, seekable=False):
906 def iterparts(self, seekable=False):
907 """yield all parts contained in the stream"""
907 """yield all parts contained in the stream"""
908 cls = seekableunbundlepart if seekable else unbundlepart
908 cls = seekableunbundlepart if seekable else unbundlepart
909 # make sure param have been loaded
909 # make sure param have been loaded
910 self.params
910 self.params
911 # From there, payload need to be decompressed
911 # From there, payload need to be decompressed
912 self._fp = self._compengine.decompressorreader(self._fp)
912 self._fp = self._compengine.decompressorreader(self._fp)
913 indebug(self.ui, b'start extraction of bundle2 parts')
913 indebug(self.ui, b'start extraction of bundle2 parts')
914 headerblock = self._readpartheader()
914 headerblock = self._readpartheader()
915 while headerblock is not None:
915 while headerblock is not None:
916 part = cls(self.ui, headerblock, self._fp)
916 part = cls(self.ui, headerblock, self._fp)
917 yield part
917 yield part
918 # Ensure part is fully consumed so we can start reading the next
918 # Ensure part is fully consumed so we can start reading the next
919 # part.
919 # part.
920 part.consume()
920 part.consume()
921
921
922 headerblock = self._readpartheader()
922 headerblock = self._readpartheader()
923 indebug(self.ui, b'end of bundle2 stream')
923 indebug(self.ui, b'end of bundle2 stream')
924
924
925 def _readpartheader(self):
925 def _readpartheader(self):
926 """reads a part header size and return the bytes blob
926 """reads a part header size and return the bytes blob
927
927
928 returns None if empty"""
928 returns None if empty"""
929 headersize = self._unpack(_fpartheadersize)[0]
929 headersize = self._unpack(_fpartheadersize)[0]
930 if headersize < 0:
930 if headersize < 0:
931 raise error.BundleValueError(
931 raise error.BundleValueError(
932 b'negative part header size: %i' % headersize
932 b'negative part header size: %i' % headersize
933 )
933 )
934 indebug(self.ui, b'part header size: %i' % headersize)
934 indebug(self.ui, b'part header size: %i' % headersize)
935 if headersize:
935 if headersize:
936 return self._readexact(headersize)
936 return self._readexact(headersize)
937 return None
937 return None
938
938
939 def compressed(self):
939 def compressed(self):
940 self.params # load params
940 self.params # load params
941 return self._compressed
941 return self._compressed
942
942
943 def close(self):
943 def close(self):
944 """close underlying file"""
944 """close underlying file"""
945 if util.safehasattr(self._fp, 'close'):
945 if util.safehasattr(self._fp, 'close'):
946 return self._fp.close()
946 return self._fp.close()
947
947
948
948
949 formatmap = {b'20': unbundle20}
949 formatmap = {b'20': unbundle20}
950
950
951 b2streamparamsmap = {}
951 b2streamparamsmap = {}
952
952
953
953
954 def b2streamparamhandler(name):
954 def b2streamparamhandler(name):
955 """register a handler for a stream level parameter"""
955 """register a handler for a stream level parameter"""
956
956
957 def decorator(func):
957 def decorator(func):
958 assert name not in formatmap
958 assert name not in formatmap
959 b2streamparamsmap[name] = func
959 b2streamparamsmap[name] = func
960 return func
960 return func
961
961
962 return decorator
962 return decorator
963
963
964
964
965 @b2streamparamhandler(b'compression')
965 @b2streamparamhandler(b'compression')
966 def processcompression(unbundler, param, value):
966 def processcompression(unbundler, param, value):
967 """read compression parameter and install payload decompression"""
967 """read compression parameter and install payload decompression"""
968 if value not in util.compengines.supportedbundletypes:
968 if value not in util.compengines.supportedbundletypes:
969 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
969 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
970 unbundler._compengine = util.compengines.forbundletype(value)
970 unbundler._compengine = util.compengines.forbundletype(value)
971 if value is not None:
971 if value is not None:
972 unbundler._compressed = True
972 unbundler._compressed = True
973
973
974
974
975 class bundlepart(object):
975 class bundlepart(object):
976 """A bundle2 part contains application level payload
976 """A bundle2 part contains application level payload
977
977
978 The part `type` is used to route the part to the application level
978 The part `type` is used to route the part to the application level
979 handler.
979 handler.
980
980
981 The part payload is contained in ``part.data``. It could be raw bytes or a
981 The part payload is contained in ``part.data``. It could be raw bytes or a
982 generator of byte chunks.
982 generator of byte chunks.
983
983
984 You can add parameters to the part using the ``addparam`` method.
984 You can add parameters to the part using the ``addparam`` method.
985 Parameters can be either mandatory (default) or advisory. Remote side
985 Parameters can be either mandatory (default) or advisory. Remote side
986 should be able to safely ignore the advisory ones.
986 should be able to safely ignore the advisory ones.
987
987
988 Both data and parameters cannot be modified after the generation has begun.
988 Both data and parameters cannot be modified after the generation has begun.
989 """
989 """
990
990
991 def __init__(
991 def __init__(
992 self,
992 self,
993 parttype,
993 parttype,
994 mandatoryparams=(),
994 mandatoryparams=(),
995 advisoryparams=(),
995 advisoryparams=(),
996 data=b'',
996 data=b'',
997 mandatory=True,
997 mandatory=True,
998 ):
998 ):
999 validateparttype(parttype)
999 validateparttype(parttype)
1000 self.id = None
1000 self.id = None
1001 self.type = parttype
1001 self.type = parttype
1002 self._data = data
1002 self._data = data
1003 self._mandatoryparams = list(mandatoryparams)
1003 self._mandatoryparams = list(mandatoryparams)
1004 self._advisoryparams = list(advisoryparams)
1004 self._advisoryparams = list(advisoryparams)
1005 # checking for duplicated entries
1005 # checking for duplicated entries
1006 self._seenparams = set()
1006 self._seenparams = set()
1007 for pname, __ in self._mandatoryparams + self._advisoryparams:
1007 for pname, __ in self._mandatoryparams + self._advisoryparams:
1008 if pname in self._seenparams:
1008 if pname in self._seenparams:
1009 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1009 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1010 self._seenparams.add(pname)
1010 self._seenparams.add(pname)
1011 # status of the part's generation:
1011 # status of the part's generation:
1012 # - None: not started,
1012 # - None: not started,
1013 # - False: currently generated,
1013 # - False: currently generated,
1014 # - True: generation done.
1014 # - True: generation done.
1015 self._generated = None
1015 self._generated = None
1016 self.mandatory = mandatory
1016 self.mandatory = mandatory
1017
1017
1018 def __repr__(self):
1018 def __repr__(self):
1019 cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1019 cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1020 return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1020 return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1021 cls,
1021 cls,
1022 id(self),
1022 id(self),
1023 self.id,
1023 self.id,
1024 self.type,
1024 self.type,
1025 self.mandatory,
1025 self.mandatory,
1026 )
1026 )
1027
1027
1028 def copy(self):
1028 def copy(self):
1029 """return a copy of the part
1029 """return a copy of the part
1030
1030
1031 The new part have the very same content but no partid assigned yet.
1031 The new part have the very same content but no partid assigned yet.
1032 Parts with generated data cannot be copied."""
1032 Parts with generated data cannot be copied."""
1033 assert not util.safehasattr(self.data, 'next')
1033 assert not util.safehasattr(self.data, 'next')
1034 return self.__class__(
1034 return self.__class__(
1035 self.type,
1035 self.type,
1036 self._mandatoryparams,
1036 self._mandatoryparams,
1037 self._advisoryparams,
1037 self._advisoryparams,
1038 self._data,
1038 self._data,
1039 self.mandatory,
1039 self.mandatory,
1040 )
1040 )
1041
1041
1042 # methods used to defines the part content
1042 # methods used to defines the part content
1043 @property
1043 @property
1044 def data(self):
1044 def data(self):
1045 return self._data
1045 return self._data
1046
1046
1047 @data.setter
1047 @data.setter
1048 def data(self, data):
1048 def data(self, data):
1049 if self._generated is not None:
1049 if self._generated is not None:
1050 raise error.ReadOnlyPartError(b'part is being generated')
1050 raise error.ReadOnlyPartError(b'part is being generated')
1051 self._data = data
1051 self._data = data
1052
1052
1053 @property
1053 @property
1054 def mandatoryparams(self):
1054 def mandatoryparams(self):
1055 # make it an immutable tuple to force people through ``addparam``
1055 # make it an immutable tuple to force people through ``addparam``
1056 return tuple(self._mandatoryparams)
1056 return tuple(self._mandatoryparams)
1057
1057
1058 @property
1058 @property
1059 def advisoryparams(self):
1059 def advisoryparams(self):
1060 # make it an immutable tuple to force people through ``addparam``
1060 # make it an immutable tuple to force people through ``addparam``
1061 return tuple(self._advisoryparams)
1061 return tuple(self._advisoryparams)
1062
1062
1063 def addparam(self, name, value=b'', mandatory=True):
1063 def addparam(self, name, value=b'', mandatory=True):
1064 """add a parameter to the part
1064 """add a parameter to the part
1065
1065
1066 If 'mandatory' is set to True, the remote handler must claim support
1066 If 'mandatory' is set to True, the remote handler must claim support
1067 for this parameter or the unbundling will be aborted.
1067 for this parameter or the unbundling will be aborted.
1068
1068
1069 The 'name' and 'value' cannot exceed 255 bytes each.
1069 The 'name' and 'value' cannot exceed 255 bytes each.
1070 """
1070 """
1071 if self._generated is not None:
1071 if self._generated is not None:
1072 raise error.ReadOnlyPartError(b'part is being generated')
1072 raise error.ReadOnlyPartError(b'part is being generated')
1073 if name in self._seenparams:
1073 if name in self._seenparams:
1074 raise ValueError(b'duplicated params: %s' % name)
1074 raise ValueError(b'duplicated params: %s' % name)
1075 self._seenparams.add(name)
1075 self._seenparams.add(name)
1076 params = self._advisoryparams
1076 params = self._advisoryparams
1077 if mandatory:
1077 if mandatory:
1078 params = self._mandatoryparams
1078 params = self._mandatoryparams
1079 params.append((name, value))
1079 params.append((name, value))
1080
1080
1081 # methods used to generates the bundle2 stream
1081 # methods used to generates the bundle2 stream
1082 def getchunks(self, ui):
1082 def getchunks(self, ui):
1083 if self._generated is not None:
1083 if self._generated is not None:
1084 raise error.ProgrammingError(b'part can only be consumed once')
1084 raise error.ProgrammingError(b'part can only be consumed once')
1085 self._generated = False
1085 self._generated = False
1086
1086
1087 if ui.debugflag:
1087 if ui.debugflag:
1088 msg = [b'bundle2-output-part: "%s"' % self.type]
1088 msg = [b'bundle2-output-part: "%s"' % self.type]
1089 if not self.mandatory:
1089 if not self.mandatory:
1090 msg.append(b' (advisory)')
1090 msg.append(b' (advisory)')
1091 nbmp = len(self.mandatoryparams)
1091 nbmp = len(self.mandatoryparams)
1092 nbap = len(self.advisoryparams)
1092 nbap = len(self.advisoryparams)
1093 if nbmp or nbap:
1093 if nbmp or nbap:
1094 msg.append(b' (params:')
1094 msg.append(b' (params:')
1095 if nbmp:
1095 if nbmp:
1096 msg.append(b' %i mandatory' % nbmp)
1096 msg.append(b' %i mandatory' % nbmp)
1097 if nbap:
1097 if nbap:
1098 msg.append(b' %i advisory' % nbmp)
1098 msg.append(b' %i advisory' % nbmp)
1099 msg.append(b')')
1099 msg.append(b')')
1100 if not self.data:
1100 if not self.data:
1101 msg.append(b' empty payload')
1101 msg.append(b' empty payload')
1102 elif util.safehasattr(self.data, 'next') or util.safehasattr(
1102 elif util.safehasattr(self.data, 'next') or util.safehasattr(
1103 self.data, b'__next__'
1103 self.data, b'__next__'
1104 ):
1104 ):
1105 msg.append(b' streamed payload')
1105 msg.append(b' streamed payload')
1106 else:
1106 else:
1107 msg.append(b' %i bytes payload' % len(self.data))
1107 msg.append(b' %i bytes payload' % len(self.data))
1108 msg.append(b'\n')
1108 msg.append(b'\n')
1109 ui.debug(b''.join(msg))
1109 ui.debug(b''.join(msg))
1110
1110
1111 #### header
1111 #### header
1112 if self.mandatory:
1112 if self.mandatory:
1113 parttype = self.type.upper()
1113 parttype = self.type.upper()
1114 else:
1114 else:
1115 parttype = self.type.lower()
1115 parttype = self.type.lower()
1116 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1116 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1117 ## parttype
1117 ## parttype
1118 header = [
1118 header = [
1119 _pack(_fparttypesize, len(parttype)),
1119 _pack(_fparttypesize, len(parttype)),
1120 parttype,
1120 parttype,
1121 _pack(_fpartid, self.id),
1121 _pack(_fpartid, self.id),
1122 ]
1122 ]
1123 ## parameters
1123 ## parameters
1124 # count
1124 # count
1125 manpar = self.mandatoryparams
1125 manpar = self.mandatoryparams
1126 advpar = self.advisoryparams
1126 advpar = self.advisoryparams
1127 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1127 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1128 # size
1128 # size
1129 parsizes = []
1129 parsizes = []
1130 for key, value in manpar:
1130 for key, value in manpar:
1131 parsizes.append(len(key))
1131 parsizes.append(len(key))
1132 parsizes.append(len(value))
1132 parsizes.append(len(value))
1133 for key, value in advpar:
1133 for key, value in advpar:
1134 parsizes.append(len(key))
1134 parsizes.append(len(key))
1135 parsizes.append(len(value))
1135 parsizes.append(len(value))
1136 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1136 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1137 header.append(paramsizes)
1137 header.append(paramsizes)
1138 # key, value
1138 # key, value
1139 for key, value in manpar:
1139 for key, value in manpar:
1140 header.append(key)
1140 header.append(key)
1141 header.append(value)
1141 header.append(value)
1142 for key, value in advpar:
1142 for key, value in advpar:
1143 header.append(key)
1143 header.append(key)
1144 header.append(value)
1144 header.append(value)
1145 ## finalize header
1145 ## finalize header
1146 try:
1146 try:
1147 headerchunk = b''.join(header)
1147 headerchunk = b''.join(header)
1148 except TypeError:
1148 except TypeError:
1149 raise TypeError(
1149 raise TypeError(
1150 'Found a non-bytes trying to '
1150 'Found a non-bytes trying to '
1151 'build bundle part header: %r' % header
1151 'build bundle part header: %r' % header
1152 )
1152 )
1153 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1153 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1154 yield _pack(_fpartheadersize, len(headerchunk))
1154 yield _pack(_fpartheadersize, len(headerchunk))
1155 yield headerchunk
1155 yield headerchunk
1156 ## payload
1156 ## payload
1157 try:
1157 try:
1158 for chunk in self._payloadchunks():
1158 for chunk in self._payloadchunks():
1159 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1159 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1160 yield _pack(_fpayloadsize, len(chunk))
1160 yield _pack(_fpayloadsize, len(chunk))
1161 yield chunk
1161 yield chunk
1162 except GeneratorExit:
1162 except GeneratorExit:
1163 # GeneratorExit means that nobody is listening for our
1163 # GeneratorExit means that nobody is listening for our
1164 # results anyway, so just bail quickly rather than trying
1164 # results anyway, so just bail quickly rather than trying
1165 # to produce an error part.
1165 # to produce an error part.
1166 ui.debug(b'bundle2-generatorexit\n')
1166 ui.debug(b'bundle2-generatorexit\n')
1167 raise
1167 raise
1168 except BaseException as exc:
1168 except BaseException as exc:
1169 bexc = stringutil.forcebytestr(exc)
1169 bexc = stringutil.forcebytestr(exc)
1170 # backup exception data for later
1170 # backup exception data for later
1171 ui.debug(
1171 ui.debug(
1172 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1172 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1173 )
1173 )
1174 tb = sys.exc_info()[2]
1174 tb = sys.exc_info()[2]
1175 msg = b'unexpected error: %s' % bexc
1175 msg = b'unexpected error: %s' % bexc
1176 interpart = bundlepart(
1176 interpart = bundlepart(
1177 b'error:abort', [(b'message', msg)], mandatory=False
1177 b'error:abort', [(b'message', msg)], mandatory=False
1178 )
1178 )
1179 interpart.id = 0
1179 interpart.id = 0
1180 yield _pack(_fpayloadsize, -1)
1180 yield _pack(_fpayloadsize, -1)
1181 for chunk in interpart.getchunks(ui=ui):
1181 for chunk in interpart.getchunks(ui=ui):
1182 yield chunk
1182 yield chunk
1183 outdebug(ui, b'closing payload chunk')
1183 outdebug(ui, b'closing payload chunk')
1184 # abort current part payload
1184 # abort current part payload
1185 yield _pack(_fpayloadsize, 0)
1185 yield _pack(_fpayloadsize, 0)
1186 pycompat.raisewithtb(exc, tb)
1186 pycompat.raisewithtb(exc, tb)
1187 # end of payload
1187 # end of payload
1188 outdebug(ui, b'closing payload chunk')
1188 outdebug(ui, b'closing payload chunk')
1189 yield _pack(_fpayloadsize, 0)
1189 yield _pack(_fpayloadsize, 0)
1190 self._generated = True
1190 self._generated = True
1191
1191
1192 def _payloadchunks(self):
1192 def _payloadchunks(self):
1193 """yield chunks of a the part payload
1193 """yield chunks of a the part payload
1194
1194
1195 Exists to handle the different methods to provide data to a part."""
1195 Exists to handle the different methods to provide data to a part."""
1196 # we only support fixed size data now.
1196 # we only support fixed size data now.
1197 # This will be improved in the future.
1197 # This will be improved in the future.
1198 if util.safehasattr(self.data, 'next') or util.safehasattr(
1198 if util.safehasattr(self.data, 'next') or util.safehasattr(
1199 self.data, b'__next__'
1199 self.data, b'__next__'
1200 ):
1200 ):
1201 buff = util.chunkbuffer(self.data)
1201 buff = util.chunkbuffer(self.data)
1202 chunk = buff.read(preferedchunksize)
1202 chunk = buff.read(preferedchunksize)
1203 while chunk:
1203 while chunk:
1204 yield chunk
1204 yield chunk
1205 chunk = buff.read(preferedchunksize)
1205 chunk = buff.read(preferedchunksize)
1206 elif len(self.data):
1206 elif len(self.data):
1207 yield self.data
1207 yield self.data
1208
1208
1209
1209
1210 flaginterrupt = -1
1210 flaginterrupt = -1
1211
1211
1212
1212
1213 class interrupthandler(unpackermixin):
1213 class interrupthandler(unpackermixin):
1214 """read one part and process it with restricted capability
1214 """read one part and process it with restricted capability
1215
1215
1216 This allows to transmit exception raised on the producer size during part
1216 This allows to transmit exception raised on the producer size during part
1217 iteration while the consumer is reading a part.
1217 iteration while the consumer is reading a part.
1218
1218
1219 Part processed in this manner only have access to a ui object,"""
1219 Part processed in this manner only have access to a ui object,"""
1220
1220
1221 def __init__(self, ui, fp):
1221 def __init__(self, ui, fp):
1222 super(interrupthandler, self).__init__(fp)
1222 super(interrupthandler, self).__init__(fp)
1223 self.ui = ui
1223 self.ui = ui
1224
1224
1225 def _readpartheader(self):
1225 def _readpartheader(self):
1226 """reads a part header size and return the bytes blob
1226 """reads a part header size and return the bytes blob
1227
1227
1228 returns None if empty"""
1228 returns None if empty"""
1229 headersize = self._unpack(_fpartheadersize)[0]
1229 headersize = self._unpack(_fpartheadersize)[0]
1230 if headersize < 0:
1230 if headersize < 0:
1231 raise error.BundleValueError(
1231 raise error.BundleValueError(
1232 b'negative part header size: %i' % headersize
1232 b'negative part header size: %i' % headersize
1233 )
1233 )
1234 indebug(self.ui, b'part header size: %i\n' % headersize)
1234 indebug(self.ui, b'part header size: %i\n' % headersize)
1235 if headersize:
1235 if headersize:
1236 return self._readexact(headersize)
1236 return self._readexact(headersize)
1237 return None
1237 return None
1238
1238
1239 def __call__(self):
1239 def __call__(self):
1240
1240
1241 self.ui.debug(
1241 self.ui.debug(
1242 b'bundle2-input-stream-interrupt: opening out of band context\n'
1242 b'bundle2-input-stream-interrupt: opening out of band context\n'
1243 )
1243 )
1244 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1244 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1245 headerblock = self._readpartheader()
1245 headerblock = self._readpartheader()
1246 if headerblock is None:
1246 if headerblock is None:
1247 indebug(self.ui, b'no part found during interruption.')
1247 indebug(self.ui, b'no part found during interruption.')
1248 return
1248 return
1249 part = unbundlepart(self.ui, headerblock, self._fp)
1249 part = unbundlepart(self.ui, headerblock, self._fp)
1250 op = interruptoperation(self.ui)
1250 op = interruptoperation(self.ui)
1251 hardabort = False
1251 hardabort = False
1252 try:
1252 try:
1253 _processpart(op, part)
1253 _processpart(op, part)
1254 except (SystemExit, KeyboardInterrupt):
1254 except (SystemExit, KeyboardInterrupt):
1255 hardabort = True
1255 hardabort = True
1256 raise
1256 raise
1257 finally:
1257 finally:
1258 if not hardabort:
1258 if not hardabort:
1259 part.consume()
1259 part.consume()
1260 self.ui.debug(
1260 self.ui.debug(
1261 b'bundle2-input-stream-interrupt: closing out of band context\n'
1261 b'bundle2-input-stream-interrupt: closing out of band context\n'
1262 )
1262 )
1263
1263
1264
1264
1265 class interruptoperation(object):
1265 class interruptoperation(object):
1266 """A limited operation to be use by part handler during interruption
1266 """A limited operation to be use by part handler during interruption
1267
1267
1268 It only have access to an ui object.
1268 It only have access to an ui object.
1269 """
1269 """
1270
1270
1271 def __init__(self, ui):
1271 def __init__(self, ui):
1272 self.ui = ui
1272 self.ui = ui
1273 self.reply = None
1273 self.reply = None
1274 self.captureoutput = False
1274 self.captureoutput = False
1275
1275
1276 @property
1276 @property
1277 def repo(self):
1277 def repo(self):
1278 raise error.ProgrammingError(b'no repo access from stream interruption')
1278 raise error.ProgrammingError(b'no repo access from stream interruption')
1279
1279
1280 def gettransaction(self):
1280 def gettransaction(self):
1281 raise TransactionUnavailable(b'no repo access from stream interruption')
1281 raise TransactionUnavailable(b'no repo access from stream interruption')
1282
1282
1283
1283
1284 def decodepayloadchunks(ui, fh):
1284 def decodepayloadchunks(ui, fh):
1285 """Reads bundle2 part payload data into chunks.
1285 """Reads bundle2 part payload data into chunks.
1286
1286
1287 Part payload data consists of framed chunks. This function takes
1287 Part payload data consists of framed chunks. This function takes
1288 a file handle and emits those chunks.
1288 a file handle and emits those chunks.
1289 """
1289 """
1290 dolog = ui.configbool(b'devel', b'bundle2.debug')
1290 dolog = ui.configbool(b'devel', b'bundle2.debug')
1291 debug = ui.debug
1291 debug = ui.debug
1292
1292
1293 headerstruct = struct.Struct(_fpayloadsize)
1293 headerstruct = struct.Struct(_fpayloadsize)
1294 headersize = headerstruct.size
1294 headersize = headerstruct.size
1295 unpack = headerstruct.unpack
1295 unpack = headerstruct.unpack
1296
1296
1297 readexactly = changegroup.readexactly
1297 readexactly = changegroup.readexactly
1298 read = fh.read
1298 read = fh.read
1299
1299
1300 chunksize = unpack(readexactly(fh, headersize))[0]
1300 chunksize = unpack(readexactly(fh, headersize))[0]
1301 indebug(ui, b'payload chunk size: %i' % chunksize)
1301 indebug(ui, b'payload chunk size: %i' % chunksize)
1302
1302
1303 # changegroup.readexactly() is inlined below for performance.
1303 # changegroup.readexactly() is inlined below for performance.
1304 while chunksize:
1304 while chunksize:
1305 if chunksize >= 0:
1305 if chunksize >= 0:
1306 s = read(chunksize)
1306 s = read(chunksize)
1307 if len(s) < chunksize:
1307 if len(s) < chunksize:
1308 raise error.Abort(
1308 raise error.Abort(
1309 _(
1309 _(
1310 b'stream ended unexpectedly '
1310 b'stream ended unexpectedly '
1311 b' (got %d bytes, expected %d)'
1311 b' (got %d bytes, expected %d)'
1312 )
1312 )
1313 % (len(s), chunksize)
1313 % (len(s), chunksize)
1314 )
1314 )
1315
1315
1316 yield s
1316 yield s
1317 elif chunksize == flaginterrupt:
1317 elif chunksize == flaginterrupt:
1318 # Interrupt "signal" detected. The regular stream is interrupted
1318 # Interrupt "signal" detected. The regular stream is interrupted
1319 # and a bundle2 part follows. Consume it.
1319 # and a bundle2 part follows. Consume it.
1320 interrupthandler(ui, fh)()
1320 interrupthandler(ui, fh)()
1321 else:
1321 else:
1322 raise error.BundleValueError(
1322 raise error.BundleValueError(
1323 b'negative payload chunk size: %s' % chunksize
1323 b'negative payload chunk size: %s' % chunksize
1324 )
1324 )
1325
1325
1326 s = read(headersize)
1326 s = read(headersize)
1327 if len(s) < headersize:
1327 if len(s) < headersize:
1328 raise error.Abort(
1328 raise error.Abort(
1329 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1329 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1330 % (len(s), chunksize)
1330 % (len(s), chunksize)
1331 )
1331 )
1332
1332
1333 chunksize = unpack(s)[0]
1333 chunksize = unpack(s)[0]
1334
1334
1335 # indebug() inlined for performance.
1335 # indebug() inlined for performance.
1336 if dolog:
1336 if dolog:
1337 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1337 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1338
1338
1339
1339
1340 class unbundlepart(unpackermixin):
1340 class unbundlepart(unpackermixin):
1341 """a bundle part read from a bundle"""
1341 """a bundle part read from a bundle"""
1342
1342
1343 def __init__(self, ui, header, fp):
1343 def __init__(self, ui, header, fp):
1344 super(unbundlepart, self).__init__(fp)
1344 super(unbundlepart, self).__init__(fp)
1345 self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
1345 self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
1346 fp, b'tell'
1346 fp, b'tell'
1347 )
1347 )
1348 self.ui = ui
1348 self.ui = ui
1349 # unbundle state attr
1349 # unbundle state attr
1350 self._headerdata = header
1350 self._headerdata = header
1351 self._headeroffset = 0
1351 self._headeroffset = 0
1352 self._initialized = False
1352 self._initialized = False
1353 self.consumed = False
1353 self.consumed = False
1354 # part data
1354 # part data
1355 self.id = None
1355 self.id = None
1356 self.type = None
1356 self.type = None
1357 self.mandatoryparams = None
1357 self.mandatoryparams = None
1358 self.advisoryparams = None
1358 self.advisoryparams = None
1359 self.params = None
1359 self.params = None
1360 self.mandatorykeys = ()
1360 self.mandatorykeys = ()
1361 self._readheader()
1361 self._readheader()
1362 self._mandatory = None
1362 self._mandatory = None
1363 self._pos = 0
1363 self._pos = 0
1364
1364
1365 def _fromheader(self, size):
1365 def _fromheader(self, size):
1366 """return the next <size> byte from the header"""
1366 """return the next <size> byte from the header"""
1367 offset = self._headeroffset
1367 offset = self._headeroffset
1368 data = self._headerdata[offset : (offset + size)]
1368 data = self._headerdata[offset : (offset + size)]
1369 self._headeroffset = offset + size
1369 self._headeroffset = offset + size
1370 return data
1370 return data
1371
1371
1372 def _unpackheader(self, format):
1372 def _unpackheader(self, format):
1373 """read given format from header
1373 """read given format from header
1374
1374
1375 This automatically compute the size of the format to read."""
1375 This automatically compute the size of the format to read."""
1376 data = self._fromheader(struct.calcsize(format))
1376 data = self._fromheader(struct.calcsize(format))
1377 return _unpack(format, data)
1377 return _unpack(format, data)
1378
1378
1379 def _initparams(self, mandatoryparams, advisoryparams):
1379 def _initparams(self, mandatoryparams, advisoryparams):
1380 """internal function to setup all logic related parameters"""
1380 """internal function to setup all logic related parameters"""
1381 # make it read only to prevent people touching it by mistake.
1381 # make it read only to prevent people touching it by mistake.
1382 self.mandatoryparams = tuple(mandatoryparams)
1382 self.mandatoryparams = tuple(mandatoryparams)
1383 self.advisoryparams = tuple(advisoryparams)
1383 self.advisoryparams = tuple(advisoryparams)
1384 # user friendly UI
1384 # user friendly UI
1385 self.params = util.sortdict(self.mandatoryparams)
1385 self.params = util.sortdict(self.mandatoryparams)
1386 self.params.update(self.advisoryparams)
1386 self.params.update(self.advisoryparams)
1387 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1387 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1388
1388
1389 def _readheader(self):
1389 def _readheader(self):
1390 """read the header and setup the object"""
1390 """read the header and setup the object"""
1391 typesize = self._unpackheader(_fparttypesize)[0]
1391 typesize = self._unpackheader(_fparttypesize)[0]
1392 self.type = self._fromheader(typesize)
1392 self.type = self._fromheader(typesize)
1393 indebug(self.ui, b'part type: "%s"' % self.type)
1393 indebug(self.ui, b'part type: "%s"' % self.type)
1394 self.id = self._unpackheader(_fpartid)[0]
1394 self.id = self._unpackheader(_fpartid)[0]
1395 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1395 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1396 # extract mandatory bit from type
1396 # extract mandatory bit from type
1397 self.mandatory = self.type != self.type.lower()
1397 self.mandatory = self.type != self.type.lower()
1398 self.type = self.type.lower()
1398 self.type = self.type.lower()
1399 ## reading parameters
1399 ## reading parameters
1400 # param count
1400 # param count
1401 mancount, advcount = self._unpackheader(_fpartparamcount)
1401 mancount, advcount = self._unpackheader(_fpartparamcount)
1402 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1402 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1403 # param size
1403 # param size
1404 fparamsizes = _makefpartparamsizes(mancount + advcount)
1404 fparamsizes = _makefpartparamsizes(mancount + advcount)
1405 paramsizes = self._unpackheader(fparamsizes)
1405 paramsizes = self._unpackheader(fparamsizes)
1406 # make it a list of couple again
1406 # make it a list of couple again
1407 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1407 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1408 # split mandatory from advisory
1408 # split mandatory from advisory
1409 mansizes = paramsizes[:mancount]
1409 mansizes = paramsizes[:mancount]
1410 advsizes = paramsizes[mancount:]
1410 advsizes = paramsizes[mancount:]
1411 # retrieve param value
1411 # retrieve param value
1412 manparams = []
1412 manparams = []
1413 for key, value in mansizes:
1413 for key, value in mansizes:
1414 manparams.append((self._fromheader(key), self._fromheader(value)))
1414 manparams.append((self._fromheader(key), self._fromheader(value)))
1415 advparams = []
1415 advparams = []
1416 for key, value in advsizes:
1416 for key, value in advsizes:
1417 advparams.append((self._fromheader(key), self._fromheader(value)))
1417 advparams.append((self._fromheader(key), self._fromheader(value)))
1418 self._initparams(manparams, advparams)
1418 self._initparams(manparams, advparams)
1419 ## part payload
1419 ## part payload
1420 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1420 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1421 # we read the data, tell it
1421 # we read the data, tell it
1422 self._initialized = True
1422 self._initialized = True
1423
1423
1424 def _payloadchunks(self):
1424 def _payloadchunks(self):
1425 """Generator of decoded chunks in the payload."""
1425 """Generator of decoded chunks in the payload."""
1426 return decodepayloadchunks(self.ui, self._fp)
1426 return decodepayloadchunks(self.ui, self._fp)
1427
1427
1428 def consume(self):
1428 def consume(self):
1429 """Read the part payload until completion.
1429 """Read the part payload until completion.
1430
1430
1431 By consuming the part data, the underlying stream read offset will
1431 By consuming the part data, the underlying stream read offset will
1432 be advanced to the next part (or end of stream).
1432 be advanced to the next part (or end of stream).
1433 """
1433 """
1434 if self.consumed:
1434 if self.consumed:
1435 return
1435 return
1436
1436
1437 chunk = self.read(32768)
1437 chunk = self.read(32768)
1438 while chunk:
1438 while chunk:
1439 self._pos += len(chunk)
1439 self._pos += len(chunk)
1440 chunk = self.read(32768)
1440 chunk = self.read(32768)
1441
1441
1442 def read(self, size=None):
1442 def read(self, size=None):
1443 """read payload data"""
1443 """read payload data"""
1444 if not self._initialized:
1444 if not self._initialized:
1445 self._readheader()
1445 self._readheader()
1446 if size is None:
1446 if size is None:
1447 data = self._payloadstream.read()
1447 data = self._payloadstream.read()
1448 else:
1448 else:
1449 data = self._payloadstream.read(size)
1449 data = self._payloadstream.read(size)
1450 self._pos += len(data)
1450 self._pos += len(data)
1451 if size is None or len(data) < size:
1451 if size is None or len(data) < size:
1452 if not self.consumed and self._pos:
1452 if not self.consumed and self._pos:
1453 self.ui.debug(
1453 self.ui.debug(
1454 b'bundle2-input-part: total payload size %i\n' % self._pos
1454 b'bundle2-input-part: total payload size %i\n' % self._pos
1455 )
1455 )
1456 self.consumed = True
1456 self.consumed = True
1457 return data
1457 return data
1458
1458
1459
1459
1460 class seekableunbundlepart(unbundlepart):
1460 class seekableunbundlepart(unbundlepart):
1461 """A bundle2 part in a bundle that is seekable.
1461 """A bundle2 part in a bundle that is seekable.
1462
1462
1463 Regular ``unbundlepart`` instances can only be read once. This class
1463 Regular ``unbundlepart`` instances can only be read once. This class
1464 extends ``unbundlepart`` to enable bi-directional seeking within the
1464 extends ``unbundlepart`` to enable bi-directional seeking within the
1465 part.
1465 part.
1466
1466
1467 Bundle2 part data consists of framed chunks. Offsets when seeking
1467 Bundle2 part data consists of framed chunks. Offsets when seeking
1468 refer to the decoded data, not the offsets in the underlying bundle2
1468 refer to the decoded data, not the offsets in the underlying bundle2
1469 stream.
1469 stream.
1470
1470
1471 To facilitate quickly seeking within the decoded data, instances of this
1471 To facilitate quickly seeking within the decoded data, instances of this
1472 class maintain a mapping between offsets in the underlying stream and
1472 class maintain a mapping between offsets in the underlying stream and
1473 the decoded payload. This mapping will consume memory in proportion
1473 the decoded payload. This mapping will consume memory in proportion
1474 to the number of chunks within the payload (which almost certainly
1474 to the number of chunks within the payload (which almost certainly
1475 increases in proportion with the size of the part).
1475 increases in proportion with the size of the part).
1476 """
1476 """
1477
1477
1478 def __init__(self, ui, header, fp):
1478 def __init__(self, ui, header, fp):
1479 # (payload, file) offsets for chunk starts.
1479 # (payload, file) offsets for chunk starts.
1480 self._chunkindex = []
1480 self._chunkindex = []
1481
1481
1482 super(seekableunbundlepart, self).__init__(ui, header, fp)
1482 super(seekableunbundlepart, self).__init__(ui, header, fp)
1483
1483
1484 def _payloadchunks(self, chunknum=0):
1484 def _payloadchunks(self, chunknum=0):
1485 '''seek to specified chunk and start yielding data'''
1485 '''seek to specified chunk and start yielding data'''
1486 if len(self._chunkindex) == 0:
1486 if len(self._chunkindex) == 0:
1487 assert chunknum == 0, b'Must start with chunk 0'
1487 assert chunknum == 0, b'Must start with chunk 0'
1488 self._chunkindex.append((0, self._tellfp()))
1488 self._chunkindex.append((0, self._tellfp()))
1489 else:
1489 else:
1490 assert chunknum < len(self._chunkindex), (
1490 assert chunknum < len(self._chunkindex), (
1491 b'Unknown chunk %d' % chunknum
1491 b'Unknown chunk %d' % chunknum
1492 )
1492 )
1493 self._seekfp(self._chunkindex[chunknum][1])
1493 self._seekfp(self._chunkindex[chunknum][1])
1494
1494
1495 pos = self._chunkindex[chunknum][0]
1495 pos = self._chunkindex[chunknum][0]
1496
1496
1497 for chunk in decodepayloadchunks(self.ui, self._fp):
1497 for chunk in decodepayloadchunks(self.ui, self._fp):
1498 chunknum += 1
1498 chunknum += 1
1499 pos += len(chunk)
1499 pos += len(chunk)
1500 if chunknum == len(self._chunkindex):
1500 if chunknum == len(self._chunkindex):
1501 self._chunkindex.append((pos, self._tellfp()))
1501 self._chunkindex.append((pos, self._tellfp()))
1502
1502
1503 yield chunk
1503 yield chunk
1504
1504
1505 def _findchunk(self, pos):
1505 def _findchunk(self, pos):
1506 '''for a given payload position, return a chunk number and offset'''
1506 '''for a given payload position, return a chunk number and offset'''
1507 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1507 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1508 if ppos == pos:
1508 if ppos == pos:
1509 return chunk, 0
1509 return chunk, 0
1510 elif ppos > pos:
1510 elif ppos > pos:
1511 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1511 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1512 raise ValueError(b'Unknown chunk')
1512 raise ValueError(b'Unknown chunk')
1513
1513
1514 def tell(self):
1514 def tell(self):
1515 return self._pos
1515 return self._pos
1516
1516
1517 def seek(self, offset, whence=os.SEEK_SET):
1517 def seek(self, offset, whence=os.SEEK_SET):
1518 if whence == os.SEEK_SET:
1518 if whence == os.SEEK_SET:
1519 newpos = offset
1519 newpos = offset
1520 elif whence == os.SEEK_CUR:
1520 elif whence == os.SEEK_CUR:
1521 newpos = self._pos + offset
1521 newpos = self._pos + offset
1522 elif whence == os.SEEK_END:
1522 elif whence == os.SEEK_END:
1523 if not self.consumed:
1523 if not self.consumed:
1524 # Can't use self.consume() here because it advances self._pos.
1524 # Can't use self.consume() here because it advances self._pos.
1525 chunk = self.read(32768)
1525 chunk = self.read(32768)
1526 while chunk:
1526 while chunk:
1527 chunk = self.read(32768)
1527 chunk = self.read(32768)
1528 newpos = self._chunkindex[-1][0] - offset
1528 newpos = self._chunkindex[-1][0] - offset
1529 else:
1529 else:
1530 raise ValueError(b'Unknown whence value: %r' % (whence,))
1530 raise ValueError(b'Unknown whence value: %r' % (whence,))
1531
1531
1532 if newpos > self._chunkindex[-1][0] and not self.consumed:
1532 if newpos > self._chunkindex[-1][0] and not self.consumed:
1533 # Can't use self.consume() here because it advances self._pos.
1533 # Can't use self.consume() here because it advances self._pos.
1534 chunk = self.read(32768)
1534 chunk = self.read(32768)
1535 while chunk:
1535 while chunk:
1536 chunk = self.read(32668)
1536 chunk = self.read(32668)
1537
1537
1538 if not 0 <= newpos <= self._chunkindex[-1][0]:
1538 if not 0 <= newpos <= self._chunkindex[-1][0]:
1539 raise ValueError(b'Offset out of range')
1539 raise ValueError(b'Offset out of range')
1540
1540
1541 if self._pos != newpos:
1541 if self._pos != newpos:
1542 chunk, internaloffset = self._findchunk(newpos)
1542 chunk, internaloffset = self._findchunk(newpos)
1543 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1543 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1544 adjust = self.read(internaloffset)
1544 adjust = self.read(internaloffset)
1545 if len(adjust) != internaloffset:
1545 if len(adjust) != internaloffset:
1546 raise error.Abort(_(b'Seek failed\n'))
1546 raise error.Abort(_(b'Seek failed\n'))
1547 self._pos = newpos
1547 self._pos = newpos
1548
1548
1549 def _seekfp(self, offset, whence=0):
1549 def _seekfp(self, offset, whence=0):
1550 """move the underlying file pointer
1550 """move the underlying file pointer
1551
1551
1552 This method is meant for internal usage by the bundle2 protocol only.
1552 This method is meant for internal usage by the bundle2 protocol only.
1553 They directly manipulate the low level stream including bundle2 level
1553 They directly manipulate the low level stream including bundle2 level
1554 instruction.
1554 instruction.
1555
1555
1556 Do not use it to implement higher-level logic or methods."""
1556 Do not use it to implement higher-level logic or methods."""
1557 if self._seekable:
1557 if self._seekable:
1558 return self._fp.seek(offset, whence)
1558 return self._fp.seek(offset, whence)
1559 else:
1559 else:
1560 raise NotImplementedError(_(b'File pointer is not seekable'))
1560 raise NotImplementedError(_(b'File pointer is not seekable'))
1561
1561
1562 def _tellfp(self):
1562 def _tellfp(self):
1563 """return the file offset, or None if file is not seekable
1563 """return the file offset, or None if file is not seekable
1564
1564
1565 This method is meant for internal usage by the bundle2 protocol only.
1565 This method is meant for internal usage by the bundle2 protocol only.
1566 They directly manipulate the low level stream including bundle2 level
1566 They directly manipulate the low level stream including bundle2 level
1567 instruction.
1567 instruction.
1568
1568
1569 Do not use it to implement higher-level logic or methods."""
1569 Do not use it to implement higher-level logic or methods."""
1570 if self._seekable:
1570 if self._seekable:
1571 try:
1571 try:
1572 return self._fp.tell()
1572 return self._fp.tell()
1573 except IOError as e:
1573 except IOError as e:
1574 if e.errno == errno.ESPIPE:
1574 if e.errno == errno.ESPIPE:
1575 self._seekable = False
1575 self._seekable = False
1576 else:
1576 else:
1577 raise
1577 raise
1578 return None
1578 return None
1579
1579
1580
1580
1581 # These are only the static capabilities.
1581 # These are only the static capabilities.
1582 # Check the 'getrepocaps' function for the rest.
1582 # Check the 'getrepocaps' function for the rest.
1583 capabilities = {
1583 capabilities = {
1584 b'HG20': (),
1584 b'HG20': (),
1585 b'bookmarks': (),
1585 b'bookmarks': (),
1586 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1586 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1587 b'listkeys': (),
1587 b'listkeys': (),
1588 b'pushkey': (),
1588 b'pushkey': (),
1589 b'digests': tuple(sorted(util.DIGESTS.keys())),
1589 b'digests': tuple(sorted(util.DIGESTS.keys())),
1590 b'remote-changegroup': (b'http', b'https'),
1590 b'remote-changegroup': (b'http', b'https'),
1591 b'hgtagsfnodes': (),
1591 b'hgtagsfnodes': (),
1592 b'rev-branch-cache': (),
1592 b'rev-branch-cache': (),
1593 b'phases': (b'heads',),
1593 b'phases': (b'heads',),
1594 b'stream': (b'v2',),
1594 b'stream': (b'v2',),
1595 }
1595 }
1596
1596
1597
1597
1598 def getrepocaps(repo, allowpushback=False, role=None):
1598 def getrepocaps(repo, allowpushback=False, role=None):
1599 """return the bundle2 capabilities for a given repo
1599 """return the bundle2 capabilities for a given repo
1600
1600
1601 Exists to allow extensions (like evolution) to mutate the capabilities.
1601 Exists to allow extensions (like evolution) to mutate the capabilities.
1602
1602
1603 The returned value is used for servers advertising their capabilities as
1603 The returned value is used for servers advertising their capabilities as
1604 well as clients advertising their capabilities to servers as part of
1604 well as clients advertising their capabilities to servers as part of
1605 bundle2 requests. The ``role`` argument specifies which is which.
1605 bundle2 requests. The ``role`` argument specifies which is which.
1606 """
1606 """
1607 if role not in (b'client', b'server'):
1607 if role not in (b'client', b'server'):
1608 raise error.ProgrammingError(b'role argument must be client or server')
1608 raise error.ProgrammingError(b'role argument must be client or server')
1609
1609
1610 caps = capabilities.copy()
1610 caps = capabilities.copy()
1611 caps[b'changegroup'] = tuple(
1611 caps[b'changegroup'] = tuple(
1612 sorted(changegroup.supportedincomingversions(repo))
1612 sorted(changegroup.supportedincomingversions(repo))
1613 )
1613 )
1614 if obsolete.isenabled(repo, obsolete.exchangeopt):
1614 if obsolete.isenabled(repo, obsolete.exchangeopt):
1615 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1615 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1616 caps[b'obsmarkers'] = supportedformat
1616 caps[b'obsmarkers'] = supportedformat
1617 if allowpushback:
1617 if allowpushback:
1618 caps[b'pushback'] = ()
1618 caps[b'pushback'] = ()
1619 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1619 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1620 if cpmode == b'check-related':
1620 if cpmode == b'check-related':
1621 caps[b'checkheads'] = (b'related',)
1621 caps[b'checkheads'] = (b'related',)
1622 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1622 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1623 caps.pop(b'phases')
1623 caps.pop(b'phases')
1624
1624
1625 # Don't advertise stream clone support in server mode if not configured.
1625 # Don't advertise stream clone support in server mode if not configured.
1626 if role == b'server':
1626 if role == b'server':
1627 streamsupported = repo.ui.configbool(
1627 streamsupported = repo.ui.configbool(
1628 b'server', b'uncompressed', untrusted=True
1628 b'server', b'uncompressed', untrusted=True
1629 )
1629 )
1630 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1630 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1631
1631
1632 if not streamsupported or not featuresupported:
1632 if not streamsupported or not featuresupported:
1633 caps.pop(b'stream')
1633 caps.pop(b'stream')
1634 # Else always advertise support on client, because payload support
1634 # Else always advertise support on client, because payload support
1635 # should always be advertised.
1635 # should always be advertised.
1636
1636
1637 return caps
1637 return caps
1638
1638
1639
1639
1640 def bundle2caps(remote):
1640 def bundle2caps(remote):
1641 """return the bundle capabilities of a peer as dict"""
1641 """return the bundle capabilities of a peer as dict"""
1642 raw = remote.capable(b'bundle2')
1642 raw = remote.capable(b'bundle2')
1643 if not raw and raw != b'':
1643 if not raw and raw != b'':
1644 return {}
1644 return {}
1645 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1645 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1646 return decodecaps(capsblob)
1646 return decodecaps(capsblob)
1647
1647
1648
1648
1649 def obsmarkersversion(caps):
1649 def obsmarkersversion(caps):
1650 """extract the list of supported obsmarkers versions from a bundle2caps dict
1650 """extract the list of supported obsmarkers versions from a bundle2caps dict
1651 """
1651 """
1652 obscaps = caps.get(b'obsmarkers', ())
1652 obscaps = caps.get(b'obsmarkers', ())
1653 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1653 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1654
1654
1655
1655
1656 def writenewbundle(
1656 def writenewbundle(
1657 ui,
1657 ui,
1658 repo,
1658 repo,
1659 source,
1659 source,
1660 filename,
1660 filename,
1661 bundletype,
1661 bundletype,
1662 outgoing,
1662 outgoing,
1663 opts,
1663 opts,
1664 vfs=None,
1664 vfs=None,
1665 compression=None,
1665 compression=None,
1666 compopts=None,
1666 compopts=None,
1667 ):
1667 ):
1668 if bundletype.startswith(b'HG10'):
1668 if bundletype.startswith(b'HG10'):
1669 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1669 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1670 return writebundle(
1670 return writebundle(
1671 ui,
1671 ui,
1672 cg,
1672 cg,
1673 filename,
1673 filename,
1674 bundletype,
1674 bundletype,
1675 vfs=vfs,
1675 vfs=vfs,
1676 compression=compression,
1676 compression=compression,
1677 compopts=compopts,
1677 compopts=compopts,
1678 )
1678 )
1679 elif not bundletype.startswith(b'HG20'):
1679 elif not bundletype.startswith(b'HG20'):
1680 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1680 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1681
1681
1682 caps = {}
1682 caps = {}
1683 if b'obsolescence' in opts:
1683 if b'obsolescence' in opts:
1684 caps[b'obsmarkers'] = (b'V1',)
1684 caps[b'obsmarkers'] = (b'V1',)
1685 bundle = bundle20(ui, caps)
1685 bundle = bundle20(ui, caps)
1686 bundle.setcompression(compression, compopts)
1686 bundle.setcompression(compression, compopts)
1687 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1687 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1688 chunkiter = bundle.getchunks()
1688 chunkiter = bundle.getchunks()
1689
1689
1690 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1690 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1691
1691
1692
1692
1693 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1693 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1694 # We should eventually reconcile this logic with the one behind
1694 # We should eventually reconcile this logic with the one behind
1695 # 'exchange.getbundle2partsgenerator'.
1695 # 'exchange.getbundle2partsgenerator'.
1696 #
1696 #
1697 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1697 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1698 # different right now. So we keep them separated for now for the sake of
1698 # different right now. So we keep them separated for now for the sake of
1699 # simplicity.
1699 # simplicity.
1700
1700
1701 # we might not always want a changegroup in such bundle, for example in
1701 # we might not always want a changegroup in such bundle, for example in
1702 # stream bundles
1702 # stream bundles
1703 if opts.get(b'changegroup', True):
1703 if opts.get(b'changegroup', True):
1704 cgversion = opts.get(b'cg.version')
1704 cgversion = opts.get(b'cg.version')
1705 if cgversion is None:
1705 if cgversion is None:
1706 cgversion = changegroup.safeversion(repo)
1706 cgversion = changegroup.safeversion(repo)
1707 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1707 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1708 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1708 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1709 part.addparam(b'version', cg.version)
1709 part.addparam(b'version', cg.version)
1710 if b'clcount' in cg.extras:
1710 if b'clcount' in cg.extras:
1711 part.addparam(
1711 part.addparam(
1712 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1712 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1713 )
1713 )
1714 if opts.get(b'phases') and repo.revs(
1714 if opts.get(b'phases') and repo.revs(
1715 b'%ln and secret()', outgoing.ancestorsof
1715 b'%ln and secret()', outgoing.ancestorsof
1716 ):
1716 ):
1717 part.addparam(
1717 part.addparam(
1718 b'targetphase', b'%d' % phases.secret, mandatory=False
1718 b'targetphase', b'%d' % phases.secret, mandatory=False
1719 )
1719 )
1720 if b'exp-sidedata-flag' in repo.requirements:
1720 if b'exp-sidedata-flag' in repo.requirements:
1721 part.addparam(b'exp-sidedata', b'1')
1721 part.addparam(b'exp-sidedata', b'1')
1722
1722
1723 if opts.get(b'streamv2', False):
1723 if opts.get(b'streamv2', False):
1724 addpartbundlestream2(bundler, repo, stream=True)
1724 addpartbundlestream2(bundler, repo, stream=True)
1725
1725
1726 if opts.get(b'tagsfnodescache', True):
1726 if opts.get(b'tagsfnodescache', True):
1727 addparttagsfnodescache(repo, bundler, outgoing)
1727 addparttagsfnodescache(repo, bundler, outgoing)
1728
1728
1729 if opts.get(b'revbranchcache', True):
1729 if opts.get(b'revbranchcache', True):
1730 addpartrevbranchcache(repo, bundler, outgoing)
1730 addpartrevbranchcache(repo, bundler, outgoing)
1731
1731
1732 if opts.get(b'obsolescence', False):
1732 if opts.get(b'obsolescence', False):
1733 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1733 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1734 buildobsmarkerspart(bundler, obsmarkers)
1734 buildobsmarkerspart(bundler, obsmarkers)
1735
1735
1736 if opts.get(b'phases', False):
1736 if opts.get(b'phases', False):
1737 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1737 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1738 phasedata = phases.binaryencode(headsbyphase)
1738 phasedata = phases.binaryencode(headsbyphase)
1739 bundler.newpart(b'phase-heads', data=phasedata)
1739 bundler.newpart(b'phase-heads', data=phasedata)
1740
1740
1741
1741
1742 def addparttagsfnodescache(repo, bundler, outgoing):
1742 def addparttagsfnodescache(repo, bundler, outgoing):
1743 # we include the tags fnode cache for the bundle changeset
1743 # we include the tags fnode cache for the bundle changeset
1744 # (as an optional parts)
1744 # (as an optional parts)
1745 cache = tags.hgtagsfnodescache(repo.unfiltered())
1745 cache = tags.hgtagsfnodescache(repo.unfiltered())
1746 chunks = []
1746 chunks = []
1747
1747
1748 # .hgtags fnodes are only relevant for head changesets. While we could
1748 # .hgtags fnodes are only relevant for head changesets. While we could
1749 # transfer values for all known nodes, there will likely be little to
1749 # transfer values for all known nodes, there will likely be little to
1750 # no benefit.
1750 # no benefit.
1751 #
1751 #
1752 # We don't bother using a generator to produce output data because
1752 # We don't bother using a generator to produce output data because
1753 # a) we only have 40 bytes per head and even esoteric numbers of heads
1753 # a) we only have 40 bytes per head and even esoteric numbers of heads
1754 # consume little memory (1M heads is 40MB) b) we don't want to send the
1754 # consume little memory (1M heads is 40MB) b) we don't want to send the
1755 # part if we don't have entries and knowing if we have entries requires
1755 # part if we don't have entries and knowing if we have entries requires
1756 # cache lookups.
1756 # cache lookups.
1757 for node in outgoing.ancestorsof:
1757 for node in outgoing.ancestorsof:
1758 # Don't compute missing, as this may slow down serving.
1758 # Don't compute missing, as this may slow down serving.
1759 fnode = cache.getfnode(node, computemissing=False)
1759 fnode = cache.getfnode(node, computemissing=False)
1760 if fnode is not None:
1760 if fnode is not None:
1761 chunks.extend([node, fnode])
1761 chunks.extend([node, fnode])
1762
1762
1763 if chunks:
1763 if chunks:
1764 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1764 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1765
1765
1766
1766
1767 def addpartrevbranchcache(repo, bundler, outgoing):
1767 def addpartrevbranchcache(repo, bundler, outgoing):
1768 # we include the rev branch cache for the bundle changeset
1768 # we include the rev branch cache for the bundle changeset
1769 # (as an optional parts)
1769 # (as an optional parts)
1770 cache = repo.revbranchcache()
1770 cache = repo.revbranchcache()
1771 cl = repo.unfiltered().changelog
1771 cl = repo.unfiltered().changelog
1772 branchesdata = collections.defaultdict(lambda: (set(), set()))
1772 branchesdata = collections.defaultdict(lambda: (set(), set()))
1773 for node in outgoing.missing:
1773 for node in outgoing.missing:
1774 branch, close = cache.branchinfo(cl.rev(node))
1774 branch, close = cache.branchinfo(cl.rev(node))
1775 branchesdata[branch][close].add(node)
1775 branchesdata[branch][close].add(node)
1776
1776
1777 def generate():
1777 def generate():
1778 for branch, (nodes, closed) in sorted(branchesdata.items()):
1778 for branch, (nodes, closed) in sorted(branchesdata.items()):
1779 utf8branch = encoding.fromlocal(branch)
1779 utf8branch = encoding.fromlocal(branch)
1780 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1780 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1781 yield utf8branch
1781 yield utf8branch
1782 for n in sorted(nodes):
1782 for n in sorted(nodes):
1783 yield n
1783 yield n
1784 for n in sorted(closed):
1784 for n in sorted(closed):
1785 yield n
1785 yield n
1786
1786
1787 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1787 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1788
1788
1789
1789
1790 def _formatrequirementsspec(requirements):
1790 def _formatrequirementsspec(requirements):
1791 requirements = [req for req in requirements if req != b"shared"]
1791 requirements = [req for req in requirements if req != b"shared"]
1792 return urlreq.quote(b','.join(sorted(requirements)))
1792 return urlreq.quote(b','.join(sorted(requirements)))
1793
1793
1794
1794
1795 def _formatrequirementsparams(requirements):
1795 def _formatrequirementsparams(requirements):
1796 requirements = _formatrequirementsspec(requirements)
1796 requirements = _formatrequirementsspec(requirements)
1797 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1797 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1798 return params
1798 return params
1799
1799
1800
1800
1801 def addpartbundlestream2(bundler, repo, **kwargs):
1801 def addpartbundlestream2(bundler, repo, **kwargs):
1802 if not kwargs.get('stream', False):
1802 if not kwargs.get('stream', False):
1803 return
1803 return
1804
1804
1805 if not streamclone.allowservergeneration(repo):
1805 if not streamclone.allowservergeneration(repo):
1806 raise error.Abort(
1806 raise error.Abort(
1807 _(
1807 _(
1808 b'stream data requested but server does not allow '
1808 b'stream data requested but server does not allow '
1809 b'this feature'
1809 b'this feature'
1810 ),
1810 ),
1811 hint=_(
1811 hint=_(
1812 b'well-behaved clients should not be '
1812 b'well-behaved clients should not be '
1813 b'requesting stream data from servers not '
1813 b'requesting stream data from servers not '
1814 b'advertising it; the client may be buggy'
1814 b'advertising it; the client may be buggy'
1815 ),
1815 ),
1816 )
1816 )
1817
1817
1818 # Stream clones don't compress well. And compression undermines a
1818 # Stream clones don't compress well. And compression undermines a
1819 # goal of stream clones, which is to be fast. Communicate the desire
1819 # goal of stream clones, which is to be fast. Communicate the desire
1820 # to avoid compression to consumers of the bundle.
1820 # to avoid compression to consumers of the bundle.
1821 bundler.prefercompressed = False
1821 bundler.prefercompressed = False
1822
1822
1823 # get the includes and excludes
1823 # get the includes and excludes
1824 includepats = kwargs.get('includepats')
1824 includepats = kwargs.get('includepats')
1825 excludepats = kwargs.get('excludepats')
1825 excludepats = kwargs.get('excludepats')
1826
1826
1827 narrowstream = repo.ui.configbool(
1827 narrowstream = repo.ui.configbool(
1828 b'experimental', b'server.stream-narrow-clones'
1828 b'experimental', b'server.stream-narrow-clones'
1829 )
1829 )
1830
1830
1831 if (includepats or excludepats) and not narrowstream:
1831 if (includepats or excludepats) and not narrowstream:
1832 raise error.Abort(_(b'server does not support narrow stream clones'))
1832 raise error.Abort(_(b'server does not support narrow stream clones'))
1833
1833
1834 includeobsmarkers = False
1834 includeobsmarkers = False
1835 if repo.obsstore:
1835 if repo.obsstore:
1836 remoteversions = obsmarkersversion(bundler.capabilities)
1836 remoteversions = obsmarkersversion(bundler.capabilities)
1837 if not remoteversions:
1837 if not remoteversions:
1838 raise error.Abort(
1838 raise error.Abort(
1839 _(
1839 _(
1840 b'server has obsolescence markers, but client '
1840 b'server has obsolescence markers, but client '
1841 b'cannot receive them via stream clone'
1841 b'cannot receive them via stream clone'
1842 )
1842 )
1843 )
1843 )
1844 elif repo.obsstore._version in remoteversions:
1844 elif repo.obsstore._version in remoteversions:
1845 includeobsmarkers = True
1845 includeobsmarkers = True
1846
1846
1847 filecount, bytecount, it = streamclone.generatev2(
1847 filecount, bytecount, it = streamclone.generatev2(
1848 repo, includepats, excludepats, includeobsmarkers
1848 repo, includepats, excludepats, includeobsmarkers
1849 )
1849 )
1850 requirements = _formatrequirementsspec(repo.requirements)
1850 requirements = _formatrequirementsspec(repo.requirements)
1851 part = bundler.newpart(b'stream2', data=it)
1851 part = bundler.newpart(b'stream2', data=it)
1852 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1852 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1853 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1853 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1854 part.addparam(b'requirements', requirements, mandatory=True)
1854 part.addparam(b'requirements', requirements, mandatory=True)
1855
1855
1856
1856
1857 def buildobsmarkerspart(bundler, markers):
1857 def buildobsmarkerspart(bundler, markers):
1858 """add an obsmarker part to the bundler with <markers>
1858 """add an obsmarker part to the bundler with <markers>
1859
1859
1860 No part is created if markers is empty.
1860 No part is created if markers is empty.
1861 Raises ValueError if the bundler doesn't support any known obsmarker format.
1861 Raises ValueError if the bundler doesn't support any known obsmarker format.
1862 """
1862 """
1863 if not markers:
1863 if not markers:
1864 return None
1864 return None
1865
1865
1866 remoteversions = obsmarkersversion(bundler.capabilities)
1866 remoteversions = obsmarkersversion(bundler.capabilities)
1867 version = obsolete.commonversion(remoteversions)
1867 version = obsolete.commonversion(remoteversions)
1868 if version is None:
1868 if version is None:
1869 raise ValueError(b'bundler does not support common obsmarker format')
1869 raise ValueError(b'bundler does not support common obsmarker format')
1870 stream = obsolete.encodemarkers(markers, True, version=version)
1870 stream = obsolete.encodemarkers(markers, True, version=version)
1871 return bundler.newpart(b'obsmarkers', data=stream)
1871 return bundler.newpart(b'obsmarkers', data=stream)
1872
1872
1873
1873
1874 def writebundle(
1874 def writebundle(
1875 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1875 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1876 ):
1876 ):
1877 """Write a bundle file and return its filename.
1877 """Write a bundle file and return its filename.
1878
1878
1879 Existing files will not be overwritten.
1879 Existing files will not be overwritten.
1880 If no filename is specified, a temporary file is created.
1880 If no filename is specified, a temporary file is created.
1881 bz2 compression can be turned off.
1881 bz2 compression can be turned off.
1882 The bundle file will be deleted in case of errors.
1882 The bundle file will be deleted in case of errors.
1883 """
1883 """
1884
1884
1885 if bundletype == b"HG20":
1885 if bundletype == b"HG20":
1886 bundle = bundle20(ui)
1886 bundle = bundle20(ui)
1887 bundle.setcompression(compression, compopts)
1887 bundle.setcompression(compression, compopts)
1888 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1888 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1889 part.addparam(b'version', cg.version)
1889 part.addparam(b'version', cg.version)
1890 if b'clcount' in cg.extras:
1890 if b'clcount' in cg.extras:
1891 part.addparam(
1891 part.addparam(
1892 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1892 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1893 )
1893 )
1894 chunkiter = bundle.getchunks()
1894 chunkiter = bundle.getchunks()
1895 else:
1895 else:
1896 # compression argument is only for the bundle2 case
1896 # compression argument is only for the bundle2 case
1897 assert compression is None
1897 assert compression is None
1898 if cg.version != b'01':
1898 if cg.version != b'01':
1899 raise error.Abort(
1899 raise error.Abort(
1900 _(b'old bundle types only supports v1 changegroups')
1900 _(b'old bundle types only supports v1 changegroups')
1901 )
1901 )
1902 header, comp = bundletypes[bundletype]
1902 header, comp = bundletypes[bundletype]
1903 if comp not in util.compengines.supportedbundletypes:
1903 if comp not in util.compengines.supportedbundletypes:
1904 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1904 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1905 compengine = util.compengines.forbundletype(comp)
1905 compengine = util.compengines.forbundletype(comp)
1906
1906
1907 def chunkiter():
1907 def chunkiter():
1908 yield header
1908 yield header
1909 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1909 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1910 yield chunk
1910 yield chunk
1911
1911
1912 chunkiter = chunkiter()
1912 chunkiter = chunkiter()
1913
1913
1914 # parse the changegroup data, otherwise we will block
1914 # parse the changegroup data, otherwise we will block
1915 # in case of sshrepo because we don't know the end of the stream
1915 # in case of sshrepo because we don't know the end of the stream
1916 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1916 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1917
1917
1918
1918
1919 def combinechangegroupresults(op):
1919 def combinechangegroupresults(op):
1920 """logic to combine 0 or more addchangegroup results into one"""
1920 """logic to combine 0 or more addchangegroup results into one"""
1921 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1921 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1922 changedheads = 0
1922 changedheads = 0
1923 result = 1
1923 result = 1
1924 for ret in results:
1924 for ret in results:
1925 # If any changegroup result is 0, return 0
1925 # If any changegroup result is 0, return 0
1926 if ret == 0:
1926 if ret == 0:
1927 result = 0
1927 result = 0
1928 break
1928 break
1929 if ret < -1:
1929 if ret < -1:
1930 changedheads += ret + 1
1930 changedheads += ret + 1
1931 elif ret > 1:
1931 elif ret > 1:
1932 changedheads += ret - 1
1932 changedheads += ret - 1
1933 if changedheads > 0:
1933 if changedheads > 0:
1934 result = 1 + changedheads
1934 result = 1 + changedheads
1935 elif changedheads < 0:
1935 elif changedheads < 0:
1936 result = -1 + changedheads
1936 result = -1 + changedheads
1937 return result
1937 return result
1938
1938
1939
1939
1940 @parthandler(
1940 @parthandler(
1941 b'changegroup',
1941 b'changegroup',
1942 (
1942 (
1943 b'version',
1943 b'version',
1944 b'nbchanges',
1944 b'nbchanges',
1945 b'exp-sidedata',
1945 b'exp-sidedata',
1946 b'treemanifest',
1946 b'treemanifest',
1947 b'targetphase',
1947 b'targetphase',
1948 ),
1948 ),
1949 )
1949 )
1950 def handlechangegroup(op, inpart):
1950 def handlechangegroup(op, inpart):
1951 """apply a changegroup part on the repo
1951 """apply a changegroup part on the repo
1952
1952
1953 This is a very early implementation that will massive rework before being
1953 This is a very early implementation that will massive rework before being
1954 inflicted to any end-user.
1954 inflicted to any end-user.
1955 """
1955 """
1956 from . import localrepo
1956 from . import localrepo
1957
1957
1958 tr = op.gettransaction()
1958 tr = op.gettransaction()
1959 unpackerversion = inpart.params.get(b'version', b'01')
1959 unpackerversion = inpart.params.get(b'version', b'01')
1960 # We should raise an appropriate exception here
1960 # We should raise an appropriate exception here
1961 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1961 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1962 # the source and url passed here are overwritten by the one contained in
1962 # the source and url passed here are overwritten by the one contained in
1963 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1963 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1964 nbchangesets = None
1964 nbchangesets = None
1965 if b'nbchanges' in inpart.params:
1965 if b'nbchanges' in inpart.params:
1966 nbchangesets = int(inpart.params.get(b'nbchanges'))
1966 nbchangesets = int(inpart.params.get(b'nbchanges'))
1967 if (
1967 if (
1968 b'treemanifest' in inpart.params
1968 b'treemanifest' in inpart.params
1969 and repository.TREEMANIFEST_REQUIREMENT not in op.repo.requirements
1969 and requirements.TREEMANIFEST_REQUIREMENT not in op.repo.requirements
1970 ):
1970 ):
1971 if len(op.repo.changelog) != 0:
1971 if len(op.repo.changelog) != 0:
1972 raise error.Abort(
1972 raise error.Abort(
1973 _(
1973 _(
1974 b"bundle contains tree manifests, but local repo is "
1974 b"bundle contains tree manifests, but local repo is "
1975 b"non-empty and does not use tree manifests"
1975 b"non-empty and does not use tree manifests"
1976 )
1976 )
1977 )
1977 )
1978 op.repo.requirements.add(repository.TREEMANIFEST_REQUIREMENT)
1978 op.repo.requirements.add(requirements.TREEMANIFEST_REQUIREMENT)
1979 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1979 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1980 op.repo.ui, op.repo.requirements, op.repo.features
1980 op.repo.ui, op.repo.requirements, op.repo.features
1981 )
1981 )
1982 scmutil.writereporequirements(op.repo)
1982 scmutil.writereporequirements(op.repo)
1983
1983
1984 bundlesidedata = bool(b'exp-sidedata' in inpart.params)
1984 bundlesidedata = bool(b'exp-sidedata' in inpart.params)
1985 reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
1985 reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
1986 if reposidedata and not bundlesidedata:
1986 if reposidedata and not bundlesidedata:
1987 msg = b"repository is using sidedata but the bundle source do not"
1987 msg = b"repository is using sidedata but the bundle source do not"
1988 hint = b'this is currently unsupported'
1988 hint = b'this is currently unsupported'
1989 raise error.Abort(msg, hint=hint)
1989 raise error.Abort(msg, hint=hint)
1990
1990
1991 extrakwargs = {}
1991 extrakwargs = {}
1992 targetphase = inpart.params.get(b'targetphase')
1992 targetphase = inpart.params.get(b'targetphase')
1993 if targetphase is not None:
1993 if targetphase is not None:
1994 extrakwargs['targetphase'] = int(targetphase)
1994 extrakwargs['targetphase'] = int(targetphase)
1995 ret = _processchangegroup(
1995 ret = _processchangegroup(
1996 op,
1996 op,
1997 cg,
1997 cg,
1998 tr,
1998 tr,
1999 b'bundle2',
1999 b'bundle2',
2000 b'bundle2',
2000 b'bundle2',
2001 expectedtotal=nbchangesets,
2001 expectedtotal=nbchangesets,
2002 **extrakwargs
2002 **extrakwargs
2003 )
2003 )
2004 if op.reply is not None:
2004 if op.reply is not None:
2005 # This is definitely not the final form of this
2005 # This is definitely not the final form of this
2006 # return. But one need to start somewhere.
2006 # return. But one need to start somewhere.
2007 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2007 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2008 part.addparam(
2008 part.addparam(
2009 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2009 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2010 )
2010 )
2011 part.addparam(b'return', b'%i' % ret, mandatory=False)
2011 part.addparam(b'return', b'%i' % ret, mandatory=False)
2012 assert not inpart.read()
2012 assert not inpart.read()
2013
2013
2014
2014
2015 _remotechangegroupparams = tuple(
2015 _remotechangegroupparams = tuple(
2016 [b'url', b'size', b'digests']
2016 [b'url', b'size', b'digests']
2017 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2017 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2018 )
2018 )
2019
2019
2020
2020
2021 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2021 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2022 def handleremotechangegroup(op, inpart):
2022 def handleremotechangegroup(op, inpart):
2023 """apply a bundle10 on the repo, given an url and validation information
2023 """apply a bundle10 on the repo, given an url and validation information
2024
2024
2025 All the information about the remote bundle to import are given as
2025 All the information about the remote bundle to import are given as
2026 parameters. The parameters include:
2026 parameters. The parameters include:
2027 - url: the url to the bundle10.
2027 - url: the url to the bundle10.
2028 - size: the bundle10 file size. It is used to validate what was
2028 - size: the bundle10 file size. It is used to validate what was
2029 retrieved by the client matches the server knowledge about the bundle.
2029 retrieved by the client matches the server knowledge about the bundle.
2030 - digests: a space separated list of the digest types provided as
2030 - digests: a space separated list of the digest types provided as
2031 parameters.
2031 parameters.
2032 - digest:<digest-type>: the hexadecimal representation of the digest with
2032 - digest:<digest-type>: the hexadecimal representation of the digest with
2033 that name. Like the size, it is used to validate what was retrieved by
2033 that name. Like the size, it is used to validate what was retrieved by
2034 the client matches what the server knows about the bundle.
2034 the client matches what the server knows about the bundle.
2035
2035
2036 When multiple digest types are given, all of them are checked.
2036 When multiple digest types are given, all of them are checked.
2037 """
2037 """
2038 try:
2038 try:
2039 raw_url = inpart.params[b'url']
2039 raw_url = inpart.params[b'url']
2040 except KeyError:
2040 except KeyError:
2041 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2041 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2042 parsed_url = util.url(raw_url)
2042 parsed_url = util.url(raw_url)
2043 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2043 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2044 raise error.Abort(
2044 raise error.Abort(
2045 _(b'remote-changegroup does not support %s urls')
2045 _(b'remote-changegroup does not support %s urls')
2046 % parsed_url.scheme
2046 % parsed_url.scheme
2047 )
2047 )
2048
2048
2049 try:
2049 try:
2050 size = int(inpart.params[b'size'])
2050 size = int(inpart.params[b'size'])
2051 except ValueError:
2051 except ValueError:
2052 raise error.Abort(
2052 raise error.Abort(
2053 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2053 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2054 )
2054 )
2055 except KeyError:
2055 except KeyError:
2056 raise error.Abort(
2056 raise error.Abort(
2057 _(b'remote-changegroup: missing "%s" param') % b'size'
2057 _(b'remote-changegroup: missing "%s" param') % b'size'
2058 )
2058 )
2059
2059
2060 digests = {}
2060 digests = {}
2061 for typ in inpart.params.get(b'digests', b'').split():
2061 for typ in inpart.params.get(b'digests', b'').split():
2062 param = b'digest:%s' % typ
2062 param = b'digest:%s' % typ
2063 try:
2063 try:
2064 value = inpart.params[param]
2064 value = inpart.params[param]
2065 except KeyError:
2065 except KeyError:
2066 raise error.Abort(
2066 raise error.Abort(
2067 _(b'remote-changegroup: missing "%s" param') % param
2067 _(b'remote-changegroup: missing "%s" param') % param
2068 )
2068 )
2069 digests[typ] = value
2069 digests[typ] = value
2070
2070
2071 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2071 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2072
2072
2073 tr = op.gettransaction()
2073 tr = op.gettransaction()
2074 from . import exchange
2074 from . import exchange
2075
2075
2076 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2076 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2077 if not isinstance(cg, changegroup.cg1unpacker):
2077 if not isinstance(cg, changegroup.cg1unpacker):
2078 raise error.Abort(
2078 raise error.Abort(
2079 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2079 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2080 )
2080 )
2081 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2081 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2082 if op.reply is not None:
2082 if op.reply is not None:
2083 # This is definitely not the final form of this
2083 # This is definitely not the final form of this
2084 # return. But one need to start somewhere.
2084 # return. But one need to start somewhere.
2085 part = op.reply.newpart(b'reply:changegroup')
2085 part = op.reply.newpart(b'reply:changegroup')
2086 part.addparam(
2086 part.addparam(
2087 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2087 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2088 )
2088 )
2089 part.addparam(b'return', b'%i' % ret, mandatory=False)
2089 part.addparam(b'return', b'%i' % ret, mandatory=False)
2090 try:
2090 try:
2091 real_part.validate()
2091 real_part.validate()
2092 except error.Abort as e:
2092 except error.Abort as e:
2093 raise error.Abort(
2093 raise error.Abort(
2094 _(b'bundle at %s is corrupted:\n%s')
2094 _(b'bundle at %s is corrupted:\n%s')
2095 % (util.hidepassword(raw_url), bytes(e))
2095 % (util.hidepassword(raw_url), bytes(e))
2096 )
2096 )
2097 assert not inpart.read()
2097 assert not inpart.read()
2098
2098
2099
2099
2100 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2100 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2101 def handlereplychangegroup(op, inpart):
2101 def handlereplychangegroup(op, inpart):
2102 ret = int(inpart.params[b'return'])
2102 ret = int(inpart.params[b'return'])
2103 replyto = int(inpart.params[b'in-reply-to'])
2103 replyto = int(inpart.params[b'in-reply-to'])
2104 op.records.add(b'changegroup', {b'return': ret}, replyto)
2104 op.records.add(b'changegroup', {b'return': ret}, replyto)
2105
2105
2106
2106
2107 @parthandler(b'check:bookmarks')
2107 @parthandler(b'check:bookmarks')
2108 def handlecheckbookmarks(op, inpart):
2108 def handlecheckbookmarks(op, inpart):
2109 """check location of bookmarks
2109 """check location of bookmarks
2110
2110
2111 This part is to be used to detect push race regarding bookmark, it
2111 This part is to be used to detect push race regarding bookmark, it
2112 contains binary encoded (bookmark, node) tuple. If the local state does
2112 contains binary encoded (bookmark, node) tuple. If the local state does
2113 not marks the one in the part, a PushRaced exception is raised
2113 not marks the one in the part, a PushRaced exception is raised
2114 """
2114 """
2115 bookdata = bookmarks.binarydecode(inpart)
2115 bookdata = bookmarks.binarydecode(inpart)
2116
2116
2117 msgstandard = (
2117 msgstandard = (
2118 b'remote repository changed while pushing - please try again '
2118 b'remote repository changed while pushing - please try again '
2119 b'(bookmark "%s" move from %s to %s)'
2119 b'(bookmark "%s" move from %s to %s)'
2120 )
2120 )
2121 msgmissing = (
2121 msgmissing = (
2122 b'remote repository changed while pushing - please try again '
2122 b'remote repository changed while pushing - please try again '
2123 b'(bookmark "%s" is missing, expected %s)'
2123 b'(bookmark "%s" is missing, expected %s)'
2124 )
2124 )
2125 msgexist = (
2125 msgexist = (
2126 b'remote repository changed while pushing - please try again '
2126 b'remote repository changed while pushing - please try again '
2127 b'(bookmark "%s" set on %s, expected missing)'
2127 b'(bookmark "%s" set on %s, expected missing)'
2128 )
2128 )
2129 for book, node in bookdata:
2129 for book, node in bookdata:
2130 currentnode = op.repo._bookmarks.get(book)
2130 currentnode = op.repo._bookmarks.get(book)
2131 if currentnode != node:
2131 if currentnode != node:
2132 if node is None:
2132 if node is None:
2133 finalmsg = msgexist % (book, nodemod.short(currentnode))
2133 finalmsg = msgexist % (book, nodemod.short(currentnode))
2134 elif currentnode is None:
2134 elif currentnode is None:
2135 finalmsg = msgmissing % (book, nodemod.short(node))
2135 finalmsg = msgmissing % (book, nodemod.short(node))
2136 else:
2136 else:
2137 finalmsg = msgstandard % (
2137 finalmsg = msgstandard % (
2138 book,
2138 book,
2139 nodemod.short(node),
2139 nodemod.short(node),
2140 nodemod.short(currentnode),
2140 nodemod.short(currentnode),
2141 )
2141 )
2142 raise error.PushRaced(finalmsg)
2142 raise error.PushRaced(finalmsg)
2143
2143
2144
2144
2145 @parthandler(b'check:heads')
2145 @parthandler(b'check:heads')
2146 def handlecheckheads(op, inpart):
2146 def handlecheckheads(op, inpart):
2147 """check that head of the repo did not change
2147 """check that head of the repo did not change
2148
2148
2149 This is used to detect a push race when using unbundle.
2149 This is used to detect a push race when using unbundle.
2150 This replaces the "heads" argument of unbundle."""
2150 This replaces the "heads" argument of unbundle."""
2151 h = inpart.read(20)
2151 h = inpart.read(20)
2152 heads = []
2152 heads = []
2153 while len(h) == 20:
2153 while len(h) == 20:
2154 heads.append(h)
2154 heads.append(h)
2155 h = inpart.read(20)
2155 h = inpart.read(20)
2156 assert not h
2156 assert not h
2157 # Trigger a transaction so that we are guaranteed to have the lock now.
2157 # Trigger a transaction so that we are guaranteed to have the lock now.
2158 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2158 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2159 op.gettransaction()
2159 op.gettransaction()
2160 if sorted(heads) != sorted(op.repo.heads()):
2160 if sorted(heads) != sorted(op.repo.heads()):
2161 raise error.PushRaced(
2161 raise error.PushRaced(
2162 b'remote repository changed while pushing - please try again'
2162 b'remote repository changed while pushing - please try again'
2163 )
2163 )
2164
2164
2165
2165
2166 @parthandler(b'check:updated-heads')
2166 @parthandler(b'check:updated-heads')
2167 def handlecheckupdatedheads(op, inpart):
2167 def handlecheckupdatedheads(op, inpart):
2168 """check for race on the heads touched by a push
2168 """check for race on the heads touched by a push
2169
2169
2170 This is similar to 'check:heads' but focus on the heads actually updated
2170 This is similar to 'check:heads' but focus on the heads actually updated
2171 during the push. If other activities happen on unrelated heads, it is
2171 during the push. If other activities happen on unrelated heads, it is
2172 ignored.
2172 ignored.
2173
2173
2174 This allow server with high traffic to avoid push contention as long as
2174 This allow server with high traffic to avoid push contention as long as
2175 unrelated parts of the graph are involved."""
2175 unrelated parts of the graph are involved."""
2176 h = inpart.read(20)
2176 h = inpart.read(20)
2177 heads = []
2177 heads = []
2178 while len(h) == 20:
2178 while len(h) == 20:
2179 heads.append(h)
2179 heads.append(h)
2180 h = inpart.read(20)
2180 h = inpart.read(20)
2181 assert not h
2181 assert not h
2182 # trigger a transaction so that we are guaranteed to have the lock now.
2182 # trigger a transaction so that we are guaranteed to have the lock now.
2183 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2183 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2184 op.gettransaction()
2184 op.gettransaction()
2185
2185
2186 currentheads = set()
2186 currentheads = set()
2187 for ls in op.repo.branchmap().iterheads():
2187 for ls in op.repo.branchmap().iterheads():
2188 currentheads.update(ls)
2188 currentheads.update(ls)
2189
2189
2190 for h in heads:
2190 for h in heads:
2191 if h not in currentheads:
2191 if h not in currentheads:
2192 raise error.PushRaced(
2192 raise error.PushRaced(
2193 b'remote repository changed while pushing - '
2193 b'remote repository changed while pushing - '
2194 b'please try again'
2194 b'please try again'
2195 )
2195 )
2196
2196
2197
2197
2198 @parthandler(b'check:phases')
2198 @parthandler(b'check:phases')
2199 def handlecheckphases(op, inpart):
2199 def handlecheckphases(op, inpart):
2200 """check that phase boundaries of the repository did not change
2200 """check that phase boundaries of the repository did not change
2201
2201
2202 This is used to detect a push race.
2202 This is used to detect a push race.
2203 """
2203 """
2204 phasetonodes = phases.binarydecode(inpart)
2204 phasetonodes = phases.binarydecode(inpart)
2205 unfi = op.repo.unfiltered()
2205 unfi = op.repo.unfiltered()
2206 cl = unfi.changelog
2206 cl = unfi.changelog
2207 phasecache = unfi._phasecache
2207 phasecache = unfi._phasecache
2208 msg = (
2208 msg = (
2209 b'remote repository changed while pushing - please try again '
2209 b'remote repository changed while pushing - please try again '
2210 b'(%s is %s expected %s)'
2210 b'(%s is %s expected %s)'
2211 )
2211 )
2212 for expectedphase, nodes in pycompat.iteritems(phasetonodes):
2212 for expectedphase, nodes in pycompat.iteritems(phasetonodes):
2213 for n in nodes:
2213 for n in nodes:
2214 actualphase = phasecache.phase(unfi, cl.rev(n))
2214 actualphase = phasecache.phase(unfi, cl.rev(n))
2215 if actualphase != expectedphase:
2215 if actualphase != expectedphase:
2216 finalmsg = msg % (
2216 finalmsg = msg % (
2217 nodemod.short(n),
2217 nodemod.short(n),
2218 phases.phasenames[actualphase],
2218 phases.phasenames[actualphase],
2219 phases.phasenames[expectedphase],
2219 phases.phasenames[expectedphase],
2220 )
2220 )
2221 raise error.PushRaced(finalmsg)
2221 raise error.PushRaced(finalmsg)
2222
2222
2223
2223
2224 @parthandler(b'output')
2224 @parthandler(b'output')
2225 def handleoutput(op, inpart):
2225 def handleoutput(op, inpart):
2226 """forward output captured on the server to the client"""
2226 """forward output captured on the server to the client"""
2227 for line in inpart.read().splitlines():
2227 for line in inpart.read().splitlines():
2228 op.ui.status(_(b'remote: %s\n') % line)
2228 op.ui.status(_(b'remote: %s\n') % line)
2229
2229
2230
2230
2231 @parthandler(b'replycaps')
2231 @parthandler(b'replycaps')
2232 def handlereplycaps(op, inpart):
2232 def handlereplycaps(op, inpart):
2233 """Notify that a reply bundle should be created
2233 """Notify that a reply bundle should be created
2234
2234
2235 The payload contains the capabilities information for the reply"""
2235 The payload contains the capabilities information for the reply"""
2236 caps = decodecaps(inpart.read())
2236 caps = decodecaps(inpart.read())
2237 if op.reply is None:
2237 if op.reply is None:
2238 op.reply = bundle20(op.ui, caps)
2238 op.reply = bundle20(op.ui, caps)
2239
2239
2240
2240
2241 class AbortFromPart(error.Abort):
2241 class AbortFromPart(error.Abort):
2242 """Sub-class of Abort that denotes an error from a bundle2 part."""
2242 """Sub-class of Abort that denotes an error from a bundle2 part."""
2243
2243
2244
2244
2245 @parthandler(b'error:abort', (b'message', b'hint'))
2245 @parthandler(b'error:abort', (b'message', b'hint'))
2246 def handleerrorabort(op, inpart):
2246 def handleerrorabort(op, inpart):
2247 """Used to transmit abort error over the wire"""
2247 """Used to transmit abort error over the wire"""
2248 raise AbortFromPart(
2248 raise AbortFromPart(
2249 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2249 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2250 )
2250 )
2251
2251
2252
2252
2253 @parthandler(
2253 @parthandler(
2254 b'error:pushkey',
2254 b'error:pushkey',
2255 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2255 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2256 )
2256 )
2257 def handleerrorpushkey(op, inpart):
2257 def handleerrorpushkey(op, inpart):
2258 """Used to transmit failure of a mandatory pushkey over the wire"""
2258 """Used to transmit failure of a mandatory pushkey over the wire"""
2259 kwargs = {}
2259 kwargs = {}
2260 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2260 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2261 value = inpart.params.get(name)
2261 value = inpart.params.get(name)
2262 if value is not None:
2262 if value is not None:
2263 kwargs[name] = value
2263 kwargs[name] = value
2264 raise error.PushkeyFailed(
2264 raise error.PushkeyFailed(
2265 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2265 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2266 )
2266 )
2267
2267
2268
2268
2269 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2269 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2270 def handleerrorunsupportedcontent(op, inpart):
2270 def handleerrorunsupportedcontent(op, inpart):
2271 """Used to transmit unknown content error over the wire"""
2271 """Used to transmit unknown content error over the wire"""
2272 kwargs = {}
2272 kwargs = {}
2273 parttype = inpart.params.get(b'parttype')
2273 parttype = inpart.params.get(b'parttype')
2274 if parttype is not None:
2274 if parttype is not None:
2275 kwargs[b'parttype'] = parttype
2275 kwargs[b'parttype'] = parttype
2276 params = inpart.params.get(b'params')
2276 params = inpart.params.get(b'params')
2277 if params is not None:
2277 if params is not None:
2278 kwargs[b'params'] = params.split(b'\0')
2278 kwargs[b'params'] = params.split(b'\0')
2279
2279
2280 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2280 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2281
2281
2282
2282
2283 @parthandler(b'error:pushraced', (b'message',))
2283 @parthandler(b'error:pushraced', (b'message',))
2284 def handleerrorpushraced(op, inpart):
2284 def handleerrorpushraced(op, inpart):
2285 """Used to transmit push race error over the wire"""
2285 """Used to transmit push race error over the wire"""
2286 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2286 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2287
2287
2288
2288
2289 @parthandler(b'listkeys', (b'namespace',))
2289 @parthandler(b'listkeys', (b'namespace',))
2290 def handlelistkeys(op, inpart):
2290 def handlelistkeys(op, inpart):
2291 """retrieve pushkey namespace content stored in a bundle2"""
2291 """retrieve pushkey namespace content stored in a bundle2"""
2292 namespace = inpart.params[b'namespace']
2292 namespace = inpart.params[b'namespace']
2293 r = pushkey.decodekeys(inpart.read())
2293 r = pushkey.decodekeys(inpart.read())
2294 op.records.add(b'listkeys', (namespace, r))
2294 op.records.add(b'listkeys', (namespace, r))
2295
2295
2296
2296
2297 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2297 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2298 def handlepushkey(op, inpart):
2298 def handlepushkey(op, inpart):
2299 """process a pushkey request"""
2299 """process a pushkey request"""
2300 dec = pushkey.decode
2300 dec = pushkey.decode
2301 namespace = dec(inpart.params[b'namespace'])
2301 namespace = dec(inpart.params[b'namespace'])
2302 key = dec(inpart.params[b'key'])
2302 key = dec(inpart.params[b'key'])
2303 old = dec(inpart.params[b'old'])
2303 old = dec(inpart.params[b'old'])
2304 new = dec(inpart.params[b'new'])
2304 new = dec(inpart.params[b'new'])
2305 # Grab the transaction to ensure that we have the lock before performing the
2305 # Grab the transaction to ensure that we have the lock before performing the
2306 # pushkey.
2306 # pushkey.
2307 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2307 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2308 op.gettransaction()
2308 op.gettransaction()
2309 ret = op.repo.pushkey(namespace, key, old, new)
2309 ret = op.repo.pushkey(namespace, key, old, new)
2310 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2310 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2311 op.records.add(b'pushkey', record)
2311 op.records.add(b'pushkey', record)
2312 if op.reply is not None:
2312 if op.reply is not None:
2313 rpart = op.reply.newpart(b'reply:pushkey')
2313 rpart = op.reply.newpart(b'reply:pushkey')
2314 rpart.addparam(
2314 rpart.addparam(
2315 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2315 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2316 )
2316 )
2317 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2317 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2318 if inpart.mandatory and not ret:
2318 if inpart.mandatory and not ret:
2319 kwargs = {}
2319 kwargs = {}
2320 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2320 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2321 if key in inpart.params:
2321 if key in inpart.params:
2322 kwargs[key] = inpart.params[key]
2322 kwargs[key] = inpart.params[key]
2323 raise error.PushkeyFailed(
2323 raise error.PushkeyFailed(
2324 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2324 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2325 )
2325 )
2326
2326
2327
2327
2328 @parthandler(b'bookmarks')
2328 @parthandler(b'bookmarks')
2329 def handlebookmark(op, inpart):
2329 def handlebookmark(op, inpart):
2330 """transmit bookmark information
2330 """transmit bookmark information
2331
2331
2332 The part contains binary encoded bookmark information.
2332 The part contains binary encoded bookmark information.
2333
2333
2334 The exact behavior of this part can be controlled by the 'bookmarks' mode
2334 The exact behavior of this part can be controlled by the 'bookmarks' mode
2335 on the bundle operation.
2335 on the bundle operation.
2336
2336
2337 When mode is 'apply' (the default) the bookmark information is applied as
2337 When mode is 'apply' (the default) the bookmark information is applied as
2338 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2338 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2339 issued earlier to check for push races in such update. This behavior is
2339 issued earlier to check for push races in such update. This behavior is
2340 suitable for pushing.
2340 suitable for pushing.
2341
2341
2342 When mode is 'records', the information is recorded into the 'bookmarks'
2342 When mode is 'records', the information is recorded into the 'bookmarks'
2343 records of the bundle operation. This behavior is suitable for pulling.
2343 records of the bundle operation. This behavior is suitable for pulling.
2344 """
2344 """
2345 changes = bookmarks.binarydecode(inpart)
2345 changes = bookmarks.binarydecode(inpart)
2346
2346
2347 pushkeycompat = op.repo.ui.configbool(
2347 pushkeycompat = op.repo.ui.configbool(
2348 b'server', b'bookmarks-pushkey-compat'
2348 b'server', b'bookmarks-pushkey-compat'
2349 )
2349 )
2350 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2350 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2351
2351
2352 if bookmarksmode == b'apply':
2352 if bookmarksmode == b'apply':
2353 tr = op.gettransaction()
2353 tr = op.gettransaction()
2354 bookstore = op.repo._bookmarks
2354 bookstore = op.repo._bookmarks
2355 if pushkeycompat:
2355 if pushkeycompat:
2356 allhooks = []
2356 allhooks = []
2357 for book, node in changes:
2357 for book, node in changes:
2358 hookargs = tr.hookargs.copy()
2358 hookargs = tr.hookargs.copy()
2359 hookargs[b'pushkeycompat'] = b'1'
2359 hookargs[b'pushkeycompat'] = b'1'
2360 hookargs[b'namespace'] = b'bookmarks'
2360 hookargs[b'namespace'] = b'bookmarks'
2361 hookargs[b'key'] = book
2361 hookargs[b'key'] = book
2362 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2362 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2363 hookargs[b'new'] = nodemod.hex(
2363 hookargs[b'new'] = nodemod.hex(
2364 node if node is not None else b''
2364 node if node is not None else b''
2365 )
2365 )
2366 allhooks.append(hookargs)
2366 allhooks.append(hookargs)
2367
2367
2368 for hookargs in allhooks:
2368 for hookargs in allhooks:
2369 op.repo.hook(
2369 op.repo.hook(
2370 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2370 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2371 )
2371 )
2372
2372
2373 for book, node in changes:
2373 for book, node in changes:
2374 if bookmarks.isdivergent(book):
2374 if bookmarks.isdivergent(book):
2375 msg = _(b'cannot accept divergent bookmark %s!') % book
2375 msg = _(b'cannot accept divergent bookmark %s!') % book
2376 raise error.Abort(msg)
2376 raise error.Abort(msg)
2377
2377
2378 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2378 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2379
2379
2380 if pushkeycompat:
2380 if pushkeycompat:
2381
2381
2382 def runhook(unused_success):
2382 def runhook(unused_success):
2383 for hookargs in allhooks:
2383 for hookargs in allhooks:
2384 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2384 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2385
2385
2386 op.repo._afterlock(runhook)
2386 op.repo._afterlock(runhook)
2387
2387
2388 elif bookmarksmode == b'records':
2388 elif bookmarksmode == b'records':
2389 for book, node in changes:
2389 for book, node in changes:
2390 record = {b'bookmark': book, b'node': node}
2390 record = {b'bookmark': book, b'node': node}
2391 op.records.add(b'bookmarks', record)
2391 op.records.add(b'bookmarks', record)
2392 else:
2392 else:
2393 raise error.ProgrammingError(
2393 raise error.ProgrammingError(
2394 b'unkown bookmark mode: %s' % bookmarksmode
2394 b'unkown bookmark mode: %s' % bookmarksmode
2395 )
2395 )
2396
2396
2397
2397
2398 @parthandler(b'phase-heads')
2398 @parthandler(b'phase-heads')
2399 def handlephases(op, inpart):
2399 def handlephases(op, inpart):
2400 """apply phases from bundle part to repo"""
2400 """apply phases from bundle part to repo"""
2401 headsbyphase = phases.binarydecode(inpart)
2401 headsbyphase = phases.binarydecode(inpart)
2402 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2402 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2403
2403
2404
2404
2405 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2405 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2406 def handlepushkeyreply(op, inpart):
2406 def handlepushkeyreply(op, inpart):
2407 """retrieve the result of a pushkey request"""
2407 """retrieve the result of a pushkey request"""
2408 ret = int(inpart.params[b'return'])
2408 ret = int(inpart.params[b'return'])
2409 partid = int(inpart.params[b'in-reply-to'])
2409 partid = int(inpart.params[b'in-reply-to'])
2410 op.records.add(b'pushkey', {b'return': ret}, partid)
2410 op.records.add(b'pushkey', {b'return': ret}, partid)
2411
2411
2412
2412
2413 @parthandler(b'obsmarkers')
2413 @parthandler(b'obsmarkers')
2414 def handleobsmarker(op, inpart):
2414 def handleobsmarker(op, inpart):
2415 """add a stream of obsmarkers to the repo"""
2415 """add a stream of obsmarkers to the repo"""
2416 tr = op.gettransaction()
2416 tr = op.gettransaction()
2417 markerdata = inpart.read()
2417 markerdata = inpart.read()
2418 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2418 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2419 op.ui.writenoi18n(
2419 op.ui.writenoi18n(
2420 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2420 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2421 )
2421 )
2422 # The mergemarkers call will crash if marker creation is not enabled.
2422 # The mergemarkers call will crash if marker creation is not enabled.
2423 # we want to avoid this if the part is advisory.
2423 # we want to avoid this if the part is advisory.
2424 if not inpart.mandatory and op.repo.obsstore.readonly:
2424 if not inpart.mandatory and op.repo.obsstore.readonly:
2425 op.repo.ui.debug(
2425 op.repo.ui.debug(
2426 b'ignoring obsolescence markers, feature not enabled\n'
2426 b'ignoring obsolescence markers, feature not enabled\n'
2427 )
2427 )
2428 return
2428 return
2429 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2429 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2430 op.repo.invalidatevolatilesets()
2430 op.repo.invalidatevolatilesets()
2431 op.records.add(b'obsmarkers', {b'new': new})
2431 op.records.add(b'obsmarkers', {b'new': new})
2432 if op.reply is not None:
2432 if op.reply is not None:
2433 rpart = op.reply.newpart(b'reply:obsmarkers')
2433 rpart = op.reply.newpart(b'reply:obsmarkers')
2434 rpart.addparam(
2434 rpart.addparam(
2435 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2435 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2436 )
2436 )
2437 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2437 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2438
2438
2439
2439
2440 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2440 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2441 def handleobsmarkerreply(op, inpart):
2441 def handleobsmarkerreply(op, inpart):
2442 """retrieve the result of a pushkey request"""
2442 """retrieve the result of a pushkey request"""
2443 ret = int(inpart.params[b'new'])
2443 ret = int(inpart.params[b'new'])
2444 partid = int(inpart.params[b'in-reply-to'])
2444 partid = int(inpart.params[b'in-reply-to'])
2445 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2445 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2446
2446
2447
2447
2448 @parthandler(b'hgtagsfnodes')
2448 @parthandler(b'hgtagsfnodes')
2449 def handlehgtagsfnodes(op, inpart):
2449 def handlehgtagsfnodes(op, inpart):
2450 """Applies .hgtags fnodes cache entries to the local repo.
2450 """Applies .hgtags fnodes cache entries to the local repo.
2451
2451
2452 Payload is pairs of 20 byte changeset nodes and filenodes.
2452 Payload is pairs of 20 byte changeset nodes and filenodes.
2453 """
2453 """
2454 # Grab the transaction so we ensure that we have the lock at this point.
2454 # Grab the transaction so we ensure that we have the lock at this point.
2455 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2455 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2456 op.gettransaction()
2456 op.gettransaction()
2457 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2457 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2458
2458
2459 count = 0
2459 count = 0
2460 while True:
2460 while True:
2461 node = inpart.read(20)
2461 node = inpart.read(20)
2462 fnode = inpart.read(20)
2462 fnode = inpart.read(20)
2463 if len(node) < 20 or len(fnode) < 20:
2463 if len(node) < 20 or len(fnode) < 20:
2464 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2464 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2465 break
2465 break
2466 cache.setfnode(node, fnode)
2466 cache.setfnode(node, fnode)
2467 count += 1
2467 count += 1
2468
2468
2469 cache.write()
2469 cache.write()
2470 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2470 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2471
2471
2472
2472
2473 rbcstruct = struct.Struct(b'>III')
2473 rbcstruct = struct.Struct(b'>III')
2474
2474
2475
2475
2476 @parthandler(b'cache:rev-branch-cache')
2476 @parthandler(b'cache:rev-branch-cache')
2477 def handlerbc(op, inpart):
2477 def handlerbc(op, inpart):
2478 """receive a rev-branch-cache payload and update the local cache
2478 """receive a rev-branch-cache payload and update the local cache
2479
2479
2480 The payload is a series of data related to each branch
2480 The payload is a series of data related to each branch
2481
2481
2482 1) branch name length
2482 1) branch name length
2483 2) number of open heads
2483 2) number of open heads
2484 3) number of closed heads
2484 3) number of closed heads
2485 4) open heads nodes
2485 4) open heads nodes
2486 5) closed heads nodes
2486 5) closed heads nodes
2487 """
2487 """
2488 total = 0
2488 total = 0
2489 rawheader = inpart.read(rbcstruct.size)
2489 rawheader = inpart.read(rbcstruct.size)
2490 cache = op.repo.revbranchcache()
2490 cache = op.repo.revbranchcache()
2491 cl = op.repo.unfiltered().changelog
2491 cl = op.repo.unfiltered().changelog
2492 while rawheader:
2492 while rawheader:
2493 header = rbcstruct.unpack(rawheader)
2493 header = rbcstruct.unpack(rawheader)
2494 total += header[1] + header[2]
2494 total += header[1] + header[2]
2495 utf8branch = inpart.read(header[0])
2495 utf8branch = inpart.read(header[0])
2496 branch = encoding.tolocal(utf8branch)
2496 branch = encoding.tolocal(utf8branch)
2497 for x in pycompat.xrange(header[1]):
2497 for x in pycompat.xrange(header[1]):
2498 node = inpart.read(20)
2498 node = inpart.read(20)
2499 rev = cl.rev(node)
2499 rev = cl.rev(node)
2500 cache.setdata(branch, rev, node, False)
2500 cache.setdata(branch, rev, node, False)
2501 for x in pycompat.xrange(header[2]):
2501 for x in pycompat.xrange(header[2]):
2502 node = inpart.read(20)
2502 node = inpart.read(20)
2503 rev = cl.rev(node)
2503 rev = cl.rev(node)
2504 cache.setdata(branch, rev, node, True)
2504 cache.setdata(branch, rev, node, True)
2505 rawheader = inpart.read(rbcstruct.size)
2505 rawheader = inpart.read(rbcstruct.size)
2506 cache.write()
2506 cache.write()
2507
2507
2508
2508
2509 @parthandler(b'pushvars')
2509 @parthandler(b'pushvars')
2510 def bundle2getvars(op, part):
2510 def bundle2getvars(op, part):
2511 '''unbundle a bundle2 containing shellvars on the server'''
2511 '''unbundle a bundle2 containing shellvars on the server'''
2512 # An option to disable unbundling on server-side for security reasons
2512 # An option to disable unbundling on server-side for security reasons
2513 if op.ui.configbool(b'push', b'pushvars.server'):
2513 if op.ui.configbool(b'push', b'pushvars.server'):
2514 hookargs = {}
2514 hookargs = {}
2515 for key, value in part.advisoryparams:
2515 for key, value in part.advisoryparams:
2516 key = key.upper()
2516 key = key.upper()
2517 # We want pushed variables to have USERVAR_ prepended so we know
2517 # We want pushed variables to have USERVAR_ prepended so we know
2518 # they came from the --pushvar flag.
2518 # they came from the --pushvar flag.
2519 key = b"USERVAR_" + key
2519 key = b"USERVAR_" + key
2520 hookargs[key] = value
2520 hookargs[key] = value
2521 op.addhookargs(hookargs)
2521 op.addhookargs(hookargs)
2522
2522
2523
2523
2524 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2524 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2525 def handlestreamv2bundle(op, part):
2525 def handlestreamv2bundle(op, part):
2526
2526
2527 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2527 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2528 filecount = int(part.params[b'filecount'])
2528 filecount = int(part.params[b'filecount'])
2529 bytecount = int(part.params[b'bytecount'])
2529 bytecount = int(part.params[b'bytecount'])
2530
2530
2531 repo = op.repo
2531 repo = op.repo
2532 if len(repo):
2532 if len(repo):
2533 msg = _(b'cannot apply stream clone to non empty repository')
2533 msg = _(b'cannot apply stream clone to non empty repository')
2534 raise error.Abort(msg)
2534 raise error.Abort(msg)
2535
2535
2536 repo.ui.debug(b'applying stream bundle\n')
2536 repo.ui.debug(b'applying stream bundle\n')
2537 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2537 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2538
2538
2539
2539
2540 def widen_bundle(
2540 def widen_bundle(
2541 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2541 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2542 ):
2542 ):
2543 """generates bundle2 for widening a narrow clone
2543 """generates bundle2 for widening a narrow clone
2544
2544
2545 bundler is the bundle to which data should be added
2545 bundler is the bundle to which data should be added
2546 repo is the localrepository instance
2546 repo is the localrepository instance
2547 oldmatcher matches what the client already has
2547 oldmatcher matches what the client already has
2548 newmatcher matches what the client needs (including what it already has)
2548 newmatcher matches what the client needs (including what it already has)
2549 common is set of common heads between server and client
2549 common is set of common heads between server and client
2550 known is a set of revs known on the client side (used in ellipses)
2550 known is a set of revs known on the client side (used in ellipses)
2551 cgversion is the changegroup version to send
2551 cgversion is the changegroup version to send
2552 ellipses is boolean value telling whether to send ellipses data or not
2552 ellipses is boolean value telling whether to send ellipses data or not
2553
2553
2554 returns bundle2 of the data required for extending
2554 returns bundle2 of the data required for extending
2555 """
2555 """
2556 commonnodes = set()
2556 commonnodes = set()
2557 cl = repo.changelog
2557 cl = repo.changelog
2558 for r in repo.revs(b"::%ln", common):
2558 for r in repo.revs(b"::%ln", common):
2559 commonnodes.add(cl.node(r))
2559 commonnodes.add(cl.node(r))
2560 if commonnodes:
2560 if commonnodes:
2561 # XXX: we should only send the filelogs (and treemanifest). user
2561 # XXX: we should only send the filelogs (and treemanifest). user
2562 # already has the changelog and manifest
2562 # already has the changelog and manifest
2563 packer = changegroup.getbundler(
2563 packer = changegroup.getbundler(
2564 cgversion,
2564 cgversion,
2565 repo,
2565 repo,
2566 oldmatcher=oldmatcher,
2566 oldmatcher=oldmatcher,
2567 matcher=newmatcher,
2567 matcher=newmatcher,
2568 fullnodes=commonnodes,
2568 fullnodes=commonnodes,
2569 )
2569 )
2570 cgdata = packer.generate(
2570 cgdata = packer.generate(
2571 {nodemod.nullid},
2571 {nodemod.nullid},
2572 list(commonnodes),
2572 list(commonnodes),
2573 False,
2573 False,
2574 b'narrow_widen',
2574 b'narrow_widen',
2575 changelog=False,
2575 changelog=False,
2576 )
2576 )
2577
2577
2578 part = bundler.newpart(b'changegroup', data=cgdata)
2578 part = bundler.newpart(b'changegroup', data=cgdata)
2579 part.addparam(b'version', cgversion)
2579 part.addparam(b'version', cgversion)
2580 if repository.TREEMANIFEST_REQUIREMENT in repo.requirements:
2580 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
2581 part.addparam(b'treemanifest', b'1')
2581 part.addparam(b'treemanifest', b'1')
2582 if b'exp-sidedata-flag' in repo.requirements:
2582 if b'exp-sidedata-flag' in repo.requirements:
2583 part.addparam(b'exp-sidedata', b'1')
2583 part.addparam(b'exp-sidedata', b'1')
2584
2584
2585 return bundler
2585 return bundler
@@ -1,1689 +1,1690 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import open
21 from .pycompat import open
22
22
23 from . import (
23 from . import (
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 requirements,
29 util,
30 util,
30 )
31 )
31
32
32 from .interfaces import repository
33 from .interfaces import repository
33
34
34 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
35 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
35 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
36 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
36 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
37 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
37
38
38 LFS_REQUIREMENT = b'lfs'
39 LFS_REQUIREMENT = b'lfs'
39
40
40 readexactly = util.readexactly
41 readexactly = util.readexactly
41
42
42
43
43 def getchunk(stream):
44 def getchunk(stream):
44 """return the next chunk from stream as a string"""
45 """return the next chunk from stream as a string"""
45 d = readexactly(stream, 4)
46 d = readexactly(stream, 4)
46 l = struct.unpack(b">l", d)[0]
47 l = struct.unpack(b">l", d)[0]
47 if l <= 4:
48 if l <= 4:
48 if l:
49 if l:
49 raise error.Abort(_(b"invalid chunk length %d") % l)
50 raise error.Abort(_(b"invalid chunk length %d") % l)
50 return b""
51 return b""
51 return readexactly(stream, l - 4)
52 return readexactly(stream, l - 4)
52
53
53
54
54 def chunkheader(length):
55 def chunkheader(length):
55 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
56 return struct.pack(b">l", length + 4)
57 return struct.pack(b">l", length + 4)
57
58
58
59
59 def closechunk():
60 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(b">l", 0)
62 return struct.pack(b">l", 0)
62
63
63
64
64 def _fileheader(path):
65 def _fileheader(path):
65 """Obtain a changegroup chunk header for a named path."""
66 """Obtain a changegroup chunk header for a named path."""
66 return chunkheader(len(path)) + path
67 return chunkheader(len(path)) + path
67
68
68
69
69 def writechunks(ui, chunks, filename, vfs=None):
70 def writechunks(ui, chunks, filename, vfs=None):
70 """Write chunks to a file and return its filename.
71 """Write chunks to a file and return its filename.
71
72
72 The stream is assumed to be a bundle file.
73 The stream is assumed to be a bundle file.
73 Existing files will not be overwritten.
74 Existing files will not be overwritten.
74 If no filename is specified, a temporary file is created.
75 If no filename is specified, a temporary file is created.
75 """
76 """
76 fh = None
77 fh = None
77 cleanup = None
78 cleanup = None
78 try:
79 try:
79 if filename:
80 if filename:
80 if vfs:
81 if vfs:
81 fh = vfs.open(filename, b"wb")
82 fh = vfs.open(filename, b"wb")
82 else:
83 else:
83 # Increase default buffer size because default is usually
84 # Increase default buffer size because default is usually
84 # small (4k is common on Linux).
85 # small (4k is common on Linux).
85 fh = open(filename, b"wb", 131072)
86 fh = open(filename, b"wb", 131072)
86 else:
87 else:
87 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
88 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
88 fh = os.fdopen(fd, "wb")
89 fh = os.fdopen(fd, "wb")
89 cleanup = filename
90 cleanup = filename
90 for c in chunks:
91 for c in chunks:
91 fh.write(c)
92 fh.write(c)
92 cleanup = None
93 cleanup = None
93 return filename
94 return filename
94 finally:
95 finally:
95 if fh is not None:
96 if fh is not None:
96 fh.close()
97 fh.close()
97 if cleanup is not None:
98 if cleanup is not None:
98 if filename and vfs:
99 if filename and vfs:
99 vfs.unlink(cleanup)
100 vfs.unlink(cleanup)
100 else:
101 else:
101 os.unlink(cleanup)
102 os.unlink(cleanup)
102
103
103
104
104 class cg1unpacker(object):
105 class cg1unpacker(object):
105 """Unpacker for cg1 changegroup streams.
106 """Unpacker for cg1 changegroup streams.
106
107
107 A changegroup unpacker handles the framing of the revision data in
108 A changegroup unpacker handles the framing of the revision data in
108 the wire format. Most consumers will want to use the apply()
109 the wire format. Most consumers will want to use the apply()
109 method to add the changes from the changegroup to a repository.
110 method to add the changes from the changegroup to a repository.
110
111
111 If you're forwarding a changegroup unmodified to another consumer,
112 If you're forwarding a changegroup unmodified to another consumer,
112 use getchunks(), which returns an iterator of changegroup
113 use getchunks(), which returns an iterator of changegroup
113 chunks. This is mostly useful for cases where you need to know the
114 chunks. This is mostly useful for cases where you need to know the
114 data stream has ended by observing the end of the changegroup.
115 data stream has ended by observing the end of the changegroup.
115
116
116 deltachunk() is useful only if you're applying delta data. Most
117 deltachunk() is useful only if you're applying delta data. Most
117 consumers should prefer apply() instead.
118 consumers should prefer apply() instead.
118
119
119 A few other public methods exist. Those are used only for
120 A few other public methods exist. Those are used only for
120 bundlerepo and some debug commands - their use is discouraged.
121 bundlerepo and some debug commands - their use is discouraged.
121 """
122 """
122
123
123 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
124 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
124 deltaheadersize = deltaheader.size
125 deltaheadersize = deltaheader.size
125 version = b'01'
126 version = b'01'
126 _grouplistcount = 1 # One list of files after the manifests
127 _grouplistcount = 1 # One list of files after the manifests
127
128
128 def __init__(self, fh, alg, extras=None):
129 def __init__(self, fh, alg, extras=None):
129 if alg is None:
130 if alg is None:
130 alg = b'UN'
131 alg = b'UN'
131 if alg not in util.compengines.supportedbundletypes:
132 if alg not in util.compengines.supportedbundletypes:
132 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
133 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
133 if alg == b'BZ':
134 if alg == b'BZ':
134 alg = b'_truncatedBZ'
135 alg = b'_truncatedBZ'
135
136
136 compengine = util.compengines.forbundletype(alg)
137 compengine = util.compengines.forbundletype(alg)
137 self._stream = compengine.decompressorreader(fh)
138 self._stream = compengine.decompressorreader(fh)
138 self._type = alg
139 self._type = alg
139 self.extras = extras or {}
140 self.extras = extras or {}
140 self.callback = None
141 self.callback = None
141
142
142 # These methods (compressed, read, seek, tell) all appear to only
143 # These methods (compressed, read, seek, tell) all appear to only
143 # be used by bundlerepo, but it's a little hard to tell.
144 # be used by bundlerepo, but it's a little hard to tell.
144 def compressed(self):
145 def compressed(self):
145 return self._type is not None and self._type != b'UN'
146 return self._type is not None and self._type != b'UN'
146
147
147 def read(self, l):
148 def read(self, l):
148 return self._stream.read(l)
149 return self._stream.read(l)
149
150
150 def seek(self, pos):
151 def seek(self, pos):
151 return self._stream.seek(pos)
152 return self._stream.seek(pos)
152
153
153 def tell(self):
154 def tell(self):
154 return self._stream.tell()
155 return self._stream.tell()
155
156
156 def close(self):
157 def close(self):
157 return self._stream.close()
158 return self._stream.close()
158
159
159 def _chunklength(self):
160 def _chunklength(self):
160 d = readexactly(self._stream, 4)
161 d = readexactly(self._stream, 4)
161 l = struct.unpack(b">l", d)[0]
162 l = struct.unpack(b">l", d)[0]
162 if l <= 4:
163 if l <= 4:
163 if l:
164 if l:
164 raise error.Abort(_(b"invalid chunk length %d") % l)
165 raise error.Abort(_(b"invalid chunk length %d") % l)
165 return 0
166 return 0
166 if self.callback:
167 if self.callback:
167 self.callback()
168 self.callback()
168 return l - 4
169 return l - 4
169
170
170 def changelogheader(self):
171 def changelogheader(self):
171 """v10 does not have a changelog header chunk"""
172 """v10 does not have a changelog header chunk"""
172 return {}
173 return {}
173
174
174 def manifestheader(self):
175 def manifestheader(self):
175 """v10 does not have a manifest header chunk"""
176 """v10 does not have a manifest header chunk"""
176 return {}
177 return {}
177
178
178 def filelogheader(self):
179 def filelogheader(self):
179 """return the header of the filelogs chunk, v10 only has the filename"""
180 """return the header of the filelogs chunk, v10 only has the filename"""
180 l = self._chunklength()
181 l = self._chunklength()
181 if not l:
182 if not l:
182 return {}
183 return {}
183 fname = readexactly(self._stream, l)
184 fname = readexactly(self._stream, l)
184 return {b'filename': fname}
185 return {b'filename': fname}
185
186
186 def _deltaheader(self, headertuple, prevnode):
187 def _deltaheader(self, headertuple, prevnode):
187 node, p1, p2, cs = headertuple
188 node, p1, p2, cs = headertuple
188 if prevnode is None:
189 if prevnode is None:
189 deltabase = p1
190 deltabase = p1
190 else:
191 else:
191 deltabase = prevnode
192 deltabase = prevnode
192 flags = 0
193 flags = 0
193 return node, p1, p2, deltabase, cs, flags
194 return node, p1, p2, deltabase, cs, flags
194
195
195 def deltachunk(self, prevnode):
196 def deltachunk(self, prevnode):
196 l = self._chunklength()
197 l = self._chunklength()
197 if not l:
198 if not l:
198 return {}
199 return {}
199 headerdata = readexactly(self._stream, self.deltaheadersize)
200 headerdata = readexactly(self._stream, self.deltaheadersize)
200 header = self.deltaheader.unpack(headerdata)
201 header = self.deltaheader.unpack(headerdata)
201 delta = readexactly(self._stream, l - self.deltaheadersize)
202 delta = readexactly(self._stream, l - self.deltaheadersize)
202 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
203 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
203 return (node, p1, p2, cs, deltabase, delta, flags)
204 return (node, p1, p2, cs, deltabase, delta, flags)
204
205
205 def getchunks(self):
206 def getchunks(self):
206 """returns all the chunks contains in the bundle
207 """returns all the chunks contains in the bundle
207
208
208 Used when you need to forward the binary stream to a file or another
209 Used when you need to forward the binary stream to a file or another
209 network API. To do so, it parse the changegroup data, otherwise it will
210 network API. To do so, it parse the changegroup data, otherwise it will
210 block in case of sshrepo because it don't know the end of the stream.
211 block in case of sshrepo because it don't know the end of the stream.
211 """
212 """
212 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
213 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
213 # and a list of filelogs. For changegroup 3, we expect 4 parts:
214 # and a list of filelogs. For changegroup 3, we expect 4 parts:
214 # changelog, manifestlog, a list of tree manifestlogs, and a list of
215 # changelog, manifestlog, a list of tree manifestlogs, and a list of
215 # filelogs.
216 # filelogs.
216 #
217 #
217 # Changelog and manifestlog parts are terminated with empty chunks. The
218 # Changelog and manifestlog parts are terminated with empty chunks. The
218 # tree and file parts are a list of entry sections. Each entry section
219 # tree and file parts are a list of entry sections. Each entry section
219 # is a series of chunks terminating in an empty chunk. The list of these
220 # is a series of chunks terminating in an empty chunk. The list of these
220 # entry sections is terminated in yet another empty chunk, so we know
221 # entry sections is terminated in yet another empty chunk, so we know
221 # we've reached the end of the tree/file list when we reach an empty
222 # we've reached the end of the tree/file list when we reach an empty
222 # chunk that was proceeded by no non-empty chunks.
223 # chunk that was proceeded by no non-empty chunks.
223
224
224 parts = 0
225 parts = 0
225 while parts < 2 + self._grouplistcount:
226 while parts < 2 + self._grouplistcount:
226 noentries = True
227 noentries = True
227 while True:
228 while True:
228 chunk = getchunk(self)
229 chunk = getchunk(self)
229 if not chunk:
230 if not chunk:
230 # The first two empty chunks represent the end of the
231 # The first two empty chunks represent the end of the
231 # changelog and the manifestlog portions. The remaining
232 # changelog and the manifestlog portions. The remaining
232 # empty chunks represent either A) the end of individual
233 # empty chunks represent either A) the end of individual
233 # tree or file entries in the file list, or B) the end of
234 # tree or file entries in the file list, or B) the end of
234 # the entire list. It's the end of the entire list if there
235 # the entire list. It's the end of the entire list if there
235 # were no entries (i.e. noentries is True).
236 # were no entries (i.e. noentries is True).
236 if parts < 2:
237 if parts < 2:
237 parts += 1
238 parts += 1
238 elif noentries:
239 elif noentries:
239 parts += 1
240 parts += 1
240 break
241 break
241 noentries = False
242 noentries = False
242 yield chunkheader(len(chunk))
243 yield chunkheader(len(chunk))
243 pos = 0
244 pos = 0
244 while pos < len(chunk):
245 while pos < len(chunk):
245 next = pos + 2 ** 20
246 next = pos + 2 ** 20
246 yield chunk[pos:next]
247 yield chunk[pos:next]
247 pos = next
248 pos = next
248 yield closechunk()
249 yield closechunk()
249
250
250 def _unpackmanifests(self, repo, revmap, trp, prog):
251 def _unpackmanifests(self, repo, revmap, trp, prog):
251 self.callback = prog.increment
252 self.callback = prog.increment
252 # no need to check for empty manifest group here:
253 # no need to check for empty manifest group here:
253 # if the result of the merge of 1 and 2 is the same in 3 and 4,
254 # if the result of the merge of 1 and 2 is the same in 3 and 4,
254 # no new manifest will be created and the manifest group will
255 # no new manifest will be created and the manifest group will
255 # be empty during the pull
256 # be empty during the pull
256 self.manifestheader()
257 self.manifestheader()
257 deltas = self.deltaiter()
258 deltas = self.deltaiter()
258 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
259 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
259 prog.complete()
260 prog.complete()
260 self.callback = None
261 self.callback = None
261
262
262 def apply(
263 def apply(
263 self,
264 self,
264 repo,
265 repo,
265 tr,
266 tr,
266 srctype,
267 srctype,
267 url,
268 url,
268 targetphase=phases.draft,
269 targetphase=phases.draft,
269 expectedtotal=None,
270 expectedtotal=None,
270 ):
271 ):
271 """Add the changegroup returned by source.read() to this repo.
272 """Add the changegroup returned by source.read() to this repo.
272 srctype is a string like 'push', 'pull', or 'unbundle'. url is
273 srctype is a string like 'push', 'pull', or 'unbundle'. url is
273 the URL of the repo where this changegroup is coming from.
274 the URL of the repo where this changegroup is coming from.
274
275
275 Return an integer summarizing the change to this repo:
276 Return an integer summarizing the change to this repo:
276 - nothing changed or no source: 0
277 - nothing changed or no source: 0
277 - more heads than before: 1+added heads (2..n)
278 - more heads than before: 1+added heads (2..n)
278 - fewer heads than before: -1-removed heads (-2..-n)
279 - fewer heads than before: -1-removed heads (-2..-n)
279 - number of heads stays the same: 1
280 - number of heads stays the same: 1
280 """
281 """
281 repo = repo.unfiltered()
282 repo = repo.unfiltered()
282
283
283 def csmap(x):
284 def csmap(x):
284 repo.ui.debug(b"add changeset %s\n" % short(x))
285 repo.ui.debug(b"add changeset %s\n" % short(x))
285 return len(cl)
286 return len(cl)
286
287
287 def revmap(x):
288 def revmap(x):
288 return cl.rev(x)
289 return cl.rev(x)
289
290
290 try:
291 try:
291 # The transaction may already carry source information. In this
292 # The transaction may already carry source information. In this
292 # case we use the top level data. We overwrite the argument
293 # case we use the top level data. We overwrite the argument
293 # because we need to use the top level value (if they exist)
294 # because we need to use the top level value (if they exist)
294 # in this function.
295 # in this function.
295 srctype = tr.hookargs.setdefault(b'source', srctype)
296 srctype = tr.hookargs.setdefault(b'source', srctype)
296 tr.hookargs.setdefault(b'url', url)
297 tr.hookargs.setdefault(b'url', url)
297 repo.hook(
298 repo.hook(
298 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
299 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
299 )
300 )
300
301
301 # write changelog data to temp files so concurrent readers
302 # write changelog data to temp files so concurrent readers
302 # will not see an inconsistent view
303 # will not see an inconsistent view
303 cl = repo.changelog
304 cl = repo.changelog
304 cl.delayupdate(tr)
305 cl.delayupdate(tr)
305 oldheads = set(cl.heads())
306 oldheads = set(cl.heads())
306
307
307 trp = weakref.proxy(tr)
308 trp = weakref.proxy(tr)
308 # pull off the changeset group
309 # pull off the changeset group
309 repo.ui.status(_(b"adding changesets\n"))
310 repo.ui.status(_(b"adding changesets\n"))
310 clstart = len(cl)
311 clstart = len(cl)
311 progress = repo.ui.makeprogress(
312 progress = repo.ui.makeprogress(
312 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
313 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
313 )
314 )
314 self.callback = progress.increment
315 self.callback = progress.increment
315
316
316 efilesset = set()
317 efilesset = set()
317
318
318 def onchangelog(cl, node):
319 def onchangelog(cl, node):
319 efilesset.update(cl.readfiles(node))
320 efilesset.update(cl.readfiles(node))
320
321
321 self.changelogheader()
322 self.changelogheader()
322 deltas = self.deltaiter()
323 deltas = self.deltaiter()
323 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
324 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
324 efiles = len(efilesset)
325 efiles = len(efilesset)
325
326
326 if not cgnodes:
327 if not cgnodes:
327 repo.ui.develwarn(
328 repo.ui.develwarn(
328 b'applied empty changelog from changegroup',
329 b'applied empty changelog from changegroup',
329 config=b'warn-empty-changegroup',
330 config=b'warn-empty-changegroup',
330 )
331 )
331 clend = len(cl)
332 clend = len(cl)
332 changesets = clend - clstart
333 changesets = clend - clstart
333 progress.complete()
334 progress.complete()
334 self.callback = None
335 self.callback = None
335
336
336 # pull off the manifest group
337 # pull off the manifest group
337 repo.ui.status(_(b"adding manifests\n"))
338 repo.ui.status(_(b"adding manifests\n"))
338 # We know that we'll never have more manifests than we had
339 # We know that we'll never have more manifests than we had
339 # changesets.
340 # changesets.
340 progress = repo.ui.makeprogress(
341 progress = repo.ui.makeprogress(
341 _(b'manifests'), unit=_(b'chunks'), total=changesets
342 _(b'manifests'), unit=_(b'chunks'), total=changesets
342 )
343 )
343 self._unpackmanifests(repo, revmap, trp, progress)
344 self._unpackmanifests(repo, revmap, trp, progress)
344
345
345 needfiles = {}
346 needfiles = {}
346 if repo.ui.configbool(b'server', b'validate'):
347 if repo.ui.configbool(b'server', b'validate'):
347 cl = repo.changelog
348 cl = repo.changelog
348 ml = repo.manifestlog
349 ml = repo.manifestlog
349 # validate incoming csets have their manifests
350 # validate incoming csets have their manifests
350 for cset in pycompat.xrange(clstart, clend):
351 for cset in pycompat.xrange(clstart, clend):
351 mfnode = cl.changelogrevision(cset).manifest
352 mfnode = cl.changelogrevision(cset).manifest
352 mfest = ml[mfnode].readdelta()
353 mfest = ml[mfnode].readdelta()
353 # store file cgnodes we must see
354 # store file cgnodes we must see
354 for f, n in pycompat.iteritems(mfest):
355 for f, n in pycompat.iteritems(mfest):
355 needfiles.setdefault(f, set()).add(n)
356 needfiles.setdefault(f, set()).add(n)
356
357
357 # process the files
358 # process the files
358 repo.ui.status(_(b"adding file changes\n"))
359 repo.ui.status(_(b"adding file changes\n"))
359 newrevs, newfiles = _addchangegroupfiles(
360 newrevs, newfiles = _addchangegroupfiles(
360 repo, self, revmap, trp, efiles, needfiles
361 repo, self, revmap, trp, efiles, needfiles
361 )
362 )
362
363
363 # making sure the value exists
364 # making sure the value exists
364 tr.changes.setdefault(b'changegroup-count-changesets', 0)
365 tr.changes.setdefault(b'changegroup-count-changesets', 0)
365 tr.changes.setdefault(b'changegroup-count-revisions', 0)
366 tr.changes.setdefault(b'changegroup-count-revisions', 0)
366 tr.changes.setdefault(b'changegroup-count-files', 0)
367 tr.changes.setdefault(b'changegroup-count-files', 0)
367 tr.changes.setdefault(b'changegroup-count-heads', 0)
368 tr.changes.setdefault(b'changegroup-count-heads', 0)
368
369
369 # some code use bundle operation for internal purpose. They usually
370 # some code use bundle operation for internal purpose. They usually
370 # set `ui.quiet` to do this outside of user sight. Size the report
371 # set `ui.quiet` to do this outside of user sight. Size the report
371 # of such operation now happens at the end of the transaction, that
372 # of such operation now happens at the end of the transaction, that
372 # ui.quiet has not direct effect on the output.
373 # ui.quiet has not direct effect on the output.
373 #
374 #
374 # To preserve this intend use an inelegant hack, we fail to report
375 # To preserve this intend use an inelegant hack, we fail to report
375 # the change if `quiet` is set. We should probably move to
376 # the change if `quiet` is set. We should probably move to
376 # something better, but this is a good first step to allow the "end
377 # something better, but this is a good first step to allow the "end
377 # of transaction report" to pass tests.
378 # of transaction report" to pass tests.
378 if not repo.ui.quiet:
379 if not repo.ui.quiet:
379 tr.changes[b'changegroup-count-changesets'] += changesets
380 tr.changes[b'changegroup-count-changesets'] += changesets
380 tr.changes[b'changegroup-count-revisions'] += newrevs
381 tr.changes[b'changegroup-count-revisions'] += newrevs
381 tr.changes[b'changegroup-count-files'] += newfiles
382 tr.changes[b'changegroup-count-files'] += newfiles
382
383
383 deltaheads = 0
384 deltaheads = 0
384 if oldheads:
385 if oldheads:
385 heads = cl.heads()
386 heads = cl.heads()
386 deltaheads += len(heads) - len(oldheads)
387 deltaheads += len(heads) - len(oldheads)
387 for h in heads:
388 for h in heads:
388 if h not in oldheads and repo[h].closesbranch():
389 if h not in oldheads and repo[h].closesbranch():
389 deltaheads -= 1
390 deltaheads -= 1
390
391
391 # see previous comment about checking ui.quiet
392 # see previous comment about checking ui.quiet
392 if not repo.ui.quiet:
393 if not repo.ui.quiet:
393 tr.changes[b'changegroup-count-heads'] += deltaheads
394 tr.changes[b'changegroup-count-heads'] += deltaheads
394 repo.invalidatevolatilesets()
395 repo.invalidatevolatilesets()
395
396
396 if changesets > 0:
397 if changesets > 0:
397 if b'node' not in tr.hookargs:
398 if b'node' not in tr.hookargs:
398 tr.hookargs[b'node'] = hex(cl.node(clstart))
399 tr.hookargs[b'node'] = hex(cl.node(clstart))
399 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
400 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
400 hookargs = dict(tr.hookargs)
401 hookargs = dict(tr.hookargs)
401 else:
402 else:
402 hookargs = dict(tr.hookargs)
403 hookargs = dict(tr.hookargs)
403 hookargs[b'node'] = hex(cl.node(clstart))
404 hookargs[b'node'] = hex(cl.node(clstart))
404 hookargs[b'node_last'] = hex(cl.node(clend - 1))
405 hookargs[b'node_last'] = hex(cl.node(clend - 1))
405 repo.hook(
406 repo.hook(
406 b'pretxnchangegroup',
407 b'pretxnchangegroup',
407 throw=True,
408 throw=True,
408 **pycompat.strkwargs(hookargs)
409 **pycompat.strkwargs(hookargs)
409 )
410 )
410
411
411 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
412 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
412 phaseall = None
413 phaseall = None
413 if srctype in (b'push', b'serve'):
414 if srctype in (b'push', b'serve'):
414 # Old servers can not push the boundary themselves.
415 # Old servers can not push the boundary themselves.
415 # New servers won't push the boundary if changeset already
416 # New servers won't push the boundary if changeset already
416 # exists locally as secret
417 # exists locally as secret
417 #
418 #
418 # We should not use added here but the list of all change in
419 # We should not use added here but the list of all change in
419 # the bundle
420 # the bundle
420 if repo.publishing():
421 if repo.publishing():
421 targetphase = phaseall = phases.public
422 targetphase = phaseall = phases.public
422 else:
423 else:
423 # closer target phase computation
424 # closer target phase computation
424
425
425 # Those changesets have been pushed from the
426 # Those changesets have been pushed from the
426 # outside, their phases are going to be pushed
427 # outside, their phases are going to be pushed
427 # alongside. Therefor `targetphase` is
428 # alongside. Therefor `targetphase` is
428 # ignored.
429 # ignored.
429 targetphase = phaseall = phases.draft
430 targetphase = phaseall = phases.draft
430 if added:
431 if added:
431 phases.registernew(repo, tr, targetphase, added)
432 phases.registernew(repo, tr, targetphase, added)
432 if phaseall is not None:
433 if phaseall is not None:
433 phases.advanceboundary(repo, tr, phaseall, cgnodes)
434 phases.advanceboundary(repo, tr, phaseall, cgnodes)
434
435
435 if changesets > 0:
436 if changesets > 0:
436
437
437 def runhooks(unused_success):
438 def runhooks(unused_success):
438 # These hooks run when the lock releases, not when the
439 # These hooks run when the lock releases, not when the
439 # transaction closes. So it's possible for the changelog
440 # transaction closes. So it's possible for the changelog
440 # to have changed since we last saw it.
441 # to have changed since we last saw it.
441 if clstart >= len(repo):
442 if clstart >= len(repo):
442 return
443 return
443
444
444 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
445 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
445
446
446 for n in added:
447 for n in added:
447 args = hookargs.copy()
448 args = hookargs.copy()
448 args[b'node'] = hex(n)
449 args[b'node'] = hex(n)
449 del args[b'node_last']
450 del args[b'node_last']
450 repo.hook(b"incoming", **pycompat.strkwargs(args))
451 repo.hook(b"incoming", **pycompat.strkwargs(args))
451
452
452 newheads = [h for h in repo.heads() if h not in oldheads]
453 newheads = [h for h in repo.heads() if h not in oldheads]
453 repo.ui.log(
454 repo.ui.log(
454 b"incoming",
455 b"incoming",
455 b"%d incoming changes - new heads: %s\n",
456 b"%d incoming changes - new heads: %s\n",
456 len(added),
457 len(added),
457 b', '.join([hex(c[:6]) for c in newheads]),
458 b', '.join([hex(c[:6]) for c in newheads]),
458 )
459 )
459
460
460 tr.addpostclose(
461 tr.addpostclose(
461 b'changegroup-runhooks-%020i' % clstart,
462 b'changegroup-runhooks-%020i' % clstart,
462 lambda tr: repo._afterlock(runhooks),
463 lambda tr: repo._afterlock(runhooks),
463 )
464 )
464 finally:
465 finally:
465 repo.ui.flush()
466 repo.ui.flush()
466 # never return 0 here:
467 # never return 0 here:
467 if deltaheads < 0:
468 if deltaheads < 0:
468 ret = deltaheads - 1
469 ret = deltaheads - 1
469 else:
470 else:
470 ret = deltaheads + 1
471 ret = deltaheads + 1
471 return ret
472 return ret
472
473
473 def deltaiter(self):
474 def deltaiter(self):
474 """
475 """
475 returns an iterator of the deltas in this changegroup
476 returns an iterator of the deltas in this changegroup
476
477
477 Useful for passing to the underlying storage system to be stored.
478 Useful for passing to the underlying storage system to be stored.
478 """
479 """
479 chain = None
480 chain = None
480 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
481 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
481 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
482 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
482 yield chunkdata
483 yield chunkdata
483 chain = chunkdata[0]
484 chain = chunkdata[0]
484
485
485
486
486 class cg2unpacker(cg1unpacker):
487 class cg2unpacker(cg1unpacker):
487 """Unpacker for cg2 streams.
488 """Unpacker for cg2 streams.
488
489
489 cg2 streams add support for generaldelta, so the delta header
490 cg2 streams add support for generaldelta, so the delta header
490 format is slightly different. All other features about the data
491 format is slightly different. All other features about the data
491 remain the same.
492 remain the same.
492 """
493 """
493
494
494 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
495 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
495 deltaheadersize = deltaheader.size
496 deltaheadersize = deltaheader.size
496 version = b'02'
497 version = b'02'
497
498
498 def _deltaheader(self, headertuple, prevnode):
499 def _deltaheader(self, headertuple, prevnode):
499 node, p1, p2, deltabase, cs = headertuple
500 node, p1, p2, deltabase, cs = headertuple
500 flags = 0
501 flags = 0
501 return node, p1, p2, deltabase, cs, flags
502 return node, p1, p2, deltabase, cs, flags
502
503
503
504
504 class cg3unpacker(cg2unpacker):
505 class cg3unpacker(cg2unpacker):
505 """Unpacker for cg3 streams.
506 """Unpacker for cg3 streams.
506
507
507 cg3 streams add support for exchanging treemanifests and revlog
508 cg3 streams add support for exchanging treemanifests and revlog
508 flags. It adds the revlog flags to the delta header and an empty chunk
509 flags. It adds the revlog flags to the delta header and an empty chunk
509 separating manifests and files.
510 separating manifests and files.
510 """
511 """
511
512
512 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
513 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
513 deltaheadersize = deltaheader.size
514 deltaheadersize = deltaheader.size
514 version = b'03'
515 version = b'03'
515 _grouplistcount = 2 # One list of manifests and one list of files
516 _grouplistcount = 2 # One list of manifests and one list of files
516
517
517 def _deltaheader(self, headertuple, prevnode):
518 def _deltaheader(self, headertuple, prevnode):
518 node, p1, p2, deltabase, cs, flags = headertuple
519 node, p1, p2, deltabase, cs, flags = headertuple
519 return node, p1, p2, deltabase, cs, flags
520 return node, p1, p2, deltabase, cs, flags
520
521
521 def _unpackmanifests(self, repo, revmap, trp, prog):
522 def _unpackmanifests(self, repo, revmap, trp, prog):
522 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
523 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
523 for chunkdata in iter(self.filelogheader, {}):
524 for chunkdata in iter(self.filelogheader, {}):
524 # If we get here, there are directory manifests in the changegroup
525 # If we get here, there are directory manifests in the changegroup
525 d = chunkdata[b"filename"]
526 d = chunkdata[b"filename"]
526 repo.ui.debug(b"adding %s revisions\n" % d)
527 repo.ui.debug(b"adding %s revisions\n" % d)
527 deltas = self.deltaiter()
528 deltas = self.deltaiter()
528 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
529 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
529 raise error.Abort(_(b"received dir revlog group is empty"))
530 raise error.Abort(_(b"received dir revlog group is empty"))
530
531
531
532
532 class headerlessfixup(object):
533 class headerlessfixup(object):
533 def __init__(self, fh, h):
534 def __init__(self, fh, h):
534 self._h = h
535 self._h = h
535 self._fh = fh
536 self._fh = fh
536
537
537 def read(self, n):
538 def read(self, n):
538 if self._h:
539 if self._h:
539 d, self._h = self._h[:n], self._h[n:]
540 d, self._h = self._h[:n], self._h[n:]
540 if len(d) < n:
541 if len(d) < n:
541 d += readexactly(self._fh, n - len(d))
542 d += readexactly(self._fh, n - len(d))
542 return d
543 return d
543 return readexactly(self._fh, n)
544 return readexactly(self._fh, n)
544
545
545
546
546 def _revisiondeltatochunks(delta, headerfn):
547 def _revisiondeltatochunks(delta, headerfn):
547 """Serialize a revisiondelta to changegroup chunks."""
548 """Serialize a revisiondelta to changegroup chunks."""
548
549
549 # The captured revision delta may be encoded as a delta against
550 # The captured revision delta may be encoded as a delta against
550 # a base revision or as a full revision. The changegroup format
551 # a base revision or as a full revision. The changegroup format
551 # requires that everything on the wire be deltas. So for full
552 # requires that everything on the wire be deltas. So for full
552 # revisions, we need to invent a header that says to rewrite
553 # revisions, we need to invent a header that says to rewrite
553 # data.
554 # data.
554
555
555 if delta.delta is not None:
556 if delta.delta is not None:
556 prefix, data = b'', delta.delta
557 prefix, data = b'', delta.delta
557 elif delta.basenode == nullid:
558 elif delta.basenode == nullid:
558 data = delta.revision
559 data = delta.revision
559 prefix = mdiff.trivialdiffheader(len(data))
560 prefix = mdiff.trivialdiffheader(len(data))
560 else:
561 else:
561 data = delta.revision
562 data = delta.revision
562 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
563 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
563
564
564 meta = headerfn(delta)
565 meta = headerfn(delta)
565
566
566 yield chunkheader(len(meta) + len(prefix) + len(data))
567 yield chunkheader(len(meta) + len(prefix) + len(data))
567 yield meta
568 yield meta
568 if prefix:
569 if prefix:
569 yield prefix
570 yield prefix
570 yield data
571 yield data
571
572
572
573
573 def _sortnodesellipsis(store, nodes, cl, lookup):
574 def _sortnodesellipsis(store, nodes, cl, lookup):
574 """Sort nodes for changegroup generation."""
575 """Sort nodes for changegroup generation."""
575 # Ellipses serving mode.
576 # Ellipses serving mode.
576 #
577 #
577 # In a perfect world, we'd generate better ellipsis-ified graphs
578 # In a perfect world, we'd generate better ellipsis-ified graphs
578 # for non-changelog revlogs. In practice, we haven't started doing
579 # for non-changelog revlogs. In practice, we haven't started doing
579 # that yet, so the resulting DAGs for the manifestlog and filelogs
580 # that yet, so the resulting DAGs for the manifestlog and filelogs
580 # are actually full of bogus parentage on all the ellipsis
581 # are actually full of bogus parentage on all the ellipsis
581 # nodes. This has the side effect that, while the contents are
582 # nodes. This has the side effect that, while the contents are
582 # correct, the individual DAGs might be completely out of whack in
583 # correct, the individual DAGs might be completely out of whack in
583 # a case like 882681bc3166 and its ancestors (back about 10
584 # a case like 882681bc3166 and its ancestors (back about 10
584 # revisions or so) in the main hg repo.
585 # revisions or so) in the main hg repo.
585 #
586 #
586 # The one invariant we *know* holds is that the new (potentially
587 # The one invariant we *know* holds is that the new (potentially
587 # bogus) DAG shape will be valid if we order the nodes in the
588 # bogus) DAG shape will be valid if we order the nodes in the
588 # order that they're introduced in dramatis personae by the
589 # order that they're introduced in dramatis personae by the
589 # changelog, so what we do is we sort the non-changelog histories
590 # changelog, so what we do is we sort the non-changelog histories
590 # by the order in which they are used by the changelog.
591 # by the order in which they are used by the changelog.
591 key = lambda n: cl.rev(lookup(n))
592 key = lambda n: cl.rev(lookup(n))
592 return sorted(nodes, key=key)
593 return sorted(nodes, key=key)
593
594
594
595
595 def _resolvenarrowrevisioninfo(
596 def _resolvenarrowrevisioninfo(
596 cl,
597 cl,
597 store,
598 store,
598 ischangelog,
599 ischangelog,
599 rev,
600 rev,
600 linkrev,
601 linkrev,
601 linknode,
602 linknode,
602 clrevtolocalrev,
603 clrevtolocalrev,
603 fullclnodes,
604 fullclnodes,
604 precomputedellipsis,
605 precomputedellipsis,
605 ):
606 ):
606 linkparents = precomputedellipsis[linkrev]
607 linkparents = precomputedellipsis[linkrev]
607
608
608 def local(clrev):
609 def local(clrev):
609 """Turn a changelog revnum into a local revnum.
610 """Turn a changelog revnum into a local revnum.
610
611
611 The ellipsis dag is stored as revnums on the changelog,
612 The ellipsis dag is stored as revnums on the changelog,
612 but when we're producing ellipsis entries for
613 but when we're producing ellipsis entries for
613 non-changelog revlogs, we need to turn those numbers into
614 non-changelog revlogs, we need to turn those numbers into
614 something local. This does that for us, and during the
615 something local. This does that for us, and during the
615 changelog sending phase will also expand the stored
616 changelog sending phase will also expand the stored
616 mappings as needed.
617 mappings as needed.
617 """
618 """
618 if clrev == nullrev:
619 if clrev == nullrev:
619 return nullrev
620 return nullrev
620
621
621 if ischangelog:
622 if ischangelog:
622 return clrev
623 return clrev
623
624
624 # Walk the ellipsis-ized changelog breadth-first looking for a
625 # Walk the ellipsis-ized changelog breadth-first looking for a
625 # change that has been linked from the current revlog.
626 # change that has been linked from the current revlog.
626 #
627 #
627 # For a flat manifest revlog only a single step should be necessary
628 # For a flat manifest revlog only a single step should be necessary
628 # as all relevant changelog entries are relevant to the flat
629 # as all relevant changelog entries are relevant to the flat
629 # manifest.
630 # manifest.
630 #
631 #
631 # For a filelog or tree manifest dirlog however not every changelog
632 # For a filelog or tree manifest dirlog however not every changelog
632 # entry will have been relevant, so we need to skip some changelog
633 # entry will have been relevant, so we need to skip some changelog
633 # nodes even after ellipsis-izing.
634 # nodes even after ellipsis-izing.
634 walk = [clrev]
635 walk = [clrev]
635 while walk:
636 while walk:
636 p = walk[0]
637 p = walk[0]
637 walk = walk[1:]
638 walk = walk[1:]
638 if p in clrevtolocalrev:
639 if p in clrevtolocalrev:
639 return clrevtolocalrev[p]
640 return clrevtolocalrev[p]
640 elif p in fullclnodes:
641 elif p in fullclnodes:
641 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
642 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
642 elif p in precomputedellipsis:
643 elif p in precomputedellipsis:
643 walk.extend(
644 walk.extend(
644 [pp for pp in precomputedellipsis[p] if pp != nullrev]
645 [pp for pp in precomputedellipsis[p] if pp != nullrev]
645 )
646 )
646 else:
647 else:
647 # In this case, we've got an ellipsis with parents
648 # In this case, we've got an ellipsis with parents
648 # outside the current bundle (likely an
649 # outside the current bundle (likely an
649 # incremental pull). We "know" that we can use the
650 # incremental pull). We "know" that we can use the
650 # value of this same revlog at whatever revision
651 # value of this same revlog at whatever revision
651 # is pointed to by linknode. "Know" is in scare
652 # is pointed to by linknode. "Know" is in scare
652 # quotes because I haven't done enough examination
653 # quotes because I haven't done enough examination
653 # of edge cases to convince myself this is really
654 # of edge cases to convince myself this is really
654 # a fact - it works for all the (admittedly
655 # a fact - it works for all the (admittedly
655 # thorough) cases in our testsuite, but I would be
656 # thorough) cases in our testsuite, but I would be
656 # somewhat unsurprised to find a case in the wild
657 # somewhat unsurprised to find a case in the wild
657 # where this breaks down a bit. That said, I don't
658 # where this breaks down a bit. That said, I don't
658 # know if it would hurt anything.
659 # know if it would hurt anything.
659 for i in pycompat.xrange(rev, 0, -1):
660 for i in pycompat.xrange(rev, 0, -1):
660 if store.linkrev(i) == clrev:
661 if store.linkrev(i) == clrev:
661 return i
662 return i
662 # We failed to resolve a parent for this node, so
663 # We failed to resolve a parent for this node, so
663 # we crash the changegroup construction.
664 # we crash the changegroup construction.
664 raise error.Abort(
665 raise error.Abort(
665 b'unable to resolve parent while packing %r %r'
666 b'unable to resolve parent while packing %r %r'
666 b' for changeset %r' % (store.indexfile, rev, clrev)
667 b' for changeset %r' % (store.indexfile, rev, clrev)
667 )
668 )
668
669
669 return nullrev
670 return nullrev
670
671
671 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
672 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
672 p1, p2 = nullrev, nullrev
673 p1, p2 = nullrev, nullrev
673 elif len(linkparents) == 1:
674 elif len(linkparents) == 1:
674 (p1,) = sorted(local(p) for p in linkparents)
675 (p1,) = sorted(local(p) for p in linkparents)
675 p2 = nullrev
676 p2 = nullrev
676 else:
677 else:
677 p1, p2 = sorted(local(p) for p in linkparents)
678 p1, p2 = sorted(local(p) for p in linkparents)
678
679
679 p1node, p2node = store.node(p1), store.node(p2)
680 p1node, p2node = store.node(p1), store.node(p2)
680
681
681 return p1node, p2node, linknode
682 return p1node, p2node, linknode
682
683
683
684
684 def deltagroup(
685 def deltagroup(
685 repo,
686 repo,
686 store,
687 store,
687 nodes,
688 nodes,
688 ischangelog,
689 ischangelog,
689 lookup,
690 lookup,
690 forcedeltaparentprev,
691 forcedeltaparentprev,
691 topic=None,
692 topic=None,
692 ellipses=False,
693 ellipses=False,
693 clrevtolocalrev=None,
694 clrevtolocalrev=None,
694 fullclnodes=None,
695 fullclnodes=None,
695 precomputedellipsis=None,
696 precomputedellipsis=None,
696 ):
697 ):
697 """Calculate deltas for a set of revisions.
698 """Calculate deltas for a set of revisions.
698
699
699 Is a generator of ``revisiondelta`` instances.
700 Is a generator of ``revisiondelta`` instances.
700
701
701 If topic is not None, progress detail will be generated using this
702 If topic is not None, progress detail will be generated using this
702 topic name (e.g. changesets, manifests, etc).
703 topic name (e.g. changesets, manifests, etc).
703 """
704 """
704 if not nodes:
705 if not nodes:
705 return
706 return
706
707
707 cl = repo.changelog
708 cl = repo.changelog
708
709
709 if ischangelog:
710 if ischangelog:
710 # `hg log` shows changesets in storage order. To preserve order
711 # `hg log` shows changesets in storage order. To preserve order
711 # across clones, send out changesets in storage order.
712 # across clones, send out changesets in storage order.
712 nodesorder = b'storage'
713 nodesorder = b'storage'
713 elif ellipses:
714 elif ellipses:
714 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
715 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
715 nodesorder = b'nodes'
716 nodesorder = b'nodes'
716 else:
717 else:
717 nodesorder = None
718 nodesorder = None
718
719
719 # Perform ellipses filtering and revision massaging. We do this before
720 # Perform ellipses filtering and revision massaging. We do this before
720 # emitrevisions() because a) filtering out revisions creates less work
721 # emitrevisions() because a) filtering out revisions creates less work
721 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
722 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
722 # assumptions about delta choices and we would possibly send a delta
723 # assumptions about delta choices and we would possibly send a delta
723 # referencing a missing base revision.
724 # referencing a missing base revision.
724 #
725 #
725 # Also, calling lookup() has side-effects with regards to populating
726 # Also, calling lookup() has side-effects with regards to populating
726 # data structures. If we don't call lookup() for each node or if we call
727 # data structures. If we don't call lookup() for each node or if we call
727 # lookup() after the first pass through each node, things can break -
728 # lookup() after the first pass through each node, things can break -
728 # possibly intermittently depending on the python hash seed! For that
729 # possibly intermittently depending on the python hash seed! For that
729 # reason, we store a mapping of all linknodes during the initial node
730 # reason, we store a mapping of all linknodes during the initial node
730 # pass rather than use lookup() on the output side.
731 # pass rather than use lookup() on the output side.
731 if ellipses:
732 if ellipses:
732 filtered = []
733 filtered = []
733 adjustedparents = {}
734 adjustedparents = {}
734 linknodes = {}
735 linknodes = {}
735
736
736 for node in nodes:
737 for node in nodes:
737 rev = store.rev(node)
738 rev = store.rev(node)
738 linknode = lookup(node)
739 linknode = lookup(node)
739 linkrev = cl.rev(linknode)
740 linkrev = cl.rev(linknode)
740 clrevtolocalrev[linkrev] = rev
741 clrevtolocalrev[linkrev] = rev
741
742
742 # If linknode is in fullclnodes, it means the corresponding
743 # If linknode is in fullclnodes, it means the corresponding
743 # changeset was a full changeset and is being sent unaltered.
744 # changeset was a full changeset and is being sent unaltered.
744 if linknode in fullclnodes:
745 if linknode in fullclnodes:
745 linknodes[node] = linknode
746 linknodes[node] = linknode
746
747
747 # If the corresponding changeset wasn't in the set computed
748 # If the corresponding changeset wasn't in the set computed
748 # as relevant to us, it should be dropped outright.
749 # as relevant to us, it should be dropped outright.
749 elif linkrev not in precomputedellipsis:
750 elif linkrev not in precomputedellipsis:
750 continue
751 continue
751
752
752 else:
753 else:
753 # We could probably do this later and avoid the dict
754 # We could probably do this later and avoid the dict
754 # holding state. But it likely doesn't matter.
755 # holding state. But it likely doesn't matter.
755 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
756 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
756 cl,
757 cl,
757 store,
758 store,
758 ischangelog,
759 ischangelog,
759 rev,
760 rev,
760 linkrev,
761 linkrev,
761 linknode,
762 linknode,
762 clrevtolocalrev,
763 clrevtolocalrev,
763 fullclnodes,
764 fullclnodes,
764 precomputedellipsis,
765 precomputedellipsis,
765 )
766 )
766
767
767 adjustedparents[node] = (p1node, p2node)
768 adjustedparents[node] = (p1node, p2node)
768 linknodes[node] = linknode
769 linknodes[node] = linknode
769
770
770 filtered.append(node)
771 filtered.append(node)
771
772
772 nodes = filtered
773 nodes = filtered
773
774
774 # We expect the first pass to be fast, so we only engage the progress
775 # We expect the first pass to be fast, so we only engage the progress
775 # meter for constructing the revision deltas.
776 # meter for constructing the revision deltas.
776 progress = None
777 progress = None
777 if topic is not None:
778 if topic is not None:
778 progress = repo.ui.makeprogress(
779 progress = repo.ui.makeprogress(
779 topic, unit=_(b'chunks'), total=len(nodes)
780 topic, unit=_(b'chunks'), total=len(nodes)
780 )
781 )
781
782
782 configtarget = repo.ui.config(b'devel', b'bundle.delta')
783 configtarget = repo.ui.config(b'devel', b'bundle.delta')
783 if configtarget not in (b'', b'p1', b'full'):
784 if configtarget not in (b'', b'p1', b'full'):
784 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
785 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
785 repo.ui.warn(msg % configtarget)
786 repo.ui.warn(msg % configtarget)
786
787
787 deltamode = repository.CG_DELTAMODE_STD
788 deltamode = repository.CG_DELTAMODE_STD
788 if forcedeltaparentprev:
789 if forcedeltaparentprev:
789 deltamode = repository.CG_DELTAMODE_PREV
790 deltamode = repository.CG_DELTAMODE_PREV
790 elif configtarget == b'p1':
791 elif configtarget == b'p1':
791 deltamode = repository.CG_DELTAMODE_P1
792 deltamode = repository.CG_DELTAMODE_P1
792 elif configtarget == b'full':
793 elif configtarget == b'full':
793 deltamode = repository.CG_DELTAMODE_FULL
794 deltamode = repository.CG_DELTAMODE_FULL
794
795
795 revisions = store.emitrevisions(
796 revisions = store.emitrevisions(
796 nodes,
797 nodes,
797 nodesorder=nodesorder,
798 nodesorder=nodesorder,
798 revisiondata=True,
799 revisiondata=True,
799 assumehaveparentrevisions=not ellipses,
800 assumehaveparentrevisions=not ellipses,
800 deltamode=deltamode,
801 deltamode=deltamode,
801 )
802 )
802
803
803 for i, revision in enumerate(revisions):
804 for i, revision in enumerate(revisions):
804 if progress:
805 if progress:
805 progress.update(i + 1)
806 progress.update(i + 1)
806
807
807 if ellipses:
808 if ellipses:
808 linknode = linknodes[revision.node]
809 linknode = linknodes[revision.node]
809
810
810 if revision.node in adjustedparents:
811 if revision.node in adjustedparents:
811 p1node, p2node = adjustedparents[revision.node]
812 p1node, p2node = adjustedparents[revision.node]
812 revision.p1node = p1node
813 revision.p1node = p1node
813 revision.p2node = p2node
814 revision.p2node = p2node
814 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
815 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
815
816
816 else:
817 else:
817 linknode = lookup(revision.node)
818 linknode = lookup(revision.node)
818
819
819 revision.linknode = linknode
820 revision.linknode = linknode
820 yield revision
821 yield revision
821
822
822 if progress:
823 if progress:
823 progress.complete()
824 progress.complete()
824
825
825
826
826 class cgpacker(object):
827 class cgpacker(object):
827 def __init__(
828 def __init__(
828 self,
829 self,
829 repo,
830 repo,
830 oldmatcher,
831 oldmatcher,
831 matcher,
832 matcher,
832 version,
833 version,
833 builddeltaheader,
834 builddeltaheader,
834 manifestsend,
835 manifestsend,
835 forcedeltaparentprev=False,
836 forcedeltaparentprev=False,
836 bundlecaps=None,
837 bundlecaps=None,
837 ellipses=False,
838 ellipses=False,
838 shallow=False,
839 shallow=False,
839 ellipsisroots=None,
840 ellipsisroots=None,
840 fullnodes=None,
841 fullnodes=None,
841 ):
842 ):
842 """Given a source repo, construct a bundler.
843 """Given a source repo, construct a bundler.
843
844
844 oldmatcher is a matcher that matches on files the client already has.
845 oldmatcher is a matcher that matches on files the client already has.
845 These will not be included in the changegroup.
846 These will not be included in the changegroup.
846
847
847 matcher is a matcher that matches on files to include in the
848 matcher is a matcher that matches on files to include in the
848 changegroup. Used to facilitate sparse changegroups.
849 changegroup. Used to facilitate sparse changegroups.
849
850
850 forcedeltaparentprev indicates whether delta parents must be against
851 forcedeltaparentprev indicates whether delta parents must be against
851 the previous revision in a delta group. This should only be used for
852 the previous revision in a delta group. This should only be used for
852 compatibility with changegroup version 1.
853 compatibility with changegroup version 1.
853
854
854 builddeltaheader is a callable that constructs the header for a group
855 builddeltaheader is a callable that constructs the header for a group
855 delta.
856 delta.
856
857
857 manifestsend is a chunk to send after manifests have been fully emitted.
858 manifestsend is a chunk to send after manifests have been fully emitted.
858
859
859 ellipses indicates whether ellipsis serving mode is enabled.
860 ellipses indicates whether ellipsis serving mode is enabled.
860
861
861 bundlecaps is optional and can be used to specify the set of
862 bundlecaps is optional and can be used to specify the set of
862 capabilities which can be used to build the bundle. While bundlecaps is
863 capabilities which can be used to build the bundle. While bundlecaps is
863 unused in core Mercurial, extensions rely on this feature to communicate
864 unused in core Mercurial, extensions rely on this feature to communicate
864 capabilities to customize the changegroup packer.
865 capabilities to customize the changegroup packer.
865
866
866 shallow indicates whether shallow data might be sent. The packer may
867 shallow indicates whether shallow data might be sent. The packer may
867 need to pack file contents not introduced by the changes being packed.
868 need to pack file contents not introduced by the changes being packed.
868
869
869 fullnodes is the set of changelog nodes which should not be ellipsis
870 fullnodes is the set of changelog nodes which should not be ellipsis
870 nodes. We store this rather than the set of nodes that should be
871 nodes. We store this rather than the set of nodes that should be
871 ellipsis because for very large histories we expect this to be
872 ellipsis because for very large histories we expect this to be
872 significantly smaller.
873 significantly smaller.
873 """
874 """
874 assert oldmatcher
875 assert oldmatcher
875 assert matcher
876 assert matcher
876 self._oldmatcher = oldmatcher
877 self._oldmatcher = oldmatcher
877 self._matcher = matcher
878 self._matcher = matcher
878
879
879 self.version = version
880 self.version = version
880 self._forcedeltaparentprev = forcedeltaparentprev
881 self._forcedeltaparentprev = forcedeltaparentprev
881 self._builddeltaheader = builddeltaheader
882 self._builddeltaheader = builddeltaheader
882 self._manifestsend = manifestsend
883 self._manifestsend = manifestsend
883 self._ellipses = ellipses
884 self._ellipses = ellipses
884
885
885 # Set of capabilities we can use to build the bundle.
886 # Set of capabilities we can use to build the bundle.
886 if bundlecaps is None:
887 if bundlecaps is None:
887 bundlecaps = set()
888 bundlecaps = set()
888 self._bundlecaps = bundlecaps
889 self._bundlecaps = bundlecaps
889 self._isshallow = shallow
890 self._isshallow = shallow
890 self._fullclnodes = fullnodes
891 self._fullclnodes = fullnodes
891
892
892 # Maps ellipsis revs to their roots at the changelog level.
893 # Maps ellipsis revs to their roots at the changelog level.
893 self._precomputedellipsis = ellipsisroots
894 self._precomputedellipsis = ellipsisroots
894
895
895 self._repo = repo
896 self._repo = repo
896
897
897 if self._repo.ui.verbose and not self._repo.ui.debugflag:
898 if self._repo.ui.verbose and not self._repo.ui.debugflag:
898 self._verbosenote = self._repo.ui.note
899 self._verbosenote = self._repo.ui.note
899 else:
900 else:
900 self._verbosenote = lambda s: None
901 self._verbosenote = lambda s: None
901
902
902 def generate(
903 def generate(
903 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
904 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
904 ):
905 ):
905 """Yield a sequence of changegroup byte chunks.
906 """Yield a sequence of changegroup byte chunks.
906 If changelog is False, changelog data won't be added to changegroup
907 If changelog is False, changelog data won't be added to changegroup
907 """
908 """
908
909
909 repo = self._repo
910 repo = self._repo
910 cl = repo.changelog
911 cl = repo.changelog
911
912
912 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
913 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
913 size = 0
914 size = 0
914
915
915 clstate, deltas = self._generatechangelog(
916 clstate, deltas = self._generatechangelog(
916 cl, clnodes, generate=changelog
917 cl, clnodes, generate=changelog
917 )
918 )
918 for delta in deltas:
919 for delta in deltas:
919 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
920 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
920 size += len(chunk)
921 size += len(chunk)
921 yield chunk
922 yield chunk
922
923
923 close = closechunk()
924 close = closechunk()
924 size += len(close)
925 size += len(close)
925 yield closechunk()
926 yield closechunk()
926
927
927 self._verbosenote(_(b'%8.i (changelog)\n') % size)
928 self._verbosenote(_(b'%8.i (changelog)\n') % size)
928
929
929 clrevorder = clstate[b'clrevorder']
930 clrevorder = clstate[b'clrevorder']
930 manifests = clstate[b'manifests']
931 manifests = clstate[b'manifests']
931 changedfiles = clstate[b'changedfiles']
932 changedfiles = clstate[b'changedfiles']
932
933
933 # We need to make sure that the linkrev in the changegroup refers to
934 # We need to make sure that the linkrev in the changegroup refers to
934 # the first changeset that introduced the manifest or file revision.
935 # the first changeset that introduced the manifest or file revision.
935 # The fastpath is usually safer than the slowpath, because the filelogs
936 # The fastpath is usually safer than the slowpath, because the filelogs
936 # are walked in revlog order.
937 # are walked in revlog order.
937 #
938 #
938 # When taking the slowpath when the manifest revlog uses generaldelta,
939 # When taking the slowpath when the manifest revlog uses generaldelta,
939 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
940 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
940 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
941 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
941 #
942 #
942 # When taking the fastpath, we are only vulnerable to reordering
943 # When taking the fastpath, we are only vulnerable to reordering
943 # of the changelog itself. The changelog never uses generaldelta and is
944 # of the changelog itself. The changelog never uses generaldelta and is
944 # never reordered. To handle this case, we simply take the slowpath,
945 # never reordered. To handle this case, we simply take the slowpath,
945 # which already has the 'clrevorder' logic. This was also fixed in
946 # which already has the 'clrevorder' logic. This was also fixed in
946 # cc0ff93d0c0c.
947 # cc0ff93d0c0c.
947
948
948 # Treemanifests don't work correctly with fastpathlinkrev
949 # Treemanifests don't work correctly with fastpathlinkrev
949 # either, because we don't discover which directory nodes to
950 # either, because we don't discover which directory nodes to
950 # send along with files. This could probably be fixed.
951 # send along with files. This could probably be fixed.
951 fastpathlinkrev = fastpathlinkrev and (
952 fastpathlinkrev = fastpathlinkrev and (
952 repository.TREEMANIFEST_REQUIREMENT not in repo.requirements
953 requirements.TREEMANIFEST_REQUIREMENT not in repo.requirements
953 )
954 )
954
955
955 fnodes = {} # needed file nodes
956 fnodes = {} # needed file nodes
956
957
957 size = 0
958 size = 0
958 it = self.generatemanifests(
959 it = self.generatemanifests(
959 commonrevs,
960 commonrevs,
960 clrevorder,
961 clrevorder,
961 fastpathlinkrev,
962 fastpathlinkrev,
962 manifests,
963 manifests,
963 fnodes,
964 fnodes,
964 source,
965 source,
965 clstate[b'clrevtomanifestrev'],
966 clstate[b'clrevtomanifestrev'],
966 )
967 )
967
968
968 for tree, deltas in it:
969 for tree, deltas in it:
969 if tree:
970 if tree:
970 assert self.version == b'03'
971 assert self.version == b'03'
971 chunk = _fileheader(tree)
972 chunk = _fileheader(tree)
972 size += len(chunk)
973 size += len(chunk)
973 yield chunk
974 yield chunk
974
975
975 for delta in deltas:
976 for delta in deltas:
976 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
977 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
977 for chunk in chunks:
978 for chunk in chunks:
978 size += len(chunk)
979 size += len(chunk)
979 yield chunk
980 yield chunk
980
981
981 close = closechunk()
982 close = closechunk()
982 size += len(close)
983 size += len(close)
983 yield close
984 yield close
984
985
985 self._verbosenote(_(b'%8.i (manifests)\n') % size)
986 self._verbosenote(_(b'%8.i (manifests)\n') % size)
986 yield self._manifestsend
987 yield self._manifestsend
987
988
988 mfdicts = None
989 mfdicts = None
989 if self._ellipses and self._isshallow:
990 if self._ellipses and self._isshallow:
990 mfdicts = [
991 mfdicts = [
991 (self._repo.manifestlog[n].read(), lr)
992 (self._repo.manifestlog[n].read(), lr)
992 for (n, lr) in pycompat.iteritems(manifests)
993 for (n, lr) in pycompat.iteritems(manifests)
993 ]
994 ]
994
995
995 manifests.clear()
996 manifests.clear()
996 clrevs = {cl.rev(x) for x in clnodes}
997 clrevs = {cl.rev(x) for x in clnodes}
997
998
998 it = self.generatefiles(
999 it = self.generatefiles(
999 changedfiles,
1000 changedfiles,
1000 commonrevs,
1001 commonrevs,
1001 source,
1002 source,
1002 mfdicts,
1003 mfdicts,
1003 fastpathlinkrev,
1004 fastpathlinkrev,
1004 fnodes,
1005 fnodes,
1005 clrevs,
1006 clrevs,
1006 )
1007 )
1007
1008
1008 for path, deltas in it:
1009 for path, deltas in it:
1009 h = _fileheader(path)
1010 h = _fileheader(path)
1010 size = len(h)
1011 size = len(h)
1011 yield h
1012 yield h
1012
1013
1013 for delta in deltas:
1014 for delta in deltas:
1014 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1015 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1015 for chunk in chunks:
1016 for chunk in chunks:
1016 size += len(chunk)
1017 size += len(chunk)
1017 yield chunk
1018 yield chunk
1018
1019
1019 close = closechunk()
1020 close = closechunk()
1020 size += len(close)
1021 size += len(close)
1021 yield close
1022 yield close
1022
1023
1023 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1024 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1024
1025
1025 yield closechunk()
1026 yield closechunk()
1026
1027
1027 if clnodes:
1028 if clnodes:
1028 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1029 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1029
1030
1030 def _generatechangelog(self, cl, nodes, generate=True):
1031 def _generatechangelog(self, cl, nodes, generate=True):
1031 """Generate data for changelog chunks.
1032 """Generate data for changelog chunks.
1032
1033
1033 Returns a 2-tuple of a dict containing state and an iterable of
1034 Returns a 2-tuple of a dict containing state and an iterable of
1034 byte chunks. The state will not be fully populated until the
1035 byte chunks. The state will not be fully populated until the
1035 chunk stream has been fully consumed.
1036 chunk stream has been fully consumed.
1036
1037
1037 if generate is False, the state will be fully populated and no chunk
1038 if generate is False, the state will be fully populated and no chunk
1038 stream will be yielded
1039 stream will be yielded
1039 """
1040 """
1040 clrevorder = {}
1041 clrevorder = {}
1041 manifests = {}
1042 manifests = {}
1042 mfl = self._repo.manifestlog
1043 mfl = self._repo.manifestlog
1043 changedfiles = set()
1044 changedfiles = set()
1044 clrevtomanifestrev = {}
1045 clrevtomanifestrev = {}
1045
1046
1046 state = {
1047 state = {
1047 b'clrevorder': clrevorder,
1048 b'clrevorder': clrevorder,
1048 b'manifests': manifests,
1049 b'manifests': manifests,
1049 b'changedfiles': changedfiles,
1050 b'changedfiles': changedfiles,
1050 b'clrevtomanifestrev': clrevtomanifestrev,
1051 b'clrevtomanifestrev': clrevtomanifestrev,
1051 }
1052 }
1052
1053
1053 if not (generate or self._ellipses):
1054 if not (generate or self._ellipses):
1054 # sort the nodes in storage order
1055 # sort the nodes in storage order
1055 nodes = sorted(nodes, key=cl.rev)
1056 nodes = sorted(nodes, key=cl.rev)
1056 for node in nodes:
1057 for node in nodes:
1057 c = cl.changelogrevision(node)
1058 c = cl.changelogrevision(node)
1058 clrevorder[node] = len(clrevorder)
1059 clrevorder[node] = len(clrevorder)
1059 # record the first changeset introducing this manifest version
1060 # record the first changeset introducing this manifest version
1060 manifests.setdefault(c.manifest, node)
1061 manifests.setdefault(c.manifest, node)
1061 # Record a complete list of potentially-changed files in
1062 # Record a complete list of potentially-changed files in
1062 # this manifest.
1063 # this manifest.
1063 changedfiles.update(c.files)
1064 changedfiles.update(c.files)
1064
1065
1065 return state, ()
1066 return state, ()
1066
1067
1067 # Callback for the changelog, used to collect changed files and
1068 # Callback for the changelog, used to collect changed files and
1068 # manifest nodes.
1069 # manifest nodes.
1069 # Returns the linkrev node (identity in the changelog case).
1070 # Returns the linkrev node (identity in the changelog case).
1070 def lookupcl(x):
1071 def lookupcl(x):
1071 c = cl.changelogrevision(x)
1072 c = cl.changelogrevision(x)
1072 clrevorder[x] = len(clrevorder)
1073 clrevorder[x] = len(clrevorder)
1073
1074
1074 if self._ellipses:
1075 if self._ellipses:
1075 # Only update manifests if x is going to be sent. Otherwise we
1076 # Only update manifests if x is going to be sent. Otherwise we
1076 # end up with bogus linkrevs specified for manifests and
1077 # end up with bogus linkrevs specified for manifests and
1077 # we skip some manifest nodes that we should otherwise
1078 # we skip some manifest nodes that we should otherwise
1078 # have sent.
1079 # have sent.
1079 if (
1080 if (
1080 x in self._fullclnodes
1081 x in self._fullclnodes
1081 or cl.rev(x) in self._precomputedellipsis
1082 or cl.rev(x) in self._precomputedellipsis
1082 ):
1083 ):
1083
1084
1084 manifestnode = c.manifest
1085 manifestnode = c.manifest
1085 # Record the first changeset introducing this manifest
1086 # Record the first changeset introducing this manifest
1086 # version.
1087 # version.
1087 manifests.setdefault(manifestnode, x)
1088 manifests.setdefault(manifestnode, x)
1088 # Set this narrow-specific dict so we have the lowest
1089 # Set this narrow-specific dict so we have the lowest
1089 # manifest revnum to look up for this cl revnum. (Part of
1090 # manifest revnum to look up for this cl revnum. (Part of
1090 # mapping changelog ellipsis parents to manifest ellipsis
1091 # mapping changelog ellipsis parents to manifest ellipsis
1091 # parents)
1092 # parents)
1092 clrevtomanifestrev.setdefault(
1093 clrevtomanifestrev.setdefault(
1093 cl.rev(x), mfl.rev(manifestnode)
1094 cl.rev(x), mfl.rev(manifestnode)
1094 )
1095 )
1095 # We can't trust the changed files list in the changeset if the
1096 # We can't trust the changed files list in the changeset if the
1096 # client requested a shallow clone.
1097 # client requested a shallow clone.
1097 if self._isshallow:
1098 if self._isshallow:
1098 changedfiles.update(mfl[c.manifest].read().keys())
1099 changedfiles.update(mfl[c.manifest].read().keys())
1099 else:
1100 else:
1100 changedfiles.update(c.files)
1101 changedfiles.update(c.files)
1101 else:
1102 else:
1102 # record the first changeset introducing this manifest version
1103 # record the first changeset introducing this manifest version
1103 manifests.setdefault(c.manifest, x)
1104 manifests.setdefault(c.manifest, x)
1104 # Record a complete list of potentially-changed files in
1105 # Record a complete list of potentially-changed files in
1105 # this manifest.
1106 # this manifest.
1106 changedfiles.update(c.files)
1107 changedfiles.update(c.files)
1107
1108
1108 return x
1109 return x
1109
1110
1110 gen = deltagroup(
1111 gen = deltagroup(
1111 self._repo,
1112 self._repo,
1112 cl,
1113 cl,
1113 nodes,
1114 nodes,
1114 True,
1115 True,
1115 lookupcl,
1116 lookupcl,
1116 self._forcedeltaparentprev,
1117 self._forcedeltaparentprev,
1117 ellipses=self._ellipses,
1118 ellipses=self._ellipses,
1118 topic=_(b'changesets'),
1119 topic=_(b'changesets'),
1119 clrevtolocalrev={},
1120 clrevtolocalrev={},
1120 fullclnodes=self._fullclnodes,
1121 fullclnodes=self._fullclnodes,
1121 precomputedellipsis=self._precomputedellipsis,
1122 precomputedellipsis=self._precomputedellipsis,
1122 )
1123 )
1123
1124
1124 return state, gen
1125 return state, gen
1125
1126
1126 def generatemanifests(
1127 def generatemanifests(
1127 self,
1128 self,
1128 commonrevs,
1129 commonrevs,
1129 clrevorder,
1130 clrevorder,
1130 fastpathlinkrev,
1131 fastpathlinkrev,
1131 manifests,
1132 manifests,
1132 fnodes,
1133 fnodes,
1133 source,
1134 source,
1134 clrevtolocalrev,
1135 clrevtolocalrev,
1135 ):
1136 ):
1136 """Returns an iterator of changegroup chunks containing manifests.
1137 """Returns an iterator of changegroup chunks containing manifests.
1137
1138
1138 `source` is unused here, but is used by extensions like remotefilelog to
1139 `source` is unused here, but is used by extensions like remotefilelog to
1139 change what is sent based in pulls vs pushes, etc.
1140 change what is sent based in pulls vs pushes, etc.
1140 """
1141 """
1141 repo = self._repo
1142 repo = self._repo
1142 mfl = repo.manifestlog
1143 mfl = repo.manifestlog
1143 tmfnodes = {b'': manifests}
1144 tmfnodes = {b'': manifests}
1144
1145
1145 # Callback for the manifest, used to collect linkrevs for filelog
1146 # Callback for the manifest, used to collect linkrevs for filelog
1146 # revisions.
1147 # revisions.
1147 # Returns the linkrev node (collected in lookupcl).
1148 # Returns the linkrev node (collected in lookupcl).
1148 def makelookupmflinknode(tree, nodes):
1149 def makelookupmflinknode(tree, nodes):
1149 if fastpathlinkrev:
1150 if fastpathlinkrev:
1150 assert not tree
1151 assert not tree
1151 return (
1152 return (
1152 manifests.__getitem__
1153 manifests.__getitem__
1153 ) # pytype: disable=unsupported-operands
1154 ) # pytype: disable=unsupported-operands
1154
1155
1155 def lookupmflinknode(x):
1156 def lookupmflinknode(x):
1156 """Callback for looking up the linknode for manifests.
1157 """Callback for looking up the linknode for manifests.
1157
1158
1158 Returns the linkrev node for the specified manifest.
1159 Returns the linkrev node for the specified manifest.
1159
1160
1160 SIDE EFFECT:
1161 SIDE EFFECT:
1161
1162
1162 1) fclnodes gets populated with the list of relevant
1163 1) fclnodes gets populated with the list of relevant
1163 file nodes if we're not using fastpathlinkrev
1164 file nodes if we're not using fastpathlinkrev
1164 2) When treemanifests are in use, collects treemanifest nodes
1165 2) When treemanifests are in use, collects treemanifest nodes
1165 to send
1166 to send
1166
1167
1167 Note that this means manifests must be completely sent to
1168 Note that this means manifests must be completely sent to
1168 the client before you can trust the list of files and
1169 the client before you can trust the list of files and
1169 treemanifests to send.
1170 treemanifests to send.
1170 """
1171 """
1171 clnode = nodes[x]
1172 clnode = nodes[x]
1172 mdata = mfl.get(tree, x).readfast(shallow=True)
1173 mdata = mfl.get(tree, x).readfast(shallow=True)
1173 for p, n, fl in mdata.iterentries():
1174 for p, n, fl in mdata.iterentries():
1174 if fl == b't': # subdirectory manifest
1175 if fl == b't': # subdirectory manifest
1175 subtree = tree + p + b'/'
1176 subtree = tree + p + b'/'
1176 tmfclnodes = tmfnodes.setdefault(subtree, {})
1177 tmfclnodes = tmfnodes.setdefault(subtree, {})
1177 tmfclnode = tmfclnodes.setdefault(n, clnode)
1178 tmfclnode = tmfclnodes.setdefault(n, clnode)
1178 if clrevorder[clnode] < clrevorder[tmfclnode]:
1179 if clrevorder[clnode] < clrevorder[tmfclnode]:
1179 tmfclnodes[n] = clnode
1180 tmfclnodes[n] = clnode
1180 else:
1181 else:
1181 f = tree + p
1182 f = tree + p
1182 fclnodes = fnodes.setdefault(f, {})
1183 fclnodes = fnodes.setdefault(f, {})
1183 fclnode = fclnodes.setdefault(n, clnode)
1184 fclnode = fclnodes.setdefault(n, clnode)
1184 if clrevorder[clnode] < clrevorder[fclnode]:
1185 if clrevorder[clnode] < clrevorder[fclnode]:
1185 fclnodes[n] = clnode
1186 fclnodes[n] = clnode
1186 return clnode
1187 return clnode
1187
1188
1188 return lookupmflinknode
1189 return lookupmflinknode
1189
1190
1190 while tmfnodes:
1191 while tmfnodes:
1191 tree, nodes = tmfnodes.popitem()
1192 tree, nodes = tmfnodes.popitem()
1192
1193
1193 should_visit = self._matcher.visitdir(tree[:-1])
1194 should_visit = self._matcher.visitdir(tree[:-1])
1194 if tree and not should_visit:
1195 if tree and not should_visit:
1195 continue
1196 continue
1196
1197
1197 store = mfl.getstorage(tree)
1198 store = mfl.getstorage(tree)
1198
1199
1199 if not should_visit:
1200 if not should_visit:
1200 # No nodes to send because this directory is out of
1201 # No nodes to send because this directory is out of
1201 # the client's view of the repository (probably
1202 # the client's view of the repository (probably
1202 # because of narrow clones). Do this even for the root
1203 # because of narrow clones). Do this even for the root
1203 # directory (tree=='')
1204 # directory (tree=='')
1204 prunednodes = []
1205 prunednodes = []
1205 else:
1206 else:
1206 # Avoid sending any manifest nodes we can prove the
1207 # Avoid sending any manifest nodes we can prove the
1207 # client already has by checking linkrevs. See the
1208 # client already has by checking linkrevs. See the
1208 # related comment in generatefiles().
1209 # related comment in generatefiles().
1209 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1210 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1210
1211
1211 if tree and not prunednodes:
1212 if tree and not prunednodes:
1212 continue
1213 continue
1213
1214
1214 lookupfn = makelookupmflinknode(tree, nodes)
1215 lookupfn = makelookupmflinknode(tree, nodes)
1215
1216
1216 deltas = deltagroup(
1217 deltas = deltagroup(
1217 self._repo,
1218 self._repo,
1218 store,
1219 store,
1219 prunednodes,
1220 prunednodes,
1220 False,
1221 False,
1221 lookupfn,
1222 lookupfn,
1222 self._forcedeltaparentprev,
1223 self._forcedeltaparentprev,
1223 ellipses=self._ellipses,
1224 ellipses=self._ellipses,
1224 topic=_(b'manifests'),
1225 topic=_(b'manifests'),
1225 clrevtolocalrev=clrevtolocalrev,
1226 clrevtolocalrev=clrevtolocalrev,
1226 fullclnodes=self._fullclnodes,
1227 fullclnodes=self._fullclnodes,
1227 precomputedellipsis=self._precomputedellipsis,
1228 precomputedellipsis=self._precomputedellipsis,
1228 )
1229 )
1229
1230
1230 if not self._oldmatcher.visitdir(store.tree[:-1]):
1231 if not self._oldmatcher.visitdir(store.tree[:-1]):
1231 yield tree, deltas
1232 yield tree, deltas
1232 else:
1233 else:
1233 # 'deltas' is a generator and we need to consume it even if
1234 # 'deltas' is a generator and we need to consume it even if
1234 # we are not going to send it because a side-effect is that
1235 # we are not going to send it because a side-effect is that
1235 # it updates tmdnodes (via lookupfn)
1236 # it updates tmdnodes (via lookupfn)
1236 for d in deltas:
1237 for d in deltas:
1237 pass
1238 pass
1238 if not tree:
1239 if not tree:
1239 yield tree, []
1240 yield tree, []
1240
1241
1241 def _prunemanifests(self, store, nodes, commonrevs):
1242 def _prunemanifests(self, store, nodes, commonrevs):
1242 if not self._ellipses:
1243 if not self._ellipses:
1243 # In non-ellipses case and large repositories, it is better to
1244 # In non-ellipses case and large repositories, it is better to
1244 # prevent calling of store.rev and store.linkrev on a lot of
1245 # prevent calling of store.rev and store.linkrev on a lot of
1245 # nodes as compared to sending some extra data
1246 # nodes as compared to sending some extra data
1246 return nodes.copy()
1247 return nodes.copy()
1247 # This is split out as a separate method to allow filtering
1248 # This is split out as a separate method to allow filtering
1248 # commonrevs in extension code.
1249 # commonrevs in extension code.
1249 #
1250 #
1250 # TODO(augie): this shouldn't be required, instead we should
1251 # TODO(augie): this shouldn't be required, instead we should
1251 # make filtering of revisions to send delegated to the store
1252 # make filtering of revisions to send delegated to the store
1252 # layer.
1253 # layer.
1253 frev, flr = store.rev, store.linkrev
1254 frev, flr = store.rev, store.linkrev
1254 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1255 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1255
1256
1256 # The 'source' parameter is useful for extensions
1257 # The 'source' parameter is useful for extensions
1257 def generatefiles(
1258 def generatefiles(
1258 self,
1259 self,
1259 changedfiles,
1260 changedfiles,
1260 commonrevs,
1261 commonrevs,
1261 source,
1262 source,
1262 mfdicts,
1263 mfdicts,
1263 fastpathlinkrev,
1264 fastpathlinkrev,
1264 fnodes,
1265 fnodes,
1265 clrevs,
1266 clrevs,
1266 ):
1267 ):
1267 changedfiles = [
1268 changedfiles = [
1268 f
1269 f
1269 for f in changedfiles
1270 for f in changedfiles
1270 if self._matcher(f) and not self._oldmatcher(f)
1271 if self._matcher(f) and not self._oldmatcher(f)
1271 ]
1272 ]
1272
1273
1273 if not fastpathlinkrev:
1274 if not fastpathlinkrev:
1274
1275
1275 def normallinknodes(unused, fname):
1276 def normallinknodes(unused, fname):
1276 return fnodes.get(fname, {})
1277 return fnodes.get(fname, {})
1277
1278
1278 else:
1279 else:
1279 cln = self._repo.changelog.node
1280 cln = self._repo.changelog.node
1280
1281
1281 def normallinknodes(store, fname):
1282 def normallinknodes(store, fname):
1282 flinkrev = store.linkrev
1283 flinkrev = store.linkrev
1283 fnode = store.node
1284 fnode = store.node
1284 revs = ((r, flinkrev(r)) for r in store)
1285 revs = ((r, flinkrev(r)) for r in store)
1285 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1286 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1286
1287
1287 clrevtolocalrev = {}
1288 clrevtolocalrev = {}
1288
1289
1289 if self._isshallow:
1290 if self._isshallow:
1290 # In a shallow clone, the linknodes callback needs to also include
1291 # In a shallow clone, the linknodes callback needs to also include
1291 # those file nodes that are in the manifests we sent but weren't
1292 # those file nodes that are in the manifests we sent but weren't
1292 # introduced by those manifests.
1293 # introduced by those manifests.
1293 commonctxs = [self._repo[c] for c in commonrevs]
1294 commonctxs = [self._repo[c] for c in commonrevs]
1294 clrev = self._repo.changelog.rev
1295 clrev = self._repo.changelog.rev
1295
1296
1296 def linknodes(flog, fname):
1297 def linknodes(flog, fname):
1297 for c in commonctxs:
1298 for c in commonctxs:
1298 try:
1299 try:
1299 fnode = c.filenode(fname)
1300 fnode = c.filenode(fname)
1300 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1301 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1301 except error.ManifestLookupError:
1302 except error.ManifestLookupError:
1302 pass
1303 pass
1303 links = normallinknodes(flog, fname)
1304 links = normallinknodes(flog, fname)
1304 if len(links) != len(mfdicts):
1305 if len(links) != len(mfdicts):
1305 for mf, lr in mfdicts:
1306 for mf, lr in mfdicts:
1306 fnode = mf.get(fname, None)
1307 fnode = mf.get(fname, None)
1307 if fnode in links:
1308 if fnode in links:
1308 links[fnode] = min(links[fnode], lr, key=clrev)
1309 links[fnode] = min(links[fnode], lr, key=clrev)
1309 elif fnode:
1310 elif fnode:
1310 links[fnode] = lr
1311 links[fnode] = lr
1311 return links
1312 return links
1312
1313
1313 else:
1314 else:
1314 linknodes = normallinknodes
1315 linknodes = normallinknodes
1315
1316
1316 repo = self._repo
1317 repo = self._repo
1317 progress = repo.ui.makeprogress(
1318 progress = repo.ui.makeprogress(
1318 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1319 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1319 )
1320 )
1320 for i, fname in enumerate(sorted(changedfiles)):
1321 for i, fname in enumerate(sorted(changedfiles)):
1321 filerevlog = repo.file(fname)
1322 filerevlog = repo.file(fname)
1322 if not filerevlog:
1323 if not filerevlog:
1323 raise error.Abort(
1324 raise error.Abort(
1324 _(b"empty or missing file data for %s") % fname
1325 _(b"empty or missing file data for %s") % fname
1325 )
1326 )
1326
1327
1327 clrevtolocalrev.clear()
1328 clrevtolocalrev.clear()
1328
1329
1329 linkrevnodes = linknodes(filerevlog, fname)
1330 linkrevnodes = linknodes(filerevlog, fname)
1330 # Lookup for filenodes, we collected the linkrev nodes above in the
1331 # Lookup for filenodes, we collected the linkrev nodes above in the
1331 # fastpath case and with lookupmf in the slowpath case.
1332 # fastpath case and with lookupmf in the slowpath case.
1332 def lookupfilelog(x):
1333 def lookupfilelog(x):
1333 return linkrevnodes[x]
1334 return linkrevnodes[x]
1334
1335
1335 frev, flr = filerevlog.rev, filerevlog.linkrev
1336 frev, flr = filerevlog.rev, filerevlog.linkrev
1336 # Skip sending any filenode we know the client already
1337 # Skip sending any filenode we know the client already
1337 # has. This avoids over-sending files relatively
1338 # has. This avoids over-sending files relatively
1338 # inexpensively, so it's not a problem if we under-filter
1339 # inexpensively, so it's not a problem if we under-filter
1339 # here.
1340 # here.
1340 filenodes = [
1341 filenodes = [
1341 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1342 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1342 ]
1343 ]
1343
1344
1344 if not filenodes:
1345 if not filenodes:
1345 continue
1346 continue
1346
1347
1347 progress.update(i + 1, item=fname)
1348 progress.update(i + 1, item=fname)
1348
1349
1349 deltas = deltagroup(
1350 deltas = deltagroup(
1350 self._repo,
1351 self._repo,
1351 filerevlog,
1352 filerevlog,
1352 filenodes,
1353 filenodes,
1353 False,
1354 False,
1354 lookupfilelog,
1355 lookupfilelog,
1355 self._forcedeltaparentprev,
1356 self._forcedeltaparentprev,
1356 ellipses=self._ellipses,
1357 ellipses=self._ellipses,
1357 clrevtolocalrev=clrevtolocalrev,
1358 clrevtolocalrev=clrevtolocalrev,
1358 fullclnodes=self._fullclnodes,
1359 fullclnodes=self._fullclnodes,
1359 precomputedellipsis=self._precomputedellipsis,
1360 precomputedellipsis=self._precomputedellipsis,
1360 )
1361 )
1361
1362
1362 yield fname, deltas
1363 yield fname, deltas
1363
1364
1364 progress.complete()
1365 progress.complete()
1365
1366
1366
1367
1367 def _makecg1packer(
1368 def _makecg1packer(
1368 repo,
1369 repo,
1369 oldmatcher,
1370 oldmatcher,
1370 matcher,
1371 matcher,
1371 bundlecaps,
1372 bundlecaps,
1372 ellipses=False,
1373 ellipses=False,
1373 shallow=False,
1374 shallow=False,
1374 ellipsisroots=None,
1375 ellipsisroots=None,
1375 fullnodes=None,
1376 fullnodes=None,
1376 ):
1377 ):
1377 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1378 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1378 d.node, d.p1node, d.p2node, d.linknode
1379 d.node, d.p1node, d.p2node, d.linknode
1379 )
1380 )
1380
1381
1381 return cgpacker(
1382 return cgpacker(
1382 repo,
1383 repo,
1383 oldmatcher,
1384 oldmatcher,
1384 matcher,
1385 matcher,
1385 b'01',
1386 b'01',
1386 builddeltaheader=builddeltaheader,
1387 builddeltaheader=builddeltaheader,
1387 manifestsend=b'',
1388 manifestsend=b'',
1388 forcedeltaparentprev=True,
1389 forcedeltaparentprev=True,
1389 bundlecaps=bundlecaps,
1390 bundlecaps=bundlecaps,
1390 ellipses=ellipses,
1391 ellipses=ellipses,
1391 shallow=shallow,
1392 shallow=shallow,
1392 ellipsisroots=ellipsisroots,
1393 ellipsisroots=ellipsisroots,
1393 fullnodes=fullnodes,
1394 fullnodes=fullnodes,
1394 )
1395 )
1395
1396
1396
1397
1397 def _makecg2packer(
1398 def _makecg2packer(
1398 repo,
1399 repo,
1399 oldmatcher,
1400 oldmatcher,
1400 matcher,
1401 matcher,
1401 bundlecaps,
1402 bundlecaps,
1402 ellipses=False,
1403 ellipses=False,
1403 shallow=False,
1404 shallow=False,
1404 ellipsisroots=None,
1405 ellipsisroots=None,
1405 fullnodes=None,
1406 fullnodes=None,
1406 ):
1407 ):
1407 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1408 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1408 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1409 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1409 )
1410 )
1410
1411
1411 return cgpacker(
1412 return cgpacker(
1412 repo,
1413 repo,
1413 oldmatcher,
1414 oldmatcher,
1414 matcher,
1415 matcher,
1415 b'02',
1416 b'02',
1416 builddeltaheader=builddeltaheader,
1417 builddeltaheader=builddeltaheader,
1417 manifestsend=b'',
1418 manifestsend=b'',
1418 bundlecaps=bundlecaps,
1419 bundlecaps=bundlecaps,
1419 ellipses=ellipses,
1420 ellipses=ellipses,
1420 shallow=shallow,
1421 shallow=shallow,
1421 ellipsisroots=ellipsisroots,
1422 ellipsisroots=ellipsisroots,
1422 fullnodes=fullnodes,
1423 fullnodes=fullnodes,
1423 )
1424 )
1424
1425
1425
1426
1426 def _makecg3packer(
1427 def _makecg3packer(
1427 repo,
1428 repo,
1428 oldmatcher,
1429 oldmatcher,
1429 matcher,
1430 matcher,
1430 bundlecaps,
1431 bundlecaps,
1431 ellipses=False,
1432 ellipses=False,
1432 shallow=False,
1433 shallow=False,
1433 ellipsisroots=None,
1434 ellipsisroots=None,
1434 fullnodes=None,
1435 fullnodes=None,
1435 ):
1436 ):
1436 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1437 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1437 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1438 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1438 )
1439 )
1439
1440
1440 return cgpacker(
1441 return cgpacker(
1441 repo,
1442 repo,
1442 oldmatcher,
1443 oldmatcher,
1443 matcher,
1444 matcher,
1444 b'03',
1445 b'03',
1445 builddeltaheader=builddeltaheader,
1446 builddeltaheader=builddeltaheader,
1446 manifestsend=closechunk(),
1447 manifestsend=closechunk(),
1447 bundlecaps=bundlecaps,
1448 bundlecaps=bundlecaps,
1448 ellipses=ellipses,
1449 ellipses=ellipses,
1449 shallow=shallow,
1450 shallow=shallow,
1450 ellipsisroots=ellipsisroots,
1451 ellipsisroots=ellipsisroots,
1451 fullnodes=fullnodes,
1452 fullnodes=fullnodes,
1452 )
1453 )
1453
1454
1454
1455
1455 _packermap = {
1456 _packermap = {
1456 b'01': (_makecg1packer, cg1unpacker),
1457 b'01': (_makecg1packer, cg1unpacker),
1457 # cg2 adds support for exchanging generaldelta
1458 # cg2 adds support for exchanging generaldelta
1458 b'02': (_makecg2packer, cg2unpacker),
1459 b'02': (_makecg2packer, cg2unpacker),
1459 # cg3 adds support for exchanging revlog flags and treemanifests
1460 # cg3 adds support for exchanging revlog flags and treemanifests
1460 b'03': (_makecg3packer, cg3unpacker),
1461 b'03': (_makecg3packer, cg3unpacker),
1461 }
1462 }
1462
1463
1463
1464
1464 def allsupportedversions(repo):
1465 def allsupportedversions(repo):
1465 versions = set(_packermap.keys())
1466 versions = set(_packermap.keys())
1466 needv03 = False
1467 needv03 = False
1467 if (
1468 if (
1468 repo.ui.configbool(b'experimental', b'changegroup3')
1469 repo.ui.configbool(b'experimental', b'changegroup3')
1469 or repo.ui.configbool(b'experimental', b'treemanifest')
1470 or repo.ui.configbool(b'experimental', b'treemanifest')
1470 or repository.TREEMANIFEST_REQUIREMENT in repo.requirements
1471 or requirements.TREEMANIFEST_REQUIREMENT in repo.requirements
1471 ):
1472 ):
1472 # we keep version 03 because we need to to exchange treemanifest data
1473 # we keep version 03 because we need to to exchange treemanifest data
1473 #
1474 #
1474 # we also keep vresion 01 and 02, because it is possible for repo to
1475 # we also keep vresion 01 and 02, because it is possible for repo to
1475 # contains both normal and tree manifest at the same time. so using
1476 # contains both normal and tree manifest at the same time. so using
1476 # older version to pull data is viable
1477 # older version to pull data is viable
1477 #
1478 #
1478 # (or even to push subset of history)
1479 # (or even to push subset of history)
1479 needv03 = True
1480 needv03 = True
1480 if b'exp-sidedata-flag' in repo.requirements:
1481 if b'exp-sidedata-flag' in repo.requirements:
1481 needv03 = True
1482 needv03 = True
1482 # don't attempt to use 01/02 until we do sidedata cleaning
1483 # don't attempt to use 01/02 until we do sidedata cleaning
1483 versions.discard(b'01')
1484 versions.discard(b'01')
1484 versions.discard(b'02')
1485 versions.discard(b'02')
1485 if not needv03:
1486 if not needv03:
1486 versions.discard(b'03')
1487 versions.discard(b'03')
1487 return versions
1488 return versions
1488
1489
1489
1490
1490 # Changegroup versions that can be applied to the repo
1491 # Changegroup versions that can be applied to the repo
1491 def supportedincomingversions(repo):
1492 def supportedincomingversions(repo):
1492 return allsupportedversions(repo)
1493 return allsupportedversions(repo)
1493
1494
1494
1495
1495 # Changegroup versions that can be created from the repo
1496 # Changegroup versions that can be created from the repo
1496 def supportedoutgoingversions(repo):
1497 def supportedoutgoingversions(repo):
1497 versions = allsupportedversions(repo)
1498 versions = allsupportedversions(repo)
1498 if repository.TREEMANIFEST_REQUIREMENT in repo.requirements:
1499 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
1499 # Versions 01 and 02 support only flat manifests and it's just too
1500 # Versions 01 and 02 support only flat manifests and it's just too
1500 # expensive to convert between the flat manifest and tree manifest on
1501 # expensive to convert between the flat manifest and tree manifest on
1501 # the fly. Since tree manifests are hashed differently, all of history
1502 # the fly. Since tree manifests are hashed differently, all of history
1502 # would have to be converted. Instead, we simply don't even pretend to
1503 # would have to be converted. Instead, we simply don't even pretend to
1503 # support versions 01 and 02.
1504 # support versions 01 and 02.
1504 versions.discard(b'01')
1505 versions.discard(b'01')
1505 versions.discard(b'02')
1506 versions.discard(b'02')
1506 if repository.NARROW_REQUIREMENT in repo.requirements:
1507 if requirements.NARROW_REQUIREMENT in repo.requirements:
1507 # Versions 01 and 02 don't support revlog flags, and we need to
1508 # Versions 01 and 02 don't support revlog flags, and we need to
1508 # support that for stripping and unbundling to work.
1509 # support that for stripping and unbundling to work.
1509 versions.discard(b'01')
1510 versions.discard(b'01')
1510 versions.discard(b'02')
1511 versions.discard(b'02')
1511 if LFS_REQUIREMENT in repo.requirements:
1512 if LFS_REQUIREMENT in repo.requirements:
1512 # Versions 01 and 02 don't support revlog flags, and we need to
1513 # Versions 01 and 02 don't support revlog flags, and we need to
1513 # mark LFS entries with REVIDX_EXTSTORED.
1514 # mark LFS entries with REVIDX_EXTSTORED.
1514 versions.discard(b'01')
1515 versions.discard(b'01')
1515 versions.discard(b'02')
1516 versions.discard(b'02')
1516
1517
1517 return versions
1518 return versions
1518
1519
1519
1520
1520 def localversion(repo):
1521 def localversion(repo):
1521 # Finds the best version to use for bundles that are meant to be used
1522 # Finds the best version to use for bundles that are meant to be used
1522 # locally, such as those from strip and shelve, and temporary bundles.
1523 # locally, such as those from strip and shelve, and temporary bundles.
1523 return max(supportedoutgoingversions(repo))
1524 return max(supportedoutgoingversions(repo))
1524
1525
1525
1526
1526 def safeversion(repo):
1527 def safeversion(repo):
1527 # Finds the smallest version that it's safe to assume clients of the repo
1528 # Finds the smallest version that it's safe to assume clients of the repo
1528 # will support. For example, all hg versions that support generaldelta also
1529 # will support. For example, all hg versions that support generaldelta also
1529 # support changegroup 02.
1530 # support changegroup 02.
1530 versions = supportedoutgoingversions(repo)
1531 versions = supportedoutgoingversions(repo)
1531 if b'generaldelta' in repo.requirements:
1532 if b'generaldelta' in repo.requirements:
1532 versions.discard(b'01')
1533 versions.discard(b'01')
1533 assert versions
1534 assert versions
1534 return min(versions)
1535 return min(versions)
1535
1536
1536
1537
1537 def getbundler(
1538 def getbundler(
1538 version,
1539 version,
1539 repo,
1540 repo,
1540 bundlecaps=None,
1541 bundlecaps=None,
1541 oldmatcher=None,
1542 oldmatcher=None,
1542 matcher=None,
1543 matcher=None,
1543 ellipses=False,
1544 ellipses=False,
1544 shallow=False,
1545 shallow=False,
1545 ellipsisroots=None,
1546 ellipsisroots=None,
1546 fullnodes=None,
1547 fullnodes=None,
1547 ):
1548 ):
1548 assert version in supportedoutgoingversions(repo)
1549 assert version in supportedoutgoingversions(repo)
1549
1550
1550 if matcher is None:
1551 if matcher is None:
1551 matcher = matchmod.always()
1552 matcher = matchmod.always()
1552 if oldmatcher is None:
1553 if oldmatcher is None:
1553 oldmatcher = matchmod.never()
1554 oldmatcher = matchmod.never()
1554
1555
1555 if version == b'01' and not matcher.always():
1556 if version == b'01' and not matcher.always():
1556 raise error.ProgrammingError(
1557 raise error.ProgrammingError(
1557 b'version 01 changegroups do not support sparse file matchers'
1558 b'version 01 changegroups do not support sparse file matchers'
1558 )
1559 )
1559
1560
1560 if ellipses and version in (b'01', b'02'):
1561 if ellipses and version in (b'01', b'02'):
1561 raise error.Abort(
1562 raise error.Abort(
1562 _(
1563 _(
1563 b'ellipsis nodes require at least cg3 on client and server, '
1564 b'ellipsis nodes require at least cg3 on client and server, '
1564 b'but negotiated version %s'
1565 b'but negotiated version %s'
1565 )
1566 )
1566 % version
1567 % version
1567 )
1568 )
1568
1569
1569 # Requested files could include files not in the local store. So
1570 # Requested files could include files not in the local store. So
1570 # filter those out.
1571 # filter those out.
1571 matcher = repo.narrowmatch(matcher)
1572 matcher = repo.narrowmatch(matcher)
1572
1573
1573 fn = _packermap[version][0]
1574 fn = _packermap[version][0]
1574 return fn(
1575 return fn(
1575 repo,
1576 repo,
1576 oldmatcher,
1577 oldmatcher,
1577 matcher,
1578 matcher,
1578 bundlecaps,
1579 bundlecaps,
1579 ellipses=ellipses,
1580 ellipses=ellipses,
1580 shallow=shallow,
1581 shallow=shallow,
1581 ellipsisroots=ellipsisroots,
1582 ellipsisroots=ellipsisroots,
1582 fullnodes=fullnodes,
1583 fullnodes=fullnodes,
1583 )
1584 )
1584
1585
1585
1586
1586 def getunbundler(version, fh, alg, extras=None):
1587 def getunbundler(version, fh, alg, extras=None):
1587 return _packermap[version][1](fh, alg, extras=extras)
1588 return _packermap[version][1](fh, alg, extras=extras)
1588
1589
1589
1590
1590 def _changegroupinfo(repo, nodes, source):
1591 def _changegroupinfo(repo, nodes, source):
1591 if repo.ui.verbose or source == b'bundle':
1592 if repo.ui.verbose or source == b'bundle':
1592 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1593 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1593 if repo.ui.debugflag:
1594 if repo.ui.debugflag:
1594 repo.ui.debug(b"list of changesets:\n")
1595 repo.ui.debug(b"list of changesets:\n")
1595 for node in nodes:
1596 for node in nodes:
1596 repo.ui.debug(b"%s\n" % hex(node))
1597 repo.ui.debug(b"%s\n" % hex(node))
1597
1598
1598
1599
1599 def makechangegroup(
1600 def makechangegroup(
1600 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1601 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1601 ):
1602 ):
1602 cgstream = makestream(
1603 cgstream = makestream(
1603 repo,
1604 repo,
1604 outgoing,
1605 outgoing,
1605 version,
1606 version,
1606 source,
1607 source,
1607 fastpath=fastpath,
1608 fastpath=fastpath,
1608 bundlecaps=bundlecaps,
1609 bundlecaps=bundlecaps,
1609 )
1610 )
1610 return getunbundler(
1611 return getunbundler(
1611 version,
1612 version,
1612 util.chunkbuffer(cgstream),
1613 util.chunkbuffer(cgstream),
1613 None,
1614 None,
1614 {b'clcount': len(outgoing.missing)},
1615 {b'clcount': len(outgoing.missing)},
1615 )
1616 )
1616
1617
1617
1618
1618 def makestream(
1619 def makestream(
1619 repo,
1620 repo,
1620 outgoing,
1621 outgoing,
1621 version,
1622 version,
1622 source,
1623 source,
1623 fastpath=False,
1624 fastpath=False,
1624 bundlecaps=None,
1625 bundlecaps=None,
1625 matcher=None,
1626 matcher=None,
1626 ):
1627 ):
1627 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1628 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1628
1629
1629 repo = repo.unfiltered()
1630 repo = repo.unfiltered()
1630 commonrevs = outgoing.common
1631 commonrevs = outgoing.common
1631 csets = outgoing.missing
1632 csets = outgoing.missing
1632 heads = outgoing.ancestorsof
1633 heads = outgoing.ancestorsof
1633 # We go through the fast path if we get told to, or if all (unfiltered
1634 # We go through the fast path if we get told to, or if all (unfiltered
1634 # heads have been requested (since we then know there all linkrevs will
1635 # heads have been requested (since we then know there all linkrevs will
1635 # be pulled by the client).
1636 # be pulled by the client).
1636 heads.sort()
1637 heads.sort()
1637 fastpathlinkrev = fastpath or (
1638 fastpathlinkrev = fastpath or (
1638 repo.filtername is None and heads == sorted(repo.heads())
1639 repo.filtername is None and heads == sorted(repo.heads())
1639 )
1640 )
1640
1641
1641 repo.hook(b'preoutgoing', throw=True, source=source)
1642 repo.hook(b'preoutgoing', throw=True, source=source)
1642 _changegroupinfo(repo, csets, source)
1643 _changegroupinfo(repo, csets, source)
1643 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1644 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1644
1645
1645
1646
1646 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1647 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1647 revisions = 0
1648 revisions = 0
1648 files = 0
1649 files = 0
1649 progress = repo.ui.makeprogress(
1650 progress = repo.ui.makeprogress(
1650 _(b'files'), unit=_(b'files'), total=expectedfiles
1651 _(b'files'), unit=_(b'files'), total=expectedfiles
1651 )
1652 )
1652 for chunkdata in iter(source.filelogheader, {}):
1653 for chunkdata in iter(source.filelogheader, {}):
1653 files += 1
1654 files += 1
1654 f = chunkdata[b"filename"]
1655 f = chunkdata[b"filename"]
1655 repo.ui.debug(b"adding %s revisions\n" % f)
1656 repo.ui.debug(b"adding %s revisions\n" % f)
1656 progress.increment()
1657 progress.increment()
1657 fl = repo.file(f)
1658 fl = repo.file(f)
1658 o = len(fl)
1659 o = len(fl)
1659 try:
1660 try:
1660 deltas = source.deltaiter()
1661 deltas = source.deltaiter()
1661 if not fl.addgroup(deltas, revmap, trp):
1662 if not fl.addgroup(deltas, revmap, trp):
1662 raise error.Abort(_(b"received file revlog group is empty"))
1663 raise error.Abort(_(b"received file revlog group is empty"))
1663 except error.CensoredBaseError as e:
1664 except error.CensoredBaseError as e:
1664 raise error.Abort(_(b"received delta base is censored: %s") % e)
1665 raise error.Abort(_(b"received delta base is censored: %s") % e)
1665 revisions += len(fl) - o
1666 revisions += len(fl) - o
1666 if f in needfiles:
1667 if f in needfiles:
1667 needs = needfiles[f]
1668 needs = needfiles[f]
1668 for new in pycompat.xrange(o, len(fl)):
1669 for new in pycompat.xrange(o, len(fl)):
1669 n = fl.node(new)
1670 n = fl.node(new)
1670 if n in needs:
1671 if n in needs:
1671 needs.remove(n)
1672 needs.remove(n)
1672 else:
1673 else:
1673 raise error.Abort(_(b"received spurious file revlog entry"))
1674 raise error.Abort(_(b"received spurious file revlog entry"))
1674 if not needs:
1675 if not needs:
1675 del needfiles[f]
1676 del needfiles[f]
1676 progress.complete()
1677 progress.complete()
1677
1678
1678 for f, needs in pycompat.iteritems(needfiles):
1679 for f, needs in pycompat.iteritems(needfiles):
1679 fl = repo.file(f)
1680 fl = repo.file(f)
1680 for n in needs:
1681 for n in needs:
1681 try:
1682 try:
1682 fl.rev(n)
1683 fl.rev(n)
1683 except error.LookupError:
1684 except error.LookupError:
1684 raise error.Abort(
1685 raise error.Abort(
1685 _(b'missing file data for %s:%s - run hg verify')
1686 _(b'missing file data for %s:%s - run hg verify')
1686 % (f, hex(n))
1687 % (f, hex(n))
1687 )
1688 )
1688
1689
1689 return revisions, files
1690 return revisions, files
@@ -1,4217 +1,4216 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import copy as copymod
10 import copy as copymod
11 import errno
11 import errno
12 import os
12 import os
13 import re
13 import re
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22 from .pycompat import (
22 from .pycompat import (
23 getattr,
23 getattr,
24 open,
24 open,
25 setattr,
25 setattr,
26 )
26 )
27 from .thirdparty import attr
27 from .thirdparty import attr
28
28
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 changelog,
31 changelog,
32 copies,
32 copies,
33 crecord as crecordmod,
33 crecord as crecordmod,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 formatter,
37 formatter,
38 logcmdutil,
38 logcmdutil,
39 match as matchmod,
39 match as matchmod,
40 merge as mergemod,
40 merge as mergemod,
41 mergestate as mergestatemod,
41 mergestate as mergestatemod,
42 mergeutil,
42 mergeutil,
43 obsolete,
43 obsolete,
44 patch,
44 patch,
45 pathutil,
45 pathutil,
46 phases,
46 phases,
47 pycompat,
47 pycompat,
48 repair,
48 repair,
49 requirements,
49 revlog,
50 revlog,
50 rewriteutil,
51 rewriteutil,
51 scmutil,
52 scmutil,
52 smartset,
53 smartset,
53 state as statemod,
54 state as statemod,
54 subrepoutil,
55 subrepoutil,
55 templatekw,
56 templatekw,
56 templater,
57 templater,
57 util,
58 util,
58 vfs as vfsmod,
59 vfs as vfsmod,
59 )
60 )
60
61
61 from .interfaces import repository
62
63 from .utils import (
62 from .utils import (
64 dateutil,
63 dateutil,
65 stringutil,
64 stringutil,
66 )
65 )
67
66
68 if pycompat.TYPE_CHECKING:
67 if pycompat.TYPE_CHECKING:
69 from typing import (
68 from typing import (
70 Any,
69 Any,
71 Dict,
70 Dict,
72 )
71 )
73
72
74 for t in (Any, Dict):
73 for t in (Any, Dict):
75 assert t
74 assert t
76
75
77 stringio = util.stringio
76 stringio = util.stringio
78
77
79 # templates of common command options
78 # templates of common command options
80
79
81 dryrunopts = [
80 dryrunopts = [
82 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
81 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
83 ]
82 ]
84
83
85 confirmopts = [
84 confirmopts = [
86 (b'', b'confirm', None, _(b'ask before applying actions')),
85 (b'', b'confirm', None, _(b'ask before applying actions')),
87 ]
86 ]
88
87
89 remoteopts = [
88 remoteopts = [
90 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
89 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
91 (
90 (
92 b'',
91 b'',
93 b'remotecmd',
92 b'remotecmd',
94 b'',
93 b'',
95 _(b'specify hg command to run on the remote side'),
94 _(b'specify hg command to run on the remote side'),
96 _(b'CMD'),
95 _(b'CMD'),
97 ),
96 ),
98 (
97 (
99 b'',
98 b'',
100 b'insecure',
99 b'insecure',
101 None,
100 None,
102 _(b'do not verify server certificate (ignoring web.cacerts config)'),
101 _(b'do not verify server certificate (ignoring web.cacerts config)'),
103 ),
102 ),
104 ]
103 ]
105
104
106 walkopts = [
105 walkopts = [
107 (
106 (
108 b'I',
107 b'I',
109 b'include',
108 b'include',
110 [],
109 [],
111 _(b'include names matching the given patterns'),
110 _(b'include names matching the given patterns'),
112 _(b'PATTERN'),
111 _(b'PATTERN'),
113 ),
112 ),
114 (
113 (
115 b'X',
114 b'X',
116 b'exclude',
115 b'exclude',
117 [],
116 [],
118 _(b'exclude names matching the given patterns'),
117 _(b'exclude names matching the given patterns'),
119 _(b'PATTERN'),
118 _(b'PATTERN'),
120 ),
119 ),
121 ]
120 ]
122
121
123 commitopts = [
122 commitopts = [
124 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
123 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
125 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
124 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
126 ]
125 ]
127
126
128 commitopts2 = [
127 commitopts2 = [
129 (
128 (
130 b'd',
129 b'd',
131 b'date',
130 b'date',
132 b'',
131 b'',
133 _(b'record the specified date as commit date'),
132 _(b'record the specified date as commit date'),
134 _(b'DATE'),
133 _(b'DATE'),
135 ),
134 ),
136 (
135 (
137 b'u',
136 b'u',
138 b'user',
137 b'user',
139 b'',
138 b'',
140 _(b'record the specified user as committer'),
139 _(b'record the specified user as committer'),
141 _(b'USER'),
140 _(b'USER'),
142 ),
141 ),
143 ]
142 ]
144
143
145 commitopts3 = [
144 commitopts3 = [
146 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
145 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
147 (b'U', b'currentuser', None, _(b'record the current user as committer')),
146 (b'U', b'currentuser', None, _(b'record the current user as committer')),
148 ]
147 ]
149
148
150 formatteropts = [
149 formatteropts = [
151 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
150 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
152 ]
151 ]
153
152
154 templateopts = [
153 templateopts = [
155 (
154 (
156 b'',
155 b'',
157 b'style',
156 b'style',
158 b'',
157 b'',
159 _(b'display using template map file (DEPRECATED)'),
158 _(b'display using template map file (DEPRECATED)'),
160 _(b'STYLE'),
159 _(b'STYLE'),
161 ),
160 ),
162 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
161 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
163 ]
162 ]
164
163
165 logopts = [
164 logopts = [
166 (b'p', b'patch', None, _(b'show patch')),
165 (b'p', b'patch', None, _(b'show patch')),
167 (b'g', b'git', None, _(b'use git extended diff format')),
166 (b'g', b'git', None, _(b'use git extended diff format')),
168 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
167 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
169 (b'M', b'no-merges', None, _(b'do not show merges')),
168 (b'M', b'no-merges', None, _(b'do not show merges')),
170 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
169 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
171 (b'G', b'graph', None, _(b"show the revision DAG")),
170 (b'G', b'graph', None, _(b"show the revision DAG")),
172 ] + templateopts
171 ] + templateopts
173
172
174 diffopts = [
173 diffopts = [
175 (b'a', b'text', None, _(b'treat all files as text')),
174 (b'a', b'text', None, _(b'treat all files as text')),
176 (
175 (
177 b'g',
176 b'g',
178 b'git',
177 b'git',
179 None,
178 None,
180 _(b'use git extended diff format (DEFAULT: diff.git)'),
179 _(b'use git extended diff format (DEFAULT: diff.git)'),
181 ),
180 ),
182 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
181 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
183 (b'', b'nodates', None, _(b'omit dates from diff headers')),
182 (b'', b'nodates', None, _(b'omit dates from diff headers')),
184 ]
183 ]
185
184
186 diffwsopts = [
185 diffwsopts = [
187 (
186 (
188 b'w',
187 b'w',
189 b'ignore-all-space',
188 b'ignore-all-space',
190 None,
189 None,
191 _(b'ignore white space when comparing lines'),
190 _(b'ignore white space when comparing lines'),
192 ),
191 ),
193 (
192 (
194 b'b',
193 b'b',
195 b'ignore-space-change',
194 b'ignore-space-change',
196 None,
195 None,
197 _(b'ignore changes in the amount of white space'),
196 _(b'ignore changes in the amount of white space'),
198 ),
197 ),
199 (
198 (
200 b'B',
199 b'B',
201 b'ignore-blank-lines',
200 b'ignore-blank-lines',
202 None,
201 None,
203 _(b'ignore changes whose lines are all blank'),
202 _(b'ignore changes whose lines are all blank'),
204 ),
203 ),
205 (
204 (
206 b'Z',
205 b'Z',
207 b'ignore-space-at-eol',
206 b'ignore-space-at-eol',
208 None,
207 None,
209 _(b'ignore changes in whitespace at EOL'),
208 _(b'ignore changes in whitespace at EOL'),
210 ),
209 ),
211 ]
210 ]
212
211
213 diffopts2 = (
212 diffopts2 = (
214 [
213 [
215 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
214 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
216 (
215 (
217 b'p',
216 b'p',
218 b'show-function',
217 b'show-function',
219 None,
218 None,
220 _(
219 _(
221 b'show which function each change is in (DEFAULT: diff.showfunc)'
220 b'show which function each change is in (DEFAULT: diff.showfunc)'
222 ),
221 ),
223 ),
222 ),
224 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
223 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
225 ]
224 ]
226 + diffwsopts
225 + diffwsopts
227 + [
226 + [
228 (
227 (
229 b'U',
228 b'U',
230 b'unified',
229 b'unified',
231 b'',
230 b'',
232 _(b'number of lines of context to show'),
231 _(b'number of lines of context to show'),
233 _(b'NUM'),
232 _(b'NUM'),
234 ),
233 ),
235 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
234 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
236 (
235 (
237 b'',
236 b'',
238 b'root',
237 b'root',
239 b'',
238 b'',
240 _(b'produce diffs relative to subdirectory'),
239 _(b'produce diffs relative to subdirectory'),
241 _(b'DIR'),
240 _(b'DIR'),
242 ),
241 ),
243 ]
242 ]
244 )
243 )
245
244
246 mergetoolopts = [
245 mergetoolopts = [
247 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
246 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
248 ]
247 ]
249
248
250 similarityopts = [
249 similarityopts = [
251 (
250 (
252 b's',
251 b's',
253 b'similarity',
252 b'similarity',
254 b'',
253 b'',
255 _(b'guess renamed files by similarity (0<=s<=100)'),
254 _(b'guess renamed files by similarity (0<=s<=100)'),
256 _(b'SIMILARITY'),
255 _(b'SIMILARITY'),
257 )
256 )
258 ]
257 ]
259
258
260 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
259 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
261
260
262 debugrevlogopts = [
261 debugrevlogopts = [
263 (b'c', b'changelog', False, _(b'open changelog')),
262 (b'c', b'changelog', False, _(b'open changelog')),
264 (b'm', b'manifest', False, _(b'open manifest')),
263 (b'm', b'manifest', False, _(b'open manifest')),
265 (b'', b'dir', b'', _(b'open directory manifest')),
264 (b'', b'dir', b'', _(b'open directory manifest')),
266 ]
265 ]
267
266
268 # special string such that everything below this line will be ingored in the
267 # special string such that everything below this line will be ingored in the
269 # editor text
268 # editor text
270 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
269 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
271
270
272
271
273 def check_at_most_one_arg(opts, *args):
272 def check_at_most_one_arg(opts, *args):
274 """abort if more than one of the arguments are in opts
273 """abort if more than one of the arguments are in opts
275
274
276 Returns the unique argument or None if none of them were specified.
275 Returns the unique argument or None if none of them were specified.
277 """
276 """
278
277
279 def to_display(name):
278 def to_display(name):
280 return pycompat.sysbytes(name).replace(b'_', b'-')
279 return pycompat.sysbytes(name).replace(b'_', b'-')
281
280
282 previous = None
281 previous = None
283 for x in args:
282 for x in args:
284 if opts.get(x):
283 if opts.get(x):
285 if previous:
284 if previous:
286 raise error.Abort(
285 raise error.Abort(
287 _(b'cannot specify both --%s and --%s')
286 _(b'cannot specify both --%s and --%s')
288 % (to_display(previous), to_display(x))
287 % (to_display(previous), to_display(x))
289 )
288 )
290 previous = x
289 previous = x
291 return previous
290 return previous
292
291
293
292
294 def check_incompatible_arguments(opts, first, others):
293 def check_incompatible_arguments(opts, first, others):
295 """abort if the first argument is given along with any of the others
294 """abort if the first argument is given along with any of the others
296
295
297 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
296 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
298 among themselves, and they're passed as a single collection.
297 among themselves, and they're passed as a single collection.
299 """
298 """
300 for other in others:
299 for other in others:
301 check_at_most_one_arg(opts, first, other)
300 check_at_most_one_arg(opts, first, other)
302
301
303
302
304 def resolvecommitoptions(ui, opts):
303 def resolvecommitoptions(ui, opts):
305 """modify commit options dict to handle related options
304 """modify commit options dict to handle related options
306
305
307 The return value indicates that ``rewrite.update-timestamp`` is the reason
306 The return value indicates that ``rewrite.update-timestamp`` is the reason
308 the ``date`` option is set.
307 the ``date`` option is set.
309 """
308 """
310 check_at_most_one_arg(opts, b'date', b'currentdate')
309 check_at_most_one_arg(opts, b'date', b'currentdate')
311 check_at_most_one_arg(opts, b'user', b'currentuser')
310 check_at_most_one_arg(opts, b'user', b'currentuser')
312
311
313 datemaydiffer = False # date-only change should be ignored?
312 datemaydiffer = False # date-only change should be ignored?
314
313
315 if opts.get(b'currentdate'):
314 if opts.get(b'currentdate'):
316 opts[b'date'] = b'%d %d' % dateutil.makedate()
315 opts[b'date'] = b'%d %d' % dateutil.makedate()
317 elif (
316 elif (
318 not opts.get(b'date')
317 not opts.get(b'date')
319 and ui.configbool(b'rewrite', b'update-timestamp')
318 and ui.configbool(b'rewrite', b'update-timestamp')
320 and opts.get(b'currentdate') is None
319 and opts.get(b'currentdate') is None
321 ):
320 ):
322 opts[b'date'] = b'%d %d' % dateutil.makedate()
321 opts[b'date'] = b'%d %d' % dateutil.makedate()
323 datemaydiffer = True
322 datemaydiffer = True
324
323
325 if opts.get(b'currentuser'):
324 if opts.get(b'currentuser'):
326 opts[b'user'] = ui.username()
325 opts[b'user'] = ui.username()
327
326
328 return datemaydiffer
327 return datemaydiffer
329
328
330
329
331 def checknotesize(ui, opts):
330 def checknotesize(ui, opts):
332 """ make sure note is of valid format """
331 """ make sure note is of valid format """
333
332
334 note = opts.get(b'note')
333 note = opts.get(b'note')
335 if not note:
334 if not note:
336 return
335 return
337
336
338 if len(note) > 255:
337 if len(note) > 255:
339 raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
338 raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
340 if b'\n' in note:
339 if b'\n' in note:
341 raise error.Abort(_(b"note cannot contain a newline"))
340 raise error.Abort(_(b"note cannot contain a newline"))
342
341
343
342
344 def ishunk(x):
343 def ishunk(x):
345 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
344 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
346 return isinstance(x, hunkclasses)
345 return isinstance(x, hunkclasses)
347
346
348
347
349 def newandmodified(chunks, originalchunks):
348 def newandmodified(chunks, originalchunks):
350 newlyaddedandmodifiedfiles = set()
349 newlyaddedandmodifiedfiles = set()
351 alsorestore = set()
350 alsorestore = set()
352 for chunk in chunks:
351 for chunk in chunks:
353 if (
352 if (
354 ishunk(chunk)
353 ishunk(chunk)
355 and chunk.header.isnewfile()
354 and chunk.header.isnewfile()
356 and chunk not in originalchunks
355 and chunk not in originalchunks
357 ):
356 ):
358 newlyaddedandmodifiedfiles.add(chunk.header.filename())
357 newlyaddedandmodifiedfiles.add(chunk.header.filename())
359 alsorestore.update(
358 alsorestore.update(
360 set(chunk.header.files()) - {chunk.header.filename()}
359 set(chunk.header.files()) - {chunk.header.filename()}
361 )
360 )
362 return newlyaddedandmodifiedfiles, alsorestore
361 return newlyaddedandmodifiedfiles, alsorestore
363
362
364
363
365 def parsealiases(cmd):
364 def parsealiases(cmd):
366 return cmd.split(b"|")
365 return cmd.split(b"|")
367
366
368
367
369 def setupwrapcolorwrite(ui):
368 def setupwrapcolorwrite(ui):
370 # wrap ui.write so diff output can be labeled/colorized
369 # wrap ui.write so diff output can be labeled/colorized
371 def wrapwrite(orig, *args, **kw):
370 def wrapwrite(orig, *args, **kw):
372 label = kw.pop('label', b'')
371 label = kw.pop('label', b'')
373 for chunk, l in patch.difflabel(lambda: args):
372 for chunk, l in patch.difflabel(lambda: args):
374 orig(chunk, label=label + l)
373 orig(chunk, label=label + l)
375
374
376 oldwrite = ui.write
375 oldwrite = ui.write
377
376
378 def wrap(*args, **kwargs):
377 def wrap(*args, **kwargs):
379 return wrapwrite(oldwrite, *args, **kwargs)
378 return wrapwrite(oldwrite, *args, **kwargs)
380
379
381 setattr(ui, 'write', wrap)
380 setattr(ui, 'write', wrap)
382 return oldwrite
381 return oldwrite
383
382
384
383
385 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
384 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
386 try:
385 try:
387 if usecurses:
386 if usecurses:
388 if testfile:
387 if testfile:
389 recordfn = crecordmod.testdecorator(
388 recordfn = crecordmod.testdecorator(
390 testfile, crecordmod.testchunkselector
389 testfile, crecordmod.testchunkselector
391 )
390 )
392 else:
391 else:
393 recordfn = crecordmod.chunkselector
392 recordfn = crecordmod.chunkselector
394
393
395 return crecordmod.filterpatch(
394 return crecordmod.filterpatch(
396 ui, originalhunks, recordfn, operation
395 ui, originalhunks, recordfn, operation
397 )
396 )
398 except crecordmod.fallbackerror as e:
397 except crecordmod.fallbackerror as e:
399 ui.warn(b'%s\n' % e)
398 ui.warn(b'%s\n' % e)
400 ui.warn(_(b'falling back to text mode\n'))
399 ui.warn(_(b'falling back to text mode\n'))
401
400
402 return patch.filterpatch(ui, originalhunks, match, operation)
401 return patch.filterpatch(ui, originalhunks, match, operation)
403
402
404
403
405 def recordfilter(ui, originalhunks, match, operation=None):
404 def recordfilter(ui, originalhunks, match, operation=None):
406 """ Prompts the user to filter the originalhunks and return a list of
405 """ Prompts the user to filter the originalhunks and return a list of
407 selected hunks.
406 selected hunks.
408 *operation* is used for to build ui messages to indicate the user what
407 *operation* is used for to build ui messages to indicate the user what
409 kind of filtering they are doing: reverting, committing, shelving, etc.
408 kind of filtering they are doing: reverting, committing, shelving, etc.
410 (see patch.filterpatch).
409 (see patch.filterpatch).
411 """
410 """
412 usecurses = crecordmod.checkcurses(ui)
411 usecurses = crecordmod.checkcurses(ui)
413 testfile = ui.config(b'experimental', b'crecordtest')
412 testfile = ui.config(b'experimental', b'crecordtest')
414 oldwrite = setupwrapcolorwrite(ui)
413 oldwrite = setupwrapcolorwrite(ui)
415 try:
414 try:
416 newchunks, newopts = filterchunks(
415 newchunks, newopts = filterchunks(
417 ui, originalhunks, usecurses, testfile, match, operation
416 ui, originalhunks, usecurses, testfile, match, operation
418 )
417 )
419 finally:
418 finally:
420 ui.write = oldwrite
419 ui.write = oldwrite
421 return newchunks, newopts
420 return newchunks, newopts
422
421
423
422
424 def dorecord(
423 def dorecord(
425 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
424 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
426 ):
425 ):
427 opts = pycompat.byteskwargs(opts)
426 opts = pycompat.byteskwargs(opts)
428 if not ui.interactive():
427 if not ui.interactive():
429 if cmdsuggest:
428 if cmdsuggest:
430 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
429 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
431 else:
430 else:
432 msg = _(b'running non-interactively')
431 msg = _(b'running non-interactively')
433 raise error.Abort(msg)
432 raise error.Abort(msg)
434
433
435 # make sure username is set before going interactive
434 # make sure username is set before going interactive
436 if not opts.get(b'user'):
435 if not opts.get(b'user'):
437 ui.username() # raise exception, username not provided
436 ui.username() # raise exception, username not provided
438
437
439 def recordfunc(ui, repo, message, match, opts):
438 def recordfunc(ui, repo, message, match, opts):
440 """This is generic record driver.
439 """This is generic record driver.
441
440
442 Its job is to interactively filter local changes, and
441 Its job is to interactively filter local changes, and
443 accordingly prepare working directory into a state in which the
442 accordingly prepare working directory into a state in which the
444 job can be delegated to a non-interactive commit command such as
443 job can be delegated to a non-interactive commit command such as
445 'commit' or 'qrefresh'.
444 'commit' or 'qrefresh'.
446
445
447 After the actual job is done by non-interactive command, the
446 After the actual job is done by non-interactive command, the
448 working directory is restored to its original state.
447 working directory is restored to its original state.
449
448
450 In the end we'll record interesting changes, and everything else
449 In the end we'll record interesting changes, and everything else
451 will be left in place, so the user can continue working.
450 will be left in place, so the user can continue working.
452 """
451 """
453 if not opts.get(b'interactive-unshelve'):
452 if not opts.get(b'interactive-unshelve'):
454 checkunfinished(repo, commit=True)
453 checkunfinished(repo, commit=True)
455 wctx = repo[None]
454 wctx = repo[None]
456 merge = len(wctx.parents()) > 1
455 merge = len(wctx.parents()) > 1
457 if merge:
456 if merge:
458 raise error.Abort(
457 raise error.Abort(
459 _(
458 _(
460 b'cannot partially commit a merge '
459 b'cannot partially commit a merge '
461 b'(use "hg commit" instead)'
460 b'(use "hg commit" instead)'
462 )
461 )
463 )
462 )
464
463
465 def fail(f, msg):
464 def fail(f, msg):
466 raise error.Abort(b'%s: %s' % (f, msg))
465 raise error.Abort(b'%s: %s' % (f, msg))
467
466
468 force = opts.get(b'force')
467 force = opts.get(b'force')
469 if not force:
468 if not force:
470 match = matchmod.badmatch(match, fail)
469 match = matchmod.badmatch(match, fail)
471
470
472 status = repo.status(match=match)
471 status = repo.status(match=match)
473
472
474 overrides = {(b'ui', b'commitsubrepos'): True}
473 overrides = {(b'ui', b'commitsubrepos'): True}
475
474
476 with repo.ui.configoverride(overrides, b'record'):
475 with repo.ui.configoverride(overrides, b'record'):
477 # subrepoutil.precommit() modifies the status
476 # subrepoutil.precommit() modifies the status
478 tmpstatus = scmutil.status(
477 tmpstatus = scmutil.status(
479 copymod.copy(status.modified),
478 copymod.copy(status.modified),
480 copymod.copy(status.added),
479 copymod.copy(status.added),
481 copymod.copy(status.removed),
480 copymod.copy(status.removed),
482 copymod.copy(status.deleted),
481 copymod.copy(status.deleted),
483 copymod.copy(status.unknown),
482 copymod.copy(status.unknown),
484 copymod.copy(status.ignored),
483 copymod.copy(status.ignored),
485 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
484 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
486 )
485 )
487
486
488 # Force allows -X subrepo to skip the subrepo.
487 # Force allows -X subrepo to skip the subrepo.
489 subs, commitsubs, newstate = subrepoutil.precommit(
488 subs, commitsubs, newstate = subrepoutil.precommit(
490 repo.ui, wctx, tmpstatus, match, force=True
489 repo.ui, wctx, tmpstatus, match, force=True
491 )
490 )
492 for s in subs:
491 for s in subs:
493 if s in commitsubs:
492 if s in commitsubs:
494 dirtyreason = wctx.sub(s).dirtyreason(True)
493 dirtyreason = wctx.sub(s).dirtyreason(True)
495 raise error.Abort(dirtyreason)
494 raise error.Abort(dirtyreason)
496
495
497 if not force:
496 if not force:
498 repo.checkcommitpatterns(wctx, match, status, fail)
497 repo.checkcommitpatterns(wctx, match, status, fail)
499 diffopts = patch.difffeatureopts(
498 diffopts = patch.difffeatureopts(
500 ui,
499 ui,
501 opts=opts,
500 opts=opts,
502 whitespace=True,
501 whitespace=True,
503 section=b'commands',
502 section=b'commands',
504 configprefix=b'commit.interactive.',
503 configprefix=b'commit.interactive.',
505 )
504 )
506 diffopts.nodates = True
505 diffopts.nodates = True
507 diffopts.git = True
506 diffopts.git = True
508 diffopts.showfunc = True
507 diffopts.showfunc = True
509 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
508 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
510 originalchunks = patch.parsepatch(originaldiff)
509 originalchunks = patch.parsepatch(originaldiff)
511 match = scmutil.match(repo[None], pats)
510 match = scmutil.match(repo[None], pats)
512
511
513 # 1. filter patch, since we are intending to apply subset of it
512 # 1. filter patch, since we are intending to apply subset of it
514 try:
513 try:
515 chunks, newopts = filterfn(ui, originalchunks, match)
514 chunks, newopts = filterfn(ui, originalchunks, match)
516 except error.PatchError as err:
515 except error.PatchError as err:
517 raise error.Abort(_(b'error parsing patch: %s') % err)
516 raise error.Abort(_(b'error parsing patch: %s') % err)
518 opts.update(newopts)
517 opts.update(newopts)
519
518
520 # We need to keep a backup of files that have been newly added and
519 # We need to keep a backup of files that have been newly added and
521 # modified during the recording process because there is a previous
520 # modified during the recording process because there is a previous
522 # version without the edit in the workdir. We also will need to restore
521 # version without the edit in the workdir. We also will need to restore
523 # files that were the sources of renames so that the patch application
522 # files that were the sources of renames so that the patch application
524 # works.
523 # works.
525 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
524 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
526 chunks, originalchunks
525 chunks, originalchunks
527 )
526 )
528 contenders = set()
527 contenders = set()
529 for h in chunks:
528 for h in chunks:
530 try:
529 try:
531 contenders.update(set(h.files()))
530 contenders.update(set(h.files()))
532 except AttributeError:
531 except AttributeError:
533 pass
532 pass
534
533
535 changed = status.modified + status.added + status.removed
534 changed = status.modified + status.added + status.removed
536 newfiles = [f for f in changed if f in contenders]
535 newfiles = [f for f in changed if f in contenders]
537 if not newfiles:
536 if not newfiles:
538 ui.status(_(b'no changes to record\n'))
537 ui.status(_(b'no changes to record\n'))
539 return 0
538 return 0
540
539
541 modified = set(status.modified)
540 modified = set(status.modified)
542
541
543 # 2. backup changed files, so we can restore them in the end
542 # 2. backup changed files, so we can restore them in the end
544
543
545 if backupall:
544 if backupall:
546 tobackup = changed
545 tobackup = changed
547 else:
546 else:
548 tobackup = [
547 tobackup = [
549 f
548 f
550 for f in newfiles
549 for f in newfiles
551 if f in modified or f in newlyaddedandmodifiedfiles
550 if f in modified or f in newlyaddedandmodifiedfiles
552 ]
551 ]
553 backups = {}
552 backups = {}
554 if tobackup:
553 if tobackup:
555 backupdir = repo.vfs.join(b'record-backups')
554 backupdir = repo.vfs.join(b'record-backups')
556 try:
555 try:
557 os.mkdir(backupdir)
556 os.mkdir(backupdir)
558 except OSError as err:
557 except OSError as err:
559 if err.errno != errno.EEXIST:
558 if err.errno != errno.EEXIST:
560 raise
559 raise
561 try:
560 try:
562 # backup continues
561 # backup continues
563 for f in tobackup:
562 for f in tobackup:
564 fd, tmpname = pycompat.mkstemp(
563 fd, tmpname = pycompat.mkstemp(
565 prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
564 prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
566 )
565 )
567 os.close(fd)
566 os.close(fd)
568 ui.debug(b'backup %r as %r\n' % (f, tmpname))
567 ui.debug(b'backup %r as %r\n' % (f, tmpname))
569 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
568 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
570 backups[f] = tmpname
569 backups[f] = tmpname
571
570
572 fp = stringio()
571 fp = stringio()
573 for c in chunks:
572 for c in chunks:
574 fname = c.filename()
573 fname = c.filename()
575 if fname in backups:
574 if fname in backups:
576 c.write(fp)
575 c.write(fp)
577 dopatch = fp.tell()
576 dopatch = fp.tell()
578 fp.seek(0)
577 fp.seek(0)
579
578
580 # 2.5 optionally review / modify patch in text editor
579 # 2.5 optionally review / modify patch in text editor
581 if opts.get(b'review', False):
580 if opts.get(b'review', False):
582 patchtext = (
581 patchtext = (
583 crecordmod.diffhelptext
582 crecordmod.diffhelptext
584 + crecordmod.patchhelptext
583 + crecordmod.patchhelptext
585 + fp.read()
584 + fp.read()
586 )
585 )
587 reviewedpatch = ui.edit(
586 reviewedpatch = ui.edit(
588 patchtext, b"", action=b"diff", repopath=repo.path
587 patchtext, b"", action=b"diff", repopath=repo.path
589 )
588 )
590 fp.truncate(0)
589 fp.truncate(0)
591 fp.write(reviewedpatch)
590 fp.write(reviewedpatch)
592 fp.seek(0)
591 fp.seek(0)
593
592
594 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
593 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
595 # 3a. apply filtered patch to clean repo (clean)
594 # 3a. apply filtered patch to clean repo (clean)
596 if backups:
595 if backups:
597 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
596 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
598 mergemod.revert_to(repo[b'.'], matcher=m)
597 mergemod.revert_to(repo[b'.'], matcher=m)
599
598
600 # 3b. (apply)
599 # 3b. (apply)
601 if dopatch:
600 if dopatch:
602 try:
601 try:
603 ui.debug(b'applying patch\n')
602 ui.debug(b'applying patch\n')
604 ui.debug(fp.getvalue())
603 ui.debug(fp.getvalue())
605 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
604 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
606 except error.PatchError as err:
605 except error.PatchError as err:
607 raise error.Abort(pycompat.bytestr(err))
606 raise error.Abort(pycompat.bytestr(err))
608 del fp
607 del fp
609
608
610 # 4. We prepared working directory according to filtered
609 # 4. We prepared working directory according to filtered
611 # patch. Now is the time to delegate the job to
610 # patch. Now is the time to delegate the job to
612 # commit/qrefresh or the like!
611 # commit/qrefresh or the like!
613
612
614 # Make all of the pathnames absolute.
613 # Make all of the pathnames absolute.
615 newfiles = [repo.wjoin(nf) for nf in newfiles]
614 newfiles = [repo.wjoin(nf) for nf in newfiles]
616 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
615 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
617 finally:
616 finally:
618 # 5. finally restore backed-up files
617 # 5. finally restore backed-up files
619 try:
618 try:
620 dirstate = repo.dirstate
619 dirstate = repo.dirstate
621 for realname, tmpname in pycompat.iteritems(backups):
620 for realname, tmpname in pycompat.iteritems(backups):
622 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
621 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
623
622
624 if dirstate[realname] == b'n':
623 if dirstate[realname] == b'n':
625 # without normallookup, restoring timestamp
624 # without normallookup, restoring timestamp
626 # may cause partially committed files
625 # may cause partially committed files
627 # to be treated as unmodified
626 # to be treated as unmodified
628 dirstate.normallookup(realname)
627 dirstate.normallookup(realname)
629
628
630 # copystat=True here and above are a hack to trick any
629 # copystat=True here and above are a hack to trick any
631 # editors that have f open that we haven't modified them.
630 # editors that have f open that we haven't modified them.
632 #
631 #
633 # Also note that this racy as an editor could notice the
632 # Also note that this racy as an editor could notice the
634 # file's mtime before we've finished writing it.
633 # file's mtime before we've finished writing it.
635 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
634 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
636 os.unlink(tmpname)
635 os.unlink(tmpname)
637 if tobackup:
636 if tobackup:
638 os.rmdir(backupdir)
637 os.rmdir(backupdir)
639 except OSError:
638 except OSError:
640 pass
639 pass
641
640
642 def recordinwlock(ui, repo, message, match, opts):
641 def recordinwlock(ui, repo, message, match, opts):
643 with repo.wlock():
642 with repo.wlock():
644 return recordfunc(ui, repo, message, match, opts)
643 return recordfunc(ui, repo, message, match, opts)
645
644
646 return commit(ui, repo, recordinwlock, pats, opts)
645 return commit(ui, repo, recordinwlock, pats, opts)
647
646
648
647
649 class dirnode(object):
648 class dirnode(object):
650 """
649 """
651 Represent a directory in user working copy with information required for
650 Represent a directory in user working copy with information required for
652 the purpose of tersing its status.
651 the purpose of tersing its status.
653
652
654 path is the path to the directory, without a trailing '/'
653 path is the path to the directory, without a trailing '/'
655
654
656 statuses is a set of statuses of all files in this directory (this includes
655 statuses is a set of statuses of all files in this directory (this includes
657 all the files in all the subdirectories too)
656 all the files in all the subdirectories too)
658
657
659 files is a list of files which are direct child of this directory
658 files is a list of files which are direct child of this directory
660
659
661 subdirs is a dictionary of sub-directory name as the key and it's own
660 subdirs is a dictionary of sub-directory name as the key and it's own
662 dirnode object as the value
661 dirnode object as the value
663 """
662 """
664
663
665 def __init__(self, dirpath):
664 def __init__(self, dirpath):
666 self.path = dirpath
665 self.path = dirpath
667 self.statuses = set()
666 self.statuses = set()
668 self.files = []
667 self.files = []
669 self.subdirs = {}
668 self.subdirs = {}
670
669
671 def _addfileindir(self, filename, status):
670 def _addfileindir(self, filename, status):
672 """Add a file in this directory as a direct child."""
671 """Add a file in this directory as a direct child."""
673 self.files.append((filename, status))
672 self.files.append((filename, status))
674
673
675 def addfile(self, filename, status):
674 def addfile(self, filename, status):
676 """
675 """
677 Add a file to this directory or to its direct parent directory.
676 Add a file to this directory or to its direct parent directory.
678
677
679 If the file is not direct child of this directory, we traverse to the
678 If the file is not direct child of this directory, we traverse to the
680 directory of which this file is a direct child of and add the file
679 directory of which this file is a direct child of and add the file
681 there.
680 there.
682 """
681 """
683
682
684 # the filename contains a path separator, it means it's not the direct
683 # the filename contains a path separator, it means it's not the direct
685 # child of this directory
684 # child of this directory
686 if b'/' in filename:
685 if b'/' in filename:
687 subdir, filep = filename.split(b'/', 1)
686 subdir, filep = filename.split(b'/', 1)
688
687
689 # does the dirnode object for subdir exists
688 # does the dirnode object for subdir exists
690 if subdir not in self.subdirs:
689 if subdir not in self.subdirs:
691 subdirpath = pathutil.join(self.path, subdir)
690 subdirpath = pathutil.join(self.path, subdir)
692 self.subdirs[subdir] = dirnode(subdirpath)
691 self.subdirs[subdir] = dirnode(subdirpath)
693
692
694 # try adding the file in subdir
693 # try adding the file in subdir
695 self.subdirs[subdir].addfile(filep, status)
694 self.subdirs[subdir].addfile(filep, status)
696
695
697 else:
696 else:
698 self._addfileindir(filename, status)
697 self._addfileindir(filename, status)
699
698
700 if status not in self.statuses:
699 if status not in self.statuses:
701 self.statuses.add(status)
700 self.statuses.add(status)
702
701
703 def iterfilepaths(self):
702 def iterfilepaths(self):
704 """Yield (status, path) for files directly under this directory."""
703 """Yield (status, path) for files directly under this directory."""
705 for f, st in self.files:
704 for f, st in self.files:
706 yield st, pathutil.join(self.path, f)
705 yield st, pathutil.join(self.path, f)
707
706
708 def tersewalk(self, terseargs):
707 def tersewalk(self, terseargs):
709 """
708 """
710 Yield (status, path) obtained by processing the status of this
709 Yield (status, path) obtained by processing the status of this
711 dirnode.
710 dirnode.
712
711
713 terseargs is the string of arguments passed by the user with `--terse`
712 terseargs is the string of arguments passed by the user with `--terse`
714 flag.
713 flag.
715
714
716 Following are the cases which can happen:
715 Following are the cases which can happen:
717
716
718 1) All the files in the directory (including all the files in its
717 1) All the files in the directory (including all the files in its
719 subdirectories) share the same status and the user has asked us to terse
718 subdirectories) share the same status and the user has asked us to terse
720 that status. -> yield (status, dirpath). dirpath will end in '/'.
719 that status. -> yield (status, dirpath). dirpath will end in '/'.
721
720
722 2) Otherwise, we do following:
721 2) Otherwise, we do following:
723
722
724 a) Yield (status, filepath) for all the files which are in this
723 a) Yield (status, filepath) for all the files which are in this
725 directory (only the ones in this directory, not the subdirs)
724 directory (only the ones in this directory, not the subdirs)
726
725
727 b) Recurse the function on all the subdirectories of this
726 b) Recurse the function on all the subdirectories of this
728 directory
727 directory
729 """
728 """
730
729
731 if len(self.statuses) == 1:
730 if len(self.statuses) == 1:
732 onlyst = self.statuses.pop()
731 onlyst = self.statuses.pop()
733
732
734 # Making sure we terse only when the status abbreviation is
733 # Making sure we terse only when the status abbreviation is
735 # passed as terse argument
734 # passed as terse argument
736 if onlyst in terseargs:
735 if onlyst in terseargs:
737 yield onlyst, self.path + b'/'
736 yield onlyst, self.path + b'/'
738 return
737 return
739
738
740 # add the files to status list
739 # add the files to status list
741 for st, fpath in self.iterfilepaths():
740 for st, fpath in self.iterfilepaths():
742 yield st, fpath
741 yield st, fpath
743
742
744 # recurse on the subdirs
743 # recurse on the subdirs
745 for dirobj in self.subdirs.values():
744 for dirobj in self.subdirs.values():
746 for st, fpath in dirobj.tersewalk(terseargs):
745 for st, fpath in dirobj.tersewalk(terseargs):
747 yield st, fpath
746 yield st, fpath
748
747
749
748
750 def tersedir(statuslist, terseargs):
749 def tersedir(statuslist, terseargs):
751 """
750 """
752 Terse the status if all the files in a directory shares the same status.
751 Terse the status if all the files in a directory shares the same status.
753
752
754 statuslist is scmutil.status() object which contains a list of files for
753 statuslist is scmutil.status() object which contains a list of files for
755 each status.
754 each status.
756 terseargs is string which is passed by the user as the argument to `--terse`
755 terseargs is string which is passed by the user as the argument to `--terse`
757 flag.
756 flag.
758
757
759 The function makes a tree of objects of dirnode class, and at each node it
758 The function makes a tree of objects of dirnode class, and at each node it
760 stores the information required to know whether we can terse a certain
759 stores the information required to know whether we can terse a certain
761 directory or not.
760 directory or not.
762 """
761 """
763 # the order matters here as that is used to produce final list
762 # the order matters here as that is used to produce final list
764 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
763 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
765
764
766 # checking the argument validity
765 # checking the argument validity
767 for s in pycompat.bytestr(terseargs):
766 for s in pycompat.bytestr(terseargs):
768 if s not in allst:
767 if s not in allst:
769 raise error.Abort(_(b"'%s' not recognized") % s)
768 raise error.Abort(_(b"'%s' not recognized") % s)
770
769
771 # creating a dirnode object for the root of the repo
770 # creating a dirnode object for the root of the repo
772 rootobj = dirnode(b'')
771 rootobj = dirnode(b'')
773 pstatus = (
772 pstatus = (
774 b'modified',
773 b'modified',
775 b'added',
774 b'added',
776 b'deleted',
775 b'deleted',
777 b'clean',
776 b'clean',
778 b'unknown',
777 b'unknown',
779 b'ignored',
778 b'ignored',
780 b'removed',
779 b'removed',
781 )
780 )
782
781
783 tersedict = {}
782 tersedict = {}
784 for attrname in pstatus:
783 for attrname in pstatus:
785 statuschar = attrname[0:1]
784 statuschar = attrname[0:1]
786 for f in getattr(statuslist, attrname):
785 for f in getattr(statuslist, attrname):
787 rootobj.addfile(f, statuschar)
786 rootobj.addfile(f, statuschar)
788 tersedict[statuschar] = []
787 tersedict[statuschar] = []
789
788
790 # we won't be tersing the root dir, so add files in it
789 # we won't be tersing the root dir, so add files in it
791 for st, fpath in rootobj.iterfilepaths():
790 for st, fpath in rootobj.iterfilepaths():
792 tersedict[st].append(fpath)
791 tersedict[st].append(fpath)
793
792
794 # process each sub-directory and build tersedict
793 # process each sub-directory and build tersedict
795 for subdir in rootobj.subdirs.values():
794 for subdir in rootobj.subdirs.values():
796 for st, f in subdir.tersewalk(terseargs):
795 for st, f in subdir.tersewalk(terseargs):
797 tersedict[st].append(f)
796 tersedict[st].append(f)
798
797
799 tersedlist = []
798 tersedlist = []
800 for st in allst:
799 for st in allst:
801 tersedict[st].sort()
800 tersedict[st].sort()
802 tersedlist.append(tersedict[st])
801 tersedlist.append(tersedict[st])
803
802
804 return scmutil.status(*tersedlist)
803 return scmutil.status(*tersedlist)
805
804
806
805
807 def _commentlines(raw):
806 def _commentlines(raw):
808 '''Surround lineswith a comment char and a new line'''
807 '''Surround lineswith a comment char and a new line'''
809 lines = raw.splitlines()
808 lines = raw.splitlines()
810 commentedlines = [b'# %s' % line for line in lines]
809 commentedlines = [b'# %s' % line for line in lines]
811 return b'\n'.join(commentedlines) + b'\n'
810 return b'\n'.join(commentedlines) + b'\n'
812
811
813
812
814 @attr.s(frozen=True)
813 @attr.s(frozen=True)
815 class morestatus(object):
814 class morestatus(object):
816 reporoot = attr.ib()
815 reporoot = attr.ib()
817 unfinishedop = attr.ib()
816 unfinishedop = attr.ib()
818 unfinishedmsg = attr.ib()
817 unfinishedmsg = attr.ib()
819 activemerge = attr.ib()
818 activemerge = attr.ib()
820 unresolvedpaths = attr.ib()
819 unresolvedpaths = attr.ib()
821 _formattedpaths = attr.ib(init=False, default=set())
820 _formattedpaths = attr.ib(init=False, default=set())
822 _label = b'status.morestatus'
821 _label = b'status.morestatus'
823
822
824 def formatfile(self, path, fm):
823 def formatfile(self, path, fm):
825 self._formattedpaths.add(path)
824 self._formattedpaths.add(path)
826 if self.activemerge and path in self.unresolvedpaths:
825 if self.activemerge and path in self.unresolvedpaths:
827 fm.data(unresolved=True)
826 fm.data(unresolved=True)
828
827
829 def formatfooter(self, fm):
828 def formatfooter(self, fm):
830 if self.unfinishedop or self.unfinishedmsg:
829 if self.unfinishedop or self.unfinishedmsg:
831 fm.startitem()
830 fm.startitem()
832 fm.data(itemtype=b'morestatus')
831 fm.data(itemtype=b'morestatus')
833
832
834 if self.unfinishedop:
833 if self.unfinishedop:
835 fm.data(unfinished=self.unfinishedop)
834 fm.data(unfinished=self.unfinishedop)
836 statemsg = (
835 statemsg = (
837 _(b'The repository is in an unfinished *%s* state.')
836 _(b'The repository is in an unfinished *%s* state.')
838 % self.unfinishedop
837 % self.unfinishedop
839 )
838 )
840 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
839 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
841 if self.unfinishedmsg:
840 if self.unfinishedmsg:
842 fm.data(unfinishedmsg=self.unfinishedmsg)
841 fm.data(unfinishedmsg=self.unfinishedmsg)
843
842
844 # May also start new data items.
843 # May also start new data items.
845 self._formatconflicts(fm)
844 self._formatconflicts(fm)
846
845
847 if self.unfinishedmsg:
846 if self.unfinishedmsg:
848 fm.plain(
847 fm.plain(
849 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
848 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
850 )
849 )
851
850
852 def _formatconflicts(self, fm):
851 def _formatconflicts(self, fm):
853 if not self.activemerge:
852 if not self.activemerge:
854 return
853 return
855
854
856 if self.unresolvedpaths:
855 if self.unresolvedpaths:
857 mergeliststr = b'\n'.join(
856 mergeliststr = b'\n'.join(
858 [
857 [
859 b' %s'
858 b' %s'
860 % util.pathto(self.reporoot, encoding.getcwd(), path)
859 % util.pathto(self.reporoot, encoding.getcwd(), path)
861 for path in self.unresolvedpaths
860 for path in self.unresolvedpaths
862 ]
861 ]
863 )
862 )
864 msg = (
863 msg = (
865 _(
864 _(
866 '''Unresolved merge conflicts:
865 '''Unresolved merge conflicts:
867
866
868 %s
867 %s
869
868
870 To mark files as resolved: hg resolve --mark FILE'''
869 To mark files as resolved: hg resolve --mark FILE'''
871 )
870 )
872 % mergeliststr
871 % mergeliststr
873 )
872 )
874
873
875 # If any paths with unresolved conflicts were not previously
874 # If any paths with unresolved conflicts were not previously
876 # formatted, output them now.
875 # formatted, output them now.
877 for f in self.unresolvedpaths:
876 for f in self.unresolvedpaths:
878 if f in self._formattedpaths:
877 if f in self._formattedpaths:
879 # Already output.
878 # Already output.
880 continue
879 continue
881 fm.startitem()
880 fm.startitem()
882 # We can't claim to know the status of the file - it may just
881 # We can't claim to know the status of the file - it may just
883 # have been in one of the states that were not requested for
882 # have been in one of the states that were not requested for
884 # display, so it could be anything.
883 # display, so it could be anything.
885 fm.data(itemtype=b'file', path=f, unresolved=True)
884 fm.data(itemtype=b'file', path=f, unresolved=True)
886
885
887 else:
886 else:
888 msg = _(b'No unresolved merge conflicts.')
887 msg = _(b'No unresolved merge conflicts.')
889
888
890 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
889 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
891
890
892
891
893 def readmorestatus(repo):
892 def readmorestatus(repo):
894 """Returns a morestatus object if the repo has unfinished state."""
893 """Returns a morestatus object if the repo has unfinished state."""
895 statetuple = statemod.getrepostate(repo)
894 statetuple = statemod.getrepostate(repo)
896 mergestate = mergestatemod.mergestate.read(repo)
895 mergestate = mergestatemod.mergestate.read(repo)
897 activemerge = mergestate.active()
896 activemerge = mergestate.active()
898 if not statetuple and not activemerge:
897 if not statetuple and not activemerge:
899 return None
898 return None
900
899
901 unfinishedop = unfinishedmsg = unresolved = None
900 unfinishedop = unfinishedmsg = unresolved = None
902 if statetuple:
901 if statetuple:
903 unfinishedop, unfinishedmsg = statetuple
902 unfinishedop, unfinishedmsg = statetuple
904 if activemerge:
903 if activemerge:
905 unresolved = sorted(mergestate.unresolved())
904 unresolved = sorted(mergestate.unresolved())
906 return morestatus(
905 return morestatus(
907 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
906 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
908 )
907 )
909
908
910
909
911 def findpossible(cmd, table, strict=False):
910 def findpossible(cmd, table, strict=False):
912 """
911 """
913 Return cmd -> (aliases, command table entry)
912 Return cmd -> (aliases, command table entry)
914 for each matching command.
913 for each matching command.
915 Return debug commands (or their aliases) only if no normal command matches.
914 Return debug commands (or their aliases) only if no normal command matches.
916 """
915 """
917 choice = {}
916 choice = {}
918 debugchoice = {}
917 debugchoice = {}
919
918
920 if cmd in table:
919 if cmd in table:
921 # short-circuit exact matches, "log" alias beats "log|history"
920 # short-circuit exact matches, "log" alias beats "log|history"
922 keys = [cmd]
921 keys = [cmd]
923 else:
922 else:
924 keys = table.keys()
923 keys = table.keys()
925
924
926 allcmds = []
925 allcmds = []
927 for e in keys:
926 for e in keys:
928 aliases = parsealiases(e)
927 aliases = parsealiases(e)
929 allcmds.extend(aliases)
928 allcmds.extend(aliases)
930 found = None
929 found = None
931 if cmd in aliases:
930 if cmd in aliases:
932 found = cmd
931 found = cmd
933 elif not strict:
932 elif not strict:
934 for a in aliases:
933 for a in aliases:
935 if a.startswith(cmd):
934 if a.startswith(cmd):
936 found = a
935 found = a
937 break
936 break
938 if found is not None:
937 if found is not None:
939 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
938 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
940 debugchoice[found] = (aliases, table[e])
939 debugchoice[found] = (aliases, table[e])
941 else:
940 else:
942 choice[found] = (aliases, table[e])
941 choice[found] = (aliases, table[e])
943
942
944 if not choice and debugchoice:
943 if not choice and debugchoice:
945 choice = debugchoice
944 choice = debugchoice
946
945
947 return choice, allcmds
946 return choice, allcmds
948
947
949
948
950 def findcmd(cmd, table, strict=True):
949 def findcmd(cmd, table, strict=True):
951 """Return (aliases, command table entry) for command string."""
950 """Return (aliases, command table entry) for command string."""
952 choice, allcmds = findpossible(cmd, table, strict)
951 choice, allcmds = findpossible(cmd, table, strict)
953
952
954 if cmd in choice:
953 if cmd in choice:
955 return choice[cmd]
954 return choice[cmd]
956
955
957 if len(choice) > 1:
956 if len(choice) > 1:
958 clist = sorted(choice)
957 clist = sorted(choice)
959 raise error.AmbiguousCommand(cmd, clist)
958 raise error.AmbiguousCommand(cmd, clist)
960
959
961 if choice:
960 if choice:
962 return list(choice.values())[0]
961 return list(choice.values())[0]
963
962
964 raise error.UnknownCommand(cmd, allcmds)
963 raise error.UnknownCommand(cmd, allcmds)
965
964
966
965
967 def changebranch(ui, repo, revs, label, opts):
966 def changebranch(ui, repo, revs, label, opts):
968 """ Change the branch name of given revs to label """
967 """ Change the branch name of given revs to label """
969
968
970 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
969 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
971 # abort in case of uncommitted merge or dirty wdir
970 # abort in case of uncommitted merge or dirty wdir
972 bailifchanged(repo)
971 bailifchanged(repo)
973 revs = scmutil.revrange(repo, revs)
972 revs = scmutil.revrange(repo, revs)
974 if not revs:
973 if not revs:
975 raise error.Abort(b"empty revision set")
974 raise error.Abort(b"empty revision set")
976 roots = repo.revs(b'roots(%ld)', revs)
975 roots = repo.revs(b'roots(%ld)', revs)
977 if len(roots) > 1:
976 if len(roots) > 1:
978 raise error.Abort(
977 raise error.Abort(
979 _(b"cannot change branch of non-linear revisions")
978 _(b"cannot change branch of non-linear revisions")
980 )
979 )
981 rewriteutil.precheck(repo, revs, b'change branch of')
980 rewriteutil.precheck(repo, revs, b'change branch of')
982
981
983 root = repo[roots.first()]
982 root = repo[roots.first()]
984 rpb = {parent.branch() for parent in root.parents()}
983 rpb = {parent.branch() for parent in root.parents()}
985 if (
984 if (
986 not opts.get(b'force')
985 not opts.get(b'force')
987 and label not in rpb
986 and label not in rpb
988 and label in repo.branchmap()
987 and label in repo.branchmap()
989 ):
988 ):
990 raise error.Abort(_(b"a branch of the same name already exists"))
989 raise error.Abort(_(b"a branch of the same name already exists"))
991
990
992 if repo.revs(b'obsolete() and %ld', revs):
991 if repo.revs(b'obsolete() and %ld', revs):
993 raise error.Abort(
992 raise error.Abort(
994 _(b"cannot change branch of a obsolete changeset")
993 _(b"cannot change branch of a obsolete changeset")
995 )
994 )
996
995
997 # make sure only topological heads
996 # make sure only topological heads
998 if repo.revs(b'heads(%ld) - head()', revs):
997 if repo.revs(b'heads(%ld) - head()', revs):
999 raise error.Abort(_(b"cannot change branch in middle of a stack"))
998 raise error.Abort(_(b"cannot change branch in middle of a stack"))
1000
999
1001 replacements = {}
1000 replacements = {}
1002 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1001 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1003 # mercurial.subrepo -> mercurial.cmdutil
1002 # mercurial.subrepo -> mercurial.cmdutil
1004 from . import context
1003 from . import context
1005
1004
1006 for rev in revs:
1005 for rev in revs:
1007 ctx = repo[rev]
1006 ctx = repo[rev]
1008 oldbranch = ctx.branch()
1007 oldbranch = ctx.branch()
1009 # check if ctx has same branch
1008 # check if ctx has same branch
1010 if oldbranch == label:
1009 if oldbranch == label:
1011 continue
1010 continue
1012
1011
1013 def filectxfn(repo, newctx, path):
1012 def filectxfn(repo, newctx, path):
1014 try:
1013 try:
1015 return ctx[path]
1014 return ctx[path]
1016 except error.ManifestLookupError:
1015 except error.ManifestLookupError:
1017 return None
1016 return None
1018
1017
1019 ui.debug(
1018 ui.debug(
1020 b"changing branch of '%s' from '%s' to '%s'\n"
1019 b"changing branch of '%s' from '%s' to '%s'\n"
1021 % (hex(ctx.node()), oldbranch, label)
1020 % (hex(ctx.node()), oldbranch, label)
1022 )
1021 )
1023 extra = ctx.extra()
1022 extra = ctx.extra()
1024 extra[b'branch_change'] = hex(ctx.node())
1023 extra[b'branch_change'] = hex(ctx.node())
1025 # While changing branch of set of linear commits, make sure that
1024 # While changing branch of set of linear commits, make sure that
1026 # we base our commits on new parent rather than old parent which
1025 # we base our commits on new parent rather than old parent which
1027 # was obsoleted while changing the branch
1026 # was obsoleted while changing the branch
1028 p1 = ctx.p1().node()
1027 p1 = ctx.p1().node()
1029 p2 = ctx.p2().node()
1028 p2 = ctx.p2().node()
1030 if p1 in replacements:
1029 if p1 in replacements:
1031 p1 = replacements[p1][0]
1030 p1 = replacements[p1][0]
1032 if p2 in replacements:
1031 if p2 in replacements:
1033 p2 = replacements[p2][0]
1032 p2 = replacements[p2][0]
1034
1033
1035 mc = context.memctx(
1034 mc = context.memctx(
1036 repo,
1035 repo,
1037 (p1, p2),
1036 (p1, p2),
1038 ctx.description(),
1037 ctx.description(),
1039 ctx.files(),
1038 ctx.files(),
1040 filectxfn,
1039 filectxfn,
1041 user=ctx.user(),
1040 user=ctx.user(),
1042 date=ctx.date(),
1041 date=ctx.date(),
1043 extra=extra,
1042 extra=extra,
1044 branch=label,
1043 branch=label,
1045 )
1044 )
1046
1045
1047 newnode = repo.commitctx(mc)
1046 newnode = repo.commitctx(mc)
1048 replacements[ctx.node()] = (newnode,)
1047 replacements[ctx.node()] = (newnode,)
1049 ui.debug(b'new node id is %s\n' % hex(newnode))
1048 ui.debug(b'new node id is %s\n' % hex(newnode))
1050
1049
1051 # create obsmarkers and move bookmarks
1050 # create obsmarkers and move bookmarks
1052 scmutil.cleanupnodes(
1051 scmutil.cleanupnodes(
1053 repo, replacements, b'branch-change', fixphase=True
1052 repo, replacements, b'branch-change', fixphase=True
1054 )
1053 )
1055
1054
1056 # move the working copy too
1055 # move the working copy too
1057 wctx = repo[None]
1056 wctx = repo[None]
1058 # in-progress merge is a bit too complex for now.
1057 # in-progress merge is a bit too complex for now.
1059 if len(wctx.parents()) == 1:
1058 if len(wctx.parents()) == 1:
1060 newid = replacements.get(wctx.p1().node())
1059 newid = replacements.get(wctx.p1().node())
1061 if newid is not None:
1060 if newid is not None:
1062 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1061 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1063 # mercurial.cmdutil
1062 # mercurial.cmdutil
1064 from . import hg
1063 from . import hg
1065
1064
1066 hg.update(repo, newid[0], quietempty=True)
1065 hg.update(repo, newid[0], quietempty=True)
1067
1066
1068 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1067 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1069
1068
1070
1069
1071 def findrepo(p):
1070 def findrepo(p):
1072 while not os.path.isdir(os.path.join(p, b".hg")):
1071 while not os.path.isdir(os.path.join(p, b".hg")):
1073 oldp, p = p, os.path.dirname(p)
1072 oldp, p = p, os.path.dirname(p)
1074 if p == oldp:
1073 if p == oldp:
1075 return None
1074 return None
1076
1075
1077 return p
1076 return p
1078
1077
1079
1078
1080 def bailifchanged(repo, merge=True, hint=None):
1079 def bailifchanged(repo, merge=True, hint=None):
1081 """ enforce the precondition that working directory must be clean.
1080 """ enforce the precondition that working directory must be clean.
1082
1081
1083 'merge' can be set to false if a pending uncommitted merge should be
1082 'merge' can be set to false if a pending uncommitted merge should be
1084 ignored (such as when 'update --check' runs).
1083 ignored (such as when 'update --check' runs).
1085
1084
1086 'hint' is the usual hint given to Abort exception.
1085 'hint' is the usual hint given to Abort exception.
1087 """
1086 """
1088
1087
1089 if merge and repo.dirstate.p2() != nullid:
1088 if merge and repo.dirstate.p2() != nullid:
1090 raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
1089 raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
1091 st = repo.status()
1090 st = repo.status()
1092 if st.modified or st.added or st.removed or st.deleted:
1091 if st.modified or st.added or st.removed or st.deleted:
1093 raise error.Abort(_(b'uncommitted changes'), hint=hint)
1092 raise error.Abort(_(b'uncommitted changes'), hint=hint)
1094 ctx = repo[None]
1093 ctx = repo[None]
1095 for s in sorted(ctx.substate):
1094 for s in sorted(ctx.substate):
1096 ctx.sub(s).bailifchanged(hint=hint)
1095 ctx.sub(s).bailifchanged(hint=hint)
1097
1096
1098
1097
1099 def logmessage(ui, opts):
1098 def logmessage(ui, opts):
1100 """ get the log message according to -m and -l option """
1099 """ get the log message according to -m and -l option """
1101
1100
1102 check_at_most_one_arg(opts, b'message', b'logfile')
1101 check_at_most_one_arg(opts, b'message', b'logfile')
1103
1102
1104 message = opts.get(b'message')
1103 message = opts.get(b'message')
1105 logfile = opts.get(b'logfile')
1104 logfile = opts.get(b'logfile')
1106
1105
1107 if not message and logfile:
1106 if not message and logfile:
1108 try:
1107 try:
1109 if isstdiofilename(logfile):
1108 if isstdiofilename(logfile):
1110 message = ui.fin.read()
1109 message = ui.fin.read()
1111 else:
1110 else:
1112 message = b'\n'.join(util.readfile(logfile).splitlines())
1111 message = b'\n'.join(util.readfile(logfile).splitlines())
1113 except IOError as inst:
1112 except IOError as inst:
1114 raise error.Abort(
1113 raise error.Abort(
1115 _(b"can't read commit message '%s': %s")
1114 _(b"can't read commit message '%s': %s")
1116 % (logfile, encoding.strtolocal(inst.strerror))
1115 % (logfile, encoding.strtolocal(inst.strerror))
1117 )
1116 )
1118 return message
1117 return message
1119
1118
1120
1119
1121 def mergeeditform(ctxorbool, baseformname):
1120 def mergeeditform(ctxorbool, baseformname):
1122 """return appropriate editform name (referencing a committemplate)
1121 """return appropriate editform name (referencing a committemplate)
1123
1122
1124 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1123 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1125 merging is committed.
1124 merging is committed.
1126
1125
1127 This returns baseformname with '.merge' appended if it is a merge,
1126 This returns baseformname with '.merge' appended if it is a merge,
1128 otherwise '.normal' is appended.
1127 otherwise '.normal' is appended.
1129 """
1128 """
1130 if isinstance(ctxorbool, bool):
1129 if isinstance(ctxorbool, bool):
1131 if ctxorbool:
1130 if ctxorbool:
1132 return baseformname + b".merge"
1131 return baseformname + b".merge"
1133 elif len(ctxorbool.parents()) > 1:
1132 elif len(ctxorbool.parents()) > 1:
1134 return baseformname + b".merge"
1133 return baseformname + b".merge"
1135
1134
1136 return baseformname + b".normal"
1135 return baseformname + b".normal"
1137
1136
1138
1137
1139 def getcommiteditor(
1138 def getcommiteditor(
1140 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1139 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1141 ):
1140 ):
1142 """get appropriate commit message editor according to '--edit' option
1141 """get appropriate commit message editor according to '--edit' option
1143
1142
1144 'finishdesc' is a function to be called with edited commit message
1143 'finishdesc' is a function to be called with edited commit message
1145 (= 'description' of the new changeset) just after editing, but
1144 (= 'description' of the new changeset) just after editing, but
1146 before checking empty-ness. It should return actual text to be
1145 before checking empty-ness. It should return actual text to be
1147 stored into history. This allows to change description before
1146 stored into history. This allows to change description before
1148 storing.
1147 storing.
1149
1148
1150 'extramsg' is a extra message to be shown in the editor instead of
1149 'extramsg' is a extra message to be shown in the editor instead of
1151 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1150 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1152 is automatically added.
1151 is automatically added.
1153
1152
1154 'editform' is a dot-separated list of names, to distinguish
1153 'editform' is a dot-separated list of names, to distinguish
1155 the purpose of commit text editing.
1154 the purpose of commit text editing.
1156
1155
1157 'getcommiteditor' returns 'commitforceeditor' regardless of
1156 'getcommiteditor' returns 'commitforceeditor' regardless of
1158 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1157 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1159 they are specific for usage in MQ.
1158 they are specific for usage in MQ.
1160 """
1159 """
1161 if edit or finishdesc or extramsg:
1160 if edit or finishdesc or extramsg:
1162 return lambda r, c, s: commitforceeditor(
1161 return lambda r, c, s: commitforceeditor(
1163 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1162 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1164 )
1163 )
1165 elif editform:
1164 elif editform:
1166 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1165 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1167 else:
1166 else:
1168 return commiteditor
1167 return commiteditor
1169
1168
1170
1169
1171 def _escapecommandtemplate(tmpl):
1170 def _escapecommandtemplate(tmpl):
1172 parts = []
1171 parts = []
1173 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1172 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1174 if typ == b'string':
1173 if typ == b'string':
1175 parts.append(stringutil.escapestr(tmpl[start:end]))
1174 parts.append(stringutil.escapestr(tmpl[start:end]))
1176 else:
1175 else:
1177 parts.append(tmpl[start:end])
1176 parts.append(tmpl[start:end])
1178 return b''.join(parts)
1177 return b''.join(parts)
1179
1178
1180
1179
1181 def rendercommandtemplate(ui, tmpl, props):
1180 def rendercommandtemplate(ui, tmpl, props):
1182 r"""Expand a literal template 'tmpl' in a way suitable for command line
1181 r"""Expand a literal template 'tmpl' in a way suitable for command line
1183
1182
1184 '\' in outermost string is not taken as an escape character because it
1183 '\' in outermost string is not taken as an escape character because it
1185 is a directory separator on Windows.
1184 is a directory separator on Windows.
1186
1185
1187 >>> from . import ui as uimod
1186 >>> from . import ui as uimod
1188 >>> ui = uimod.ui()
1187 >>> ui = uimod.ui()
1189 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1188 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1190 'c:\\foo'
1189 'c:\\foo'
1191 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1190 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1192 'c:{path}'
1191 'c:{path}'
1193 """
1192 """
1194 if not tmpl:
1193 if not tmpl:
1195 return tmpl
1194 return tmpl
1196 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1195 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1197 return t.renderdefault(props)
1196 return t.renderdefault(props)
1198
1197
1199
1198
1200 def rendertemplate(ctx, tmpl, props=None):
1199 def rendertemplate(ctx, tmpl, props=None):
1201 """Expand a literal template 'tmpl' byte-string against one changeset
1200 """Expand a literal template 'tmpl' byte-string against one changeset
1202
1201
1203 Each props item must be a stringify-able value or a callable returning
1202 Each props item must be a stringify-able value or a callable returning
1204 such value, i.e. no bare list nor dict should be passed.
1203 such value, i.e. no bare list nor dict should be passed.
1205 """
1204 """
1206 repo = ctx.repo()
1205 repo = ctx.repo()
1207 tres = formatter.templateresources(repo.ui, repo)
1206 tres = formatter.templateresources(repo.ui, repo)
1208 t = formatter.maketemplater(
1207 t = formatter.maketemplater(
1209 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1208 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1210 )
1209 )
1211 mapping = {b'ctx': ctx}
1210 mapping = {b'ctx': ctx}
1212 if props:
1211 if props:
1213 mapping.update(props)
1212 mapping.update(props)
1214 return t.renderdefault(mapping)
1213 return t.renderdefault(mapping)
1215
1214
1216
1215
1217 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1216 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1218 r"""Convert old-style filename format string to template string
1217 r"""Convert old-style filename format string to template string
1219
1218
1220 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1219 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1221 'foo-{reporoot|basename}-{seqno}.patch'
1220 'foo-{reporoot|basename}-{seqno}.patch'
1222 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1221 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1223 '{rev}{tags % "{tag}"}{node}'
1222 '{rev}{tags % "{tag}"}{node}'
1224
1223
1225 '\' in outermost strings has to be escaped because it is a directory
1224 '\' in outermost strings has to be escaped because it is a directory
1226 separator on Windows:
1225 separator on Windows:
1227
1226
1228 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1227 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1229 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1228 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1230 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1229 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1231 '\\\\\\\\foo\\\\bar.patch'
1230 '\\\\\\\\foo\\\\bar.patch'
1232 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1231 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1233 '\\\\{tags % "{tag}"}'
1232 '\\\\{tags % "{tag}"}'
1234
1233
1235 but inner strings follow the template rules (i.e. '\' is taken as an
1234 but inner strings follow the template rules (i.e. '\' is taken as an
1236 escape character):
1235 escape character):
1237
1236
1238 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1237 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1239 '{"c:\\tmp"}'
1238 '{"c:\\tmp"}'
1240 """
1239 """
1241 expander = {
1240 expander = {
1242 b'H': b'{node}',
1241 b'H': b'{node}',
1243 b'R': b'{rev}',
1242 b'R': b'{rev}',
1244 b'h': b'{node|short}',
1243 b'h': b'{node|short}',
1245 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1244 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1246 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1245 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1247 b'%': b'%',
1246 b'%': b'%',
1248 b'b': b'{reporoot|basename}',
1247 b'b': b'{reporoot|basename}',
1249 }
1248 }
1250 if total is not None:
1249 if total is not None:
1251 expander[b'N'] = b'{total}'
1250 expander[b'N'] = b'{total}'
1252 if seqno is not None:
1251 if seqno is not None:
1253 expander[b'n'] = b'{seqno}'
1252 expander[b'n'] = b'{seqno}'
1254 if total is not None and seqno is not None:
1253 if total is not None and seqno is not None:
1255 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1254 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1256 if pathname is not None:
1255 if pathname is not None:
1257 expander[b's'] = b'{pathname|basename}'
1256 expander[b's'] = b'{pathname|basename}'
1258 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1257 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1259 expander[b'p'] = b'{pathname}'
1258 expander[b'p'] = b'{pathname}'
1260
1259
1261 newname = []
1260 newname = []
1262 for typ, start, end in templater.scantemplate(pat, raw=True):
1261 for typ, start, end in templater.scantemplate(pat, raw=True):
1263 if typ != b'string':
1262 if typ != b'string':
1264 newname.append(pat[start:end])
1263 newname.append(pat[start:end])
1265 continue
1264 continue
1266 i = start
1265 i = start
1267 while i < end:
1266 while i < end:
1268 n = pat.find(b'%', i, end)
1267 n = pat.find(b'%', i, end)
1269 if n < 0:
1268 if n < 0:
1270 newname.append(stringutil.escapestr(pat[i:end]))
1269 newname.append(stringutil.escapestr(pat[i:end]))
1271 break
1270 break
1272 newname.append(stringutil.escapestr(pat[i:n]))
1271 newname.append(stringutil.escapestr(pat[i:n]))
1273 if n + 2 > end:
1272 if n + 2 > end:
1274 raise error.Abort(
1273 raise error.Abort(
1275 _(b"incomplete format spec in output filename")
1274 _(b"incomplete format spec in output filename")
1276 )
1275 )
1277 c = pat[n + 1 : n + 2]
1276 c = pat[n + 1 : n + 2]
1278 i = n + 2
1277 i = n + 2
1279 try:
1278 try:
1280 newname.append(expander[c])
1279 newname.append(expander[c])
1281 except KeyError:
1280 except KeyError:
1282 raise error.Abort(
1281 raise error.Abort(
1283 _(b"invalid format spec '%%%s' in output filename") % c
1282 _(b"invalid format spec '%%%s' in output filename") % c
1284 )
1283 )
1285 return b''.join(newname)
1284 return b''.join(newname)
1286
1285
1287
1286
1288 def makefilename(ctx, pat, **props):
1287 def makefilename(ctx, pat, **props):
1289 if not pat:
1288 if not pat:
1290 return pat
1289 return pat
1291 tmpl = _buildfntemplate(pat, **props)
1290 tmpl = _buildfntemplate(pat, **props)
1292 # BUG: alias expansion shouldn't be made against template fragments
1291 # BUG: alias expansion shouldn't be made against template fragments
1293 # rewritten from %-format strings, but we have no easy way to partially
1292 # rewritten from %-format strings, but we have no easy way to partially
1294 # disable the expansion.
1293 # disable the expansion.
1295 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1294 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1296
1295
1297
1296
1298 def isstdiofilename(pat):
1297 def isstdiofilename(pat):
1299 """True if the given pat looks like a filename denoting stdin/stdout"""
1298 """True if the given pat looks like a filename denoting stdin/stdout"""
1300 return not pat or pat == b'-'
1299 return not pat or pat == b'-'
1301
1300
1302
1301
1303 class _unclosablefile(object):
1302 class _unclosablefile(object):
1304 def __init__(self, fp):
1303 def __init__(self, fp):
1305 self._fp = fp
1304 self._fp = fp
1306
1305
1307 def close(self):
1306 def close(self):
1308 pass
1307 pass
1309
1308
1310 def __iter__(self):
1309 def __iter__(self):
1311 return iter(self._fp)
1310 return iter(self._fp)
1312
1311
1313 def __getattr__(self, attr):
1312 def __getattr__(self, attr):
1314 return getattr(self._fp, attr)
1313 return getattr(self._fp, attr)
1315
1314
1316 def __enter__(self):
1315 def __enter__(self):
1317 return self
1316 return self
1318
1317
1319 def __exit__(self, exc_type, exc_value, exc_tb):
1318 def __exit__(self, exc_type, exc_value, exc_tb):
1320 pass
1319 pass
1321
1320
1322
1321
1323 def makefileobj(ctx, pat, mode=b'wb', **props):
1322 def makefileobj(ctx, pat, mode=b'wb', **props):
1324 writable = mode not in (b'r', b'rb')
1323 writable = mode not in (b'r', b'rb')
1325
1324
1326 if isstdiofilename(pat):
1325 if isstdiofilename(pat):
1327 repo = ctx.repo()
1326 repo = ctx.repo()
1328 if writable:
1327 if writable:
1329 fp = repo.ui.fout
1328 fp = repo.ui.fout
1330 else:
1329 else:
1331 fp = repo.ui.fin
1330 fp = repo.ui.fin
1332 return _unclosablefile(fp)
1331 return _unclosablefile(fp)
1333 fn = makefilename(ctx, pat, **props)
1332 fn = makefilename(ctx, pat, **props)
1334 return open(fn, mode)
1333 return open(fn, mode)
1335
1334
1336
1335
1337 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1336 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1338 """opens the changelog, manifest, a filelog or a given revlog"""
1337 """opens the changelog, manifest, a filelog or a given revlog"""
1339 cl = opts[b'changelog']
1338 cl = opts[b'changelog']
1340 mf = opts[b'manifest']
1339 mf = opts[b'manifest']
1341 dir = opts[b'dir']
1340 dir = opts[b'dir']
1342 msg = None
1341 msg = None
1343 if cl and mf:
1342 if cl and mf:
1344 msg = _(b'cannot specify --changelog and --manifest at the same time')
1343 msg = _(b'cannot specify --changelog and --manifest at the same time')
1345 elif cl and dir:
1344 elif cl and dir:
1346 msg = _(b'cannot specify --changelog and --dir at the same time')
1345 msg = _(b'cannot specify --changelog and --dir at the same time')
1347 elif cl or mf or dir:
1346 elif cl or mf or dir:
1348 if file_:
1347 if file_:
1349 msg = _(b'cannot specify filename with --changelog or --manifest')
1348 msg = _(b'cannot specify filename with --changelog or --manifest')
1350 elif not repo:
1349 elif not repo:
1351 msg = _(
1350 msg = _(
1352 b'cannot specify --changelog or --manifest or --dir '
1351 b'cannot specify --changelog or --manifest or --dir '
1353 b'without a repository'
1352 b'without a repository'
1354 )
1353 )
1355 if msg:
1354 if msg:
1356 raise error.Abort(msg)
1355 raise error.Abort(msg)
1357
1356
1358 r = None
1357 r = None
1359 if repo:
1358 if repo:
1360 if cl:
1359 if cl:
1361 r = repo.unfiltered().changelog
1360 r = repo.unfiltered().changelog
1362 elif dir:
1361 elif dir:
1363 if repository.TREEMANIFEST_REQUIREMENT not in repo.requirements:
1362 if requirements.TREEMANIFEST_REQUIREMENT not in repo.requirements:
1364 raise error.Abort(
1363 raise error.Abort(
1365 _(
1364 _(
1366 b"--dir can only be used on repos with "
1365 b"--dir can only be used on repos with "
1367 b"treemanifest enabled"
1366 b"treemanifest enabled"
1368 )
1367 )
1369 )
1368 )
1370 if not dir.endswith(b'/'):
1369 if not dir.endswith(b'/'):
1371 dir = dir + b'/'
1370 dir = dir + b'/'
1372 dirlog = repo.manifestlog.getstorage(dir)
1371 dirlog = repo.manifestlog.getstorage(dir)
1373 if len(dirlog):
1372 if len(dirlog):
1374 r = dirlog
1373 r = dirlog
1375 elif mf:
1374 elif mf:
1376 r = repo.manifestlog.getstorage(b'')
1375 r = repo.manifestlog.getstorage(b'')
1377 elif file_:
1376 elif file_:
1378 filelog = repo.file(file_)
1377 filelog = repo.file(file_)
1379 if len(filelog):
1378 if len(filelog):
1380 r = filelog
1379 r = filelog
1381
1380
1382 # Not all storage may be revlogs. If requested, try to return an actual
1381 # Not all storage may be revlogs. If requested, try to return an actual
1383 # revlog instance.
1382 # revlog instance.
1384 if returnrevlog:
1383 if returnrevlog:
1385 if isinstance(r, revlog.revlog):
1384 if isinstance(r, revlog.revlog):
1386 pass
1385 pass
1387 elif util.safehasattr(r, b'_revlog'):
1386 elif util.safehasattr(r, b'_revlog'):
1388 r = r._revlog # pytype: disable=attribute-error
1387 r = r._revlog # pytype: disable=attribute-error
1389 elif r is not None:
1388 elif r is not None:
1390 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
1389 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
1391
1390
1392 if not r:
1391 if not r:
1393 if not returnrevlog:
1392 if not returnrevlog:
1394 raise error.Abort(_(b'cannot give path to non-revlog'))
1393 raise error.Abort(_(b'cannot give path to non-revlog'))
1395
1394
1396 if not file_:
1395 if not file_:
1397 raise error.CommandError(cmd, _(b'invalid arguments'))
1396 raise error.CommandError(cmd, _(b'invalid arguments'))
1398 if not os.path.isfile(file_):
1397 if not os.path.isfile(file_):
1399 raise error.Abort(_(b"revlog '%s' not found") % file_)
1398 raise error.Abort(_(b"revlog '%s' not found") % file_)
1400 r = revlog.revlog(
1399 r = revlog.revlog(
1401 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1400 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1402 )
1401 )
1403 return r
1402 return r
1404
1403
1405
1404
1406 def openrevlog(repo, cmd, file_, opts):
1405 def openrevlog(repo, cmd, file_, opts):
1407 """Obtain a revlog backing storage of an item.
1406 """Obtain a revlog backing storage of an item.
1408
1407
1409 This is similar to ``openstorage()`` except it always returns a revlog.
1408 This is similar to ``openstorage()`` except it always returns a revlog.
1410
1409
1411 In most cases, a caller cares about the main storage object - not the
1410 In most cases, a caller cares about the main storage object - not the
1412 revlog backing it. Therefore, this function should only be used by code
1411 revlog backing it. Therefore, this function should only be used by code
1413 that needs to examine low-level revlog implementation details. e.g. debug
1412 that needs to examine low-level revlog implementation details. e.g. debug
1414 commands.
1413 commands.
1415 """
1414 """
1416 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1415 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1417
1416
1418
1417
1419 def copy(ui, repo, pats, opts, rename=False):
1418 def copy(ui, repo, pats, opts, rename=False):
1420 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1419 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1421
1420
1422 # called with the repo lock held
1421 # called with the repo lock held
1423 #
1422 #
1424 # hgsep => pathname that uses "/" to separate directories
1423 # hgsep => pathname that uses "/" to separate directories
1425 # ossep => pathname that uses os.sep to separate directories
1424 # ossep => pathname that uses os.sep to separate directories
1426 cwd = repo.getcwd()
1425 cwd = repo.getcwd()
1427 targets = {}
1426 targets = {}
1428 forget = opts.get(b"forget")
1427 forget = opts.get(b"forget")
1429 after = opts.get(b"after")
1428 after = opts.get(b"after")
1430 dryrun = opts.get(b"dry_run")
1429 dryrun = opts.get(b"dry_run")
1431 rev = opts.get(b'at_rev')
1430 rev = opts.get(b'at_rev')
1432 if rev:
1431 if rev:
1433 if not forget and not after:
1432 if not forget and not after:
1434 # TODO: Remove this restriction and make it also create the copy
1433 # TODO: Remove this restriction and make it also create the copy
1435 # targets (and remove the rename source if rename==True).
1434 # targets (and remove the rename source if rename==True).
1436 raise error.Abort(_(b'--at-rev requires --after'))
1435 raise error.Abort(_(b'--at-rev requires --after'))
1437 ctx = scmutil.revsingle(repo, rev)
1436 ctx = scmutil.revsingle(repo, rev)
1438 if len(ctx.parents()) > 1:
1437 if len(ctx.parents()) > 1:
1439 raise error.Abort(_(b'cannot mark/unmark copy in merge commit'))
1438 raise error.Abort(_(b'cannot mark/unmark copy in merge commit'))
1440 else:
1439 else:
1441 ctx = repo[None]
1440 ctx = repo[None]
1442
1441
1443 pctx = ctx.p1()
1442 pctx = ctx.p1()
1444
1443
1445 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1444 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1446
1445
1447 if forget:
1446 if forget:
1448 if ctx.rev() is None:
1447 if ctx.rev() is None:
1449 new_ctx = ctx
1448 new_ctx = ctx
1450 else:
1449 else:
1451 if len(ctx.parents()) > 1:
1450 if len(ctx.parents()) > 1:
1452 raise error.Abort(_(b'cannot unmark copy in merge commit'))
1451 raise error.Abort(_(b'cannot unmark copy in merge commit'))
1453 # avoid cycle context -> subrepo -> cmdutil
1452 # avoid cycle context -> subrepo -> cmdutil
1454 from . import context
1453 from . import context
1455
1454
1456 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1455 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1457 new_ctx = context.overlayworkingctx(repo)
1456 new_ctx = context.overlayworkingctx(repo)
1458 new_ctx.setbase(ctx.p1())
1457 new_ctx.setbase(ctx.p1())
1459 mergemod.graft(repo, ctx, wctx=new_ctx)
1458 mergemod.graft(repo, ctx, wctx=new_ctx)
1460
1459
1461 match = scmutil.match(ctx, pats, opts)
1460 match = scmutil.match(ctx, pats, opts)
1462
1461
1463 current_copies = ctx.p1copies()
1462 current_copies = ctx.p1copies()
1464 current_copies.update(ctx.p2copies())
1463 current_copies.update(ctx.p2copies())
1465
1464
1466 uipathfn = scmutil.getuipathfn(repo)
1465 uipathfn = scmutil.getuipathfn(repo)
1467 for f in ctx.walk(match):
1466 for f in ctx.walk(match):
1468 if f in current_copies:
1467 if f in current_copies:
1469 new_ctx[f].markcopied(None)
1468 new_ctx[f].markcopied(None)
1470 elif match.exact(f):
1469 elif match.exact(f):
1471 ui.warn(
1470 ui.warn(
1472 _(
1471 _(
1473 b'%s: not unmarking as copy - file is not marked as copied\n'
1472 b'%s: not unmarking as copy - file is not marked as copied\n'
1474 )
1473 )
1475 % uipathfn(f)
1474 % uipathfn(f)
1476 )
1475 )
1477
1476
1478 if ctx.rev() is not None:
1477 if ctx.rev() is not None:
1479 with repo.lock():
1478 with repo.lock():
1480 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1479 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1481 new_node = mem_ctx.commit()
1480 new_node = mem_ctx.commit()
1482
1481
1483 if repo.dirstate.p1() == ctx.node():
1482 if repo.dirstate.p1() == ctx.node():
1484 with repo.dirstate.parentchange():
1483 with repo.dirstate.parentchange():
1485 scmutil.movedirstate(repo, repo[new_node])
1484 scmutil.movedirstate(repo, repo[new_node])
1486 replacements = {ctx.node(): [new_node]}
1485 replacements = {ctx.node(): [new_node]}
1487 scmutil.cleanupnodes(
1486 scmutil.cleanupnodes(
1488 repo, replacements, b'uncopy', fixphase=True
1487 repo, replacements, b'uncopy', fixphase=True
1489 )
1488 )
1490
1489
1491 return
1490 return
1492
1491
1493 pats = scmutil.expandpats(pats)
1492 pats = scmutil.expandpats(pats)
1494 if not pats:
1493 if not pats:
1495 raise error.Abort(_(b'no source or destination specified'))
1494 raise error.Abort(_(b'no source or destination specified'))
1496 if len(pats) == 1:
1495 if len(pats) == 1:
1497 raise error.Abort(_(b'no destination specified'))
1496 raise error.Abort(_(b'no destination specified'))
1498 dest = pats.pop()
1497 dest = pats.pop()
1499
1498
1500 def walkpat(pat):
1499 def walkpat(pat):
1501 srcs = []
1500 srcs = []
1502 # TODO: Inline and simplify the non-working-copy version of this code
1501 # TODO: Inline and simplify the non-working-copy version of this code
1503 # since it shares very little with the working-copy version of it.
1502 # since it shares very little with the working-copy version of it.
1504 ctx_to_walk = ctx if ctx.rev() is None else pctx
1503 ctx_to_walk = ctx if ctx.rev() is None else pctx
1505 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1504 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1506 for abs in ctx_to_walk.walk(m):
1505 for abs in ctx_to_walk.walk(m):
1507 rel = uipathfn(abs)
1506 rel = uipathfn(abs)
1508 exact = m.exact(abs)
1507 exact = m.exact(abs)
1509 if abs not in ctx:
1508 if abs not in ctx:
1510 if abs in pctx:
1509 if abs in pctx:
1511 if not after:
1510 if not after:
1512 if exact:
1511 if exact:
1513 ui.warn(
1512 ui.warn(
1514 _(
1513 _(
1515 b'%s: not copying - file has been marked '
1514 b'%s: not copying - file has been marked '
1516 b'for remove\n'
1515 b'for remove\n'
1517 )
1516 )
1518 % rel
1517 % rel
1519 )
1518 )
1520 continue
1519 continue
1521 else:
1520 else:
1522 if exact:
1521 if exact:
1523 ui.warn(
1522 ui.warn(
1524 _(b'%s: not copying - file is not managed\n') % rel
1523 _(b'%s: not copying - file is not managed\n') % rel
1525 )
1524 )
1526 continue
1525 continue
1527
1526
1528 # abs: hgsep
1527 # abs: hgsep
1529 # rel: ossep
1528 # rel: ossep
1530 srcs.append((abs, rel, exact))
1529 srcs.append((abs, rel, exact))
1531 return srcs
1530 return srcs
1532
1531
1533 if ctx.rev() is not None:
1532 if ctx.rev() is not None:
1534 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1533 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1535 absdest = pathutil.canonpath(repo.root, cwd, dest)
1534 absdest = pathutil.canonpath(repo.root, cwd, dest)
1536 if ctx.hasdir(absdest):
1535 if ctx.hasdir(absdest):
1537 raise error.Abort(
1536 raise error.Abort(
1538 _(b'%s: --at-rev does not support a directory as destination')
1537 _(b'%s: --at-rev does not support a directory as destination')
1539 % uipathfn(absdest)
1538 % uipathfn(absdest)
1540 )
1539 )
1541 if absdest not in ctx:
1540 if absdest not in ctx:
1542 raise error.Abort(
1541 raise error.Abort(
1543 _(b'%s: copy destination does not exist in %s')
1542 _(b'%s: copy destination does not exist in %s')
1544 % (uipathfn(absdest), ctx)
1543 % (uipathfn(absdest), ctx)
1545 )
1544 )
1546
1545
1547 # avoid cycle context -> subrepo -> cmdutil
1546 # avoid cycle context -> subrepo -> cmdutil
1548 from . import context
1547 from . import context
1549
1548
1550 copylist = []
1549 copylist = []
1551 for pat in pats:
1550 for pat in pats:
1552 srcs = walkpat(pat)
1551 srcs = walkpat(pat)
1553 if not srcs:
1552 if not srcs:
1554 continue
1553 continue
1555 for abs, rel, exact in srcs:
1554 for abs, rel, exact in srcs:
1556 copylist.append(abs)
1555 copylist.append(abs)
1557
1556
1558 if not copylist:
1557 if not copylist:
1559 raise error.Abort(_(b'no files to copy'))
1558 raise error.Abort(_(b'no files to copy'))
1560 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1559 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1561 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1560 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1562 # existing functions below.
1561 # existing functions below.
1563 if len(copylist) != 1:
1562 if len(copylist) != 1:
1564 raise error.Abort(_(b'--at-rev requires a single source'))
1563 raise error.Abort(_(b'--at-rev requires a single source'))
1565
1564
1566 new_ctx = context.overlayworkingctx(repo)
1565 new_ctx = context.overlayworkingctx(repo)
1567 new_ctx.setbase(ctx.p1())
1566 new_ctx.setbase(ctx.p1())
1568 mergemod.graft(repo, ctx, wctx=new_ctx)
1567 mergemod.graft(repo, ctx, wctx=new_ctx)
1569
1568
1570 new_ctx.markcopied(absdest, copylist[0])
1569 new_ctx.markcopied(absdest, copylist[0])
1571
1570
1572 with repo.lock():
1571 with repo.lock():
1573 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1572 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1574 new_node = mem_ctx.commit()
1573 new_node = mem_ctx.commit()
1575
1574
1576 if repo.dirstate.p1() == ctx.node():
1575 if repo.dirstate.p1() == ctx.node():
1577 with repo.dirstate.parentchange():
1576 with repo.dirstate.parentchange():
1578 scmutil.movedirstate(repo, repo[new_node])
1577 scmutil.movedirstate(repo, repo[new_node])
1579 replacements = {ctx.node(): [new_node]}
1578 replacements = {ctx.node(): [new_node]}
1580 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1579 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1581
1580
1582 return
1581 return
1583
1582
1584 # abssrc: hgsep
1583 # abssrc: hgsep
1585 # relsrc: ossep
1584 # relsrc: ossep
1586 # otarget: ossep
1585 # otarget: ossep
1587 def copyfile(abssrc, relsrc, otarget, exact):
1586 def copyfile(abssrc, relsrc, otarget, exact):
1588 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1587 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1589 if b'/' in abstarget:
1588 if b'/' in abstarget:
1590 # We cannot normalize abstarget itself, this would prevent
1589 # We cannot normalize abstarget itself, this would prevent
1591 # case only renames, like a => A.
1590 # case only renames, like a => A.
1592 abspath, absname = abstarget.rsplit(b'/', 1)
1591 abspath, absname = abstarget.rsplit(b'/', 1)
1593 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1592 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1594 reltarget = repo.pathto(abstarget, cwd)
1593 reltarget = repo.pathto(abstarget, cwd)
1595 target = repo.wjoin(abstarget)
1594 target = repo.wjoin(abstarget)
1596 src = repo.wjoin(abssrc)
1595 src = repo.wjoin(abssrc)
1597 state = repo.dirstate[abstarget]
1596 state = repo.dirstate[abstarget]
1598
1597
1599 scmutil.checkportable(ui, abstarget)
1598 scmutil.checkportable(ui, abstarget)
1600
1599
1601 # check for collisions
1600 # check for collisions
1602 prevsrc = targets.get(abstarget)
1601 prevsrc = targets.get(abstarget)
1603 if prevsrc is not None:
1602 if prevsrc is not None:
1604 ui.warn(
1603 ui.warn(
1605 _(b'%s: not overwriting - %s collides with %s\n')
1604 _(b'%s: not overwriting - %s collides with %s\n')
1606 % (
1605 % (
1607 reltarget,
1606 reltarget,
1608 repo.pathto(abssrc, cwd),
1607 repo.pathto(abssrc, cwd),
1609 repo.pathto(prevsrc, cwd),
1608 repo.pathto(prevsrc, cwd),
1610 )
1609 )
1611 )
1610 )
1612 return True # report a failure
1611 return True # report a failure
1613
1612
1614 # check for overwrites
1613 # check for overwrites
1615 exists = os.path.lexists(target)
1614 exists = os.path.lexists(target)
1616 samefile = False
1615 samefile = False
1617 if exists and abssrc != abstarget:
1616 if exists and abssrc != abstarget:
1618 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1617 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1619 abstarget
1618 abstarget
1620 ):
1619 ):
1621 if not rename:
1620 if not rename:
1622 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1621 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1623 return True # report a failure
1622 return True # report a failure
1624 exists = False
1623 exists = False
1625 samefile = True
1624 samefile = True
1626
1625
1627 if not after and exists or after and state in b'mn':
1626 if not after and exists or after and state in b'mn':
1628 if not opts[b'force']:
1627 if not opts[b'force']:
1629 if state in b'mn':
1628 if state in b'mn':
1630 msg = _(b'%s: not overwriting - file already committed\n')
1629 msg = _(b'%s: not overwriting - file already committed\n')
1631 if after:
1630 if after:
1632 flags = b'--after --force'
1631 flags = b'--after --force'
1633 else:
1632 else:
1634 flags = b'--force'
1633 flags = b'--force'
1635 if rename:
1634 if rename:
1636 hint = (
1635 hint = (
1637 _(
1636 _(
1638 b"('hg rename %s' to replace the file by "
1637 b"('hg rename %s' to replace the file by "
1639 b'recording a rename)\n'
1638 b'recording a rename)\n'
1640 )
1639 )
1641 % flags
1640 % flags
1642 )
1641 )
1643 else:
1642 else:
1644 hint = (
1643 hint = (
1645 _(
1644 _(
1646 b"('hg copy %s' to replace the file by "
1645 b"('hg copy %s' to replace the file by "
1647 b'recording a copy)\n'
1646 b'recording a copy)\n'
1648 )
1647 )
1649 % flags
1648 % flags
1650 )
1649 )
1651 else:
1650 else:
1652 msg = _(b'%s: not overwriting - file exists\n')
1651 msg = _(b'%s: not overwriting - file exists\n')
1653 if rename:
1652 if rename:
1654 hint = _(
1653 hint = _(
1655 b"('hg rename --after' to record the rename)\n"
1654 b"('hg rename --after' to record the rename)\n"
1656 )
1655 )
1657 else:
1656 else:
1658 hint = _(b"('hg copy --after' to record the copy)\n")
1657 hint = _(b"('hg copy --after' to record the copy)\n")
1659 ui.warn(msg % reltarget)
1658 ui.warn(msg % reltarget)
1660 ui.warn(hint)
1659 ui.warn(hint)
1661 return True # report a failure
1660 return True # report a failure
1662
1661
1663 if after:
1662 if after:
1664 if not exists:
1663 if not exists:
1665 if rename:
1664 if rename:
1666 ui.warn(
1665 ui.warn(
1667 _(b'%s: not recording move - %s does not exist\n')
1666 _(b'%s: not recording move - %s does not exist\n')
1668 % (relsrc, reltarget)
1667 % (relsrc, reltarget)
1669 )
1668 )
1670 else:
1669 else:
1671 ui.warn(
1670 ui.warn(
1672 _(b'%s: not recording copy - %s does not exist\n')
1671 _(b'%s: not recording copy - %s does not exist\n')
1673 % (relsrc, reltarget)
1672 % (relsrc, reltarget)
1674 )
1673 )
1675 return True # report a failure
1674 return True # report a failure
1676 elif not dryrun:
1675 elif not dryrun:
1677 try:
1676 try:
1678 if exists:
1677 if exists:
1679 os.unlink(target)
1678 os.unlink(target)
1680 targetdir = os.path.dirname(target) or b'.'
1679 targetdir = os.path.dirname(target) or b'.'
1681 if not os.path.isdir(targetdir):
1680 if not os.path.isdir(targetdir):
1682 os.makedirs(targetdir)
1681 os.makedirs(targetdir)
1683 if samefile:
1682 if samefile:
1684 tmp = target + b"~hgrename"
1683 tmp = target + b"~hgrename"
1685 os.rename(src, tmp)
1684 os.rename(src, tmp)
1686 os.rename(tmp, target)
1685 os.rename(tmp, target)
1687 else:
1686 else:
1688 # Preserve stat info on renames, not on copies; this matches
1687 # Preserve stat info on renames, not on copies; this matches
1689 # Linux CLI behavior.
1688 # Linux CLI behavior.
1690 util.copyfile(src, target, copystat=rename)
1689 util.copyfile(src, target, copystat=rename)
1691 srcexists = True
1690 srcexists = True
1692 except IOError as inst:
1691 except IOError as inst:
1693 if inst.errno == errno.ENOENT:
1692 if inst.errno == errno.ENOENT:
1694 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1693 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1695 srcexists = False
1694 srcexists = False
1696 else:
1695 else:
1697 ui.warn(
1696 ui.warn(
1698 _(b'%s: cannot copy - %s\n')
1697 _(b'%s: cannot copy - %s\n')
1699 % (relsrc, encoding.strtolocal(inst.strerror))
1698 % (relsrc, encoding.strtolocal(inst.strerror))
1700 )
1699 )
1701 return True # report a failure
1700 return True # report a failure
1702
1701
1703 if ui.verbose or not exact:
1702 if ui.verbose or not exact:
1704 if rename:
1703 if rename:
1705 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1704 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1706 else:
1705 else:
1707 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1706 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1708
1707
1709 targets[abstarget] = abssrc
1708 targets[abstarget] = abssrc
1710
1709
1711 # fix up dirstate
1710 # fix up dirstate
1712 scmutil.dirstatecopy(
1711 scmutil.dirstatecopy(
1713 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1712 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1714 )
1713 )
1715 if rename and not dryrun:
1714 if rename and not dryrun:
1716 if not after and srcexists and not samefile:
1715 if not after and srcexists and not samefile:
1717 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1716 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1718 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1717 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1719 ctx.forget([abssrc])
1718 ctx.forget([abssrc])
1720
1719
1721 # pat: ossep
1720 # pat: ossep
1722 # dest ossep
1721 # dest ossep
1723 # srcs: list of (hgsep, hgsep, ossep, bool)
1722 # srcs: list of (hgsep, hgsep, ossep, bool)
1724 # return: function that takes hgsep and returns ossep
1723 # return: function that takes hgsep and returns ossep
1725 def targetpathfn(pat, dest, srcs):
1724 def targetpathfn(pat, dest, srcs):
1726 if os.path.isdir(pat):
1725 if os.path.isdir(pat):
1727 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1726 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1728 abspfx = util.localpath(abspfx)
1727 abspfx = util.localpath(abspfx)
1729 if destdirexists:
1728 if destdirexists:
1730 striplen = len(os.path.split(abspfx)[0])
1729 striplen = len(os.path.split(abspfx)[0])
1731 else:
1730 else:
1732 striplen = len(abspfx)
1731 striplen = len(abspfx)
1733 if striplen:
1732 if striplen:
1734 striplen += len(pycompat.ossep)
1733 striplen += len(pycompat.ossep)
1735 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1734 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1736 elif destdirexists:
1735 elif destdirexists:
1737 res = lambda p: os.path.join(
1736 res = lambda p: os.path.join(
1738 dest, os.path.basename(util.localpath(p))
1737 dest, os.path.basename(util.localpath(p))
1739 )
1738 )
1740 else:
1739 else:
1741 res = lambda p: dest
1740 res = lambda p: dest
1742 return res
1741 return res
1743
1742
1744 # pat: ossep
1743 # pat: ossep
1745 # dest ossep
1744 # dest ossep
1746 # srcs: list of (hgsep, hgsep, ossep, bool)
1745 # srcs: list of (hgsep, hgsep, ossep, bool)
1747 # return: function that takes hgsep and returns ossep
1746 # return: function that takes hgsep and returns ossep
1748 def targetpathafterfn(pat, dest, srcs):
1747 def targetpathafterfn(pat, dest, srcs):
1749 if matchmod.patkind(pat):
1748 if matchmod.patkind(pat):
1750 # a mercurial pattern
1749 # a mercurial pattern
1751 res = lambda p: os.path.join(
1750 res = lambda p: os.path.join(
1752 dest, os.path.basename(util.localpath(p))
1751 dest, os.path.basename(util.localpath(p))
1753 )
1752 )
1754 else:
1753 else:
1755 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1754 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1756 if len(abspfx) < len(srcs[0][0]):
1755 if len(abspfx) < len(srcs[0][0]):
1757 # A directory. Either the target path contains the last
1756 # A directory. Either the target path contains the last
1758 # component of the source path or it does not.
1757 # component of the source path or it does not.
1759 def evalpath(striplen):
1758 def evalpath(striplen):
1760 score = 0
1759 score = 0
1761 for s in srcs:
1760 for s in srcs:
1762 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1761 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1763 if os.path.lexists(t):
1762 if os.path.lexists(t):
1764 score += 1
1763 score += 1
1765 return score
1764 return score
1766
1765
1767 abspfx = util.localpath(abspfx)
1766 abspfx = util.localpath(abspfx)
1768 striplen = len(abspfx)
1767 striplen = len(abspfx)
1769 if striplen:
1768 if striplen:
1770 striplen += len(pycompat.ossep)
1769 striplen += len(pycompat.ossep)
1771 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1770 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1772 score = evalpath(striplen)
1771 score = evalpath(striplen)
1773 striplen1 = len(os.path.split(abspfx)[0])
1772 striplen1 = len(os.path.split(abspfx)[0])
1774 if striplen1:
1773 if striplen1:
1775 striplen1 += len(pycompat.ossep)
1774 striplen1 += len(pycompat.ossep)
1776 if evalpath(striplen1) > score:
1775 if evalpath(striplen1) > score:
1777 striplen = striplen1
1776 striplen = striplen1
1778 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1777 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1779 else:
1778 else:
1780 # a file
1779 # a file
1781 if destdirexists:
1780 if destdirexists:
1782 res = lambda p: os.path.join(
1781 res = lambda p: os.path.join(
1783 dest, os.path.basename(util.localpath(p))
1782 dest, os.path.basename(util.localpath(p))
1784 )
1783 )
1785 else:
1784 else:
1786 res = lambda p: dest
1785 res = lambda p: dest
1787 return res
1786 return res
1788
1787
1789 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1788 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1790 if not destdirexists:
1789 if not destdirexists:
1791 if len(pats) > 1 or matchmod.patkind(pats[0]):
1790 if len(pats) > 1 or matchmod.patkind(pats[0]):
1792 raise error.Abort(
1791 raise error.Abort(
1793 _(
1792 _(
1794 b'with multiple sources, destination must be an '
1793 b'with multiple sources, destination must be an '
1795 b'existing directory'
1794 b'existing directory'
1796 )
1795 )
1797 )
1796 )
1798 if util.endswithsep(dest):
1797 if util.endswithsep(dest):
1799 raise error.Abort(_(b'destination %s is not a directory') % dest)
1798 raise error.Abort(_(b'destination %s is not a directory') % dest)
1800
1799
1801 tfn = targetpathfn
1800 tfn = targetpathfn
1802 if after:
1801 if after:
1803 tfn = targetpathafterfn
1802 tfn = targetpathafterfn
1804 copylist = []
1803 copylist = []
1805 for pat in pats:
1804 for pat in pats:
1806 srcs = walkpat(pat)
1805 srcs = walkpat(pat)
1807 if not srcs:
1806 if not srcs:
1808 continue
1807 continue
1809 copylist.append((tfn(pat, dest, srcs), srcs))
1808 copylist.append((tfn(pat, dest, srcs), srcs))
1810 if not copylist:
1809 if not copylist:
1811 raise error.Abort(_(b'no files to copy'))
1810 raise error.Abort(_(b'no files to copy'))
1812
1811
1813 errors = 0
1812 errors = 0
1814 for targetpath, srcs in copylist:
1813 for targetpath, srcs in copylist:
1815 for abssrc, relsrc, exact in srcs:
1814 for abssrc, relsrc, exact in srcs:
1816 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1815 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1817 errors += 1
1816 errors += 1
1818
1817
1819 return errors != 0
1818 return errors != 0
1820
1819
1821
1820
1822 ## facility to let extension process additional data into an import patch
1821 ## facility to let extension process additional data into an import patch
1823 # list of identifier to be executed in order
1822 # list of identifier to be executed in order
1824 extrapreimport = [] # run before commit
1823 extrapreimport = [] # run before commit
1825 extrapostimport = [] # run after commit
1824 extrapostimport = [] # run after commit
1826 # mapping from identifier to actual import function
1825 # mapping from identifier to actual import function
1827 #
1826 #
1828 # 'preimport' are run before the commit is made and are provided the following
1827 # 'preimport' are run before the commit is made and are provided the following
1829 # arguments:
1828 # arguments:
1830 # - repo: the localrepository instance,
1829 # - repo: the localrepository instance,
1831 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1830 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1832 # - extra: the future extra dictionary of the changeset, please mutate it,
1831 # - extra: the future extra dictionary of the changeset, please mutate it,
1833 # - opts: the import options.
1832 # - opts: the import options.
1834 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1833 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1835 # mutation of in memory commit and more. Feel free to rework the code to get
1834 # mutation of in memory commit and more. Feel free to rework the code to get
1836 # there.
1835 # there.
1837 extrapreimportmap = {}
1836 extrapreimportmap = {}
1838 # 'postimport' are run after the commit is made and are provided the following
1837 # 'postimport' are run after the commit is made and are provided the following
1839 # argument:
1838 # argument:
1840 # - ctx: the changectx created by import.
1839 # - ctx: the changectx created by import.
1841 extrapostimportmap = {}
1840 extrapostimportmap = {}
1842
1841
1843
1842
1844 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1843 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1845 """Utility function used by commands.import to import a single patch
1844 """Utility function used by commands.import to import a single patch
1846
1845
1847 This function is explicitly defined here to help the evolve extension to
1846 This function is explicitly defined here to help the evolve extension to
1848 wrap this part of the import logic.
1847 wrap this part of the import logic.
1849
1848
1850 The API is currently a bit ugly because it a simple code translation from
1849 The API is currently a bit ugly because it a simple code translation from
1851 the import command. Feel free to make it better.
1850 the import command. Feel free to make it better.
1852
1851
1853 :patchdata: a dictionary containing parsed patch data (such as from
1852 :patchdata: a dictionary containing parsed patch data (such as from
1854 ``patch.extract()``)
1853 ``patch.extract()``)
1855 :parents: nodes that will be parent of the created commit
1854 :parents: nodes that will be parent of the created commit
1856 :opts: the full dict of option passed to the import command
1855 :opts: the full dict of option passed to the import command
1857 :msgs: list to save commit message to.
1856 :msgs: list to save commit message to.
1858 (used in case we need to save it when failing)
1857 (used in case we need to save it when failing)
1859 :updatefunc: a function that update a repo to a given node
1858 :updatefunc: a function that update a repo to a given node
1860 updatefunc(<repo>, <node>)
1859 updatefunc(<repo>, <node>)
1861 """
1860 """
1862 # avoid cycle context -> subrepo -> cmdutil
1861 # avoid cycle context -> subrepo -> cmdutil
1863 from . import context
1862 from . import context
1864
1863
1865 tmpname = patchdata.get(b'filename')
1864 tmpname = patchdata.get(b'filename')
1866 message = patchdata.get(b'message')
1865 message = patchdata.get(b'message')
1867 user = opts.get(b'user') or patchdata.get(b'user')
1866 user = opts.get(b'user') or patchdata.get(b'user')
1868 date = opts.get(b'date') or patchdata.get(b'date')
1867 date = opts.get(b'date') or patchdata.get(b'date')
1869 branch = patchdata.get(b'branch')
1868 branch = patchdata.get(b'branch')
1870 nodeid = patchdata.get(b'nodeid')
1869 nodeid = patchdata.get(b'nodeid')
1871 p1 = patchdata.get(b'p1')
1870 p1 = patchdata.get(b'p1')
1872 p2 = patchdata.get(b'p2')
1871 p2 = patchdata.get(b'p2')
1873
1872
1874 nocommit = opts.get(b'no_commit')
1873 nocommit = opts.get(b'no_commit')
1875 importbranch = opts.get(b'import_branch')
1874 importbranch = opts.get(b'import_branch')
1876 update = not opts.get(b'bypass')
1875 update = not opts.get(b'bypass')
1877 strip = opts[b"strip"]
1876 strip = opts[b"strip"]
1878 prefix = opts[b"prefix"]
1877 prefix = opts[b"prefix"]
1879 sim = float(opts.get(b'similarity') or 0)
1878 sim = float(opts.get(b'similarity') or 0)
1880
1879
1881 if not tmpname:
1880 if not tmpname:
1882 return None, None, False
1881 return None, None, False
1883
1882
1884 rejects = False
1883 rejects = False
1885
1884
1886 cmdline_message = logmessage(ui, opts)
1885 cmdline_message = logmessage(ui, opts)
1887 if cmdline_message:
1886 if cmdline_message:
1888 # pickup the cmdline msg
1887 # pickup the cmdline msg
1889 message = cmdline_message
1888 message = cmdline_message
1890 elif message:
1889 elif message:
1891 # pickup the patch msg
1890 # pickup the patch msg
1892 message = message.strip()
1891 message = message.strip()
1893 else:
1892 else:
1894 # launch the editor
1893 # launch the editor
1895 message = None
1894 message = None
1896 ui.debug(b'message:\n%s\n' % (message or b''))
1895 ui.debug(b'message:\n%s\n' % (message or b''))
1897
1896
1898 if len(parents) == 1:
1897 if len(parents) == 1:
1899 parents.append(repo[nullid])
1898 parents.append(repo[nullid])
1900 if opts.get(b'exact'):
1899 if opts.get(b'exact'):
1901 if not nodeid or not p1:
1900 if not nodeid or not p1:
1902 raise error.Abort(_(b'not a Mercurial patch'))
1901 raise error.Abort(_(b'not a Mercurial patch'))
1903 p1 = repo[p1]
1902 p1 = repo[p1]
1904 p2 = repo[p2 or nullid]
1903 p2 = repo[p2 or nullid]
1905 elif p2:
1904 elif p2:
1906 try:
1905 try:
1907 p1 = repo[p1]
1906 p1 = repo[p1]
1908 p2 = repo[p2]
1907 p2 = repo[p2]
1909 # Without any options, consider p2 only if the
1908 # Without any options, consider p2 only if the
1910 # patch is being applied on top of the recorded
1909 # patch is being applied on top of the recorded
1911 # first parent.
1910 # first parent.
1912 if p1 != parents[0]:
1911 if p1 != parents[0]:
1913 p1 = parents[0]
1912 p1 = parents[0]
1914 p2 = repo[nullid]
1913 p2 = repo[nullid]
1915 except error.RepoError:
1914 except error.RepoError:
1916 p1, p2 = parents
1915 p1, p2 = parents
1917 if p2.node() == nullid:
1916 if p2.node() == nullid:
1918 ui.warn(
1917 ui.warn(
1919 _(
1918 _(
1920 b"warning: import the patch as a normal revision\n"
1919 b"warning: import the patch as a normal revision\n"
1921 b"(use --exact to import the patch as a merge)\n"
1920 b"(use --exact to import the patch as a merge)\n"
1922 )
1921 )
1923 )
1922 )
1924 else:
1923 else:
1925 p1, p2 = parents
1924 p1, p2 = parents
1926
1925
1927 n = None
1926 n = None
1928 if update:
1927 if update:
1929 if p1 != parents[0]:
1928 if p1 != parents[0]:
1930 updatefunc(repo, p1.node())
1929 updatefunc(repo, p1.node())
1931 if p2 != parents[1]:
1930 if p2 != parents[1]:
1932 repo.setparents(p1.node(), p2.node())
1931 repo.setparents(p1.node(), p2.node())
1933
1932
1934 if opts.get(b'exact') or importbranch:
1933 if opts.get(b'exact') or importbranch:
1935 repo.dirstate.setbranch(branch or b'default')
1934 repo.dirstate.setbranch(branch or b'default')
1936
1935
1937 partial = opts.get(b'partial', False)
1936 partial = opts.get(b'partial', False)
1938 files = set()
1937 files = set()
1939 try:
1938 try:
1940 patch.patch(
1939 patch.patch(
1941 ui,
1940 ui,
1942 repo,
1941 repo,
1943 tmpname,
1942 tmpname,
1944 strip=strip,
1943 strip=strip,
1945 prefix=prefix,
1944 prefix=prefix,
1946 files=files,
1945 files=files,
1947 eolmode=None,
1946 eolmode=None,
1948 similarity=sim / 100.0,
1947 similarity=sim / 100.0,
1949 )
1948 )
1950 except error.PatchError as e:
1949 except error.PatchError as e:
1951 if not partial:
1950 if not partial:
1952 raise error.Abort(pycompat.bytestr(e))
1951 raise error.Abort(pycompat.bytestr(e))
1953 if partial:
1952 if partial:
1954 rejects = True
1953 rejects = True
1955
1954
1956 files = list(files)
1955 files = list(files)
1957 if nocommit:
1956 if nocommit:
1958 if message:
1957 if message:
1959 msgs.append(message)
1958 msgs.append(message)
1960 else:
1959 else:
1961 if opts.get(b'exact') or p2:
1960 if opts.get(b'exact') or p2:
1962 # If you got here, you either use --force and know what
1961 # If you got here, you either use --force and know what
1963 # you are doing or used --exact or a merge patch while
1962 # you are doing or used --exact or a merge patch while
1964 # being updated to its first parent.
1963 # being updated to its first parent.
1965 m = None
1964 m = None
1966 else:
1965 else:
1967 m = scmutil.matchfiles(repo, files or [])
1966 m = scmutil.matchfiles(repo, files or [])
1968 editform = mergeeditform(repo[None], b'import.normal')
1967 editform = mergeeditform(repo[None], b'import.normal')
1969 if opts.get(b'exact'):
1968 if opts.get(b'exact'):
1970 editor = None
1969 editor = None
1971 else:
1970 else:
1972 editor = getcommiteditor(
1971 editor = getcommiteditor(
1973 editform=editform, **pycompat.strkwargs(opts)
1972 editform=editform, **pycompat.strkwargs(opts)
1974 )
1973 )
1975 extra = {}
1974 extra = {}
1976 for idfunc in extrapreimport:
1975 for idfunc in extrapreimport:
1977 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1976 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1978 overrides = {}
1977 overrides = {}
1979 if partial:
1978 if partial:
1980 overrides[(b'ui', b'allowemptycommit')] = True
1979 overrides[(b'ui', b'allowemptycommit')] = True
1981 if opts.get(b'secret'):
1980 if opts.get(b'secret'):
1982 overrides[(b'phases', b'new-commit')] = b'secret'
1981 overrides[(b'phases', b'new-commit')] = b'secret'
1983 with repo.ui.configoverride(overrides, b'import'):
1982 with repo.ui.configoverride(overrides, b'import'):
1984 n = repo.commit(
1983 n = repo.commit(
1985 message, user, date, match=m, editor=editor, extra=extra
1984 message, user, date, match=m, editor=editor, extra=extra
1986 )
1985 )
1987 for idfunc in extrapostimport:
1986 for idfunc in extrapostimport:
1988 extrapostimportmap[idfunc](repo[n])
1987 extrapostimportmap[idfunc](repo[n])
1989 else:
1988 else:
1990 if opts.get(b'exact') or importbranch:
1989 if opts.get(b'exact') or importbranch:
1991 branch = branch or b'default'
1990 branch = branch or b'default'
1992 else:
1991 else:
1993 branch = p1.branch()
1992 branch = p1.branch()
1994 store = patch.filestore()
1993 store = patch.filestore()
1995 try:
1994 try:
1996 files = set()
1995 files = set()
1997 try:
1996 try:
1998 patch.patchrepo(
1997 patch.patchrepo(
1999 ui,
1998 ui,
2000 repo,
1999 repo,
2001 p1,
2000 p1,
2002 store,
2001 store,
2003 tmpname,
2002 tmpname,
2004 strip,
2003 strip,
2005 prefix,
2004 prefix,
2006 files,
2005 files,
2007 eolmode=None,
2006 eolmode=None,
2008 )
2007 )
2009 except error.PatchError as e:
2008 except error.PatchError as e:
2010 raise error.Abort(stringutil.forcebytestr(e))
2009 raise error.Abort(stringutil.forcebytestr(e))
2011 if opts.get(b'exact'):
2010 if opts.get(b'exact'):
2012 editor = None
2011 editor = None
2013 else:
2012 else:
2014 editor = getcommiteditor(editform=b'import.bypass')
2013 editor = getcommiteditor(editform=b'import.bypass')
2015 memctx = context.memctx(
2014 memctx = context.memctx(
2016 repo,
2015 repo,
2017 (p1.node(), p2.node()),
2016 (p1.node(), p2.node()),
2018 message,
2017 message,
2019 files=files,
2018 files=files,
2020 filectxfn=store,
2019 filectxfn=store,
2021 user=user,
2020 user=user,
2022 date=date,
2021 date=date,
2023 branch=branch,
2022 branch=branch,
2024 editor=editor,
2023 editor=editor,
2025 )
2024 )
2026
2025
2027 overrides = {}
2026 overrides = {}
2028 if opts.get(b'secret'):
2027 if opts.get(b'secret'):
2029 overrides[(b'phases', b'new-commit')] = b'secret'
2028 overrides[(b'phases', b'new-commit')] = b'secret'
2030 with repo.ui.configoverride(overrides, b'import'):
2029 with repo.ui.configoverride(overrides, b'import'):
2031 n = memctx.commit()
2030 n = memctx.commit()
2032 finally:
2031 finally:
2033 store.close()
2032 store.close()
2034 if opts.get(b'exact') and nocommit:
2033 if opts.get(b'exact') and nocommit:
2035 # --exact with --no-commit is still useful in that it does merge
2034 # --exact with --no-commit is still useful in that it does merge
2036 # and branch bits
2035 # and branch bits
2037 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2036 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2038 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2037 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2039 raise error.Abort(_(b'patch is damaged or loses information'))
2038 raise error.Abort(_(b'patch is damaged or loses information'))
2040 msg = _(b'applied to working directory')
2039 msg = _(b'applied to working directory')
2041 if n:
2040 if n:
2042 # i18n: refers to a short changeset id
2041 # i18n: refers to a short changeset id
2043 msg = _(b'created %s') % short(n)
2042 msg = _(b'created %s') % short(n)
2044 return msg, n, rejects
2043 return msg, n, rejects
2045
2044
2046
2045
2047 # facility to let extensions include additional data in an exported patch
2046 # facility to let extensions include additional data in an exported patch
2048 # list of identifiers to be executed in order
2047 # list of identifiers to be executed in order
2049 extraexport = []
2048 extraexport = []
2050 # mapping from identifier to actual export function
2049 # mapping from identifier to actual export function
2051 # function as to return a string to be added to the header or None
2050 # function as to return a string to be added to the header or None
2052 # it is given two arguments (sequencenumber, changectx)
2051 # it is given two arguments (sequencenumber, changectx)
2053 extraexportmap = {}
2052 extraexportmap = {}
2054
2053
2055
2054
2056 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2055 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2057 node = scmutil.binnode(ctx)
2056 node = scmutil.binnode(ctx)
2058 parents = [p.node() for p in ctx.parents() if p]
2057 parents = [p.node() for p in ctx.parents() if p]
2059 branch = ctx.branch()
2058 branch = ctx.branch()
2060 if switch_parent:
2059 if switch_parent:
2061 parents.reverse()
2060 parents.reverse()
2062
2061
2063 if parents:
2062 if parents:
2064 prev = parents[0]
2063 prev = parents[0]
2065 else:
2064 else:
2066 prev = nullid
2065 prev = nullid
2067
2066
2068 fm.context(ctx=ctx)
2067 fm.context(ctx=ctx)
2069 fm.plain(b'# HG changeset patch\n')
2068 fm.plain(b'# HG changeset patch\n')
2070 fm.write(b'user', b'# User %s\n', ctx.user())
2069 fm.write(b'user', b'# User %s\n', ctx.user())
2071 fm.plain(b'# Date %d %d\n' % ctx.date())
2070 fm.plain(b'# Date %d %d\n' % ctx.date())
2072 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2071 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2073 fm.condwrite(
2072 fm.condwrite(
2074 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2073 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2075 )
2074 )
2076 fm.write(b'node', b'# Node ID %s\n', hex(node))
2075 fm.write(b'node', b'# Node ID %s\n', hex(node))
2077 fm.plain(b'# Parent %s\n' % hex(prev))
2076 fm.plain(b'# Parent %s\n' % hex(prev))
2078 if len(parents) > 1:
2077 if len(parents) > 1:
2079 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2078 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2080 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2079 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2081
2080
2082 # TODO: redesign extraexportmap function to support formatter
2081 # TODO: redesign extraexportmap function to support formatter
2083 for headerid in extraexport:
2082 for headerid in extraexport:
2084 header = extraexportmap[headerid](seqno, ctx)
2083 header = extraexportmap[headerid](seqno, ctx)
2085 if header is not None:
2084 if header is not None:
2086 fm.plain(b'# %s\n' % header)
2085 fm.plain(b'# %s\n' % header)
2087
2086
2088 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2087 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2089 fm.plain(b'\n')
2088 fm.plain(b'\n')
2090
2089
2091 if fm.isplain():
2090 if fm.isplain():
2092 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2091 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2093 for chunk, label in chunkiter:
2092 for chunk, label in chunkiter:
2094 fm.plain(chunk, label=label)
2093 fm.plain(chunk, label=label)
2095 else:
2094 else:
2096 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2095 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2097 # TODO: make it structured?
2096 # TODO: make it structured?
2098 fm.data(diff=b''.join(chunkiter))
2097 fm.data(diff=b''.join(chunkiter))
2099
2098
2100
2099
2101 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2100 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2102 """Export changesets to stdout or a single file"""
2101 """Export changesets to stdout or a single file"""
2103 for seqno, rev in enumerate(revs, 1):
2102 for seqno, rev in enumerate(revs, 1):
2104 ctx = repo[rev]
2103 ctx = repo[rev]
2105 if not dest.startswith(b'<'):
2104 if not dest.startswith(b'<'):
2106 repo.ui.note(b"%s\n" % dest)
2105 repo.ui.note(b"%s\n" % dest)
2107 fm.startitem()
2106 fm.startitem()
2108 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2107 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2109
2108
2110
2109
2111 def _exportfntemplate(
2110 def _exportfntemplate(
2112 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2111 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2113 ):
2112 ):
2114 """Export changesets to possibly multiple files"""
2113 """Export changesets to possibly multiple files"""
2115 total = len(revs)
2114 total = len(revs)
2116 revwidth = max(len(str(rev)) for rev in revs)
2115 revwidth = max(len(str(rev)) for rev in revs)
2117 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2116 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2118
2117
2119 for seqno, rev in enumerate(revs, 1):
2118 for seqno, rev in enumerate(revs, 1):
2120 ctx = repo[rev]
2119 ctx = repo[rev]
2121 dest = makefilename(
2120 dest = makefilename(
2122 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2121 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2123 )
2122 )
2124 filemap.setdefault(dest, []).append((seqno, rev))
2123 filemap.setdefault(dest, []).append((seqno, rev))
2125
2124
2126 for dest in filemap:
2125 for dest in filemap:
2127 with formatter.maybereopen(basefm, dest) as fm:
2126 with formatter.maybereopen(basefm, dest) as fm:
2128 repo.ui.note(b"%s\n" % dest)
2127 repo.ui.note(b"%s\n" % dest)
2129 for seqno, rev in filemap[dest]:
2128 for seqno, rev in filemap[dest]:
2130 fm.startitem()
2129 fm.startitem()
2131 ctx = repo[rev]
2130 ctx = repo[rev]
2132 _exportsingle(
2131 _exportsingle(
2133 repo, ctx, fm, match, switch_parent, seqno, diffopts
2132 repo, ctx, fm, match, switch_parent, seqno, diffopts
2134 )
2133 )
2135
2134
2136
2135
2137 def _prefetchchangedfiles(repo, revs, match):
2136 def _prefetchchangedfiles(repo, revs, match):
2138 allfiles = set()
2137 allfiles = set()
2139 for rev in revs:
2138 for rev in revs:
2140 for file in repo[rev].files():
2139 for file in repo[rev].files():
2141 if not match or match(file):
2140 if not match or match(file):
2142 allfiles.add(file)
2141 allfiles.add(file)
2143 match = scmutil.matchfiles(repo, allfiles)
2142 match = scmutil.matchfiles(repo, allfiles)
2144 revmatches = [(rev, match) for rev in revs]
2143 revmatches = [(rev, match) for rev in revs]
2145 scmutil.prefetchfiles(repo, revmatches)
2144 scmutil.prefetchfiles(repo, revmatches)
2146
2145
2147
2146
2148 def export(
2147 def export(
2149 repo,
2148 repo,
2150 revs,
2149 revs,
2151 basefm,
2150 basefm,
2152 fntemplate=b'hg-%h.patch',
2151 fntemplate=b'hg-%h.patch',
2153 switch_parent=False,
2152 switch_parent=False,
2154 opts=None,
2153 opts=None,
2155 match=None,
2154 match=None,
2156 ):
2155 ):
2157 '''export changesets as hg patches
2156 '''export changesets as hg patches
2158
2157
2159 Args:
2158 Args:
2160 repo: The repository from which we're exporting revisions.
2159 repo: The repository from which we're exporting revisions.
2161 revs: A list of revisions to export as revision numbers.
2160 revs: A list of revisions to export as revision numbers.
2162 basefm: A formatter to which patches should be written.
2161 basefm: A formatter to which patches should be written.
2163 fntemplate: An optional string to use for generating patch file names.
2162 fntemplate: An optional string to use for generating patch file names.
2164 switch_parent: If True, show diffs against second parent when not nullid.
2163 switch_parent: If True, show diffs against second parent when not nullid.
2165 Default is false, which always shows diff against p1.
2164 Default is false, which always shows diff against p1.
2166 opts: diff options to use for generating the patch.
2165 opts: diff options to use for generating the patch.
2167 match: If specified, only export changes to files matching this matcher.
2166 match: If specified, only export changes to files matching this matcher.
2168
2167
2169 Returns:
2168 Returns:
2170 Nothing.
2169 Nothing.
2171
2170
2172 Side Effect:
2171 Side Effect:
2173 "HG Changeset Patch" data is emitted to one of the following
2172 "HG Changeset Patch" data is emitted to one of the following
2174 destinations:
2173 destinations:
2175 fntemplate specified: Each rev is written to a unique file named using
2174 fntemplate specified: Each rev is written to a unique file named using
2176 the given template.
2175 the given template.
2177 Otherwise: All revs will be written to basefm.
2176 Otherwise: All revs will be written to basefm.
2178 '''
2177 '''
2179 _prefetchchangedfiles(repo, revs, match)
2178 _prefetchchangedfiles(repo, revs, match)
2180
2179
2181 if not fntemplate:
2180 if not fntemplate:
2182 _exportfile(
2181 _exportfile(
2183 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2182 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2184 )
2183 )
2185 else:
2184 else:
2186 _exportfntemplate(
2185 _exportfntemplate(
2187 repo, revs, basefm, fntemplate, switch_parent, opts, match
2186 repo, revs, basefm, fntemplate, switch_parent, opts, match
2188 )
2187 )
2189
2188
2190
2189
2191 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2190 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2192 """Export changesets to the given file stream"""
2191 """Export changesets to the given file stream"""
2193 _prefetchchangedfiles(repo, revs, match)
2192 _prefetchchangedfiles(repo, revs, match)
2194
2193
2195 dest = getattr(fp, 'name', b'<unnamed>')
2194 dest = getattr(fp, 'name', b'<unnamed>')
2196 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2195 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2197 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2196 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2198
2197
2199
2198
2200 def showmarker(fm, marker, index=None):
2199 def showmarker(fm, marker, index=None):
2201 """utility function to display obsolescence marker in a readable way
2200 """utility function to display obsolescence marker in a readable way
2202
2201
2203 To be used by debug function."""
2202 To be used by debug function."""
2204 if index is not None:
2203 if index is not None:
2205 fm.write(b'index', b'%i ', index)
2204 fm.write(b'index', b'%i ', index)
2206 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2205 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2207 succs = marker.succnodes()
2206 succs = marker.succnodes()
2208 fm.condwrite(
2207 fm.condwrite(
2209 succs,
2208 succs,
2210 b'succnodes',
2209 b'succnodes',
2211 b'%s ',
2210 b'%s ',
2212 fm.formatlist(map(hex, succs), name=b'node'),
2211 fm.formatlist(map(hex, succs), name=b'node'),
2213 )
2212 )
2214 fm.write(b'flag', b'%X ', marker.flags())
2213 fm.write(b'flag', b'%X ', marker.flags())
2215 parents = marker.parentnodes()
2214 parents = marker.parentnodes()
2216 if parents is not None:
2215 if parents is not None:
2217 fm.write(
2216 fm.write(
2218 b'parentnodes',
2217 b'parentnodes',
2219 b'{%s} ',
2218 b'{%s} ',
2220 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2219 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2221 )
2220 )
2222 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2221 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2223 meta = marker.metadata().copy()
2222 meta = marker.metadata().copy()
2224 meta.pop(b'date', None)
2223 meta.pop(b'date', None)
2225 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2224 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2226 fm.write(
2225 fm.write(
2227 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2226 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2228 )
2227 )
2229 fm.plain(b'\n')
2228 fm.plain(b'\n')
2230
2229
2231
2230
2232 def finddate(ui, repo, date):
2231 def finddate(ui, repo, date):
2233 """Find the tipmost changeset that matches the given date spec"""
2232 """Find the tipmost changeset that matches the given date spec"""
2234
2233
2235 df = dateutil.matchdate(date)
2234 df = dateutil.matchdate(date)
2236 m = scmutil.matchall(repo)
2235 m = scmutil.matchall(repo)
2237 results = {}
2236 results = {}
2238
2237
2239 def prep(ctx, fns):
2238 def prep(ctx, fns):
2240 d = ctx.date()
2239 d = ctx.date()
2241 if df(d[0]):
2240 if df(d[0]):
2242 results[ctx.rev()] = d
2241 results[ctx.rev()] = d
2243
2242
2244 for ctx in walkchangerevs(repo, m, {b'rev': None}, prep):
2243 for ctx in walkchangerevs(repo, m, {b'rev': None}, prep):
2245 rev = ctx.rev()
2244 rev = ctx.rev()
2246 if rev in results:
2245 if rev in results:
2247 ui.status(
2246 ui.status(
2248 _(b"found revision %d from %s\n")
2247 _(b"found revision %d from %s\n")
2249 % (rev, dateutil.datestr(results[rev]))
2248 % (rev, dateutil.datestr(results[rev]))
2250 )
2249 )
2251 return b'%d' % rev
2250 return b'%d' % rev
2252
2251
2253 raise error.Abort(_(b"revision matching date not found"))
2252 raise error.Abort(_(b"revision matching date not found"))
2254
2253
2255
2254
2256 def increasingwindows(windowsize=8, sizelimit=512):
2255 def increasingwindows(windowsize=8, sizelimit=512):
2257 while True:
2256 while True:
2258 yield windowsize
2257 yield windowsize
2259 if windowsize < sizelimit:
2258 if windowsize < sizelimit:
2260 windowsize *= 2
2259 windowsize *= 2
2261
2260
2262
2261
2263 def _walkrevs(repo, opts):
2262 def _walkrevs(repo, opts):
2264 # Default --rev value depends on --follow but --follow behavior
2263 # Default --rev value depends on --follow but --follow behavior
2265 # depends on revisions resolved from --rev...
2264 # depends on revisions resolved from --rev...
2266 follow = opts.get(b'follow') or opts.get(b'follow_first')
2265 follow = opts.get(b'follow') or opts.get(b'follow_first')
2267 if opts.get(b'rev'):
2266 if opts.get(b'rev'):
2268 revs = scmutil.revrange(repo, opts[b'rev'])
2267 revs = scmutil.revrange(repo, opts[b'rev'])
2269 elif follow and repo.dirstate.p1() == nullid:
2268 elif follow and repo.dirstate.p1() == nullid:
2270 revs = smartset.baseset()
2269 revs = smartset.baseset()
2271 elif follow:
2270 elif follow:
2272 revs = repo.revs(b'reverse(:.)')
2271 revs = repo.revs(b'reverse(:.)')
2273 else:
2272 else:
2274 revs = smartset.spanset(repo)
2273 revs = smartset.spanset(repo)
2275 revs.reverse()
2274 revs.reverse()
2276 return revs
2275 return revs
2277
2276
2278
2277
2279 class FileWalkError(Exception):
2278 class FileWalkError(Exception):
2280 pass
2279 pass
2281
2280
2282
2281
2283 def walkfilerevs(repo, match, follow, revs, fncache):
2282 def walkfilerevs(repo, match, follow, revs, fncache):
2284 '''Walks the file history for the matched files.
2283 '''Walks the file history for the matched files.
2285
2284
2286 Returns the changeset revs that are involved in the file history.
2285 Returns the changeset revs that are involved in the file history.
2287
2286
2288 Throws FileWalkError if the file history can't be walked using
2287 Throws FileWalkError if the file history can't be walked using
2289 filelogs alone.
2288 filelogs alone.
2290 '''
2289 '''
2291 wanted = set()
2290 wanted = set()
2292 copies = []
2291 copies = []
2293 minrev, maxrev = min(revs), max(revs)
2292 minrev, maxrev = min(revs), max(revs)
2294
2293
2295 def filerevs(filelog, last):
2294 def filerevs(filelog, last):
2296 """
2295 """
2297 Only files, no patterns. Check the history of each file.
2296 Only files, no patterns. Check the history of each file.
2298
2297
2299 Examines filelog entries within minrev, maxrev linkrev range
2298 Examines filelog entries within minrev, maxrev linkrev range
2300 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2299 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2301 tuples in backwards order
2300 tuples in backwards order
2302 """
2301 """
2303 cl_count = len(repo)
2302 cl_count = len(repo)
2304 revs = []
2303 revs = []
2305 for j in pycompat.xrange(0, last + 1):
2304 for j in pycompat.xrange(0, last + 1):
2306 linkrev = filelog.linkrev(j)
2305 linkrev = filelog.linkrev(j)
2307 if linkrev < minrev:
2306 if linkrev < minrev:
2308 continue
2307 continue
2309 # only yield rev for which we have the changelog, it can
2308 # only yield rev for which we have the changelog, it can
2310 # happen while doing "hg log" during a pull or commit
2309 # happen while doing "hg log" during a pull or commit
2311 if linkrev >= cl_count:
2310 if linkrev >= cl_count:
2312 break
2311 break
2313
2312
2314 parentlinkrevs = []
2313 parentlinkrevs = []
2315 for p in filelog.parentrevs(j):
2314 for p in filelog.parentrevs(j):
2316 if p != nullrev:
2315 if p != nullrev:
2317 parentlinkrevs.append(filelog.linkrev(p))
2316 parentlinkrevs.append(filelog.linkrev(p))
2318 n = filelog.node(j)
2317 n = filelog.node(j)
2319 revs.append(
2318 revs.append(
2320 (linkrev, parentlinkrevs, follow and filelog.renamed(n))
2319 (linkrev, parentlinkrevs, follow and filelog.renamed(n))
2321 )
2320 )
2322
2321
2323 return reversed(revs)
2322 return reversed(revs)
2324
2323
2325 def iterfiles():
2324 def iterfiles():
2326 pctx = repo[b'.']
2325 pctx = repo[b'.']
2327 for filename in match.files():
2326 for filename in match.files():
2328 if follow:
2327 if follow:
2329 if filename not in pctx:
2328 if filename not in pctx:
2330 raise error.Abort(
2329 raise error.Abort(
2331 _(
2330 _(
2332 b'cannot follow file not in parent '
2331 b'cannot follow file not in parent '
2333 b'revision: "%s"'
2332 b'revision: "%s"'
2334 )
2333 )
2335 % filename
2334 % filename
2336 )
2335 )
2337 yield filename, pctx[filename].filenode()
2336 yield filename, pctx[filename].filenode()
2338 else:
2337 else:
2339 yield filename, None
2338 yield filename, None
2340 for filename_node in copies:
2339 for filename_node in copies:
2341 yield filename_node
2340 yield filename_node
2342
2341
2343 for file_, node in iterfiles():
2342 for file_, node in iterfiles():
2344 filelog = repo.file(file_)
2343 filelog = repo.file(file_)
2345 if not len(filelog):
2344 if not len(filelog):
2346 if node is None:
2345 if node is None:
2347 # A zero count may be a directory or deleted file, so
2346 # A zero count may be a directory or deleted file, so
2348 # try to find matching entries on the slow path.
2347 # try to find matching entries on the slow path.
2349 if follow:
2348 if follow:
2350 raise error.Abort(
2349 raise error.Abort(
2351 _(b'cannot follow nonexistent file: "%s"') % file_
2350 _(b'cannot follow nonexistent file: "%s"') % file_
2352 )
2351 )
2353 raise FileWalkError(b"Cannot walk via filelog")
2352 raise FileWalkError(b"Cannot walk via filelog")
2354 else:
2353 else:
2355 continue
2354 continue
2356
2355
2357 if node is None:
2356 if node is None:
2358 last = len(filelog) - 1
2357 last = len(filelog) - 1
2359 else:
2358 else:
2360 last = filelog.rev(node)
2359 last = filelog.rev(node)
2361
2360
2362 # keep track of all ancestors of the file
2361 # keep track of all ancestors of the file
2363 ancestors = {filelog.linkrev(last)}
2362 ancestors = {filelog.linkrev(last)}
2364
2363
2365 # iterate from latest to oldest revision
2364 # iterate from latest to oldest revision
2366 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
2365 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
2367 if not follow:
2366 if not follow:
2368 if rev > maxrev:
2367 if rev > maxrev:
2369 continue
2368 continue
2370 else:
2369 else:
2371 # Note that last might not be the first interesting
2370 # Note that last might not be the first interesting
2372 # rev to us:
2371 # rev to us:
2373 # if the file has been changed after maxrev, we'll
2372 # if the file has been changed after maxrev, we'll
2374 # have linkrev(last) > maxrev, and we still need
2373 # have linkrev(last) > maxrev, and we still need
2375 # to explore the file graph
2374 # to explore the file graph
2376 if rev not in ancestors:
2375 if rev not in ancestors:
2377 continue
2376 continue
2378 # XXX insert 1327 fix here
2377 # XXX insert 1327 fix here
2379 if flparentlinkrevs:
2378 if flparentlinkrevs:
2380 ancestors.update(flparentlinkrevs)
2379 ancestors.update(flparentlinkrevs)
2381
2380
2382 fncache.setdefault(rev, []).append(file_)
2381 fncache.setdefault(rev, []).append(file_)
2383 wanted.add(rev)
2382 wanted.add(rev)
2384 if copied:
2383 if copied:
2385 copies.append(copied)
2384 copies.append(copied)
2386
2385
2387 return wanted
2386 return wanted
2388
2387
2389
2388
2390 class _followfilter(object):
2389 class _followfilter(object):
2391 def __init__(self, repo, onlyfirst=False):
2390 def __init__(self, repo, onlyfirst=False):
2392 self.repo = repo
2391 self.repo = repo
2393 self.startrev = nullrev
2392 self.startrev = nullrev
2394 self.roots = set()
2393 self.roots = set()
2395 self.onlyfirst = onlyfirst
2394 self.onlyfirst = onlyfirst
2396
2395
2397 def match(self, rev):
2396 def match(self, rev):
2398 def realparents(rev):
2397 def realparents(rev):
2399 if self.onlyfirst:
2398 if self.onlyfirst:
2400 return self.repo.changelog.parentrevs(rev)[0:1]
2399 return self.repo.changelog.parentrevs(rev)[0:1]
2401 else:
2400 else:
2402 return filter(
2401 return filter(
2403 lambda x: x != nullrev, self.repo.changelog.parentrevs(rev)
2402 lambda x: x != nullrev, self.repo.changelog.parentrevs(rev)
2404 )
2403 )
2405
2404
2406 if self.startrev == nullrev:
2405 if self.startrev == nullrev:
2407 self.startrev = rev
2406 self.startrev = rev
2408 return True
2407 return True
2409
2408
2410 if rev > self.startrev:
2409 if rev > self.startrev:
2411 # forward: all descendants
2410 # forward: all descendants
2412 if not self.roots:
2411 if not self.roots:
2413 self.roots.add(self.startrev)
2412 self.roots.add(self.startrev)
2414 for parent in realparents(rev):
2413 for parent in realparents(rev):
2415 if parent in self.roots:
2414 if parent in self.roots:
2416 self.roots.add(rev)
2415 self.roots.add(rev)
2417 return True
2416 return True
2418 else:
2417 else:
2419 # backwards: all parents
2418 # backwards: all parents
2420 if not self.roots:
2419 if not self.roots:
2421 self.roots.update(realparents(self.startrev))
2420 self.roots.update(realparents(self.startrev))
2422 if rev in self.roots:
2421 if rev in self.roots:
2423 self.roots.remove(rev)
2422 self.roots.remove(rev)
2424 self.roots.update(realparents(rev))
2423 self.roots.update(realparents(rev))
2425 return True
2424 return True
2426
2425
2427 return False
2426 return False
2428
2427
2429
2428
2430 def walkchangerevs(repo, match, opts, prepare):
2429 def walkchangerevs(repo, match, opts, prepare):
2431 '''Iterate over files and the revs in which they changed.
2430 '''Iterate over files and the revs in which they changed.
2432
2431
2433 Callers most commonly need to iterate backwards over the history
2432 Callers most commonly need to iterate backwards over the history
2434 in which they are interested. Doing so has awful (quadratic-looking)
2433 in which they are interested. Doing so has awful (quadratic-looking)
2435 performance, so we use iterators in a "windowed" way.
2434 performance, so we use iterators in a "windowed" way.
2436
2435
2437 We walk a window of revisions in the desired order. Within the
2436 We walk a window of revisions in the desired order. Within the
2438 window, we first walk forwards to gather data, then in the desired
2437 window, we first walk forwards to gather data, then in the desired
2439 order (usually backwards) to display it.
2438 order (usually backwards) to display it.
2440
2439
2441 This function returns an iterator yielding contexts. Before
2440 This function returns an iterator yielding contexts. Before
2442 yielding each context, the iterator will first call the prepare
2441 yielding each context, the iterator will first call the prepare
2443 function on each context in the window in forward order.'''
2442 function on each context in the window in forward order.'''
2444
2443
2445 allfiles = opts.get(b'all_files')
2444 allfiles = opts.get(b'all_files')
2446 follow = opts.get(b'follow') or opts.get(b'follow_first')
2445 follow = opts.get(b'follow') or opts.get(b'follow_first')
2447 revs = _walkrevs(repo, opts)
2446 revs = _walkrevs(repo, opts)
2448 if not revs:
2447 if not revs:
2449 return []
2448 return []
2450 wanted = set()
2449 wanted = set()
2451 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
2450 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
2452 fncache = {}
2451 fncache = {}
2453 change = repo.__getitem__
2452 change = repo.__getitem__
2454
2453
2455 # First step is to fill wanted, the set of revisions that we want to yield.
2454 # First step is to fill wanted, the set of revisions that we want to yield.
2456 # When it does not induce extra cost, we also fill fncache for revisions in
2455 # When it does not induce extra cost, we also fill fncache for revisions in
2457 # wanted: a cache of filenames that were changed (ctx.files()) and that
2456 # wanted: a cache of filenames that were changed (ctx.files()) and that
2458 # match the file filtering conditions.
2457 # match the file filtering conditions.
2459
2458
2460 if match.always() or allfiles:
2459 if match.always() or allfiles:
2461 # No files, no patterns. Display all revs.
2460 # No files, no patterns. Display all revs.
2462 wanted = revs
2461 wanted = revs
2463 elif not slowpath:
2462 elif not slowpath:
2464 # We only have to read through the filelog to find wanted revisions
2463 # We only have to read through the filelog to find wanted revisions
2465
2464
2466 try:
2465 try:
2467 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2466 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2468 except FileWalkError:
2467 except FileWalkError:
2469 slowpath = True
2468 slowpath = True
2470
2469
2471 # We decided to fall back to the slowpath because at least one
2470 # We decided to fall back to the slowpath because at least one
2472 # of the paths was not a file. Check to see if at least one of them
2471 # of the paths was not a file. Check to see if at least one of them
2473 # existed in history, otherwise simply return
2472 # existed in history, otherwise simply return
2474 for path in match.files():
2473 for path in match.files():
2475 if path == b'.' or path in repo.store:
2474 if path == b'.' or path in repo.store:
2476 break
2475 break
2477 else:
2476 else:
2478 return []
2477 return []
2479
2478
2480 if slowpath:
2479 if slowpath:
2481 # We have to read the changelog to match filenames against
2480 # We have to read the changelog to match filenames against
2482 # changed files
2481 # changed files
2483
2482
2484 if follow:
2483 if follow:
2485 raise error.Abort(
2484 raise error.Abort(
2486 _(b'can only follow copies/renames for explicit filenames')
2485 _(b'can only follow copies/renames for explicit filenames')
2487 )
2486 )
2488
2487
2489 # The slow path checks files modified in every changeset.
2488 # The slow path checks files modified in every changeset.
2490 # This is really slow on large repos, so compute the set lazily.
2489 # This is really slow on large repos, so compute the set lazily.
2491 class lazywantedset(object):
2490 class lazywantedset(object):
2492 def __init__(self):
2491 def __init__(self):
2493 self.set = set()
2492 self.set = set()
2494 self.revs = set(revs)
2493 self.revs = set(revs)
2495
2494
2496 # No need to worry about locality here because it will be accessed
2495 # No need to worry about locality here because it will be accessed
2497 # in the same order as the increasing window below.
2496 # in the same order as the increasing window below.
2498 def __contains__(self, value):
2497 def __contains__(self, value):
2499 if value in self.set:
2498 if value in self.set:
2500 return True
2499 return True
2501 elif not value in self.revs:
2500 elif not value in self.revs:
2502 return False
2501 return False
2503 else:
2502 else:
2504 self.revs.discard(value)
2503 self.revs.discard(value)
2505 ctx = change(value)
2504 ctx = change(value)
2506 if allfiles:
2505 if allfiles:
2507 matches = list(ctx.manifest().walk(match))
2506 matches = list(ctx.manifest().walk(match))
2508 else:
2507 else:
2509 matches = [f for f in ctx.files() if match(f)]
2508 matches = [f for f in ctx.files() if match(f)]
2510 if matches:
2509 if matches:
2511 fncache[value] = matches
2510 fncache[value] = matches
2512 self.set.add(value)
2511 self.set.add(value)
2513 return True
2512 return True
2514 return False
2513 return False
2515
2514
2516 def discard(self, value):
2515 def discard(self, value):
2517 self.revs.discard(value)
2516 self.revs.discard(value)
2518 self.set.discard(value)
2517 self.set.discard(value)
2519
2518
2520 wanted = lazywantedset()
2519 wanted = lazywantedset()
2521
2520
2522 # it might be worthwhile to do this in the iterator if the rev range
2521 # it might be worthwhile to do this in the iterator if the rev range
2523 # is descending and the prune args are all within that range
2522 # is descending and the prune args are all within that range
2524 for rev in opts.get(b'prune', ()):
2523 for rev in opts.get(b'prune', ()):
2525 rev = repo[rev].rev()
2524 rev = repo[rev].rev()
2526 ff = _followfilter(repo)
2525 ff = _followfilter(repo)
2527 stop = min(revs[0], revs[-1])
2526 stop = min(revs[0], revs[-1])
2528 for x in pycompat.xrange(rev, stop - 1, -1):
2527 for x in pycompat.xrange(rev, stop - 1, -1):
2529 if ff.match(x):
2528 if ff.match(x):
2530 wanted = wanted - [x]
2529 wanted = wanted - [x]
2531
2530
2532 # Now that wanted is correctly initialized, we can iterate over the
2531 # Now that wanted is correctly initialized, we can iterate over the
2533 # revision range, yielding only revisions in wanted.
2532 # revision range, yielding only revisions in wanted.
2534 def iterate():
2533 def iterate():
2535 if follow and match.always():
2534 if follow and match.always():
2536 ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
2535 ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
2537
2536
2538 def want(rev):
2537 def want(rev):
2539 return ff.match(rev) and rev in wanted
2538 return ff.match(rev) and rev in wanted
2540
2539
2541 else:
2540 else:
2542
2541
2543 def want(rev):
2542 def want(rev):
2544 return rev in wanted
2543 return rev in wanted
2545
2544
2546 it = iter(revs)
2545 it = iter(revs)
2547 stopiteration = False
2546 stopiteration = False
2548 for windowsize in increasingwindows():
2547 for windowsize in increasingwindows():
2549 nrevs = []
2548 nrevs = []
2550 for i in pycompat.xrange(windowsize):
2549 for i in pycompat.xrange(windowsize):
2551 rev = next(it, None)
2550 rev = next(it, None)
2552 if rev is None:
2551 if rev is None:
2553 stopiteration = True
2552 stopiteration = True
2554 break
2553 break
2555 elif want(rev):
2554 elif want(rev):
2556 nrevs.append(rev)
2555 nrevs.append(rev)
2557 for rev in sorted(nrevs):
2556 for rev in sorted(nrevs):
2558 fns = fncache.get(rev)
2557 fns = fncache.get(rev)
2559 ctx = change(rev)
2558 ctx = change(rev)
2560 if not fns:
2559 if not fns:
2561
2560
2562 def fns_generator():
2561 def fns_generator():
2563 if allfiles:
2562 if allfiles:
2564
2563
2565 def bad(f, msg):
2564 def bad(f, msg):
2566 pass
2565 pass
2567
2566
2568 for f in ctx.matches(matchmod.badmatch(match, bad)):
2567 for f in ctx.matches(matchmod.badmatch(match, bad)):
2569 yield f
2568 yield f
2570 else:
2569 else:
2571 for f in ctx.files():
2570 for f in ctx.files():
2572 if match(f):
2571 if match(f):
2573 yield f
2572 yield f
2574
2573
2575 fns = fns_generator()
2574 fns = fns_generator()
2576 prepare(ctx, fns)
2575 prepare(ctx, fns)
2577 for rev in nrevs:
2576 for rev in nrevs:
2578 yield change(rev)
2577 yield change(rev)
2579
2578
2580 if stopiteration:
2579 if stopiteration:
2581 break
2580 break
2582
2581
2583 return iterate()
2582 return iterate()
2584
2583
2585
2584
2586 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2585 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2587 bad = []
2586 bad = []
2588
2587
2589 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2588 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2590 names = []
2589 names = []
2591 wctx = repo[None]
2590 wctx = repo[None]
2592 cca = None
2591 cca = None
2593 abort, warn = scmutil.checkportabilityalert(ui)
2592 abort, warn = scmutil.checkportabilityalert(ui)
2594 if abort or warn:
2593 if abort or warn:
2595 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2594 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2596
2595
2597 match = repo.narrowmatch(match, includeexact=True)
2596 match = repo.narrowmatch(match, includeexact=True)
2598 badmatch = matchmod.badmatch(match, badfn)
2597 badmatch = matchmod.badmatch(match, badfn)
2599 dirstate = repo.dirstate
2598 dirstate = repo.dirstate
2600 # We don't want to just call wctx.walk here, since it would return a lot of
2599 # We don't want to just call wctx.walk here, since it would return a lot of
2601 # clean files, which we aren't interested in and takes time.
2600 # clean files, which we aren't interested in and takes time.
2602 for f in sorted(
2601 for f in sorted(
2603 dirstate.walk(
2602 dirstate.walk(
2604 badmatch,
2603 badmatch,
2605 subrepos=sorted(wctx.substate),
2604 subrepos=sorted(wctx.substate),
2606 unknown=True,
2605 unknown=True,
2607 ignored=False,
2606 ignored=False,
2608 full=False,
2607 full=False,
2609 )
2608 )
2610 ):
2609 ):
2611 exact = match.exact(f)
2610 exact = match.exact(f)
2612 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2611 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2613 if cca:
2612 if cca:
2614 cca(f)
2613 cca(f)
2615 names.append(f)
2614 names.append(f)
2616 if ui.verbose or not exact:
2615 if ui.verbose or not exact:
2617 ui.status(
2616 ui.status(
2618 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2617 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2619 )
2618 )
2620
2619
2621 for subpath in sorted(wctx.substate):
2620 for subpath in sorted(wctx.substate):
2622 sub = wctx.sub(subpath)
2621 sub = wctx.sub(subpath)
2623 try:
2622 try:
2624 submatch = matchmod.subdirmatcher(subpath, match)
2623 submatch = matchmod.subdirmatcher(subpath, match)
2625 subprefix = repo.wvfs.reljoin(prefix, subpath)
2624 subprefix = repo.wvfs.reljoin(prefix, subpath)
2626 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2625 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2627 if opts.get('subrepos'):
2626 if opts.get('subrepos'):
2628 bad.extend(
2627 bad.extend(
2629 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2628 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2630 )
2629 )
2631 else:
2630 else:
2632 bad.extend(
2631 bad.extend(
2633 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2632 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2634 )
2633 )
2635 except error.LookupError:
2634 except error.LookupError:
2636 ui.status(
2635 ui.status(
2637 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2636 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2638 )
2637 )
2639
2638
2640 if not opts.get('dry_run'):
2639 if not opts.get('dry_run'):
2641 rejected = wctx.add(names, prefix)
2640 rejected = wctx.add(names, prefix)
2642 bad.extend(f for f in rejected if f in match.files())
2641 bad.extend(f for f in rejected if f in match.files())
2643 return bad
2642 return bad
2644
2643
2645
2644
2646 def addwebdirpath(repo, serverpath, webconf):
2645 def addwebdirpath(repo, serverpath, webconf):
2647 webconf[serverpath] = repo.root
2646 webconf[serverpath] = repo.root
2648 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2647 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2649
2648
2650 for r in repo.revs(b'filelog("path:.hgsub")'):
2649 for r in repo.revs(b'filelog("path:.hgsub")'):
2651 ctx = repo[r]
2650 ctx = repo[r]
2652 for subpath in ctx.substate:
2651 for subpath in ctx.substate:
2653 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2652 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2654
2653
2655
2654
2656 def forget(
2655 def forget(
2657 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2656 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2658 ):
2657 ):
2659 if dryrun and interactive:
2658 if dryrun and interactive:
2660 raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
2659 raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
2661 bad = []
2660 bad = []
2662 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2661 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2663 wctx = repo[None]
2662 wctx = repo[None]
2664 forgot = []
2663 forgot = []
2665
2664
2666 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2665 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2667 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2666 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2668 if explicitonly:
2667 if explicitonly:
2669 forget = [f for f in forget if match.exact(f)]
2668 forget = [f for f in forget if match.exact(f)]
2670
2669
2671 for subpath in sorted(wctx.substate):
2670 for subpath in sorted(wctx.substate):
2672 sub = wctx.sub(subpath)
2671 sub = wctx.sub(subpath)
2673 submatch = matchmod.subdirmatcher(subpath, match)
2672 submatch = matchmod.subdirmatcher(subpath, match)
2674 subprefix = repo.wvfs.reljoin(prefix, subpath)
2673 subprefix = repo.wvfs.reljoin(prefix, subpath)
2675 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2674 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2676 try:
2675 try:
2677 subbad, subforgot = sub.forget(
2676 subbad, subforgot = sub.forget(
2678 submatch,
2677 submatch,
2679 subprefix,
2678 subprefix,
2680 subuipathfn,
2679 subuipathfn,
2681 dryrun=dryrun,
2680 dryrun=dryrun,
2682 interactive=interactive,
2681 interactive=interactive,
2683 )
2682 )
2684 bad.extend([subpath + b'/' + f for f in subbad])
2683 bad.extend([subpath + b'/' + f for f in subbad])
2685 forgot.extend([subpath + b'/' + f for f in subforgot])
2684 forgot.extend([subpath + b'/' + f for f in subforgot])
2686 except error.LookupError:
2685 except error.LookupError:
2687 ui.status(
2686 ui.status(
2688 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2687 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2689 )
2688 )
2690
2689
2691 if not explicitonly:
2690 if not explicitonly:
2692 for f in match.files():
2691 for f in match.files():
2693 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2692 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2694 if f not in forgot:
2693 if f not in forgot:
2695 if repo.wvfs.exists(f):
2694 if repo.wvfs.exists(f):
2696 # Don't complain if the exact case match wasn't given.
2695 # Don't complain if the exact case match wasn't given.
2697 # But don't do this until after checking 'forgot', so
2696 # But don't do this until after checking 'forgot', so
2698 # that subrepo files aren't normalized, and this op is
2697 # that subrepo files aren't normalized, and this op is
2699 # purely from data cached by the status walk above.
2698 # purely from data cached by the status walk above.
2700 if repo.dirstate.normalize(f) in repo.dirstate:
2699 if repo.dirstate.normalize(f) in repo.dirstate:
2701 continue
2700 continue
2702 ui.warn(
2701 ui.warn(
2703 _(
2702 _(
2704 b'not removing %s: '
2703 b'not removing %s: '
2705 b'file is already untracked\n'
2704 b'file is already untracked\n'
2706 )
2705 )
2707 % uipathfn(f)
2706 % uipathfn(f)
2708 )
2707 )
2709 bad.append(f)
2708 bad.append(f)
2710
2709
2711 if interactive:
2710 if interactive:
2712 responses = _(
2711 responses = _(
2713 b'[Ynsa?]'
2712 b'[Ynsa?]'
2714 b'$$ &Yes, forget this file'
2713 b'$$ &Yes, forget this file'
2715 b'$$ &No, skip this file'
2714 b'$$ &No, skip this file'
2716 b'$$ &Skip remaining files'
2715 b'$$ &Skip remaining files'
2717 b'$$ Include &all remaining files'
2716 b'$$ Include &all remaining files'
2718 b'$$ &? (display help)'
2717 b'$$ &? (display help)'
2719 )
2718 )
2720 for filename in forget[:]:
2719 for filename in forget[:]:
2721 r = ui.promptchoice(
2720 r = ui.promptchoice(
2722 _(b'forget %s %s') % (uipathfn(filename), responses)
2721 _(b'forget %s %s') % (uipathfn(filename), responses)
2723 )
2722 )
2724 if r == 4: # ?
2723 if r == 4: # ?
2725 while r == 4:
2724 while r == 4:
2726 for c, t in ui.extractchoices(responses)[1]:
2725 for c, t in ui.extractchoices(responses)[1]:
2727 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2726 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2728 r = ui.promptchoice(
2727 r = ui.promptchoice(
2729 _(b'forget %s %s') % (uipathfn(filename), responses)
2728 _(b'forget %s %s') % (uipathfn(filename), responses)
2730 )
2729 )
2731 if r == 0: # yes
2730 if r == 0: # yes
2732 continue
2731 continue
2733 elif r == 1: # no
2732 elif r == 1: # no
2734 forget.remove(filename)
2733 forget.remove(filename)
2735 elif r == 2: # Skip
2734 elif r == 2: # Skip
2736 fnindex = forget.index(filename)
2735 fnindex = forget.index(filename)
2737 del forget[fnindex:]
2736 del forget[fnindex:]
2738 break
2737 break
2739 elif r == 3: # All
2738 elif r == 3: # All
2740 break
2739 break
2741
2740
2742 for f in forget:
2741 for f in forget:
2743 if ui.verbose or not match.exact(f) or interactive:
2742 if ui.verbose or not match.exact(f) or interactive:
2744 ui.status(
2743 ui.status(
2745 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2744 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2746 )
2745 )
2747
2746
2748 if not dryrun:
2747 if not dryrun:
2749 rejected = wctx.forget(forget, prefix)
2748 rejected = wctx.forget(forget, prefix)
2750 bad.extend(f for f in rejected if f in match.files())
2749 bad.extend(f for f in rejected if f in match.files())
2751 forgot.extend(f for f in forget if f not in rejected)
2750 forgot.extend(f for f in forget if f not in rejected)
2752 return bad, forgot
2751 return bad, forgot
2753
2752
2754
2753
2755 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2754 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2756 ret = 1
2755 ret = 1
2757
2756
2758 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2757 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2759 if fm.isplain() and not needsfctx:
2758 if fm.isplain() and not needsfctx:
2760 # Fast path. The speed-up comes from skipping the formatter, and batching
2759 # Fast path. The speed-up comes from skipping the formatter, and batching
2761 # calls to ui.write.
2760 # calls to ui.write.
2762 buf = []
2761 buf = []
2763 for f in ctx.matches(m):
2762 for f in ctx.matches(m):
2764 buf.append(fmt % uipathfn(f))
2763 buf.append(fmt % uipathfn(f))
2765 if len(buf) > 100:
2764 if len(buf) > 100:
2766 ui.write(b''.join(buf))
2765 ui.write(b''.join(buf))
2767 del buf[:]
2766 del buf[:]
2768 ret = 0
2767 ret = 0
2769 if buf:
2768 if buf:
2770 ui.write(b''.join(buf))
2769 ui.write(b''.join(buf))
2771 else:
2770 else:
2772 for f in ctx.matches(m):
2771 for f in ctx.matches(m):
2773 fm.startitem()
2772 fm.startitem()
2774 fm.context(ctx=ctx)
2773 fm.context(ctx=ctx)
2775 if needsfctx:
2774 if needsfctx:
2776 fc = ctx[f]
2775 fc = ctx[f]
2777 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2776 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2778 fm.data(path=f)
2777 fm.data(path=f)
2779 fm.plain(fmt % uipathfn(f))
2778 fm.plain(fmt % uipathfn(f))
2780 ret = 0
2779 ret = 0
2781
2780
2782 for subpath in sorted(ctx.substate):
2781 for subpath in sorted(ctx.substate):
2783 submatch = matchmod.subdirmatcher(subpath, m)
2782 submatch = matchmod.subdirmatcher(subpath, m)
2784 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2783 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2785 if subrepos or m.exact(subpath) or any(submatch.files()):
2784 if subrepos or m.exact(subpath) or any(submatch.files()):
2786 sub = ctx.sub(subpath)
2785 sub = ctx.sub(subpath)
2787 try:
2786 try:
2788 recurse = m.exact(subpath) or subrepos
2787 recurse = m.exact(subpath) or subrepos
2789 if (
2788 if (
2790 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2789 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2791 == 0
2790 == 0
2792 ):
2791 ):
2793 ret = 0
2792 ret = 0
2794 except error.LookupError:
2793 except error.LookupError:
2795 ui.status(
2794 ui.status(
2796 _(b"skipping missing subrepository: %s\n")
2795 _(b"skipping missing subrepository: %s\n")
2797 % uipathfn(subpath)
2796 % uipathfn(subpath)
2798 )
2797 )
2799
2798
2800 return ret
2799 return ret
2801
2800
2802
2801
2803 def remove(
2802 def remove(
2804 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2803 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2805 ):
2804 ):
2806 ret = 0
2805 ret = 0
2807 s = repo.status(match=m, clean=True)
2806 s = repo.status(match=m, clean=True)
2808 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2807 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2809
2808
2810 wctx = repo[None]
2809 wctx = repo[None]
2811
2810
2812 if warnings is None:
2811 if warnings is None:
2813 warnings = []
2812 warnings = []
2814 warn = True
2813 warn = True
2815 else:
2814 else:
2816 warn = False
2815 warn = False
2817
2816
2818 subs = sorted(wctx.substate)
2817 subs = sorted(wctx.substate)
2819 progress = ui.makeprogress(
2818 progress = ui.makeprogress(
2820 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2819 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2821 )
2820 )
2822 for subpath in subs:
2821 for subpath in subs:
2823 submatch = matchmod.subdirmatcher(subpath, m)
2822 submatch = matchmod.subdirmatcher(subpath, m)
2824 subprefix = repo.wvfs.reljoin(prefix, subpath)
2823 subprefix = repo.wvfs.reljoin(prefix, subpath)
2825 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2824 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2826 if subrepos or m.exact(subpath) or any(submatch.files()):
2825 if subrepos or m.exact(subpath) or any(submatch.files()):
2827 progress.increment()
2826 progress.increment()
2828 sub = wctx.sub(subpath)
2827 sub = wctx.sub(subpath)
2829 try:
2828 try:
2830 if sub.removefiles(
2829 if sub.removefiles(
2831 submatch,
2830 submatch,
2832 subprefix,
2831 subprefix,
2833 subuipathfn,
2832 subuipathfn,
2834 after,
2833 after,
2835 force,
2834 force,
2836 subrepos,
2835 subrepos,
2837 dryrun,
2836 dryrun,
2838 warnings,
2837 warnings,
2839 ):
2838 ):
2840 ret = 1
2839 ret = 1
2841 except error.LookupError:
2840 except error.LookupError:
2842 warnings.append(
2841 warnings.append(
2843 _(b"skipping missing subrepository: %s\n")
2842 _(b"skipping missing subrepository: %s\n")
2844 % uipathfn(subpath)
2843 % uipathfn(subpath)
2845 )
2844 )
2846 progress.complete()
2845 progress.complete()
2847
2846
2848 # warn about failure to delete explicit files/dirs
2847 # warn about failure to delete explicit files/dirs
2849 deleteddirs = pathutil.dirs(deleted)
2848 deleteddirs = pathutil.dirs(deleted)
2850 files = m.files()
2849 files = m.files()
2851 progress = ui.makeprogress(
2850 progress = ui.makeprogress(
2852 _(b'deleting'), total=len(files), unit=_(b'files')
2851 _(b'deleting'), total=len(files), unit=_(b'files')
2853 )
2852 )
2854 for f in files:
2853 for f in files:
2855
2854
2856 def insubrepo():
2855 def insubrepo():
2857 for subpath in wctx.substate:
2856 for subpath in wctx.substate:
2858 if f.startswith(subpath + b'/'):
2857 if f.startswith(subpath + b'/'):
2859 return True
2858 return True
2860 return False
2859 return False
2861
2860
2862 progress.increment()
2861 progress.increment()
2863 isdir = f in deleteddirs or wctx.hasdir(f)
2862 isdir = f in deleteddirs or wctx.hasdir(f)
2864 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2863 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2865 continue
2864 continue
2866
2865
2867 if repo.wvfs.exists(f):
2866 if repo.wvfs.exists(f):
2868 if repo.wvfs.isdir(f):
2867 if repo.wvfs.isdir(f):
2869 warnings.append(
2868 warnings.append(
2870 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2869 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2871 )
2870 )
2872 else:
2871 else:
2873 warnings.append(
2872 warnings.append(
2874 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2873 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2875 )
2874 )
2876 # missing files will generate a warning elsewhere
2875 # missing files will generate a warning elsewhere
2877 ret = 1
2876 ret = 1
2878 progress.complete()
2877 progress.complete()
2879
2878
2880 if force:
2879 if force:
2881 list = modified + deleted + clean + added
2880 list = modified + deleted + clean + added
2882 elif after:
2881 elif after:
2883 list = deleted
2882 list = deleted
2884 remaining = modified + added + clean
2883 remaining = modified + added + clean
2885 progress = ui.makeprogress(
2884 progress = ui.makeprogress(
2886 _(b'skipping'), total=len(remaining), unit=_(b'files')
2885 _(b'skipping'), total=len(remaining), unit=_(b'files')
2887 )
2886 )
2888 for f in remaining:
2887 for f in remaining:
2889 progress.increment()
2888 progress.increment()
2890 if ui.verbose or (f in files):
2889 if ui.verbose or (f in files):
2891 warnings.append(
2890 warnings.append(
2892 _(b'not removing %s: file still exists\n') % uipathfn(f)
2891 _(b'not removing %s: file still exists\n') % uipathfn(f)
2893 )
2892 )
2894 ret = 1
2893 ret = 1
2895 progress.complete()
2894 progress.complete()
2896 else:
2895 else:
2897 list = deleted + clean
2896 list = deleted + clean
2898 progress = ui.makeprogress(
2897 progress = ui.makeprogress(
2899 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2898 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2900 )
2899 )
2901 for f in modified:
2900 for f in modified:
2902 progress.increment()
2901 progress.increment()
2903 warnings.append(
2902 warnings.append(
2904 _(
2903 _(
2905 b'not removing %s: file is modified (use -f'
2904 b'not removing %s: file is modified (use -f'
2906 b' to force removal)\n'
2905 b' to force removal)\n'
2907 )
2906 )
2908 % uipathfn(f)
2907 % uipathfn(f)
2909 )
2908 )
2910 ret = 1
2909 ret = 1
2911 for f in added:
2910 for f in added:
2912 progress.increment()
2911 progress.increment()
2913 warnings.append(
2912 warnings.append(
2914 _(
2913 _(
2915 b"not removing %s: file has been marked for add"
2914 b"not removing %s: file has been marked for add"
2916 b" (use 'hg forget' to undo add)\n"
2915 b" (use 'hg forget' to undo add)\n"
2917 )
2916 )
2918 % uipathfn(f)
2917 % uipathfn(f)
2919 )
2918 )
2920 ret = 1
2919 ret = 1
2921 progress.complete()
2920 progress.complete()
2922
2921
2923 list = sorted(list)
2922 list = sorted(list)
2924 progress = ui.makeprogress(
2923 progress = ui.makeprogress(
2925 _(b'deleting'), total=len(list), unit=_(b'files')
2924 _(b'deleting'), total=len(list), unit=_(b'files')
2926 )
2925 )
2927 for f in list:
2926 for f in list:
2928 if ui.verbose or not m.exact(f):
2927 if ui.verbose or not m.exact(f):
2929 progress.increment()
2928 progress.increment()
2930 ui.status(
2929 ui.status(
2931 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2930 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2932 )
2931 )
2933 progress.complete()
2932 progress.complete()
2934
2933
2935 if not dryrun:
2934 if not dryrun:
2936 with repo.wlock():
2935 with repo.wlock():
2937 if not after:
2936 if not after:
2938 for f in list:
2937 for f in list:
2939 if f in added:
2938 if f in added:
2940 continue # we never unlink added files on remove
2939 continue # we never unlink added files on remove
2941 rmdir = repo.ui.configbool(
2940 rmdir = repo.ui.configbool(
2942 b'experimental', b'removeemptydirs'
2941 b'experimental', b'removeemptydirs'
2943 )
2942 )
2944 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2943 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2945 repo[None].forget(list)
2944 repo[None].forget(list)
2946
2945
2947 if warn:
2946 if warn:
2948 for warning in warnings:
2947 for warning in warnings:
2949 ui.warn(warning)
2948 ui.warn(warning)
2950
2949
2951 return ret
2950 return ret
2952
2951
2953
2952
2954 def _catfmtneedsdata(fm):
2953 def _catfmtneedsdata(fm):
2955 return not fm.datahint() or b'data' in fm.datahint()
2954 return not fm.datahint() or b'data' in fm.datahint()
2956
2955
2957
2956
2958 def _updatecatformatter(fm, ctx, matcher, path, decode):
2957 def _updatecatformatter(fm, ctx, matcher, path, decode):
2959 """Hook for adding data to the formatter used by ``hg cat``.
2958 """Hook for adding data to the formatter used by ``hg cat``.
2960
2959
2961 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2960 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2962 this method first."""
2961 this method first."""
2963
2962
2964 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2963 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2965 # wasn't requested.
2964 # wasn't requested.
2966 data = b''
2965 data = b''
2967 if _catfmtneedsdata(fm):
2966 if _catfmtneedsdata(fm):
2968 data = ctx[path].data()
2967 data = ctx[path].data()
2969 if decode:
2968 if decode:
2970 data = ctx.repo().wwritedata(path, data)
2969 data = ctx.repo().wwritedata(path, data)
2971 fm.startitem()
2970 fm.startitem()
2972 fm.context(ctx=ctx)
2971 fm.context(ctx=ctx)
2973 fm.write(b'data', b'%s', data)
2972 fm.write(b'data', b'%s', data)
2974 fm.data(path=path)
2973 fm.data(path=path)
2975
2974
2976
2975
2977 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2976 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2978 err = 1
2977 err = 1
2979 opts = pycompat.byteskwargs(opts)
2978 opts = pycompat.byteskwargs(opts)
2980
2979
2981 def write(path):
2980 def write(path):
2982 filename = None
2981 filename = None
2983 if fntemplate:
2982 if fntemplate:
2984 filename = makefilename(
2983 filename = makefilename(
2985 ctx, fntemplate, pathname=os.path.join(prefix, path)
2984 ctx, fntemplate, pathname=os.path.join(prefix, path)
2986 )
2985 )
2987 # attempt to create the directory if it does not already exist
2986 # attempt to create the directory if it does not already exist
2988 try:
2987 try:
2989 os.makedirs(os.path.dirname(filename))
2988 os.makedirs(os.path.dirname(filename))
2990 except OSError:
2989 except OSError:
2991 pass
2990 pass
2992 with formatter.maybereopen(basefm, filename) as fm:
2991 with formatter.maybereopen(basefm, filename) as fm:
2993 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2992 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2994
2993
2995 # Automation often uses hg cat on single files, so special case it
2994 # Automation often uses hg cat on single files, so special case it
2996 # for performance to avoid the cost of parsing the manifest.
2995 # for performance to avoid the cost of parsing the manifest.
2997 if len(matcher.files()) == 1 and not matcher.anypats():
2996 if len(matcher.files()) == 1 and not matcher.anypats():
2998 file = matcher.files()[0]
2997 file = matcher.files()[0]
2999 mfl = repo.manifestlog
2998 mfl = repo.manifestlog
3000 mfnode = ctx.manifestnode()
2999 mfnode = ctx.manifestnode()
3001 try:
3000 try:
3002 if mfnode and mfl[mfnode].find(file)[0]:
3001 if mfnode and mfl[mfnode].find(file)[0]:
3003 if _catfmtneedsdata(basefm):
3002 if _catfmtneedsdata(basefm):
3004 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
3003 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
3005 write(file)
3004 write(file)
3006 return 0
3005 return 0
3007 except KeyError:
3006 except KeyError:
3008 pass
3007 pass
3009
3008
3010 if _catfmtneedsdata(basefm):
3009 if _catfmtneedsdata(basefm):
3011 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
3010 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
3012
3011
3013 for abs in ctx.walk(matcher):
3012 for abs in ctx.walk(matcher):
3014 write(abs)
3013 write(abs)
3015 err = 0
3014 err = 0
3016
3015
3017 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3016 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3018 for subpath in sorted(ctx.substate):
3017 for subpath in sorted(ctx.substate):
3019 sub = ctx.sub(subpath)
3018 sub = ctx.sub(subpath)
3020 try:
3019 try:
3021 submatch = matchmod.subdirmatcher(subpath, matcher)
3020 submatch = matchmod.subdirmatcher(subpath, matcher)
3022 subprefix = os.path.join(prefix, subpath)
3021 subprefix = os.path.join(prefix, subpath)
3023 if not sub.cat(
3022 if not sub.cat(
3024 submatch,
3023 submatch,
3025 basefm,
3024 basefm,
3026 fntemplate,
3025 fntemplate,
3027 subprefix,
3026 subprefix,
3028 **pycompat.strkwargs(opts)
3027 **pycompat.strkwargs(opts)
3029 ):
3028 ):
3030 err = 0
3029 err = 0
3031 except error.RepoLookupError:
3030 except error.RepoLookupError:
3032 ui.status(
3031 ui.status(
3033 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
3032 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
3034 )
3033 )
3035
3034
3036 return err
3035 return err
3037
3036
3038
3037
3039 def commit(ui, repo, commitfunc, pats, opts):
3038 def commit(ui, repo, commitfunc, pats, opts):
3040 '''commit the specified files or all outstanding changes'''
3039 '''commit the specified files or all outstanding changes'''
3041 date = opts.get(b'date')
3040 date = opts.get(b'date')
3042 if date:
3041 if date:
3043 opts[b'date'] = dateutil.parsedate(date)
3042 opts[b'date'] = dateutil.parsedate(date)
3044 message = logmessage(ui, opts)
3043 message = logmessage(ui, opts)
3045 matcher = scmutil.match(repo[None], pats, opts)
3044 matcher = scmutil.match(repo[None], pats, opts)
3046
3045
3047 dsguard = None
3046 dsguard = None
3048 # extract addremove carefully -- this function can be called from a command
3047 # extract addremove carefully -- this function can be called from a command
3049 # that doesn't support addremove
3048 # that doesn't support addremove
3050 if opts.get(b'addremove'):
3049 if opts.get(b'addremove'):
3051 dsguard = dirstateguard.dirstateguard(repo, b'commit')
3050 dsguard = dirstateguard.dirstateguard(repo, b'commit')
3052 with dsguard or util.nullcontextmanager():
3051 with dsguard or util.nullcontextmanager():
3053 if dsguard:
3052 if dsguard:
3054 relative = scmutil.anypats(pats, opts)
3053 relative = scmutil.anypats(pats, opts)
3055 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3054 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3056 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
3055 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
3057 raise error.Abort(
3056 raise error.Abort(
3058 _(b"failed to mark all new/missing files as added/removed")
3057 _(b"failed to mark all new/missing files as added/removed")
3059 )
3058 )
3060
3059
3061 return commitfunc(ui, repo, message, matcher, opts)
3060 return commitfunc(ui, repo, message, matcher, opts)
3062
3061
3063
3062
3064 def samefile(f, ctx1, ctx2):
3063 def samefile(f, ctx1, ctx2):
3065 if f in ctx1.manifest():
3064 if f in ctx1.manifest():
3066 a = ctx1.filectx(f)
3065 a = ctx1.filectx(f)
3067 if f in ctx2.manifest():
3066 if f in ctx2.manifest():
3068 b = ctx2.filectx(f)
3067 b = ctx2.filectx(f)
3069 return not a.cmp(b) and a.flags() == b.flags()
3068 return not a.cmp(b) and a.flags() == b.flags()
3070 else:
3069 else:
3071 return False
3070 return False
3072 else:
3071 else:
3073 return f not in ctx2.manifest()
3072 return f not in ctx2.manifest()
3074
3073
3075
3074
3076 def amend(ui, repo, old, extra, pats, opts):
3075 def amend(ui, repo, old, extra, pats, opts):
3077 # avoid cycle context -> subrepo -> cmdutil
3076 # avoid cycle context -> subrepo -> cmdutil
3078 from . import context
3077 from . import context
3079
3078
3080 # amend will reuse the existing user if not specified, but the obsolete
3079 # amend will reuse the existing user if not specified, but the obsolete
3081 # marker creation requires that the current user's name is specified.
3080 # marker creation requires that the current user's name is specified.
3082 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3081 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3083 ui.username() # raise exception if username not set
3082 ui.username() # raise exception if username not set
3084
3083
3085 ui.note(_(b'amending changeset %s\n') % old)
3084 ui.note(_(b'amending changeset %s\n') % old)
3086 base = old.p1()
3085 base = old.p1()
3087
3086
3088 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
3087 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
3089 # Participating changesets:
3088 # Participating changesets:
3090 #
3089 #
3091 # wctx o - workingctx that contains changes from working copy
3090 # wctx o - workingctx that contains changes from working copy
3092 # | to go into amending commit
3091 # | to go into amending commit
3093 # |
3092 # |
3094 # old o - changeset to amend
3093 # old o - changeset to amend
3095 # |
3094 # |
3096 # base o - first parent of the changeset to amend
3095 # base o - first parent of the changeset to amend
3097 wctx = repo[None]
3096 wctx = repo[None]
3098
3097
3099 # Copy to avoid mutating input
3098 # Copy to avoid mutating input
3100 extra = extra.copy()
3099 extra = extra.copy()
3101 # Update extra dict from amended commit (e.g. to preserve graft
3100 # Update extra dict from amended commit (e.g. to preserve graft
3102 # source)
3101 # source)
3103 extra.update(old.extra())
3102 extra.update(old.extra())
3104
3103
3105 # Also update it from the from the wctx
3104 # Also update it from the from the wctx
3106 extra.update(wctx.extra())
3105 extra.update(wctx.extra())
3107
3106
3108 # date-only change should be ignored?
3107 # date-only change should be ignored?
3109 datemaydiffer = resolvecommitoptions(ui, opts)
3108 datemaydiffer = resolvecommitoptions(ui, opts)
3110
3109
3111 date = old.date()
3110 date = old.date()
3112 if opts.get(b'date'):
3111 if opts.get(b'date'):
3113 date = dateutil.parsedate(opts.get(b'date'))
3112 date = dateutil.parsedate(opts.get(b'date'))
3114 user = opts.get(b'user') or old.user()
3113 user = opts.get(b'user') or old.user()
3115
3114
3116 if len(old.parents()) > 1:
3115 if len(old.parents()) > 1:
3117 # ctx.files() isn't reliable for merges, so fall back to the
3116 # ctx.files() isn't reliable for merges, so fall back to the
3118 # slower repo.status() method
3117 # slower repo.status() method
3119 st = base.status(old)
3118 st = base.status(old)
3120 files = set(st.modified) | set(st.added) | set(st.removed)
3119 files = set(st.modified) | set(st.added) | set(st.removed)
3121 else:
3120 else:
3122 files = set(old.files())
3121 files = set(old.files())
3123
3122
3124 # add/remove the files to the working copy if the "addremove" option
3123 # add/remove the files to the working copy if the "addremove" option
3125 # was specified.
3124 # was specified.
3126 matcher = scmutil.match(wctx, pats, opts)
3125 matcher = scmutil.match(wctx, pats, opts)
3127 relative = scmutil.anypats(pats, opts)
3126 relative = scmutil.anypats(pats, opts)
3128 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3127 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3129 if opts.get(b'addremove') and scmutil.addremove(
3128 if opts.get(b'addremove') and scmutil.addremove(
3130 repo, matcher, b"", uipathfn, opts
3129 repo, matcher, b"", uipathfn, opts
3131 ):
3130 ):
3132 raise error.Abort(
3131 raise error.Abort(
3133 _(b"failed to mark all new/missing files as added/removed")
3132 _(b"failed to mark all new/missing files as added/removed")
3134 )
3133 )
3135
3134
3136 # Check subrepos. This depends on in-place wctx._status update in
3135 # Check subrepos. This depends on in-place wctx._status update in
3137 # subrepo.precommit(). To minimize the risk of this hack, we do
3136 # subrepo.precommit(). To minimize the risk of this hack, we do
3138 # nothing if .hgsub does not exist.
3137 # nothing if .hgsub does not exist.
3139 if b'.hgsub' in wctx or b'.hgsub' in old:
3138 if b'.hgsub' in wctx or b'.hgsub' in old:
3140 subs, commitsubs, newsubstate = subrepoutil.precommit(
3139 subs, commitsubs, newsubstate = subrepoutil.precommit(
3141 ui, wctx, wctx._status, matcher
3140 ui, wctx, wctx._status, matcher
3142 )
3141 )
3143 # amend should abort if commitsubrepos is enabled
3142 # amend should abort if commitsubrepos is enabled
3144 assert not commitsubs
3143 assert not commitsubs
3145 if subs:
3144 if subs:
3146 subrepoutil.writestate(repo, newsubstate)
3145 subrepoutil.writestate(repo, newsubstate)
3147
3146
3148 ms = mergestatemod.mergestate.read(repo)
3147 ms = mergestatemod.mergestate.read(repo)
3149 mergeutil.checkunresolved(ms)
3148 mergeutil.checkunresolved(ms)
3150
3149
3151 filestoamend = {f for f in wctx.files() if matcher(f)}
3150 filestoamend = {f for f in wctx.files() if matcher(f)}
3152
3151
3153 changes = len(filestoamend) > 0
3152 changes = len(filestoamend) > 0
3154 if changes:
3153 if changes:
3155 # Recompute copies (avoid recording a -> b -> a)
3154 # Recompute copies (avoid recording a -> b -> a)
3156 copied = copies.pathcopies(base, wctx, matcher)
3155 copied = copies.pathcopies(base, wctx, matcher)
3157 if old.p2:
3156 if old.p2:
3158 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3157 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3159
3158
3160 # Prune files which were reverted by the updates: if old
3159 # Prune files which were reverted by the updates: if old
3161 # introduced file X and the file was renamed in the working
3160 # introduced file X and the file was renamed in the working
3162 # copy, then those two files are the same and
3161 # copy, then those two files are the same and
3163 # we can discard X from our list of files. Likewise if X
3162 # we can discard X from our list of files. Likewise if X
3164 # was removed, it's no longer relevant. If X is missing (aka
3163 # was removed, it's no longer relevant. If X is missing (aka
3165 # deleted), old X must be preserved.
3164 # deleted), old X must be preserved.
3166 files.update(filestoamend)
3165 files.update(filestoamend)
3167 files = [
3166 files = [
3168 f
3167 f
3169 for f in files
3168 for f in files
3170 if (f not in filestoamend or not samefile(f, wctx, base))
3169 if (f not in filestoamend or not samefile(f, wctx, base))
3171 ]
3170 ]
3172
3171
3173 def filectxfn(repo, ctx_, path):
3172 def filectxfn(repo, ctx_, path):
3174 try:
3173 try:
3175 # If the file being considered is not amongst the files
3174 # If the file being considered is not amongst the files
3176 # to be amended, we should return the file context from the
3175 # to be amended, we should return the file context from the
3177 # old changeset. This avoids issues when only some files in
3176 # old changeset. This avoids issues when only some files in
3178 # the working copy are being amended but there are also
3177 # the working copy are being amended but there are also
3179 # changes to other files from the old changeset.
3178 # changes to other files from the old changeset.
3180 if path not in filestoamend:
3179 if path not in filestoamend:
3181 return old.filectx(path)
3180 return old.filectx(path)
3182
3181
3183 # Return None for removed files.
3182 # Return None for removed files.
3184 if path in wctx.removed():
3183 if path in wctx.removed():
3185 return None
3184 return None
3186
3185
3187 fctx = wctx[path]
3186 fctx = wctx[path]
3188 flags = fctx.flags()
3187 flags = fctx.flags()
3189 mctx = context.memfilectx(
3188 mctx = context.memfilectx(
3190 repo,
3189 repo,
3191 ctx_,
3190 ctx_,
3192 fctx.path(),
3191 fctx.path(),
3193 fctx.data(),
3192 fctx.data(),
3194 islink=b'l' in flags,
3193 islink=b'l' in flags,
3195 isexec=b'x' in flags,
3194 isexec=b'x' in flags,
3196 copysource=copied.get(path),
3195 copysource=copied.get(path),
3197 )
3196 )
3198 return mctx
3197 return mctx
3199 except KeyError:
3198 except KeyError:
3200 return None
3199 return None
3201
3200
3202 else:
3201 else:
3203 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
3202 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
3204
3203
3205 # Use version of files as in the old cset
3204 # Use version of files as in the old cset
3206 def filectxfn(repo, ctx_, path):
3205 def filectxfn(repo, ctx_, path):
3207 try:
3206 try:
3208 return old.filectx(path)
3207 return old.filectx(path)
3209 except KeyError:
3208 except KeyError:
3210 return None
3209 return None
3211
3210
3212 # See if we got a message from -m or -l, if not, open the editor with
3211 # See if we got a message from -m or -l, if not, open the editor with
3213 # the message of the changeset to amend.
3212 # the message of the changeset to amend.
3214 message = logmessage(ui, opts)
3213 message = logmessage(ui, opts)
3215
3214
3216 editform = mergeeditform(old, b'commit.amend')
3215 editform = mergeeditform(old, b'commit.amend')
3217
3216
3218 if not message:
3217 if not message:
3219 message = old.description()
3218 message = old.description()
3220 # Default if message isn't provided and --edit is not passed is to
3219 # Default if message isn't provided and --edit is not passed is to
3221 # invoke editor, but allow --no-edit. If somehow we don't have any
3220 # invoke editor, but allow --no-edit. If somehow we don't have any
3222 # description, let's always start the editor.
3221 # description, let's always start the editor.
3223 doedit = not message or opts.get(b'edit') in [True, None]
3222 doedit = not message or opts.get(b'edit') in [True, None]
3224 else:
3223 else:
3225 # Default if message is provided is to not invoke editor, but allow
3224 # Default if message is provided is to not invoke editor, but allow
3226 # --edit.
3225 # --edit.
3227 doedit = opts.get(b'edit') is True
3226 doedit = opts.get(b'edit') is True
3228 editor = getcommiteditor(edit=doedit, editform=editform)
3227 editor = getcommiteditor(edit=doedit, editform=editform)
3229
3228
3230 pureextra = extra.copy()
3229 pureextra = extra.copy()
3231 extra[b'amend_source'] = old.hex()
3230 extra[b'amend_source'] = old.hex()
3232
3231
3233 new = context.memctx(
3232 new = context.memctx(
3234 repo,
3233 repo,
3235 parents=[base.node(), old.p2().node()],
3234 parents=[base.node(), old.p2().node()],
3236 text=message,
3235 text=message,
3237 files=files,
3236 files=files,
3238 filectxfn=filectxfn,
3237 filectxfn=filectxfn,
3239 user=user,
3238 user=user,
3240 date=date,
3239 date=date,
3241 extra=extra,
3240 extra=extra,
3242 editor=editor,
3241 editor=editor,
3243 )
3242 )
3244
3243
3245 newdesc = changelog.stripdesc(new.description())
3244 newdesc = changelog.stripdesc(new.description())
3246 if (
3245 if (
3247 (not changes)
3246 (not changes)
3248 and newdesc == old.description()
3247 and newdesc == old.description()
3249 and user == old.user()
3248 and user == old.user()
3250 and (date == old.date() or datemaydiffer)
3249 and (date == old.date() or datemaydiffer)
3251 and pureextra == old.extra()
3250 and pureextra == old.extra()
3252 ):
3251 ):
3253 # nothing changed. continuing here would create a new node
3252 # nothing changed. continuing here would create a new node
3254 # anyway because of the amend_source noise.
3253 # anyway because of the amend_source noise.
3255 #
3254 #
3256 # This not what we expect from amend.
3255 # This not what we expect from amend.
3257 return old.node()
3256 return old.node()
3258
3257
3259 commitphase = None
3258 commitphase = None
3260 if opts.get(b'secret'):
3259 if opts.get(b'secret'):
3261 commitphase = phases.secret
3260 commitphase = phases.secret
3262 newid = repo.commitctx(new)
3261 newid = repo.commitctx(new)
3263
3262
3264 # Reroute the working copy parent to the new changeset
3263 # Reroute the working copy parent to the new changeset
3265 repo.setparents(newid, nullid)
3264 repo.setparents(newid, nullid)
3266 mapping = {old.node(): (newid,)}
3265 mapping = {old.node(): (newid,)}
3267 obsmetadata = None
3266 obsmetadata = None
3268 if opts.get(b'note'):
3267 if opts.get(b'note'):
3269 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3268 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3270 backup = ui.configbool(b'rewrite', b'backup-bundle')
3269 backup = ui.configbool(b'rewrite', b'backup-bundle')
3271 scmutil.cleanupnodes(
3270 scmutil.cleanupnodes(
3272 repo,
3271 repo,
3273 mapping,
3272 mapping,
3274 b'amend',
3273 b'amend',
3275 metadata=obsmetadata,
3274 metadata=obsmetadata,
3276 fixphase=True,
3275 fixphase=True,
3277 targetphase=commitphase,
3276 targetphase=commitphase,
3278 backup=backup,
3277 backup=backup,
3279 )
3278 )
3280
3279
3281 # Fixing the dirstate because localrepo.commitctx does not update
3280 # Fixing the dirstate because localrepo.commitctx does not update
3282 # it. This is rather convenient because we did not need to update
3281 # it. This is rather convenient because we did not need to update
3283 # the dirstate for all the files in the new commit which commitctx
3282 # the dirstate for all the files in the new commit which commitctx
3284 # could have done if it updated the dirstate. Now, we can
3283 # could have done if it updated the dirstate. Now, we can
3285 # selectively update the dirstate only for the amended files.
3284 # selectively update the dirstate only for the amended files.
3286 dirstate = repo.dirstate
3285 dirstate = repo.dirstate
3287
3286
3288 # Update the state of the files which were added and modified in the
3287 # Update the state of the files which were added and modified in the
3289 # amend to "normal" in the dirstate. We need to use "normallookup" since
3288 # amend to "normal" in the dirstate. We need to use "normallookup" since
3290 # the files may have changed since the command started; using "normal"
3289 # the files may have changed since the command started; using "normal"
3291 # would mark them as clean but with uncommitted contents.
3290 # would mark them as clean but with uncommitted contents.
3292 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3291 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3293 for f in normalfiles:
3292 for f in normalfiles:
3294 dirstate.normallookup(f)
3293 dirstate.normallookup(f)
3295
3294
3296 # Update the state of files which were removed in the amend
3295 # Update the state of files which were removed in the amend
3297 # to "removed" in the dirstate.
3296 # to "removed" in the dirstate.
3298 removedfiles = set(wctx.removed()) & filestoamend
3297 removedfiles = set(wctx.removed()) & filestoamend
3299 for f in removedfiles:
3298 for f in removedfiles:
3300 dirstate.drop(f)
3299 dirstate.drop(f)
3301
3300
3302 return newid
3301 return newid
3303
3302
3304
3303
3305 def commiteditor(repo, ctx, subs, editform=b''):
3304 def commiteditor(repo, ctx, subs, editform=b''):
3306 if ctx.description():
3305 if ctx.description():
3307 return ctx.description()
3306 return ctx.description()
3308 return commitforceeditor(
3307 return commitforceeditor(
3309 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3308 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3310 )
3309 )
3311
3310
3312
3311
3313 def commitforceeditor(
3312 def commitforceeditor(
3314 repo,
3313 repo,
3315 ctx,
3314 ctx,
3316 subs,
3315 subs,
3317 finishdesc=None,
3316 finishdesc=None,
3318 extramsg=None,
3317 extramsg=None,
3319 editform=b'',
3318 editform=b'',
3320 unchangedmessagedetection=False,
3319 unchangedmessagedetection=False,
3321 ):
3320 ):
3322 if not extramsg:
3321 if not extramsg:
3323 extramsg = _(b"Leave message empty to abort commit.")
3322 extramsg = _(b"Leave message empty to abort commit.")
3324
3323
3325 forms = [e for e in editform.split(b'.') if e]
3324 forms = [e for e in editform.split(b'.') if e]
3326 forms.insert(0, b'changeset')
3325 forms.insert(0, b'changeset')
3327 templatetext = None
3326 templatetext = None
3328 while forms:
3327 while forms:
3329 ref = b'.'.join(forms)
3328 ref = b'.'.join(forms)
3330 if repo.ui.config(b'committemplate', ref):
3329 if repo.ui.config(b'committemplate', ref):
3331 templatetext = committext = buildcommittemplate(
3330 templatetext = committext = buildcommittemplate(
3332 repo, ctx, subs, extramsg, ref
3331 repo, ctx, subs, extramsg, ref
3333 )
3332 )
3334 break
3333 break
3335 forms.pop()
3334 forms.pop()
3336 else:
3335 else:
3337 committext = buildcommittext(repo, ctx, subs, extramsg)
3336 committext = buildcommittext(repo, ctx, subs, extramsg)
3338
3337
3339 # run editor in the repository root
3338 # run editor in the repository root
3340 olddir = encoding.getcwd()
3339 olddir = encoding.getcwd()
3341 os.chdir(repo.root)
3340 os.chdir(repo.root)
3342
3341
3343 # make in-memory changes visible to external process
3342 # make in-memory changes visible to external process
3344 tr = repo.currenttransaction()
3343 tr = repo.currenttransaction()
3345 repo.dirstate.write(tr)
3344 repo.dirstate.write(tr)
3346 pending = tr and tr.writepending() and repo.root
3345 pending = tr and tr.writepending() and repo.root
3347
3346
3348 editortext = repo.ui.edit(
3347 editortext = repo.ui.edit(
3349 committext,
3348 committext,
3350 ctx.user(),
3349 ctx.user(),
3351 ctx.extra(),
3350 ctx.extra(),
3352 editform=editform,
3351 editform=editform,
3353 pending=pending,
3352 pending=pending,
3354 repopath=repo.path,
3353 repopath=repo.path,
3355 action=b'commit',
3354 action=b'commit',
3356 )
3355 )
3357 text = editortext
3356 text = editortext
3358
3357
3359 # strip away anything below this special string (used for editors that want
3358 # strip away anything below this special string (used for editors that want
3360 # to display the diff)
3359 # to display the diff)
3361 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3360 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3362 if stripbelow:
3361 if stripbelow:
3363 text = text[: stripbelow.start()]
3362 text = text[: stripbelow.start()]
3364
3363
3365 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3364 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3366 os.chdir(olddir)
3365 os.chdir(olddir)
3367
3366
3368 if finishdesc:
3367 if finishdesc:
3369 text = finishdesc(text)
3368 text = finishdesc(text)
3370 if not text.strip():
3369 if not text.strip():
3371 raise error.Abort(_(b"empty commit message"))
3370 raise error.Abort(_(b"empty commit message"))
3372 if unchangedmessagedetection and editortext == templatetext:
3371 if unchangedmessagedetection and editortext == templatetext:
3373 raise error.Abort(_(b"commit message unchanged"))
3372 raise error.Abort(_(b"commit message unchanged"))
3374
3373
3375 return text
3374 return text
3376
3375
3377
3376
3378 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3377 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3379 ui = repo.ui
3378 ui = repo.ui
3380 spec = formatter.reference_templatespec(ref)
3379 spec = formatter.reference_templatespec(ref)
3381 t = logcmdutil.changesettemplater(ui, repo, spec)
3380 t = logcmdutil.changesettemplater(ui, repo, spec)
3382 t.t.cache.update(
3381 t.t.cache.update(
3383 (k, templater.unquotestring(v))
3382 (k, templater.unquotestring(v))
3384 for k, v in repo.ui.configitems(b'committemplate')
3383 for k, v in repo.ui.configitems(b'committemplate')
3385 )
3384 )
3386
3385
3387 if not extramsg:
3386 if not extramsg:
3388 extramsg = b'' # ensure that extramsg is string
3387 extramsg = b'' # ensure that extramsg is string
3389
3388
3390 ui.pushbuffer()
3389 ui.pushbuffer()
3391 t.show(ctx, extramsg=extramsg)
3390 t.show(ctx, extramsg=extramsg)
3392 return ui.popbuffer()
3391 return ui.popbuffer()
3393
3392
3394
3393
3395 def hgprefix(msg):
3394 def hgprefix(msg):
3396 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3395 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3397
3396
3398
3397
3399 def buildcommittext(repo, ctx, subs, extramsg):
3398 def buildcommittext(repo, ctx, subs, extramsg):
3400 edittext = []
3399 edittext = []
3401 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3400 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3402 if ctx.description():
3401 if ctx.description():
3403 edittext.append(ctx.description())
3402 edittext.append(ctx.description())
3404 edittext.append(b"")
3403 edittext.append(b"")
3405 edittext.append(b"") # Empty line between message and comments.
3404 edittext.append(b"") # Empty line between message and comments.
3406 edittext.append(
3405 edittext.append(
3407 hgprefix(
3406 hgprefix(
3408 _(
3407 _(
3409 b"Enter commit message."
3408 b"Enter commit message."
3410 b" Lines beginning with 'HG:' are removed."
3409 b" Lines beginning with 'HG:' are removed."
3411 )
3410 )
3412 )
3411 )
3413 )
3412 )
3414 edittext.append(hgprefix(extramsg))
3413 edittext.append(hgprefix(extramsg))
3415 edittext.append(b"HG: --")
3414 edittext.append(b"HG: --")
3416 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3415 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3417 if ctx.p2():
3416 if ctx.p2():
3418 edittext.append(hgprefix(_(b"branch merge")))
3417 edittext.append(hgprefix(_(b"branch merge")))
3419 if ctx.branch():
3418 if ctx.branch():
3420 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3419 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3421 if bookmarks.isactivewdirparent(repo):
3420 if bookmarks.isactivewdirparent(repo):
3422 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3421 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3423 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3422 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3424 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3423 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3425 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3424 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3426 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3425 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3427 if not added and not modified and not removed:
3426 if not added and not modified and not removed:
3428 edittext.append(hgprefix(_(b"no files changed")))
3427 edittext.append(hgprefix(_(b"no files changed")))
3429 edittext.append(b"")
3428 edittext.append(b"")
3430
3429
3431 return b"\n".join(edittext)
3430 return b"\n".join(edittext)
3432
3431
3433
3432
3434 def commitstatus(repo, node, branch, bheads=None, opts=None):
3433 def commitstatus(repo, node, branch, bheads=None, opts=None):
3435 if opts is None:
3434 if opts is None:
3436 opts = {}
3435 opts = {}
3437 ctx = repo[node]
3436 ctx = repo[node]
3438 parents = ctx.parents()
3437 parents = ctx.parents()
3439
3438
3440 if (
3439 if (
3441 not opts.get(b'amend')
3440 not opts.get(b'amend')
3442 and bheads
3441 and bheads
3443 and node not in bheads
3442 and node not in bheads
3444 and not any(
3443 and not any(
3445 p.node() in bheads and p.branch() == branch for p in parents
3444 p.node() in bheads and p.branch() == branch for p in parents
3446 )
3445 )
3447 ):
3446 ):
3448 repo.ui.status(_(b'created new head\n'))
3447 repo.ui.status(_(b'created new head\n'))
3449 # The message is not printed for initial roots. For the other
3448 # The message is not printed for initial roots. For the other
3450 # changesets, it is printed in the following situations:
3449 # changesets, it is printed in the following situations:
3451 #
3450 #
3452 # Par column: for the 2 parents with ...
3451 # Par column: for the 2 parents with ...
3453 # N: null or no parent
3452 # N: null or no parent
3454 # B: parent is on another named branch
3453 # B: parent is on another named branch
3455 # C: parent is a regular non head changeset
3454 # C: parent is a regular non head changeset
3456 # H: parent was a branch head of the current branch
3455 # H: parent was a branch head of the current branch
3457 # Msg column: whether we print "created new head" message
3456 # Msg column: whether we print "created new head" message
3458 # In the following, it is assumed that there already exists some
3457 # In the following, it is assumed that there already exists some
3459 # initial branch heads of the current branch, otherwise nothing is
3458 # initial branch heads of the current branch, otherwise nothing is
3460 # printed anyway.
3459 # printed anyway.
3461 #
3460 #
3462 # Par Msg Comment
3461 # Par Msg Comment
3463 # N N y additional topo root
3462 # N N y additional topo root
3464 #
3463 #
3465 # B N y additional branch root
3464 # B N y additional branch root
3466 # C N y additional topo head
3465 # C N y additional topo head
3467 # H N n usual case
3466 # H N n usual case
3468 #
3467 #
3469 # B B y weird additional branch root
3468 # B B y weird additional branch root
3470 # C B y branch merge
3469 # C B y branch merge
3471 # H B n merge with named branch
3470 # H B n merge with named branch
3472 #
3471 #
3473 # C C y additional head from merge
3472 # C C y additional head from merge
3474 # C H n merge with a head
3473 # C H n merge with a head
3475 #
3474 #
3476 # H H n head merge: head count decreases
3475 # H H n head merge: head count decreases
3477
3476
3478 if not opts.get(b'close_branch'):
3477 if not opts.get(b'close_branch'):
3479 for r in parents:
3478 for r in parents:
3480 if r.closesbranch() and r.branch() == branch:
3479 if r.closesbranch() and r.branch() == branch:
3481 repo.ui.status(
3480 repo.ui.status(
3482 _(b'reopening closed branch head %d\n') % r.rev()
3481 _(b'reopening closed branch head %d\n') % r.rev()
3483 )
3482 )
3484
3483
3485 if repo.ui.debugflag:
3484 if repo.ui.debugflag:
3486 repo.ui.write(
3485 repo.ui.write(
3487 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3486 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3488 )
3487 )
3489 elif repo.ui.verbose:
3488 elif repo.ui.verbose:
3490 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3489 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3491
3490
3492
3491
3493 def postcommitstatus(repo, pats, opts):
3492 def postcommitstatus(repo, pats, opts):
3494 return repo.status(match=scmutil.match(repo[None], pats, opts))
3493 return repo.status(match=scmutil.match(repo[None], pats, opts))
3495
3494
3496
3495
3497 def revert(ui, repo, ctx, parents, *pats, **opts):
3496 def revert(ui, repo, ctx, parents, *pats, **opts):
3498 opts = pycompat.byteskwargs(opts)
3497 opts = pycompat.byteskwargs(opts)
3499 parent, p2 = parents
3498 parent, p2 = parents
3500 node = ctx.node()
3499 node = ctx.node()
3501
3500
3502 mf = ctx.manifest()
3501 mf = ctx.manifest()
3503 if node == p2:
3502 if node == p2:
3504 parent = p2
3503 parent = p2
3505
3504
3506 # need all matching names in dirstate and manifest of target rev,
3505 # need all matching names in dirstate and manifest of target rev,
3507 # so have to walk both. do not print errors if files exist in one
3506 # so have to walk both. do not print errors if files exist in one
3508 # but not other. in both cases, filesets should be evaluated against
3507 # but not other. in both cases, filesets should be evaluated against
3509 # workingctx to get consistent result (issue4497). this means 'set:**'
3508 # workingctx to get consistent result (issue4497). this means 'set:**'
3510 # cannot be used to select missing files from target rev.
3509 # cannot be used to select missing files from target rev.
3511
3510
3512 # `names` is a mapping for all elements in working copy and target revision
3511 # `names` is a mapping for all elements in working copy and target revision
3513 # The mapping is in the form:
3512 # The mapping is in the form:
3514 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3513 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3515 names = {}
3514 names = {}
3516 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3515 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3517
3516
3518 with repo.wlock():
3517 with repo.wlock():
3519 ## filling of the `names` mapping
3518 ## filling of the `names` mapping
3520 # walk dirstate to fill `names`
3519 # walk dirstate to fill `names`
3521
3520
3522 interactive = opts.get(b'interactive', False)
3521 interactive = opts.get(b'interactive', False)
3523 wctx = repo[None]
3522 wctx = repo[None]
3524 m = scmutil.match(wctx, pats, opts)
3523 m = scmutil.match(wctx, pats, opts)
3525
3524
3526 # we'll need this later
3525 # we'll need this later
3527 targetsubs = sorted(s for s in wctx.substate if m(s))
3526 targetsubs = sorted(s for s in wctx.substate if m(s))
3528
3527
3529 if not m.always():
3528 if not m.always():
3530 matcher = matchmod.badmatch(m, lambda x, y: False)
3529 matcher = matchmod.badmatch(m, lambda x, y: False)
3531 for abs in wctx.walk(matcher):
3530 for abs in wctx.walk(matcher):
3532 names[abs] = m.exact(abs)
3531 names[abs] = m.exact(abs)
3533
3532
3534 # walk target manifest to fill `names`
3533 # walk target manifest to fill `names`
3535
3534
3536 def badfn(path, msg):
3535 def badfn(path, msg):
3537 if path in names:
3536 if path in names:
3538 return
3537 return
3539 if path in ctx.substate:
3538 if path in ctx.substate:
3540 return
3539 return
3541 path_ = path + b'/'
3540 path_ = path + b'/'
3542 for f in names:
3541 for f in names:
3543 if f.startswith(path_):
3542 if f.startswith(path_):
3544 return
3543 return
3545 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3544 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3546
3545
3547 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3546 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3548 if abs not in names:
3547 if abs not in names:
3549 names[abs] = m.exact(abs)
3548 names[abs] = m.exact(abs)
3550
3549
3551 # Find status of all file in `names`.
3550 # Find status of all file in `names`.
3552 m = scmutil.matchfiles(repo, names)
3551 m = scmutil.matchfiles(repo, names)
3553
3552
3554 changes = repo.status(
3553 changes = repo.status(
3555 node1=node, match=m, unknown=True, ignored=True, clean=True
3554 node1=node, match=m, unknown=True, ignored=True, clean=True
3556 )
3555 )
3557 else:
3556 else:
3558 changes = repo.status(node1=node, match=m)
3557 changes = repo.status(node1=node, match=m)
3559 for kind in changes:
3558 for kind in changes:
3560 for abs in kind:
3559 for abs in kind:
3561 names[abs] = m.exact(abs)
3560 names[abs] = m.exact(abs)
3562
3561
3563 m = scmutil.matchfiles(repo, names)
3562 m = scmutil.matchfiles(repo, names)
3564
3563
3565 modified = set(changes.modified)
3564 modified = set(changes.modified)
3566 added = set(changes.added)
3565 added = set(changes.added)
3567 removed = set(changes.removed)
3566 removed = set(changes.removed)
3568 _deleted = set(changes.deleted)
3567 _deleted = set(changes.deleted)
3569 unknown = set(changes.unknown)
3568 unknown = set(changes.unknown)
3570 unknown.update(changes.ignored)
3569 unknown.update(changes.ignored)
3571 clean = set(changes.clean)
3570 clean = set(changes.clean)
3572 modadded = set()
3571 modadded = set()
3573
3572
3574 # We need to account for the state of the file in the dirstate,
3573 # We need to account for the state of the file in the dirstate,
3575 # even when we revert against something else than parent. This will
3574 # even when we revert against something else than parent. This will
3576 # slightly alter the behavior of revert (doing back up or not, delete
3575 # slightly alter the behavior of revert (doing back up or not, delete
3577 # or just forget etc).
3576 # or just forget etc).
3578 if parent == node:
3577 if parent == node:
3579 dsmodified = modified
3578 dsmodified = modified
3580 dsadded = added
3579 dsadded = added
3581 dsremoved = removed
3580 dsremoved = removed
3582 # store all local modifications, useful later for rename detection
3581 # store all local modifications, useful later for rename detection
3583 localchanges = dsmodified | dsadded
3582 localchanges = dsmodified | dsadded
3584 modified, added, removed = set(), set(), set()
3583 modified, added, removed = set(), set(), set()
3585 else:
3584 else:
3586 changes = repo.status(node1=parent, match=m)
3585 changes = repo.status(node1=parent, match=m)
3587 dsmodified = set(changes.modified)
3586 dsmodified = set(changes.modified)
3588 dsadded = set(changes.added)
3587 dsadded = set(changes.added)
3589 dsremoved = set(changes.removed)
3588 dsremoved = set(changes.removed)
3590 # store all local modifications, useful later for rename detection
3589 # store all local modifications, useful later for rename detection
3591 localchanges = dsmodified | dsadded
3590 localchanges = dsmodified | dsadded
3592
3591
3593 # only take into account for removes between wc and target
3592 # only take into account for removes between wc and target
3594 clean |= dsremoved - removed
3593 clean |= dsremoved - removed
3595 dsremoved &= removed
3594 dsremoved &= removed
3596 # distinct between dirstate remove and other
3595 # distinct between dirstate remove and other
3597 removed -= dsremoved
3596 removed -= dsremoved
3598
3597
3599 modadded = added & dsmodified
3598 modadded = added & dsmodified
3600 added -= modadded
3599 added -= modadded
3601
3600
3602 # tell newly modified apart.
3601 # tell newly modified apart.
3603 dsmodified &= modified
3602 dsmodified &= modified
3604 dsmodified |= modified & dsadded # dirstate added may need backup
3603 dsmodified |= modified & dsadded # dirstate added may need backup
3605 modified -= dsmodified
3604 modified -= dsmodified
3606
3605
3607 # We need to wait for some post-processing to update this set
3606 # We need to wait for some post-processing to update this set
3608 # before making the distinction. The dirstate will be used for
3607 # before making the distinction. The dirstate will be used for
3609 # that purpose.
3608 # that purpose.
3610 dsadded = added
3609 dsadded = added
3611
3610
3612 # in case of merge, files that are actually added can be reported as
3611 # in case of merge, files that are actually added can be reported as
3613 # modified, we need to post process the result
3612 # modified, we need to post process the result
3614 if p2 != nullid:
3613 if p2 != nullid:
3615 mergeadd = set(dsmodified)
3614 mergeadd = set(dsmodified)
3616 for path in dsmodified:
3615 for path in dsmodified:
3617 if path in mf:
3616 if path in mf:
3618 mergeadd.remove(path)
3617 mergeadd.remove(path)
3619 dsadded |= mergeadd
3618 dsadded |= mergeadd
3620 dsmodified -= mergeadd
3619 dsmodified -= mergeadd
3621
3620
3622 # if f is a rename, update `names` to also revert the source
3621 # if f is a rename, update `names` to also revert the source
3623 for f in localchanges:
3622 for f in localchanges:
3624 src = repo.dirstate.copied(f)
3623 src = repo.dirstate.copied(f)
3625 # XXX should we check for rename down to target node?
3624 # XXX should we check for rename down to target node?
3626 if src and src not in names and repo.dirstate[src] == b'r':
3625 if src and src not in names and repo.dirstate[src] == b'r':
3627 dsremoved.add(src)
3626 dsremoved.add(src)
3628 names[src] = True
3627 names[src] = True
3629
3628
3630 # determine the exact nature of the deleted changesets
3629 # determine the exact nature of the deleted changesets
3631 deladded = set(_deleted)
3630 deladded = set(_deleted)
3632 for path in _deleted:
3631 for path in _deleted:
3633 if path in mf:
3632 if path in mf:
3634 deladded.remove(path)
3633 deladded.remove(path)
3635 deleted = _deleted - deladded
3634 deleted = _deleted - deladded
3636
3635
3637 # distinguish between file to forget and the other
3636 # distinguish between file to forget and the other
3638 added = set()
3637 added = set()
3639 for abs in dsadded:
3638 for abs in dsadded:
3640 if repo.dirstate[abs] != b'a':
3639 if repo.dirstate[abs] != b'a':
3641 added.add(abs)
3640 added.add(abs)
3642 dsadded -= added
3641 dsadded -= added
3643
3642
3644 for abs in deladded:
3643 for abs in deladded:
3645 if repo.dirstate[abs] == b'a':
3644 if repo.dirstate[abs] == b'a':
3646 dsadded.add(abs)
3645 dsadded.add(abs)
3647 deladded -= dsadded
3646 deladded -= dsadded
3648
3647
3649 # For files marked as removed, we check if an unknown file is present at
3648 # For files marked as removed, we check if an unknown file is present at
3650 # the same path. If a such file exists it may need to be backed up.
3649 # the same path. If a such file exists it may need to be backed up.
3651 # Making the distinction at this stage helps have simpler backup
3650 # Making the distinction at this stage helps have simpler backup
3652 # logic.
3651 # logic.
3653 removunk = set()
3652 removunk = set()
3654 for abs in removed:
3653 for abs in removed:
3655 target = repo.wjoin(abs)
3654 target = repo.wjoin(abs)
3656 if os.path.lexists(target):
3655 if os.path.lexists(target):
3657 removunk.add(abs)
3656 removunk.add(abs)
3658 removed -= removunk
3657 removed -= removunk
3659
3658
3660 dsremovunk = set()
3659 dsremovunk = set()
3661 for abs in dsremoved:
3660 for abs in dsremoved:
3662 target = repo.wjoin(abs)
3661 target = repo.wjoin(abs)
3663 if os.path.lexists(target):
3662 if os.path.lexists(target):
3664 dsremovunk.add(abs)
3663 dsremovunk.add(abs)
3665 dsremoved -= dsremovunk
3664 dsremoved -= dsremovunk
3666
3665
3667 # action to be actually performed by revert
3666 # action to be actually performed by revert
3668 # (<list of file>, message>) tuple
3667 # (<list of file>, message>) tuple
3669 actions = {
3668 actions = {
3670 b'revert': ([], _(b'reverting %s\n')),
3669 b'revert': ([], _(b'reverting %s\n')),
3671 b'add': ([], _(b'adding %s\n')),
3670 b'add': ([], _(b'adding %s\n')),
3672 b'remove': ([], _(b'removing %s\n')),
3671 b'remove': ([], _(b'removing %s\n')),
3673 b'drop': ([], _(b'removing %s\n')),
3672 b'drop': ([], _(b'removing %s\n')),
3674 b'forget': ([], _(b'forgetting %s\n')),
3673 b'forget': ([], _(b'forgetting %s\n')),
3675 b'undelete': ([], _(b'undeleting %s\n')),
3674 b'undelete': ([], _(b'undeleting %s\n')),
3676 b'noop': (None, _(b'no changes needed to %s\n')),
3675 b'noop': (None, _(b'no changes needed to %s\n')),
3677 b'unknown': (None, _(b'file not managed: %s\n')),
3676 b'unknown': (None, _(b'file not managed: %s\n')),
3678 }
3677 }
3679
3678
3680 # "constant" that convey the backup strategy.
3679 # "constant" that convey the backup strategy.
3681 # All set to `discard` if `no-backup` is set do avoid checking
3680 # All set to `discard` if `no-backup` is set do avoid checking
3682 # no_backup lower in the code.
3681 # no_backup lower in the code.
3683 # These values are ordered for comparison purposes
3682 # These values are ordered for comparison purposes
3684 backupinteractive = 3 # do backup if interactively modified
3683 backupinteractive = 3 # do backup if interactively modified
3685 backup = 2 # unconditionally do backup
3684 backup = 2 # unconditionally do backup
3686 check = 1 # check if the existing file differs from target
3685 check = 1 # check if the existing file differs from target
3687 discard = 0 # never do backup
3686 discard = 0 # never do backup
3688 if opts.get(b'no_backup'):
3687 if opts.get(b'no_backup'):
3689 backupinteractive = backup = check = discard
3688 backupinteractive = backup = check = discard
3690 if interactive:
3689 if interactive:
3691 dsmodifiedbackup = backupinteractive
3690 dsmodifiedbackup = backupinteractive
3692 else:
3691 else:
3693 dsmodifiedbackup = backup
3692 dsmodifiedbackup = backup
3694 tobackup = set()
3693 tobackup = set()
3695
3694
3696 backupanddel = actions[b'remove']
3695 backupanddel = actions[b'remove']
3697 if not opts.get(b'no_backup'):
3696 if not opts.get(b'no_backup'):
3698 backupanddel = actions[b'drop']
3697 backupanddel = actions[b'drop']
3699
3698
3700 disptable = (
3699 disptable = (
3701 # dispatch table:
3700 # dispatch table:
3702 # file state
3701 # file state
3703 # action
3702 # action
3704 # make backup
3703 # make backup
3705 ## Sets that results that will change file on disk
3704 ## Sets that results that will change file on disk
3706 # Modified compared to target, no local change
3705 # Modified compared to target, no local change
3707 (modified, actions[b'revert'], discard),
3706 (modified, actions[b'revert'], discard),
3708 # Modified compared to target, but local file is deleted
3707 # Modified compared to target, but local file is deleted
3709 (deleted, actions[b'revert'], discard),
3708 (deleted, actions[b'revert'], discard),
3710 # Modified compared to target, local change
3709 # Modified compared to target, local change
3711 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3710 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3712 # Added since target
3711 # Added since target
3713 (added, actions[b'remove'], discard),
3712 (added, actions[b'remove'], discard),
3714 # Added in working directory
3713 # Added in working directory
3715 (dsadded, actions[b'forget'], discard),
3714 (dsadded, actions[b'forget'], discard),
3716 # Added since target, have local modification
3715 # Added since target, have local modification
3717 (modadded, backupanddel, backup),
3716 (modadded, backupanddel, backup),
3718 # Added since target but file is missing in working directory
3717 # Added since target but file is missing in working directory
3719 (deladded, actions[b'drop'], discard),
3718 (deladded, actions[b'drop'], discard),
3720 # Removed since target, before working copy parent
3719 # Removed since target, before working copy parent
3721 (removed, actions[b'add'], discard),
3720 (removed, actions[b'add'], discard),
3722 # Same as `removed` but an unknown file exists at the same path
3721 # Same as `removed` but an unknown file exists at the same path
3723 (removunk, actions[b'add'], check),
3722 (removunk, actions[b'add'], check),
3724 # Removed since targe, marked as such in working copy parent
3723 # Removed since targe, marked as such in working copy parent
3725 (dsremoved, actions[b'undelete'], discard),
3724 (dsremoved, actions[b'undelete'], discard),
3726 # Same as `dsremoved` but an unknown file exists at the same path
3725 # Same as `dsremoved` but an unknown file exists at the same path
3727 (dsremovunk, actions[b'undelete'], check),
3726 (dsremovunk, actions[b'undelete'], check),
3728 ## the following sets does not result in any file changes
3727 ## the following sets does not result in any file changes
3729 # File with no modification
3728 # File with no modification
3730 (clean, actions[b'noop'], discard),
3729 (clean, actions[b'noop'], discard),
3731 # Existing file, not tracked anywhere
3730 # Existing file, not tracked anywhere
3732 (unknown, actions[b'unknown'], discard),
3731 (unknown, actions[b'unknown'], discard),
3733 )
3732 )
3734
3733
3735 for abs, exact in sorted(names.items()):
3734 for abs, exact in sorted(names.items()):
3736 # target file to be touch on disk (relative to cwd)
3735 # target file to be touch on disk (relative to cwd)
3737 target = repo.wjoin(abs)
3736 target = repo.wjoin(abs)
3738 # search the entry in the dispatch table.
3737 # search the entry in the dispatch table.
3739 # if the file is in any of these sets, it was touched in the working
3738 # if the file is in any of these sets, it was touched in the working
3740 # directory parent and we are sure it needs to be reverted.
3739 # directory parent and we are sure it needs to be reverted.
3741 for table, (xlist, msg), dobackup in disptable:
3740 for table, (xlist, msg), dobackup in disptable:
3742 if abs not in table:
3741 if abs not in table:
3743 continue
3742 continue
3744 if xlist is not None:
3743 if xlist is not None:
3745 xlist.append(abs)
3744 xlist.append(abs)
3746 if dobackup:
3745 if dobackup:
3747 # If in interactive mode, don't automatically create
3746 # If in interactive mode, don't automatically create
3748 # .orig files (issue4793)
3747 # .orig files (issue4793)
3749 if dobackup == backupinteractive:
3748 if dobackup == backupinteractive:
3750 tobackup.add(abs)
3749 tobackup.add(abs)
3751 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3750 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3752 absbakname = scmutil.backuppath(ui, repo, abs)
3751 absbakname = scmutil.backuppath(ui, repo, abs)
3753 bakname = os.path.relpath(
3752 bakname = os.path.relpath(
3754 absbakname, start=repo.root
3753 absbakname, start=repo.root
3755 )
3754 )
3756 ui.note(
3755 ui.note(
3757 _(b'saving current version of %s as %s\n')
3756 _(b'saving current version of %s as %s\n')
3758 % (uipathfn(abs), uipathfn(bakname))
3757 % (uipathfn(abs), uipathfn(bakname))
3759 )
3758 )
3760 if not opts.get(b'dry_run'):
3759 if not opts.get(b'dry_run'):
3761 if interactive:
3760 if interactive:
3762 util.copyfile(target, absbakname)
3761 util.copyfile(target, absbakname)
3763 else:
3762 else:
3764 util.rename(target, absbakname)
3763 util.rename(target, absbakname)
3765 if opts.get(b'dry_run'):
3764 if opts.get(b'dry_run'):
3766 if ui.verbose or not exact:
3765 if ui.verbose or not exact:
3767 ui.status(msg % uipathfn(abs))
3766 ui.status(msg % uipathfn(abs))
3768 elif exact:
3767 elif exact:
3769 ui.warn(msg % uipathfn(abs))
3768 ui.warn(msg % uipathfn(abs))
3770 break
3769 break
3771
3770
3772 if not opts.get(b'dry_run'):
3771 if not opts.get(b'dry_run'):
3773 needdata = (b'revert', b'add', b'undelete')
3772 needdata = (b'revert', b'add', b'undelete')
3774 oplist = [actions[name][0] for name in needdata]
3773 oplist = [actions[name][0] for name in needdata]
3775 prefetch = scmutil.prefetchfiles
3774 prefetch = scmutil.prefetchfiles
3776 matchfiles = scmutil.matchfiles(
3775 matchfiles = scmutil.matchfiles(
3777 repo, [f for sublist in oplist for f in sublist]
3776 repo, [f for sublist in oplist for f in sublist]
3778 )
3777 )
3779 prefetch(
3778 prefetch(
3780 repo, [(ctx.rev(), matchfiles)],
3779 repo, [(ctx.rev(), matchfiles)],
3781 )
3780 )
3782 match = scmutil.match(repo[None], pats)
3781 match = scmutil.match(repo[None], pats)
3783 _performrevert(
3782 _performrevert(
3784 repo,
3783 repo,
3785 parents,
3784 parents,
3786 ctx,
3785 ctx,
3787 names,
3786 names,
3788 uipathfn,
3787 uipathfn,
3789 actions,
3788 actions,
3790 match,
3789 match,
3791 interactive,
3790 interactive,
3792 tobackup,
3791 tobackup,
3793 )
3792 )
3794
3793
3795 if targetsubs:
3794 if targetsubs:
3796 # Revert the subrepos on the revert list
3795 # Revert the subrepos on the revert list
3797 for sub in targetsubs:
3796 for sub in targetsubs:
3798 try:
3797 try:
3799 wctx.sub(sub).revert(
3798 wctx.sub(sub).revert(
3800 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3799 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3801 )
3800 )
3802 except KeyError:
3801 except KeyError:
3803 raise error.Abort(
3802 raise error.Abort(
3804 b"subrepository '%s' does not exist in %s!"
3803 b"subrepository '%s' does not exist in %s!"
3805 % (sub, short(ctx.node()))
3804 % (sub, short(ctx.node()))
3806 )
3805 )
3807
3806
3808
3807
3809 def _performrevert(
3808 def _performrevert(
3810 repo,
3809 repo,
3811 parents,
3810 parents,
3812 ctx,
3811 ctx,
3813 names,
3812 names,
3814 uipathfn,
3813 uipathfn,
3815 actions,
3814 actions,
3816 match,
3815 match,
3817 interactive=False,
3816 interactive=False,
3818 tobackup=None,
3817 tobackup=None,
3819 ):
3818 ):
3820 """function that actually perform all the actions computed for revert
3819 """function that actually perform all the actions computed for revert
3821
3820
3822 This is an independent function to let extension to plug in and react to
3821 This is an independent function to let extension to plug in and react to
3823 the imminent revert.
3822 the imminent revert.
3824
3823
3825 Make sure you have the working directory locked when calling this function.
3824 Make sure you have the working directory locked when calling this function.
3826 """
3825 """
3827 parent, p2 = parents
3826 parent, p2 = parents
3828 node = ctx.node()
3827 node = ctx.node()
3829 excluded_files = []
3828 excluded_files = []
3830
3829
3831 def checkout(f):
3830 def checkout(f):
3832 fc = ctx[f]
3831 fc = ctx[f]
3833 repo.wwrite(f, fc.data(), fc.flags())
3832 repo.wwrite(f, fc.data(), fc.flags())
3834
3833
3835 def doremove(f):
3834 def doremove(f):
3836 try:
3835 try:
3837 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3836 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3838 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3837 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3839 except OSError:
3838 except OSError:
3840 pass
3839 pass
3841 repo.dirstate.remove(f)
3840 repo.dirstate.remove(f)
3842
3841
3843 def prntstatusmsg(action, f):
3842 def prntstatusmsg(action, f):
3844 exact = names[f]
3843 exact = names[f]
3845 if repo.ui.verbose or not exact:
3844 if repo.ui.verbose or not exact:
3846 repo.ui.status(actions[action][1] % uipathfn(f))
3845 repo.ui.status(actions[action][1] % uipathfn(f))
3847
3846
3848 audit_path = pathutil.pathauditor(repo.root, cached=True)
3847 audit_path = pathutil.pathauditor(repo.root, cached=True)
3849 for f in actions[b'forget'][0]:
3848 for f in actions[b'forget'][0]:
3850 if interactive:
3849 if interactive:
3851 choice = repo.ui.promptchoice(
3850 choice = repo.ui.promptchoice(
3852 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3851 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3853 )
3852 )
3854 if choice == 0:
3853 if choice == 0:
3855 prntstatusmsg(b'forget', f)
3854 prntstatusmsg(b'forget', f)
3856 repo.dirstate.drop(f)
3855 repo.dirstate.drop(f)
3857 else:
3856 else:
3858 excluded_files.append(f)
3857 excluded_files.append(f)
3859 else:
3858 else:
3860 prntstatusmsg(b'forget', f)
3859 prntstatusmsg(b'forget', f)
3861 repo.dirstate.drop(f)
3860 repo.dirstate.drop(f)
3862 for f in actions[b'remove'][0]:
3861 for f in actions[b'remove'][0]:
3863 audit_path(f)
3862 audit_path(f)
3864 if interactive:
3863 if interactive:
3865 choice = repo.ui.promptchoice(
3864 choice = repo.ui.promptchoice(
3866 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3865 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3867 )
3866 )
3868 if choice == 0:
3867 if choice == 0:
3869 prntstatusmsg(b'remove', f)
3868 prntstatusmsg(b'remove', f)
3870 doremove(f)
3869 doremove(f)
3871 else:
3870 else:
3872 excluded_files.append(f)
3871 excluded_files.append(f)
3873 else:
3872 else:
3874 prntstatusmsg(b'remove', f)
3873 prntstatusmsg(b'remove', f)
3875 doremove(f)
3874 doremove(f)
3876 for f in actions[b'drop'][0]:
3875 for f in actions[b'drop'][0]:
3877 audit_path(f)
3876 audit_path(f)
3878 prntstatusmsg(b'drop', f)
3877 prntstatusmsg(b'drop', f)
3879 repo.dirstate.remove(f)
3878 repo.dirstate.remove(f)
3880
3879
3881 normal = None
3880 normal = None
3882 if node == parent:
3881 if node == parent:
3883 # We're reverting to our parent. If possible, we'd like status
3882 # We're reverting to our parent. If possible, we'd like status
3884 # to report the file as clean. We have to use normallookup for
3883 # to report the file as clean. We have to use normallookup for
3885 # merges to avoid losing information about merged/dirty files.
3884 # merges to avoid losing information about merged/dirty files.
3886 if p2 != nullid:
3885 if p2 != nullid:
3887 normal = repo.dirstate.normallookup
3886 normal = repo.dirstate.normallookup
3888 else:
3887 else:
3889 normal = repo.dirstate.normal
3888 normal = repo.dirstate.normal
3890
3889
3891 newlyaddedandmodifiedfiles = set()
3890 newlyaddedandmodifiedfiles = set()
3892 if interactive:
3891 if interactive:
3893 # Prompt the user for changes to revert
3892 # Prompt the user for changes to revert
3894 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3893 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3895 m = scmutil.matchfiles(repo, torevert)
3894 m = scmutil.matchfiles(repo, torevert)
3896 diffopts = patch.difffeatureopts(
3895 diffopts = patch.difffeatureopts(
3897 repo.ui,
3896 repo.ui,
3898 whitespace=True,
3897 whitespace=True,
3899 section=b'commands',
3898 section=b'commands',
3900 configprefix=b'revert.interactive.',
3899 configprefix=b'revert.interactive.',
3901 )
3900 )
3902 diffopts.nodates = True
3901 diffopts.nodates = True
3903 diffopts.git = True
3902 diffopts.git = True
3904 operation = b'apply'
3903 operation = b'apply'
3905 if node == parent:
3904 if node == parent:
3906 if repo.ui.configbool(
3905 if repo.ui.configbool(
3907 b'experimental', b'revert.interactive.select-to-keep'
3906 b'experimental', b'revert.interactive.select-to-keep'
3908 ):
3907 ):
3909 operation = b'keep'
3908 operation = b'keep'
3910 else:
3909 else:
3911 operation = b'discard'
3910 operation = b'discard'
3912
3911
3913 if operation == b'apply':
3912 if operation == b'apply':
3914 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3913 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3915 else:
3914 else:
3916 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3915 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3917 originalchunks = patch.parsepatch(diff)
3916 originalchunks = patch.parsepatch(diff)
3918
3917
3919 try:
3918 try:
3920
3919
3921 chunks, opts = recordfilter(
3920 chunks, opts = recordfilter(
3922 repo.ui, originalchunks, match, operation=operation
3921 repo.ui, originalchunks, match, operation=operation
3923 )
3922 )
3924 if operation == b'discard':
3923 if operation == b'discard':
3925 chunks = patch.reversehunks(chunks)
3924 chunks = patch.reversehunks(chunks)
3926
3925
3927 except error.PatchError as err:
3926 except error.PatchError as err:
3928 raise error.Abort(_(b'error parsing patch: %s') % err)
3927 raise error.Abort(_(b'error parsing patch: %s') % err)
3929
3928
3930 # FIXME: when doing an interactive revert of a copy, there's no way of
3929 # FIXME: when doing an interactive revert of a copy, there's no way of
3931 # performing a partial revert of the added file, the only option is
3930 # performing a partial revert of the added file, the only option is
3932 # "remove added file <name> (Yn)?", so we don't need to worry about the
3931 # "remove added file <name> (Yn)?", so we don't need to worry about the
3933 # alsorestore value. Ideally we'd be able to partially revert
3932 # alsorestore value. Ideally we'd be able to partially revert
3934 # copied/renamed files.
3933 # copied/renamed files.
3935 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3934 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3936 chunks, originalchunks
3935 chunks, originalchunks
3937 )
3936 )
3938 if tobackup is None:
3937 if tobackup is None:
3939 tobackup = set()
3938 tobackup = set()
3940 # Apply changes
3939 # Apply changes
3941 fp = stringio()
3940 fp = stringio()
3942 # chunks are serialized per file, but files aren't sorted
3941 # chunks are serialized per file, but files aren't sorted
3943 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3942 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3944 prntstatusmsg(b'revert', f)
3943 prntstatusmsg(b'revert', f)
3945 files = set()
3944 files = set()
3946 for c in chunks:
3945 for c in chunks:
3947 if ishunk(c):
3946 if ishunk(c):
3948 abs = c.header.filename()
3947 abs = c.header.filename()
3949 # Create a backup file only if this hunk should be backed up
3948 # Create a backup file only if this hunk should be backed up
3950 if c.header.filename() in tobackup:
3949 if c.header.filename() in tobackup:
3951 target = repo.wjoin(abs)
3950 target = repo.wjoin(abs)
3952 bakname = scmutil.backuppath(repo.ui, repo, abs)
3951 bakname = scmutil.backuppath(repo.ui, repo, abs)
3953 util.copyfile(target, bakname)
3952 util.copyfile(target, bakname)
3954 tobackup.remove(abs)
3953 tobackup.remove(abs)
3955 if abs not in files:
3954 if abs not in files:
3956 files.add(abs)
3955 files.add(abs)
3957 if operation == b'keep':
3956 if operation == b'keep':
3958 checkout(abs)
3957 checkout(abs)
3959 c.write(fp)
3958 c.write(fp)
3960 dopatch = fp.tell()
3959 dopatch = fp.tell()
3961 fp.seek(0)
3960 fp.seek(0)
3962 if dopatch:
3961 if dopatch:
3963 try:
3962 try:
3964 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3963 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3965 except error.PatchError as err:
3964 except error.PatchError as err:
3966 raise error.Abort(pycompat.bytestr(err))
3965 raise error.Abort(pycompat.bytestr(err))
3967 del fp
3966 del fp
3968 else:
3967 else:
3969 for f in actions[b'revert'][0]:
3968 for f in actions[b'revert'][0]:
3970 prntstatusmsg(b'revert', f)
3969 prntstatusmsg(b'revert', f)
3971 checkout(f)
3970 checkout(f)
3972 if normal:
3971 if normal:
3973 normal(f)
3972 normal(f)
3974
3973
3975 for f in actions[b'add'][0]:
3974 for f in actions[b'add'][0]:
3976 # Don't checkout modified files, they are already created by the diff
3975 # Don't checkout modified files, they are already created by the diff
3977 if f not in newlyaddedandmodifiedfiles:
3976 if f not in newlyaddedandmodifiedfiles:
3978 prntstatusmsg(b'add', f)
3977 prntstatusmsg(b'add', f)
3979 checkout(f)
3978 checkout(f)
3980 repo.dirstate.add(f)
3979 repo.dirstate.add(f)
3981
3980
3982 normal = repo.dirstate.normallookup
3981 normal = repo.dirstate.normallookup
3983 if node == parent and p2 == nullid:
3982 if node == parent and p2 == nullid:
3984 normal = repo.dirstate.normal
3983 normal = repo.dirstate.normal
3985 for f in actions[b'undelete'][0]:
3984 for f in actions[b'undelete'][0]:
3986 if interactive:
3985 if interactive:
3987 choice = repo.ui.promptchoice(
3986 choice = repo.ui.promptchoice(
3988 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3987 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3989 )
3988 )
3990 if choice == 0:
3989 if choice == 0:
3991 prntstatusmsg(b'undelete', f)
3990 prntstatusmsg(b'undelete', f)
3992 checkout(f)
3991 checkout(f)
3993 normal(f)
3992 normal(f)
3994 else:
3993 else:
3995 excluded_files.append(f)
3994 excluded_files.append(f)
3996 else:
3995 else:
3997 prntstatusmsg(b'undelete', f)
3996 prntstatusmsg(b'undelete', f)
3998 checkout(f)
3997 checkout(f)
3999 normal(f)
3998 normal(f)
4000
3999
4001 copied = copies.pathcopies(repo[parent], ctx)
4000 copied = copies.pathcopies(repo[parent], ctx)
4002
4001
4003 for f in (
4002 for f in (
4004 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
4003 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
4005 ):
4004 ):
4006 if f in copied:
4005 if f in copied:
4007 repo.dirstate.copy(copied[f], f)
4006 repo.dirstate.copy(copied[f], f)
4008
4007
4009
4008
4010 # a list of (ui, repo, otherpeer, opts, missing) functions called by
4009 # a list of (ui, repo, otherpeer, opts, missing) functions called by
4011 # commands.outgoing. "missing" is "missing" of the result of
4010 # commands.outgoing. "missing" is "missing" of the result of
4012 # "findcommonoutgoing()"
4011 # "findcommonoutgoing()"
4013 outgoinghooks = util.hooks()
4012 outgoinghooks = util.hooks()
4014
4013
4015 # a list of (ui, repo) functions called by commands.summary
4014 # a list of (ui, repo) functions called by commands.summary
4016 summaryhooks = util.hooks()
4015 summaryhooks = util.hooks()
4017
4016
4018 # a list of (ui, repo, opts, changes) functions called by commands.summary.
4017 # a list of (ui, repo, opts, changes) functions called by commands.summary.
4019 #
4018 #
4020 # functions should return tuple of booleans below, if 'changes' is None:
4019 # functions should return tuple of booleans below, if 'changes' is None:
4021 # (whether-incomings-are-needed, whether-outgoings-are-needed)
4020 # (whether-incomings-are-needed, whether-outgoings-are-needed)
4022 #
4021 #
4023 # otherwise, 'changes' is a tuple of tuples below:
4022 # otherwise, 'changes' is a tuple of tuples below:
4024 # - (sourceurl, sourcebranch, sourcepeer, incoming)
4023 # - (sourceurl, sourcebranch, sourcepeer, incoming)
4025 # - (desturl, destbranch, destpeer, outgoing)
4024 # - (desturl, destbranch, destpeer, outgoing)
4026 summaryremotehooks = util.hooks()
4025 summaryremotehooks = util.hooks()
4027
4026
4028
4027
4029 def checkunfinished(repo, commit=False, skipmerge=False):
4028 def checkunfinished(repo, commit=False, skipmerge=False):
4030 '''Look for an unfinished multistep operation, like graft, and abort
4029 '''Look for an unfinished multistep operation, like graft, and abort
4031 if found. It's probably good to check this right before
4030 if found. It's probably good to check this right before
4032 bailifchanged().
4031 bailifchanged().
4033 '''
4032 '''
4034 # Check for non-clearable states first, so things like rebase will take
4033 # Check for non-clearable states first, so things like rebase will take
4035 # precedence over update.
4034 # precedence over update.
4036 for state in statemod._unfinishedstates:
4035 for state in statemod._unfinishedstates:
4037 if (
4036 if (
4038 state._clearable
4037 state._clearable
4039 or (commit and state._allowcommit)
4038 or (commit and state._allowcommit)
4040 or state._reportonly
4039 or state._reportonly
4041 ):
4040 ):
4042 continue
4041 continue
4043 if state.isunfinished(repo):
4042 if state.isunfinished(repo):
4044 raise error.Abort(state.msg(), hint=state.hint())
4043 raise error.Abort(state.msg(), hint=state.hint())
4045
4044
4046 for s in statemod._unfinishedstates:
4045 for s in statemod._unfinishedstates:
4047 if (
4046 if (
4048 not s._clearable
4047 not s._clearable
4049 or (commit and s._allowcommit)
4048 or (commit and s._allowcommit)
4050 or (s._opname == b'merge' and skipmerge)
4049 or (s._opname == b'merge' and skipmerge)
4051 or s._reportonly
4050 or s._reportonly
4052 ):
4051 ):
4053 continue
4052 continue
4054 if s.isunfinished(repo):
4053 if s.isunfinished(repo):
4055 raise error.Abort(s.msg(), hint=s.hint())
4054 raise error.Abort(s.msg(), hint=s.hint())
4056
4055
4057
4056
4058 def clearunfinished(repo):
4057 def clearunfinished(repo):
4059 '''Check for unfinished operations (as above), and clear the ones
4058 '''Check for unfinished operations (as above), and clear the ones
4060 that are clearable.
4059 that are clearable.
4061 '''
4060 '''
4062 for state in statemod._unfinishedstates:
4061 for state in statemod._unfinishedstates:
4063 if state._reportonly:
4062 if state._reportonly:
4064 continue
4063 continue
4065 if not state._clearable and state.isunfinished(repo):
4064 if not state._clearable and state.isunfinished(repo):
4066 raise error.Abort(state.msg(), hint=state.hint())
4065 raise error.Abort(state.msg(), hint=state.hint())
4067
4066
4068 for s in statemod._unfinishedstates:
4067 for s in statemod._unfinishedstates:
4069 if s._opname == b'merge' or state._reportonly:
4068 if s._opname == b'merge' or state._reportonly:
4070 continue
4069 continue
4071 if s._clearable and s.isunfinished(repo):
4070 if s._clearable and s.isunfinished(repo):
4072 util.unlink(repo.vfs.join(s._fname))
4071 util.unlink(repo.vfs.join(s._fname))
4073
4072
4074
4073
4075 def getunfinishedstate(repo):
4074 def getunfinishedstate(repo):
4076 ''' Checks for unfinished operations and returns statecheck object
4075 ''' Checks for unfinished operations and returns statecheck object
4077 for it'''
4076 for it'''
4078 for state in statemod._unfinishedstates:
4077 for state in statemod._unfinishedstates:
4079 if state.isunfinished(repo):
4078 if state.isunfinished(repo):
4080 return state
4079 return state
4081 return None
4080 return None
4082
4081
4083
4082
4084 def howtocontinue(repo):
4083 def howtocontinue(repo):
4085 '''Check for an unfinished operation and return the command to finish
4084 '''Check for an unfinished operation and return the command to finish
4086 it.
4085 it.
4087
4086
4088 statemod._unfinishedstates list is checked for an unfinished operation
4087 statemod._unfinishedstates list is checked for an unfinished operation
4089 and the corresponding message to finish it is generated if a method to
4088 and the corresponding message to finish it is generated if a method to
4090 continue is supported by the operation.
4089 continue is supported by the operation.
4091
4090
4092 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
4091 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
4093 a boolean.
4092 a boolean.
4094 '''
4093 '''
4095 contmsg = _(b"continue: %s")
4094 contmsg = _(b"continue: %s")
4096 for state in statemod._unfinishedstates:
4095 for state in statemod._unfinishedstates:
4097 if not state._continueflag:
4096 if not state._continueflag:
4098 continue
4097 continue
4099 if state.isunfinished(repo):
4098 if state.isunfinished(repo):
4100 return contmsg % state.continuemsg(), True
4099 return contmsg % state.continuemsg(), True
4101 if repo[None].dirty(missing=True, merge=False, branch=False):
4100 if repo[None].dirty(missing=True, merge=False, branch=False):
4102 return contmsg % _(b"hg commit"), False
4101 return contmsg % _(b"hg commit"), False
4103 return None, None
4102 return None, None
4104
4103
4105
4104
4106 def checkafterresolved(repo):
4105 def checkafterresolved(repo):
4107 '''Inform the user about the next action after completing hg resolve
4106 '''Inform the user about the next action after completing hg resolve
4108
4107
4109 If there's a an unfinished operation that supports continue flag,
4108 If there's a an unfinished operation that supports continue flag,
4110 howtocontinue will yield repo.ui.warn as the reporter.
4109 howtocontinue will yield repo.ui.warn as the reporter.
4111
4110
4112 Otherwise, it will yield repo.ui.note.
4111 Otherwise, it will yield repo.ui.note.
4113 '''
4112 '''
4114 msg, warning = howtocontinue(repo)
4113 msg, warning = howtocontinue(repo)
4115 if msg is not None:
4114 if msg is not None:
4116 if warning:
4115 if warning:
4117 repo.ui.warn(b"%s\n" % msg)
4116 repo.ui.warn(b"%s\n" % msg)
4118 else:
4117 else:
4119 repo.ui.note(b"%s\n" % msg)
4118 repo.ui.note(b"%s\n" % msg)
4120
4119
4121
4120
4122 def wrongtooltocontinue(repo, task):
4121 def wrongtooltocontinue(repo, task):
4123 '''Raise an abort suggesting how to properly continue if there is an
4122 '''Raise an abort suggesting how to properly continue if there is an
4124 active task.
4123 active task.
4125
4124
4126 Uses howtocontinue() to find the active task.
4125 Uses howtocontinue() to find the active task.
4127
4126
4128 If there's no task (repo.ui.note for 'hg commit'), it does not offer
4127 If there's no task (repo.ui.note for 'hg commit'), it does not offer
4129 a hint.
4128 a hint.
4130 '''
4129 '''
4131 after = howtocontinue(repo)
4130 after = howtocontinue(repo)
4132 hint = None
4131 hint = None
4133 if after[1]:
4132 if after[1]:
4134 hint = after[0]
4133 hint = after[0]
4135 raise error.Abort(_(b'no %s in progress') % task, hint=hint)
4134 raise error.Abort(_(b'no %s in progress') % task, hint=hint)
4136
4135
4137
4136
4138 def abortgraft(ui, repo, graftstate):
4137 def abortgraft(ui, repo, graftstate):
4139 """abort the interrupted graft and rollbacks to the state before interrupted
4138 """abort the interrupted graft and rollbacks to the state before interrupted
4140 graft"""
4139 graft"""
4141 if not graftstate.exists():
4140 if not graftstate.exists():
4142 raise error.Abort(_(b"no interrupted graft to abort"))
4141 raise error.Abort(_(b"no interrupted graft to abort"))
4143 statedata = readgraftstate(repo, graftstate)
4142 statedata = readgraftstate(repo, graftstate)
4144 newnodes = statedata.get(b'newnodes')
4143 newnodes = statedata.get(b'newnodes')
4145 if newnodes is None:
4144 if newnodes is None:
4146 # and old graft state which does not have all the data required to abort
4145 # and old graft state which does not have all the data required to abort
4147 # the graft
4146 # the graft
4148 raise error.Abort(_(b"cannot abort using an old graftstate"))
4147 raise error.Abort(_(b"cannot abort using an old graftstate"))
4149
4148
4150 # changeset from which graft operation was started
4149 # changeset from which graft operation was started
4151 if len(newnodes) > 0:
4150 if len(newnodes) > 0:
4152 startctx = repo[newnodes[0]].p1()
4151 startctx = repo[newnodes[0]].p1()
4153 else:
4152 else:
4154 startctx = repo[b'.']
4153 startctx = repo[b'.']
4155 # whether to strip or not
4154 # whether to strip or not
4156 cleanup = False
4155 cleanup = False
4157 from . import hg
4156 from . import hg
4158
4157
4159 if newnodes:
4158 if newnodes:
4160 newnodes = [repo[r].rev() for r in newnodes]
4159 newnodes = [repo[r].rev() for r in newnodes]
4161 cleanup = True
4160 cleanup = True
4162 # checking that none of the newnodes turned public or is public
4161 # checking that none of the newnodes turned public or is public
4163 immutable = [c for c in newnodes if not repo[c].mutable()]
4162 immutable = [c for c in newnodes if not repo[c].mutable()]
4164 if immutable:
4163 if immutable:
4165 repo.ui.warn(
4164 repo.ui.warn(
4166 _(b"cannot clean up public changesets %s\n")
4165 _(b"cannot clean up public changesets %s\n")
4167 % b', '.join(bytes(repo[r]) for r in immutable),
4166 % b', '.join(bytes(repo[r]) for r in immutable),
4168 hint=_(b"see 'hg help phases' for details"),
4167 hint=_(b"see 'hg help phases' for details"),
4169 )
4168 )
4170 cleanup = False
4169 cleanup = False
4171
4170
4172 # checking that no new nodes are created on top of grafted revs
4171 # checking that no new nodes are created on top of grafted revs
4173 desc = set(repo.changelog.descendants(newnodes))
4172 desc = set(repo.changelog.descendants(newnodes))
4174 if desc - set(newnodes):
4173 if desc - set(newnodes):
4175 repo.ui.warn(
4174 repo.ui.warn(
4176 _(
4175 _(
4177 b"new changesets detected on destination "
4176 b"new changesets detected on destination "
4178 b"branch, can't strip\n"
4177 b"branch, can't strip\n"
4179 )
4178 )
4180 )
4179 )
4181 cleanup = False
4180 cleanup = False
4182
4181
4183 if cleanup:
4182 if cleanup:
4184 with repo.wlock(), repo.lock():
4183 with repo.wlock(), repo.lock():
4185 hg.updaterepo(repo, startctx.node(), overwrite=True)
4184 hg.updaterepo(repo, startctx.node(), overwrite=True)
4186 # stripping the new nodes created
4185 # stripping the new nodes created
4187 strippoints = [
4186 strippoints = [
4188 c.node() for c in repo.set(b"roots(%ld)", newnodes)
4187 c.node() for c in repo.set(b"roots(%ld)", newnodes)
4189 ]
4188 ]
4190 repair.strip(repo.ui, repo, strippoints, backup=False)
4189 repair.strip(repo.ui, repo, strippoints, backup=False)
4191
4190
4192 if not cleanup:
4191 if not cleanup:
4193 # we don't update to the startnode if we can't strip
4192 # we don't update to the startnode if we can't strip
4194 startctx = repo[b'.']
4193 startctx = repo[b'.']
4195 hg.updaterepo(repo, startctx.node(), overwrite=True)
4194 hg.updaterepo(repo, startctx.node(), overwrite=True)
4196
4195
4197 ui.status(_(b"graft aborted\n"))
4196 ui.status(_(b"graft aborted\n"))
4198 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
4197 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
4199 graftstate.delete()
4198 graftstate.delete()
4200 return 0
4199 return 0
4201
4200
4202
4201
4203 def readgraftstate(repo, graftstate):
4202 def readgraftstate(repo, graftstate):
4204 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
4203 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
4205 """read the graft state file and return a dict of the data stored in it"""
4204 """read the graft state file and return a dict of the data stored in it"""
4206 try:
4205 try:
4207 return graftstate.read()
4206 return graftstate.read()
4208 except error.CorruptedState:
4207 except error.CorruptedState:
4209 nodes = repo.vfs.read(b'graftstate').splitlines()
4208 nodes = repo.vfs.read(b'graftstate').splitlines()
4210 return {b'nodes': nodes}
4209 return {b'nodes': nodes}
4211
4210
4212
4211
4213 def hgabortgraft(ui, repo):
4212 def hgabortgraft(ui, repo):
4214 """ abort logic for aborting graft using 'hg abort'"""
4213 """ abort logic for aborting graft using 'hg abort'"""
4215 with repo.wlock():
4214 with repo.wlock():
4216 graftstate = statemod.cmdstate(repo, b'graftstate')
4215 graftstate = statemod.cmdstate(repo, b'graftstate')
4217 return abortgraft(ui, repo, graftstate)
4216 return abortgraft(ui, repo, graftstate)
@@ -1,3157 +1,3157 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .thirdparty import attr
19 from .thirdparty import attr
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchangev2,
26 exchangev2,
27 lock as lockmod,
27 lock as lockmod,
28 logexchange,
28 logexchange,
29 narrowspec,
29 narrowspec,
30 obsolete,
30 obsolete,
31 obsutil,
31 obsutil,
32 phases,
32 phases,
33 pushkey,
33 pushkey,
34 pycompat,
34 pycompat,
35 requirements,
35 scmutil,
36 scmutil,
36 sslutil,
37 sslutil,
37 streamclone,
38 streamclone,
38 url as urlmod,
39 url as urlmod,
39 util,
40 util,
40 wireprototypes,
41 wireprototypes,
41 )
42 )
42 from .interfaces import repository
43 from .utils import (
43 from .utils import (
44 hashutil,
44 hashutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 urlerr = util.urlerr
48 urlerr = util.urlerr
49 urlreq = util.urlreq
49 urlreq = util.urlreq
50
50
51 _NARROWACL_SECTION = b'narrowacl'
51 _NARROWACL_SECTION = b'narrowacl'
52
52
53 # Maps bundle version human names to changegroup versions.
53 # Maps bundle version human names to changegroup versions.
54 _bundlespeccgversions = {
54 _bundlespeccgversions = {
55 b'v1': b'01',
55 b'v1': b'01',
56 b'v2': b'02',
56 b'v2': b'02',
57 b'packed1': b's1',
57 b'packed1': b's1',
58 b'bundle2': b'02', # legacy
58 b'bundle2': b'02', # legacy
59 }
59 }
60
60
61 # Maps bundle version with content opts to choose which part to bundle
61 # Maps bundle version with content opts to choose which part to bundle
62 _bundlespeccontentopts = {
62 _bundlespeccontentopts = {
63 b'v1': {
63 b'v1': {
64 b'changegroup': True,
64 b'changegroup': True,
65 b'cg.version': b'01',
65 b'cg.version': b'01',
66 b'obsolescence': False,
66 b'obsolescence': False,
67 b'phases': False,
67 b'phases': False,
68 b'tagsfnodescache': False,
68 b'tagsfnodescache': False,
69 b'revbranchcache': False,
69 b'revbranchcache': False,
70 },
70 },
71 b'v2': {
71 b'v2': {
72 b'changegroup': True,
72 b'changegroup': True,
73 b'cg.version': b'02',
73 b'cg.version': b'02',
74 b'obsolescence': False,
74 b'obsolescence': False,
75 b'phases': False,
75 b'phases': False,
76 b'tagsfnodescache': True,
76 b'tagsfnodescache': True,
77 b'revbranchcache': True,
77 b'revbranchcache': True,
78 },
78 },
79 b'packed1': {b'cg.version': b's1'},
79 b'packed1': {b'cg.version': b's1'},
80 }
80 }
81 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
81 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
82
82
83 _bundlespecvariants = {
83 _bundlespecvariants = {
84 b"streamv2": {
84 b"streamv2": {
85 b"changegroup": False,
85 b"changegroup": False,
86 b"streamv2": True,
86 b"streamv2": True,
87 b"tagsfnodescache": False,
87 b"tagsfnodescache": False,
88 b"revbranchcache": False,
88 b"revbranchcache": False,
89 }
89 }
90 }
90 }
91
91
92 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
92 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
93 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
93 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
94
94
95
95
96 @attr.s
96 @attr.s
97 class bundlespec(object):
97 class bundlespec(object):
98 compression = attr.ib()
98 compression = attr.ib()
99 wirecompression = attr.ib()
99 wirecompression = attr.ib()
100 version = attr.ib()
100 version = attr.ib()
101 wireversion = attr.ib()
101 wireversion = attr.ib()
102 params = attr.ib()
102 params = attr.ib()
103 contentopts = attr.ib()
103 contentopts = attr.ib()
104
104
105
105
106 def parsebundlespec(repo, spec, strict=True):
106 def parsebundlespec(repo, spec, strict=True):
107 """Parse a bundle string specification into parts.
107 """Parse a bundle string specification into parts.
108
108
109 Bundle specifications denote a well-defined bundle/exchange format.
109 Bundle specifications denote a well-defined bundle/exchange format.
110 The content of a given specification should not change over time in
110 The content of a given specification should not change over time in
111 order to ensure that bundles produced by a newer version of Mercurial are
111 order to ensure that bundles produced by a newer version of Mercurial are
112 readable from an older version.
112 readable from an older version.
113
113
114 The string currently has the form:
114 The string currently has the form:
115
115
116 <compression>-<type>[;<parameter0>[;<parameter1>]]
116 <compression>-<type>[;<parameter0>[;<parameter1>]]
117
117
118 Where <compression> is one of the supported compression formats
118 Where <compression> is one of the supported compression formats
119 and <type> is (currently) a version string. A ";" can follow the type and
119 and <type> is (currently) a version string. A ";" can follow the type and
120 all text afterwards is interpreted as URI encoded, ";" delimited key=value
120 all text afterwards is interpreted as URI encoded, ";" delimited key=value
121 pairs.
121 pairs.
122
122
123 If ``strict`` is True (the default) <compression> is required. Otherwise,
123 If ``strict`` is True (the default) <compression> is required. Otherwise,
124 it is optional.
124 it is optional.
125
125
126 Returns a bundlespec object of (compression, version, parameters).
126 Returns a bundlespec object of (compression, version, parameters).
127 Compression will be ``None`` if not in strict mode and a compression isn't
127 Compression will be ``None`` if not in strict mode and a compression isn't
128 defined.
128 defined.
129
129
130 An ``InvalidBundleSpecification`` is raised when the specification is
130 An ``InvalidBundleSpecification`` is raised when the specification is
131 not syntactically well formed.
131 not syntactically well formed.
132
132
133 An ``UnsupportedBundleSpecification`` is raised when the compression or
133 An ``UnsupportedBundleSpecification`` is raised when the compression or
134 bundle type/version is not recognized.
134 bundle type/version is not recognized.
135
135
136 Note: this function will likely eventually return a more complex data
136 Note: this function will likely eventually return a more complex data
137 structure, including bundle2 part information.
137 structure, including bundle2 part information.
138 """
138 """
139
139
140 def parseparams(s):
140 def parseparams(s):
141 if b';' not in s:
141 if b';' not in s:
142 return s, {}
142 return s, {}
143
143
144 params = {}
144 params = {}
145 version, paramstr = s.split(b';', 1)
145 version, paramstr = s.split(b';', 1)
146
146
147 for p in paramstr.split(b';'):
147 for p in paramstr.split(b';'):
148 if b'=' not in p:
148 if b'=' not in p:
149 raise error.InvalidBundleSpecification(
149 raise error.InvalidBundleSpecification(
150 _(
150 _(
151 b'invalid bundle specification: '
151 b'invalid bundle specification: '
152 b'missing "=" in parameter: %s'
152 b'missing "=" in parameter: %s'
153 )
153 )
154 % p
154 % p
155 )
155 )
156
156
157 key, value = p.split(b'=', 1)
157 key, value = p.split(b'=', 1)
158 key = urlreq.unquote(key)
158 key = urlreq.unquote(key)
159 value = urlreq.unquote(value)
159 value = urlreq.unquote(value)
160 params[key] = value
160 params[key] = value
161
161
162 return version, params
162 return version, params
163
163
164 if strict and b'-' not in spec:
164 if strict and b'-' not in spec:
165 raise error.InvalidBundleSpecification(
165 raise error.InvalidBundleSpecification(
166 _(
166 _(
167 b'invalid bundle specification; '
167 b'invalid bundle specification; '
168 b'must be prefixed with compression: %s'
168 b'must be prefixed with compression: %s'
169 )
169 )
170 % spec
170 % spec
171 )
171 )
172
172
173 if b'-' in spec:
173 if b'-' in spec:
174 compression, version = spec.split(b'-', 1)
174 compression, version = spec.split(b'-', 1)
175
175
176 if compression not in util.compengines.supportedbundlenames:
176 if compression not in util.compengines.supportedbundlenames:
177 raise error.UnsupportedBundleSpecification(
177 raise error.UnsupportedBundleSpecification(
178 _(b'%s compression is not supported') % compression
178 _(b'%s compression is not supported') % compression
179 )
179 )
180
180
181 version, params = parseparams(version)
181 version, params = parseparams(version)
182
182
183 if version not in _bundlespeccgversions:
183 if version not in _bundlespeccgversions:
184 raise error.UnsupportedBundleSpecification(
184 raise error.UnsupportedBundleSpecification(
185 _(b'%s is not a recognized bundle version') % version
185 _(b'%s is not a recognized bundle version') % version
186 )
186 )
187 else:
187 else:
188 # Value could be just the compression or just the version, in which
188 # Value could be just the compression or just the version, in which
189 # case some defaults are assumed (but only when not in strict mode).
189 # case some defaults are assumed (but only when not in strict mode).
190 assert not strict
190 assert not strict
191
191
192 spec, params = parseparams(spec)
192 spec, params = parseparams(spec)
193
193
194 if spec in util.compengines.supportedbundlenames:
194 if spec in util.compengines.supportedbundlenames:
195 compression = spec
195 compression = spec
196 version = b'v1'
196 version = b'v1'
197 # Generaldelta repos require v2.
197 # Generaldelta repos require v2.
198 if b'generaldelta' in repo.requirements:
198 if b'generaldelta' in repo.requirements:
199 version = b'v2'
199 version = b'v2'
200 # Modern compression engines require v2.
200 # Modern compression engines require v2.
201 if compression not in _bundlespecv1compengines:
201 if compression not in _bundlespecv1compengines:
202 version = b'v2'
202 version = b'v2'
203 elif spec in _bundlespeccgversions:
203 elif spec in _bundlespeccgversions:
204 if spec == b'packed1':
204 if spec == b'packed1':
205 compression = b'none'
205 compression = b'none'
206 else:
206 else:
207 compression = b'bzip2'
207 compression = b'bzip2'
208 version = spec
208 version = spec
209 else:
209 else:
210 raise error.UnsupportedBundleSpecification(
210 raise error.UnsupportedBundleSpecification(
211 _(b'%s is not a recognized bundle specification') % spec
211 _(b'%s is not a recognized bundle specification') % spec
212 )
212 )
213
213
214 # Bundle version 1 only supports a known set of compression engines.
214 # Bundle version 1 only supports a known set of compression engines.
215 if version == b'v1' and compression not in _bundlespecv1compengines:
215 if version == b'v1' and compression not in _bundlespecv1compengines:
216 raise error.UnsupportedBundleSpecification(
216 raise error.UnsupportedBundleSpecification(
217 _(b'compression engine %s is not supported on v1 bundles')
217 _(b'compression engine %s is not supported on v1 bundles')
218 % compression
218 % compression
219 )
219 )
220
220
221 # The specification for packed1 can optionally declare the data formats
221 # The specification for packed1 can optionally declare the data formats
222 # required to apply it. If we see this metadata, compare against what the
222 # required to apply it. If we see this metadata, compare against what the
223 # repo supports and error if the bundle isn't compatible.
223 # repo supports and error if the bundle isn't compatible.
224 if version == b'packed1' and b'requirements' in params:
224 if version == b'packed1' and b'requirements' in params:
225 requirements = set(params[b'requirements'].split(b','))
225 requirements = set(params[b'requirements'].split(b','))
226 missingreqs = requirements - repo.supportedformats
226 missingreqs = requirements - repo.supportedformats
227 if missingreqs:
227 if missingreqs:
228 raise error.UnsupportedBundleSpecification(
228 raise error.UnsupportedBundleSpecification(
229 _(b'missing support for repository features: %s')
229 _(b'missing support for repository features: %s')
230 % b', '.join(sorted(missingreqs))
230 % b', '.join(sorted(missingreqs))
231 )
231 )
232
232
233 # Compute contentopts based on the version
233 # Compute contentopts based on the version
234 contentopts = _bundlespeccontentopts.get(version, {}).copy()
234 contentopts = _bundlespeccontentopts.get(version, {}).copy()
235
235
236 # Process the variants
236 # Process the variants
237 if b"stream" in params and params[b"stream"] == b"v2":
237 if b"stream" in params and params[b"stream"] == b"v2":
238 variant = _bundlespecvariants[b"streamv2"]
238 variant = _bundlespecvariants[b"streamv2"]
239 contentopts.update(variant)
239 contentopts.update(variant)
240
240
241 engine = util.compengines.forbundlename(compression)
241 engine = util.compengines.forbundlename(compression)
242 compression, wirecompression = engine.bundletype()
242 compression, wirecompression = engine.bundletype()
243 wireversion = _bundlespeccgversions[version]
243 wireversion = _bundlespeccgversions[version]
244
244
245 return bundlespec(
245 return bundlespec(
246 compression, wirecompression, version, wireversion, params, contentopts
246 compression, wirecompression, version, wireversion, params, contentopts
247 )
247 )
248
248
249
249
250 def readbundle(ui, fh, fname, vfs=None):
250 def readbundle(ui, fh, fname, vfs=None):
251 header = changegroup.readexactly(fh, 4)
251 header = changegroup.readexactly(fh, 4)
252
252
253 alg = None
253 alg = None
254 if not fname:
254 if not fname:
255 fname = b"stream"
255 fname = b"stream"
256 if not header.startswith(b'HG') and header.startswith(b'\0'):
256 if not header.startswith(b'HG') and header.startswith(b'\0'):
257 fh = changegroup.headerlessfixup(fh, header)
257 fh = changegroup.headerlessfixup(fh, header)
258 header = b"HG10"
258 header = b"HG10"
259 alg = b'UN'
259 alg = b'UN'
260 elif vfs:
260 elif vfs:
261 fname = vfs.join(fname)
261 fname = vfs.join(fname)
262
262
263 magic, version = header[0:2], header[2:4]
263 magic, version = header[0:2], header[2:4]
264
264
265 if magic != b'HG':
265 if magic != b'HG':
266 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
266 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
267 if version == b'10':
267 if version == b'10':
268 if alg is None:
268 if alg is None:
269 alg = changegroup.readexactly(fh, 2)
269 alg = changegroup.readexactly(fh, 2)
270 return changegroup.cg1unpacker(fh, alg)
270 return changegroup.cg1unpacker(fh, alg)
271 elif version.startswith(b'2'):
271 elif version.startswith(b'2'):
272 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
272 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
273 elif version == b'S1':
273 elif version == b'S1':
274 return streamclone.streamcloneapplier(fh)
274 return streamclone.streamcloneapplier(fh)
275 else:
275 else:
276 raise error.Abort(
276 raise error.Abort(
277 _(b'%s: unknown bundle version %s') % (fname, version)
277 _(b'%s: unknown bundle version %s') % (fname, version)
278 )
278 )
279
279
280
280
281 def getbundlespec(ui, fh):
281 def getbundlespec(ui, fh):
282 """Infer the bundlespec from a bundle file handle.
282 """Infer the bundlespec from a bundle file handle.
283
283
284 The input file handle is seeked and the original seek position is not
284 The input file handle is seeked and the original seek position is not
285 restored.
285 restored.
286 """
286 """
287
287
288 def speccompression(alg):
288 def speccompression(alg):
289 try:
289 try:
290 return util.compengines.forbundletype(alg).bundletype()[0]
290 return util.compengines.forbundletype(alg).bundletype()[0]
291 except KeyError:
291 except KeyError:
292 return None
292 return None
293
293
294 b = readbundle(ui, fh, None)
294 b = readbundle(ui, fh, None)
295 if isinstance(b, changegroup.cg1unpacker):
295 if isinstance(b, changegroup.cg1unpacker):
296 alg = b._type
296 alg = b._type
297 if alg == b'_truncatedBZ':
297 if alg == b'_truncatedBZ':
298 alg = b'BZ'
298 alg = b'BZ'
299 comp = speccompression(alg)
299 comp = speccompression(alg)
300 if not comp:
300 if not comp:
301 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
301 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
302 return b'%s-v1' % comp
302 return b'%s-v1' % comp
303 elif isinstance(b, bundle2.unbundle20):
303 elif isinstance(b, bundle2.unbundle20):
304 if b'Compression' in b.params:
304 if b'Compression' in b.params:
305 comp = speccompression(b.params[b'Compression'])
305 comp = speccompression(b.params[b'Compression'])
306 if not comp:
306 if not comp:
307 raise error.Abort(
307 raise error.Abort(
308 _(b'unknown compression algorithm: %s') % comp
308 _(b'unknown compression algorithm: %s') % comp
309 )
309 )
310 else:
310 else:
311 comp = b'none'
311 comp = b'none'
312
312
313 version = None
313 version = None
314 for part in b.iterparts():
314 for part in b.iterparts():
315 if part.type == b'changegroup':
315 if part.type == b'changegroup':
316 version = part.params[b'version']
316 version = part.params[b'version']
317 if version in (b'01', b'02'):
317 if version in (b'01', b'02'):
318 version = b'v2'
318 version = b'v2'
319 else:
319 else:
320 raise error.Abort(
320 raise error.Abort(
321 _(
321 _(
322 b'changegroup version %s does not have '
322 b'changegroup version %s does not have '
323 b'a known bundlespec'
323 b'a known bundlespec'
324 )
324 )
325 % version,
325 % version,
326 hint=_(b'try upgrading your Mercurial client'),
326 hint=_(b'try upgrading your Mercurial client'),
327 )
327 )
328 elif part.type == b'stream2' and version is None:
328 elif part.type == b'stream2' and version is None:
329 # A stream2 part requires to be part of a v2 bundle
329 # A stream2 part requires to be part of a v2 bundle
330 requirements = urlreq.unquote(part.params[b'requirements'])
330 requirements = urlreq.unquote(part.params[b'requirements'])
331 splitted = requirements.split()
331 splitted = requirements.split()
332 params = bundle2._formatrequirementsparams(splitted)
332 params = bundle2._formatrequirementsparams(splitted)
333 return b'none-v2;stream=v2;%s' % params
333 return b'none-v2;stream=v2;%s' % params
334
334
335 if not version:
335 if not version:
336 raise error.Abort(
336 raise error.Abort(
337 _(b'could not identify changegroup version in bundle')
337 _(b'could not identify changegroup version in bundle')
338 )
338 )
339
339
340 return b'%s-%s' % (comp, version)
340 return b'%s-%s' % (comp, version)
341 elif isinstance(b, streamclone.streamcloneapplier):
341 elif isinstance(b, streamclone.streamcloneapplier):
342 requirements = streamclone.readbundle1header(fh)[2]
342 requirements = streamclone.readbundle1header(fh)[2]
343 formatted = bundle2._formatrequirementsparams(requirements)
343 formatted = bundle2._formatrequirementsparams(requirements)
344 return b'none-packed1;%s' % formatted
344 return b'none-packed1;%s' % formatted
345 else:
345 else:
346 raise error.Abort(_(b'unknown bundle type: %s') % b)
346 raise error.Abort(_(b'unknown bundle type: %s') % b)
347
347
348
348
349 def _computeoutgoing(repo, heads, common):
349 def _computeoutgoing(repo, heads, common):
350 """Computes which revs are outgoing given a set of common
350 """Computes which revs are outgoing given a set of common
351 and a set of heads.
351 and a set of heads.
352
352
353 This is a separate function so extensions can have access to
353 This is a separate function so extensions can have access to
354 the logic.
354 the logic.
355
355
356 Returns a discovery.outgoing object.
356 Returns a discovery.outgoing object.
357 """
357 """
358 cl = repo.changelog
358 cl = repo.changelog
359 if common:
359 if common:
360 hasnode = cl.hasnode
360 hasnode = cl.hasnode
361 common = [n for n in common if hasnode(n)]
361 common = [n for n in common if hasnode(n)]
362 else:
362 else:
363 common = [nullid]
363 common = [nullid]
364 if not heads:
364 if not heads:
365 heads = cl.heads()
365 heads = cl.heads()
366 return discovery.outgoing(repo, common, heads)
366 return discovery.outgoing(repo, common, heads)
367
367
368
368
369 def _checkpublish(pushop):
369 def _checkpublish(pushop):
370 repo = pushop.repo
370 repo = pushop.repo
371 ui = repo.ui
371 ui = repo.ui
372 behavior = ui.config(b'experimental', b'auto-publish')
372 behavior = ui.config(b'experimental', b'auto-publish')
373 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
373 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
374 return
374 return
375 remotephases = listkeys(pushop.remote, b'phases')
375 remotephases = listkeys(pushop.remote, b'phases')
376 if not remotephases.get(b'publishing', False):
376 if not remotephases.get(b'publishing', False):
377 return
377 return
378
378
379 if pushop.revs is None:
379 if pushop.revs is None:
380 published = repo.filtered(b'served').revs(b'not public()')
380 published = repo.filtered(b'served').revs(b'not public()')
381 else:
381 else:
382 published = repo.revs(b'::%ln - public()', pushop.revs)
382 published = repo.revs(b'::%ln - public()', pushop.revs)
383 if published:
383 if published:
384 if behavior == b'warn':
384 if behavior == b'warn':
385 ui.warn(
385 ui.warn(
386 _(b'%i changesets about to be published\n') % len(published)
386 _(b'%i changesets about to be published\n') % len(published)
387 )
387 )
388 elif behavior == b'confirm':
388 elif behavior == b'confirm':
389 if ui.promptchoice(
389 if ui.promptchoice(
390 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
390 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
391 % len(published)
391 % len(published)
392 ):
392 ):
393 raise error.Abort(_(b'user quit'))
393 raise error.Abort(_(b'user quit'))
394 elif behavior == b'abort':
394 elif behavior == b'abort':
395 msg = _(b'push would publish %i changesets') % len(published)
395 msg = _(b'push would publish %i changesets') % len(published)
396 hint = _(
396 hint = _(
397 b"use --publish or adjust 'experimental.auto-publish'"
397 b"use --publish or adjust 'experimental.auto-publish'"
398 b" config"
398 b" config"
399 )
399 )
400 raise error.Abort(msg, hint=hint)
400 raise error.Abort(msg, hint=hint)
401
401
402
402
403 def _forcebundle1(op):
403 def _forcebundle1(op):
404 """return true if a pull/push must use bundle1
404 """return true if a pull/push must use bundle1
405
405
406 This function is used to allow testing of the older bundle version"""
406 This function is used to allow testing of the older bundle version"""
407 ui = op.repo.ui
407 ui = op.repo.ui
408 # The goal is this config is to allow developer to choose the bundle
408 # The goal is this config is to allow developer to choose the bundle
409 # version used during exchanged. This is especially handy during test.
409 # version used during exchanged. This is especially handy during test.
410 # Value is a list of bundle version to be picked from, highest version
410 # Value is a list of bundle version to be picked from, highest version
411 # should be used.
411 # should be used.
412 #
412 #
413 # developer config: devel.legacy.exchange
413 # developer config: devel.legacy.exchange
414 exchange = ui.configlist(b'devel', b'legacy.exchange')
414 exchange = ui.configlist(b'devel', b'legacy.exchange')
415 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
415 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
416 return forcebundle1 or not op.remote.capable(b'bundle2')
416 return forcebundle1 or not op.remote.capable(b'bundle2')
417
417
418
418
419 class pushoperation(object):
419 class pushoperation(object):
420 """A object that represent a single push operation
420 """A object that represent a single push operation
421
421
422 Its purpose is to carry push related state and very common operations.
422 Its purpose is to carry push related state and very common operations.
423
423
424 A new pushoperation should be created at the beginning of each push and
424 A new pushoperation should be created at the beginning of each push and
425 discarded afterward.
425 discarded afterward.
426 """
426 """
427
427
428 def __init__(
428 def __init__(
429 self,
429 self,
430 repo,
430 repo,
431 remote,
431 remote,
432 force=False,
432 force=False,
433 revs=None,
433 revs=None,
434 newbranch=False,
434 newbranch=False,
435 bookmarks=(),
435 bookmarks=(),
436 publish=False,
436 publish=False,
437 pushvars=None,
437 pushvars=None,
438 ):
438 ):
439 # repo we push from
439 # repo we push from
440 self.repo = repo
440 self.repo = repo
441 self.ui = repo.ui
441 self.ui = repo.ui
442 # repo we push to
442 # repo we push to
443 self.remote = remote
443 self.remote = remote
444 # force option provided
444 # force option provided
445 self.force = force
445 self.force = force
446 # revs to be pushed (None is "all")
446 # revs to be pushed (None is "all")
447 self.revs = revs
447 self.revs = revs
448 # bookmark explicitly pushed
448 # bookmark explicitly pushed
449 self.bookmarks = bookmarks
449 self.bookmarks = bookmarks
450 # allow push of new branch
450 # allow push of new branch
451 self.newbranch = newbranch
451 self.newbranch = newbranch
452 # step already performed
452 # step already performed
453 # (used to check what steps have been already performed through bundle2)
453 # (used to check what steps have been already performed through bundle2)
454 self.stepsdone = set()
454 self.stepsdone = set()
455 # Integer version of the changegroup push result
455 # Integer version of the changegroup push result
456 # - None means nothing to push
456 # - None means nothing to push
457 # - 0 means HTTP error
457 # - 0 means HTTP error
458 # - 1 means we pushed and remote head count is unchanged *or*
458 # - 1 means we pushed and remote head count is unchanged *or*
459 # we have outgoing changesets but refused to push
459 # we have outgoing changesets but refused to push
460 # - other values as described by addchangegroup()
460 # - other values as described by addchangegroup()
461 self.cgresult = None
461 self.cgresult = None
462 # Boolean value for the bookmark push
462 # Boolean value for the bookmark push
463 self.bkresult = None
463 self.bkresult = None
464 # discover.outgoing object (contains common and outgoing data)
464 # discover.outgoing object (contains common and outgoing data)
465 self.outgoing = None
465 self.outgoing = None
466 # all remote topological heads before the push
466 # all remote topological heads before the push
467 self.remoteheads = None
467 self.remoteheads = None
468 # Details of the remote branch pre and post push
468 # Details of the remote branch pre and post push
469 #
469 #
470 # mapping: {'branch': ([remoteheads],
470 # mapping: {'branch': ([remoteheads],
471 # [newheads],
471 # [newheads],
472 # [unsyncedheads],
472 # [unsyncedheads],
473 # [discardedheads])}
473 # [discardedheads])}
474 # - branch: the branch name
474 # - branch: the branch name
475 # - remoteheads: the list of remote heads known locally
475 # - remoteheads: the list of remote heads known locally
476 # None if the branch is new
476 # None if the branch is new
477 # - newheads: the new remote heads (known locally) with outgoing pushed
477 # - newheads: the new remote heads (known locally) with outgoing pushed
478 # - unsyncedheads: the list of remote heads unknown locally.
478 # - unsyncedheads: the list of remote heads unknown locally.
479 # - discardedheads: the list of remote heads made obsolete by the push
479 # - discardedheads: the list of remote heads made obsolete by the push
480 self.pushbranchmap = None
480 self.pushbranchmap = None
481 # testable as a boolean indicating if any nodes are missing locally.
481 # testable as a boolean indicating if any nodes are missing locally.
482 self.incoming = None
482 self.incoming = None
483 # summary of the remote phase situation
483 # summary of the remote phase situation
484 self.remotephases = None
484 self.remotephases = None
485 # phases changes that must be pushed along side the changesets
485 # phases changes that must be pushed along side the changesets
486 self.outdatedphases = None
486 self.outdatedphases = None
487 # phases changes that must be pushed if changeset push fails
487 # phases changes that must be pushed if changeset push fails
488 self.fallbackoutdatedphases = None
488 self.fallbackoutdatedphases = None
489 # outgoing obsmarkers
489 # outgoing obsmarkers
490 self.outobsmarkers = set()
490 self.outobsmarkers = set()
491 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
491 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
492 self.outbookmarks = []
492 self.outbookmarks = []
493 # transaction manager
493 # transaction manager
494 self.trmanager = None
494 self.trmanager = None
495 # map { pushkey partid -> callback handling failure}
495 # map { pushkey partid -> callback handling failure}
496 # used to handle exception from mandatory pushkey part failure
496 # used to handle exception from mandatory pushkey part failure
497 self.pkfailcb = {}
497 self.pkfailcb = {}
498 # an iterable of pushvars or None
498 # an iterable of pushvars or None
499 self.pushvars = pushvars
499 self.pushvars = pushvars
500 # publish pushed changesets
500 # publish pushed changesets
501 self.publish = publish
501 self.publish = publish
502
502
503 @util.propertycache
503 @util.propertycache
504 def futureheads(self):
504 def futureheads(self):
505 """future remote heads if the changeset push succeeds"""
505 """future remote heads if the changeset push succeeds"""
506 return self.outgoing.ancestorsof
506 return self.outgoing.ancestorsof
507
507
508 @util.propertycache
508 @util.propertycache
509 def fallbackheads(self):
509 def fallbackheads(self):
510 """future remote heads if the changeset push fails"""
510 """future remote heads if the changeset push fails"""
511 if self.revs is None:
511 if self.revs is None:
512 # not target to push, all common are relevant
512 # not target to push, all common are relevant
513 return self.outgoing.commonheads
513 return self.outgoing.commonheads
514 unfi = self.repo.unfiltered()
514 unfi = self.repo.unfiltered()
515 # I want cheads = heads(::ancestorsof and ::commonheads)
515 # I want cheads = heads(::ancestorsof and ::commonheads)
516 # (ancestorsof is revs with secret changeset filtered out)
516 # (ancestorsof is revs with secret changeset filtered out)
517 #
517 #
518 # This can be expressed as:
518 # This can be expressed as:
519 # cheads = ( (ancestorsof and ::commonheads)
519 # cheads = ( (ancestorsof and ::commonheads)
520 # + (commonheads and ::ancestorsof))"
520 # + (commonheads and ::ancestorsof))"
521 # )
521 # )
522 #
522 #
523 # while trying to push we already computed the following:
523 # while trying to push we already computed the following:
524 # common = (::commonheads)
524 # common = (::commonheads)
525 # missing = ((commonheads::ancestorsof) - commonheads)
525 # missing = ((commonheads::ancestorsof) - commonheads)
526 #
526 #
527 # We can pick:
527 # We can pick:
528 # * ancestorsof part of common (::commonheads)
528 # * ancestorsof part of common (::commonheads)
529 common = self.outgoing.common
529 common = self.outgoing.common
530 rev = self.repo.changelog.index.rev
530 rev = self.repo.changelog.index.rev
531 cheads = [node for node in self.revs if rev(node) in common]
531 cheads = [node for node in self.revs if rev(node) in common]
532 # and
532 # and
533 # * commonheads parents on missing
533 # * commonheads parents on missing
534 revset = unfi.set(
534 revset = unfi.set(
535 b'%ln and parents(roots(%ln))',
535 b'%ln and parents(roots(%ln))',
536 self.outgoing.commonheads,
536 self.outgoing.commonheads,
537 self.outgoing.missing,
537 self.outgoing.missing,
538 )
538 )
539 cheads.extend(c.node() for c in revset)
539 cheads.extend(c.node() for c in revset)
540 return cheads
540 return cheads
541
541
542 @property
542 @property
543 def commonheads(self):
543 def commonheads(self):
544 """set of all common heads after changeset bundle push"""
544 """set of all common heads after changeset bundle push"""
545 if self.cgresult:
545 if self.cgresult:
546 return self.futureheads
546 return self.futureheads
547 else:
547 else:
548 return self.fallbackheads
548 return self.fallbackheads
549
549
550
550
551 # mapping of message used when pushing bookmark
551 # mapping of message used when pushing bookmark
552 bookmsgmap = {
552 bookmsgmap = {
553 b'update': (
553 b'update': (
554 _(b"updating bookmark %s\n"),
554 _(b"updating bookmark %s\n"),
555 _(b'updating bookmark %s failed!\n'),
555 _(b'updating bookmark %s failed!\n'),
556 ),
556 ),
557 b'export': (
557 b'export': (
558 _(b"exporting bookmark %s\n"),
558 _(b"exporting bookmark %s\n"),
559 _(b'exporting bookmark %s failed!\n'),
559 _(b'exporting bookmark %s failed!\n'),
560 ),
560 ),
561 b'delete': (
561 b'delete': (
562 _(b"deleting remote bookmark %s\n"),
562 _(b"deleting remote bookmark %s\n"),
563 _(b'deleting remote bookmark %s failed!\n'),
563 _(b'deleting remote bookmark %s failed!\n'),
564 ),
564 ),
565 }
565 }
566
566
567
567
568 def push(
568 def push(
569 repo,
569 repo,
570 remote,
570 remote,
571 force=False,
571 force=False,
572 revs=None,
572 revs=None,
573 newbranch=False,
573 newbranch=False,
574 bookmarks=(),
574 bookmarks=(),
575 publish=False,
575 publish=False,
576 opargs=None,
576 opargs=None,
577 ):
577 ):
578 '''Push outgoing changesets (limited by revs) from a local
578 '''Push outgoing changesets (limited by revs) from a local
579 repository to remote. Return an integer:
579 repository to remote. Return an integer:
580 - None means nothing to push
580 - None means nothing to push
581 - 0 means HTTP error
581 - 0 means HTTP error
582 - 1 means we pushed and remote head count is unchanged *or*
582 - 1 means we pushed and remote head count is unchanged *or*
583 we have outgoing changesets but refused to push
583 we have outgoing changesets but refused to push
584 - other values as described by addchangegroup()
584 - other values as described by addchangegroup()
585 '''
585 '''
586 if opargs is None:
586 if opargs is None:
587 opargs = {}
587 opargs = {}
588 pushop = pushoperation(
588 pushop = pushoperation(
589 repo,
589 repo,
590 remote,
590 remote,
591 force,
591 force,
592 revs,
592 revs,
593 newbranch,
593 newbranch,
594 bookmarks,
594 bookmarks,
595 publish,
595 publish,
596 **pycompat.strkwargs(opargs)
596 **pycompat.strkwargs(opargs)
597 )
597 )
598 if pushop.remote.local():
598 if pushop.remote.local():
599 missing = (
599 missing = (
600 set(pushop.repo.requirements) - pushop.remote.local().supported
600 set(pushop.repo.requirements) - pushop.remote.local().supported
601 )
601 )
602 if missing:
602 if missing:
603 msg = _(
603 msg = _(
604 b"required features are not"
604 b"required features are not"
605 b" supported in the destination:"
605 b" supported in the destination:"
606 b" %s"
606 b" %s"
607 ) % (b', '.join(sorted(missing)))
607 ) % (b', '.join(sorted(missing)))
608 raise error.Abort(msg)
608 raise error.Abort(msg)
609
609
610 if not pushop.remote.canpush():
610 if not pushop.remote.canpush():
611 raise error.Abort(_(b"destination does not support push"))
611 raise error.Abort(_(b"destination does not support push"))
612
612
613 if not pushop.remote.capable(b'unbundle'):
613 if not pushop.remote.capable(b'unbundle'):
614 raise error.Abort(
614 raise error.Abort(
615 _(
615 _(
616 b'cannot push: destination does not support the '
616 b'cannot push: destination does not support the '
617 b'unbundle wire protocol command'
617 b'unbundle wire protocol command'
618 )
618 )
619 )
619 )
620
620
621 # get lock as we might write phase data
621 # get lock as we might write phase data
622 wlock = lock = None
622 wlock = lock = None
623 try:
623 try:
624 # bundle2 push may receive a reply bundle touching bookmarks
624 # bundle2 push may receive a reply bundle touching bookmarks
625 # requiring the wlock. Take it now to ensure proper ordering.
625 # requiring the wlock. Take it now to ensure proper ordering.
626 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
626 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
627 if (
627 if (
628 (not _forcebundle1(pushop))
628 (not _forcebundle1(pushop))
629 and maypushback
629 and maypushback
630 and not bookmod.bookmarksinstore(repo)
630 and not bookmod.bookmarksinstore(repo)
631 ):
631 ):
632 wlock = pushop.repo.wlock()
632 wlock = pushop.repo.wlock()
633 lock = pushop.repo.lock()
633 lock = pushop.repo.lock()
634 pushop.trmanager = transactionmanager(
634 pushop.trmanager = transactionmanager(
635 pushop.repo, b'push-response', pushop.remote.url()
635 pushop.repo, b'push-response', pushop.remote.url()
636 )
636 )
637 except error.LockUnavailable as err:
637 except error.LockUnavailable as err:
638 # source repo cannot be locked.
638 # source repo cannot be locked.
639 # We do not abort the push, but just disable the local phase
639 # We do not abort the push, but just disable the local phase
640 # synchronisation.
640 # synchronisation.
641 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
641 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
642 err
642 err
643 )
643 )
644 pushop.ui.debug(msg)
644 pushop.ui.debug(msg)
645
645
646 with wlock or util.nullcontextmanager():
646 with wlock or util.nullcontextmanager():
647 with lock or util.nullcontextmanager():
647 with lock or util.nullcontextmanager():
648 with pushop.trmanager or util.nullcontextmanager():
648 with pushop.trmanager or util.nullcontextmanager():
649 pushop.repo.checkpush(pushop)
649 pushop.repo.checkpush(pushop)
650 _checkpublish(pushop)
650 _checkpublish(pushop)
651 _pushdiscovery(pushop)
651 _pushdiscovery(pushop)
652 if not pushop.force:
652 if not pushop.force:
653 _checksubrepostate(pushop)
653 _checksubrepostate(pushop)
654 if not _forcebundle1(pushop):
654 if not _forcebundle1(pushop):
655 _pushbundle2(pushop)
655 _pushbundle2(pushop)
656 _pushchangeset(pushop)
656 _pushchangeset(pushop)
657 _pushsyncphase(pushop)
657 _pushsyncphase(pushop)
658 _pushobsolete(pushop)
658 _pushobsolete(pushop)
659 _pushbookmark(pushop)
659 _pushbookmark(pushop)
660
660
661 if repo.ui.configbool(b'experimental', b'remotenames'):
661 if repo.ui.configbool(b'experimental', b'remotenames'):
662 logexchange.pullremotenames(repo, remote)
662 logexchange.pullremotenames(repo, remote)
663
663
664 return pushop
664 return pushop
665
665
666
666
667 # list of steps to perform discovery before push
667 # list of steps to perform discovery before push
668 pushdiscoveryorder = []
668 pushdiscoveryorder = []
669
669
670 # Mapping between step name and function
670 # Mapping between step name and function
671 #
671 #
672 # This exists to help extensions wrap steps if necessary
672 # This exists to help extensions wrap steps if necessary
673 pushdiscoverymapping = {}
673 pushdiscoverymapping = {}
674
674
675
675
676 def pushdiscovery(stepname):
676 def pushdiscovery(stepname):
677 """decorator for function performing discovery before push
677 """decorator for function performing discovery before push
678
678
679 The function is added to the step -> function mapping and appended to the
679 The function is added to the step -> function mapping and appended to the
680 list of steps. Beware that decorated function will be added in order (this
680 list of steps. Beware that decorated function will be added in order (this
681 may matter).
681 may matter).
682
682
683 You can only use this decorator for a new step, if you want to wrap a step
683 You can only use this decorator for a new step, if you want to wrap a step
684 from an extension, change the pushdiscovery dictionary directly."""
684 from an extension, change the pushdiscovery dictionary directly."""
685
685
686 def dec(func):
686 def dec(func):
687 assert stepname not in pushdiscoverymapping
687 assert stepname not in pushdiscoverymapping
688 pushdiscoverymapping[stepname] = func
688 pushdiscoverymapping[stepname] = func
689 pushdiscoveryorder.append(stepname)
689 pushdiscoveryorder.append(stepname)
690 return func
690 return func
691
691
692 return dec
692 return dec
693
693
694
694
695 def _pushdiscovery(pushop):
695 def _pushdiscovery(pushop):
696 """Run all discovery steps"""
696 """Run all discovery steps"""
697 for stepname in pushdiscoveryorder:
697 for stepname in pushdiscoveryorder:
698 step = pushdiscoverymapping[stepname]
698 step = pushdiscoverymapping[stepname]
699 step(pushop)
699 step(pushop)
700
700
701
701
702 def _checksubrepostate(pushop):
702 def _checksubrepostate(pushop):
703 """Ensure all outgoing referenced subrepo revisions are present locally"""
703 """Ensure all outgoing referenced subrepo revisions are present locally"""
704 for n in pushop.outgoing.missing:
704 for n in pushop.outgoing.missing:
705 ctx = pushop.repo[n]
705 ctx = pushop.repo[n]
706
706
707 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
707 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
708 for subpath in sorted(ctx.substate):
708 for subpath in sorted(ctx.substate):
709 sub = ctx.sub(subpath)
709 sub = ctx.sub(subpath)
710 sub.verify(onpush=True)
710 sub.verify(onpush=True)
711
711
712
712
713 @pushdiscovery(b'changeset')
713 @pushdiscovery(b'changeset')
714 def _pushdiscoverychangeset(pushop):
714 def _pushdiscoverychangeset(pushop):
715 """discover the changeset that need to be pushed"""
715 """discover the changeset that need to be pushed"""
716 fci = discovery.findcommonincoming
716 fci = discovery.findcommonincoming
717 if pushop.revs:
717 if pushop.revs:
718 commoninc = fci(
718 commoninc = fci(
719 pushop.repo,
719 pushop.repo,
720 pushop.remote,
720 pushop.remote,
721 force=pushop.force,
721 force=pushop.force,
722 ancestorsof=pushop.revs,
722 ancestorsof=pushop.revs,
723 )
723 )
724 else:
724 else:
725 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
725 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
726 common, inc, remoteheads = commoninc
726 common, inc, remoteheads = commoninc
727 fco = discovery.findcommonoutgoing
727 fco = discovery.findcommonoutgoing
728 outgoing = fco(
728 outgoing = fco(
729 pushop.repo,
729 pushop.repo,
730 pushop.remote,
730 pushop.remote,
731 onlyheads=pushop.revs,
731 onlyheads=pushop.revs,
732 commoninc=commoninc,
732 commoninc=commoninc,
733 force=pushop.force,
733 force=pushop.force,
734 )
734 )
735 pushop.outgoing = outgoing
735 pushop.outgoing = outgoing
736 pushop.remoteheads = remoteheads
736 pushop.remoteheads = remoteheads
737 pushop.incoming = inc
737 pushop.incoming = inc
738
738
739
739
740 @pushdiscovery(b'phase')
740 @pushdiscovery(b'phase')
741 def _pushdiscoveryphase(pushop):
741 def _pushdiscoveryphase(pushop):
742 """discover the phase that needs to be pushed
742 """discover the phase that needs to be pushed
743
743
744 (computed for both success and failure case for changesets push)"""
744 (computed for both success and failure case for changesets push)"""
745 outgoing = pushop.outgoing
745 outgoing = pushop.outgoing
746 unfi = pushop.repo.unfiltered()
746 unfi = pushop.repo.unfiltered()
747 remotephases = listkeys(pushop.remote, b'phases')
747 remotephases = listkeys(pushop.remote, b'phases')
748
748
749 if (
749 if (
750 pushop.ui.configbool(b'ui', b'_usedassubrepo')
750 pushop.ui.configbool(b'ui', b'_usedassubrepo')
751 and remotephases # server supports phases
751 and remotephases # server supports phases
752 and not pushop.outgoing.missing # no changesets to be pushed
752 and not pushop.outgoing.missing # no changesets to be pushed
753 and remotephases.get(b'publishing', False)
753 and remotephases.get(b'publishing', False)
754 ):
754 ):
755 # When:
755 # When:
756 # - this is a subrepo push
756 # - this is a subrepo push
757 # - and remote support phase
757 # - and remote support phase
758 # - and no changeset are to be pushed
758 # - and no changeset are to be pushed
759 # - and remote is publishing
759 # - and remote is publishing
760 # We may be in issue 3781 case!
760 # We may be in issue 3781 case!
761 # We drop the possible phase synchronisation done by
761 # We drop the possible phase synchronisation done by
762 # courtesy to publish changesets possibly locally draft
762 # courtesy to publish changesets possibly locally draft
763 # on the remote.
763 # on the remote.
764 pushop.outdatedphases = []
764 pushop.outdatedphases = []
765 pushop.fallbackoutdatedphases = []
765 pushop.fallbackoutdatedphases = []
766 return
766 return
767
767
768 pushop.remotephases = phases.remotephasessummary(
768 pushop.remotephases = phases.remotephasessummary(
769 pushop.repo, pushop.fallbackheads, remotephases
769 pushop.repo, pushop.fallbackheads, remotephases
770 )
770 )
771 droots = pushop.remotephases.draftroots
771 droots = pushop.remotephases.draftroots
772
772
773 extracond = b''
773 extracond = b''
774 if not pushop.remotephases.publishing:
774 if not pushop.remotephases.publishing:
775 extracond = b' and public()'
775 extracond = b' and public()'
776 revset = b'heads((%%ln::%%ln) %s)' % extracond
776 revset = b'heads((%%ln::%%ln) %s)' % extracond
777 # Get the list of all revs draft on remote by public here.
777 # Get the list of all revs draft on remote by public here.
778 # XXX Beware that revset break if droots is not strictly
778 # XXX Beware that revset break if droots is not strictly
779 # XXX root we may want to ensure it is but it is costly
779 # XXX root we may want to ensure it is but it is costly
780 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
780 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
781 if not pushop.remotephases.publishing and pushop.publish:
781 if not pushop.remotephases.publishing and pushop.publish:
782 future = list(
782 future = list(
783 unfi.set(
783 unfi.set(
784 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
784 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
785 )
785 )
786 )
786 )
787 elif not outgoing.missing:
787 elif not outgoing.missing:
788 future = fallback
788 future = fallback
789 else:
789 else:
790 # adds changeset we are going to push as draft
790 # adds changeset we are going to push as draft
791 #
791 #
792 # should not be necessary for publishing server, but because of an
792 # should not be necessary for publishing server, but because of an
793 # issue fixed in xxxxx we have to do it anyway.
793 # issue fixed in xxxxx we have to do it anyway.
794 fdroots = list(
794 fdroots = list(
795 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
795 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
796 )
796 )
797 fdroots = [f.node() for f in fdroots]
797 fdroots = [f.node() for f in fdroots]
798 future = list(unfi.set(revset, fdroots, pushop.futureheads))
798 future = list(unfi.set(revset, fdroots, pushop.futureheads))
799 pushop.outdatedphases = future
799 pushop.outdatedphases = future
800 pushop.fallbackoutdatedphases = fallback
800 pushop.fallbackoutdatedphases = fallback
801
801
802
802
803 @pushdiscovery(b'obsmarker')
803 @pushdiscovery(b'obsmarker')
804 def _pushdiscoveryobsmarkers(pushop):
804 def _pushdiscoveryobsmarkers(pushop):
805 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
805 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
806 return
806 return
807
807
808 if not pushop.repo.obsstore:
808 if not pushop.repo.obsstore:
809 return
809 return
810
810
811 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
811 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
812 return
812 return
813
813
814 repo = pushop.repo
814 repo = pushop.repo
815 # very naive computation, that can be quite expensive on big repo.
815 # very naive computation, that can be quite expensive on big repo.
816 # However: evolution is currently slow on them anyway.
816 # However: evolution is currently slow on them anyway.
817 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
817 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
818 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
818 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
819
819
820
820
821 @pushdiscovery(b'bookmarks')
821 @pushdiscovery(b'bookmarks')
822 def _pushdiscoverybookmarks(pushop):
822 def _pushdiscoverybookmarks(pushop):
823 ui = pushop.ui
823 ui = pushop.ui
824 repo = pushop.repo.unfiltered()
824 repo = pushop.repo.unfiltered()
825 remote = pushop.remote
825 remote = pushop.remote
826 ui.debug(b"checking for updated bookmarks\n")
826 ui.debug(b"checking for updated bookmarks\n")
827 ancestors = ()
827 ancestors = ()
828 if pushop.revs:
828 if pushop.revs:
829 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
829 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
830 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
830 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
831
831
832 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
832 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
833
833
834 explicit = {
834 explicit = {
835 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
835 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
836 }
836 }
837
837
838 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
838 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
839 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
839 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
840
840
841
841
842 def _processcompared(pushop, pushed, explicit, remotebms, comp):
842 def _processcompared(pushop, pushed, explicit, remotebms, comp):
843 """take decision on bookmarks to push to the remote repo
843 """take decision on bookmarks to push to the remote repo
844
844
845 Exists to help extensions alter this behavior.
845 Exists to help extensions alter this behavior.
846 """
846 """
847 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
847 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
848
848
849 repo = pushop.repo
849 repo = pushop.repo
850
850
851 for b, scid, dcid in advsrc:
851 for b, scid, dcid in advsrc:
852 if b in explicit:
852 if b in explicit:
853 explicit.remove(b)
853 explicit.remove(b)
854 if not pushed or repo[scid].rev() in pushed:
854 if not pushed or repo[scid].rev() in pushed:
855 pushop.outbookmarks.append((b, dcid, scid))
855 pushop.outbookmarks.append((b, dcid, scid))
856 # search added bookmark
856 # search added bookmark
857 for b, scid, dcid in addsrc:
857 for b, scid, dcid in addsrc:
858 if b in explicit:
858 if b in explicit:
859 explicit.remove(b)
859 explicit.remove(b)
860 if bookmod.isdivergent(b):
860 if bookmod.isdivergent(b):
861 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
861 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
862 pushop.bkresult = 2
862 pushop.bkresult = 2
863 else:
863 else:
864 pushop.outbookmarks.append((b, b'', scid))
864 pushop.outbookmarks.append((b, b'', scid))
865 # search for overwritten bookmark
865 # search for overwritten bookmark
866 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
866 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
867 if b in explicit:
867 if b in explicit:
868 explicit.remove(b)
868 explicit.remove(b)
869 pushop.outbookmarks.append((b, dcid, scid))
869 pushop.outbookmarks.append((b, dcid, scid))
870 # search for bookmark to delete
870 # search for bookmark to delete
871 for b, scid, dcid in adddst:
871 for b, scid, dcid in adddst:
872 if b in explicit:
872 if b in explicit:
873 explicit.remove(b)
873 explicit.remove(b)
874 # treat as "deleted locally"
874 # treat as "deleted locally"
875 pushop.outbookmarks.append((b, dcid, b''))
875 pushop.outbookmarks.append((b, dcid, b''))
876 # identical bookmarks shouldn't get reported
876 # identical bookmarks shouldn't get reported
877 for b, scid, dcid in same:
877 for b, scid, dcid in same:
878 if b in explicit:
878 if b in explicit:
879 explicit.remove(b)
879 explicit.remove(b)
880
880
881 if explicit:
881 if explicit:
882 explicit = sorted(explicit)
882 explicit = sorted(explicit)
883 # we should probably list all of them
883 # we should probably list all of them
884 pushop.ui.warn(
884 pushop.ui.warn(
885 _(
885 _(
886 b'bookmark %s does not exist on the local '
886 b'bookmark %s does not exist on the local '
887 b'or remote repository!\n'
887 b'or remote repository!\n'
888 )
888 )
889 % explicit[0]
889 % explicit[0]
890 )
890 )
891 pushop.bkresult = 2
891 pushop.bkresult = 2
892
892
893 pushop.outbookmarks.sort()
893 pushop.outbookmarks.sort()
894
894
895
895
896 def _pushcheckoutgoing(pushop):
896 def _pushcheckoutgoing(pushop):
897 outgoing = pushop.outgoing
897 outgoing = pushop.outgoing
898 unfi = pushop.repo.unfiltered()
898 unfi = pushop.repo.unfiltered()
899 if not outgoing.missing:
899 if not outgoing.missing:
900 # nothing to push
900 # nothing to push
901 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
901 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
902 return False
902 return False
903 # something to push
903 # something to push
904 if not pushop.force:
904 if not pushop.force:
905 # if repo.obsstore == False --> no obsolete
905 # if repo.obsstore == False --> no obsolete
906 # then, save the iteration
906 # then, save the iteration
907 if unfi.obsstore:
907 if unfi.obsstore:
908 # this message are here for 80 char limit reason
908 # this message are here for 80 char limit reason
909 mso = _(b"push includes obsolete changeset: %s!")
909 mso = _(b"push includes obsolete changeset: %s!")
910 mspd = _(b"push includes phase-divergent changeset: %s!")
910 mspd = _(b"push includes phase-divergent changeset: %s!")
911 mscd = _(b"push includes content-divergent changeset: %s!")
911 mscd = _(b"push includes content-divergent changeset: %s!")
912 mst = {
912 mst = {
913 b"orphan": _(b"push includes orphan changeset: %s!"),
913 b"orphan": _(b"push includes orphan changeset: %s!"),
914 b"phase-divergent": mspd,
914 b"phase-divergent": mspd,
915 b"content-divergent": mscd,
915 b"content-divergent": mscd,
916 }
916 }
917 # If we are to push if there is at least one
917 # If we are to push if there is at least one
918 # obsolete or unstable changeset in missing, at
918 # obsolete or unstable changeset in missing, at
919 # least one of the missinghead will be obsolete or
919 # least one of the missinghead will be obsolete or
920 # unstable. So checking heads only is ok
920 # unstable. So checking heads only is ok
921 for node in outgoing.ancestorsof:
921 for node in outgoing.ancestorsof:
922 ctx = unfi[node]
922 ctx = unfi[node]
923 if ctx.obsolete():
923 if ctx.obsolete():
924 raise error.Abort(mso % ctx)
924 raise error.Abort(mso % ctx)
925 elif ctx.isunstable():
925 elif ctx.isunstable():
926 # TODO print more than one instability in the abort
926 # TODO print more than one instability in the abort
927 # message
927 # message
928 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
928 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
929
929
930 discovery.checkheads(pushop)
930 discovery.checkheads(pushop)
931 return True
931 return True
932
932
933
933
934 # List of names of steps to perform for an outgoing bundle2, order matters.
934 # List of names of steps to perform for an outgoing bundle2, order matters.
935 b2partsgenorder = []
935 b2partsgenorder = []
936
936
937 # Mapping between step name and function
937 # Mapping between step name and function
938 #
938 #
939 # This exists to help extensions wrap steps if necessary
939 # This exists to help extensions wrap steps if necessary
940 b2partsgenmapping = {}
940 b2partsgenmapping = {}
941
941
942
942
943 def b2partsgenerator(stepname, idx=None):
943 def b2partsgenerator(stepname, idx=None):
944 """decorator for function generating bundle2 part
944 """decorator for function generating bundle2 part
945
945
946 The function is added to the step -> function mapping and appended to the
946 The function is added to the step -> function mapping and appended to the
947 list of steps. Beware that decorated functions will be added in order
947 list of steps. Beware that decorated functions will be added in order
948 (this may matter).
948 (this may matter).
949
949
950 You can only use this decorator for new steps, if you want to wrap a step
950 You can only use this decorator for new steps, if you want to wrap a step
951 from an extension, attack the b2partsgenmapping dictionary directly."""
951 from an extension, attack the b2partsgenmapping dictionary directly."""
952
952
953 def dec(func):
953 def dec(func):
954 assert stepname not in b2partsgenmapping
954 assert stepname not in b2partsgenmapping
955 b2partsgenmapping[stepname] = func
955 b2partsgenmapping[stepname] = func
956 if idx is None:
956 if idx is None:
957 b2partsgenorder.append(stepname)
957 b2partsgenorder.append(stepname)
958 else:
958 else:
959 b2partsgenorder.insert(idx, stepname)
959 b2partsgenorder.insert(idx, stepname)
960 return func
960 return func
961
961
962 return dec
962 return dec
963
963
964
964
965 def _pushb2ctxcheckheads(pushop, bundler):
965 def _pushb2ctxcheckheads(pushop, bundler):
966 """Generate race condition checking parts
966 """Generate race condition checking parts
967
967
968 Exists as an independent function to aid extensions
968 Exists as an independent function to aid extensions
969 """
969 """
970 # * 'force' do not check for push race,
970 # * 'force' do not check for push race,
971 # * if we don't push anything, there are nothing to check.
971 # * if we don't push anything, there are nothing to check.
972 if not pushop.force and pushop.outgoing.ancestorsof:
972 if not pushop.force and pushop.outgoing.ancestorsof:
973 allowunrelated = b'related' in bundler.capabilities.get(
973 allowunrelated = b'related' in bundler.capabilities.get(
974 b'checkheads', ()
974 b'checkheads', ()
975 )
975 )
976 emptyremote = pushop.pushbranchmap is None
976 emptyremote = pushop.pushbranchmap is None
977 if not allowunrelated or emptyremote:
977 if not allowunrelated or emptyremote:
978 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
978 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
979 else:
979 else:
980 affected = set()
980 affected = set()
981 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
981 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
982 remoteheads, newheads, unsyncedheads, discardedheads = heads
982 remoteheads, newheads, unsyncedheads, discardedheads = heads
983 if remoteheads is not None:
983 if remoteheads is not None:
984 remote = set(remoteheads)
984 remote = set(remoteheads)
985 affected |= set(discardedheads) & remote
985 affected |= set(discardedheads) & remote
986 affected |= remote - set(newheads)
986 affected |= remote - set(newheads)
987 if affected:
987 if affected:
988 data = iter(sorted(affected))
988 data = iter(sorted(affected))
989 bundler.newpart(b'check:updated-heads', data=data)
989 bundler.newpart(b'check:updated-heads', data=data)
990
990
991
991
992 def _pushing(pushop):
992 def _pushing(pushop):
993 """return True if we are pushing anything"""
993 """return True if we are pushing anything"""
994 return bool(
994 return bool(
995 pushop.outgoing.missing
995 pushop.outgoing.missing
996 or pushop.outdatedphases
996 or pushop.outdatedphases
997 or pushop.outobsmarkers
997 or pushop.outobsmarkers
998 or pushop.outbookmarks
998 or pushop.outbookmarks
999 )
999 )
1000
1000
1001
1001
1002 @b2partsgenerator(b'check-bookmarks')
1002 @b2partsgenerator(b'check-bookmarks')
1003 def _pushb2checkbookmarks(pushop, bundler):
1003 def _pushb2checkbookmarks(pushop, bundler):
1004 """insert bookmark move checking"""
1004 """insert bookmark move checking"""
1005 if not _pushing(pushop) or pushop.force:
1005 if not _pushing(pushop) or pushop.force:
1006 return
1006 return
1007 b2caps = bundle2.bundle2caps(pushop.remote)
1007 b2caps = bundle2.bundle2caps(pushop.remote)
1008 hasbookmarkcheck = b'bookmarks' in b2caps
1008 hasbookmarkcheck = b'bookmarks' in b2caps
1009 if not (pushop.outbookmarks and hasbookmarkcheck):
1009 if not (pushop.outbookmarks and hasbookmarkcheck):
1010 return
1010 return
1011 data = []
1011 data = []
1012 for book, old, new in pushop.outbookmarks:
1012 for book, old, new in pushop.outbookmarks:
1013 data.append((book, old))
1013 data.append((book, old))
1014 checkdata = bookmod.binaryencode(data)
1014 checkdata = bookmod.binaryencode(data)
1015 bundler.newpart(b'check:bookmarks', data=checkdata)
1015 bundler.newpart(b'check:bookmarks', data=checkdata)
1016
1016
1017
1017
1018 @b2partsgenerator(b'check-phases')
1018 @b2partsgenerator(b'check-phases')
1019 def _pushb2checkphases(pushop, bundler):
1019 def _pushb2checkphases(pushop, bundler):
1020 """insert phase move checking"""
1020 """insert phase move checking"""
1021 if not _pushing(pushop) or pushop.force:
1021 if not _pushing(pushop) or pushop.force:
1022 return
1022 return
1023 b2caps = bundle2.bundle2caps(pushop.remote)
1023 b2caps = bundle2.bundle2caps(pushop.remote)
1024 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1024 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1025 if pushop.remotephases is not None and hasphaseheads:
1025 if pushop.remotephases is not None and hasphaseheads:
1026 # check that the remote phase has not changed
1026 # check that the remote phase has not changed
1027 checks = {p: [] for p in phases.allphases}
1027 checks = {p: [] for p in phases.allphases}
1028 checks[phases.public].extend(pushop.remotephases.publicheads)
1028 checks[phases.public].extend(pushop.remotephases.publicheads)
1029 checks[phases.draft].extend(pushop.remotephases.draftroots)
1029 checks[phases.draft].extend(pushop.remotephases.draftroots)
1030 if any(pycompat.itervalues(checks)):
1030 if any(pycompat.itervalues(checks)):
1031 for phase in checks:
1031 for phase in checks:
1032 checks[phase].sort()
1032 checks[phase].sort()
1033 checkdata = phases.binaryencode(checks)
1033 checkdata = phases.binaryencode(checks)
1034 bundler.newpart(b'check:phases', data=checkdata)
1034 bundler.newpart(b'check:phases', data=checkdata)
1035
1035
1036
1036
1037 @b2partsgenerator(b'changeset')
1037 @b2partsgenerator(b'changeset')
1038 def _pushb2ctx(pushop, bundler):
1038 def _pushb2ctx(pushop, bundler):
1039 """handle changegroup push through bundle2
1039 """handle changegroup push through bundle2
1040
1040
1041 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1041 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1042 """
1042 """
1043 if b'changesets' in pushop.stepsdone:
1043 if b'changesets' in pushop.stepsdone:
1044 return
1044 return
1045 pushop.stepsdone.add(b'changesets')
1045 pushop.stepsdone.add(b'changesets')
1046 # Send known heads to the server for race detection.
1046 # Send known heads to the server for race detection.
1047 if not _pushcheckoutgoing(pushop):
1047 if not _pushcheckoutgoing(pushop):
1048 return
1048 return
1049 pushop.repo.prepushoutgoinghooks(pushop)
1049 pushop.repo.prepushoutgoinghooks(pushop)
1050
1050
1051 _pushb2ctxcheckheads(pushop, bundler)
1051 _pushb2ctxcheckheads(pushop, bundler)
1052
1052
1053 b2caps = bundle2.bundle2caps(pushop.remote)
1053 b2caps = bundle2.bundle2caps(pushop.remote)
1054 version = b'01'
1054 version = b'01'
1055 cgversions = b2caps.get(b'changegroup')
1055 cgversions = b2caps.get(b'changegroup')
1056 if cgversions: # 3.1 and 3.2 ship with an empty value
1056 if cgversions: # 3.1 and 3.2 ship with an empty value
1057 cgversions = [
1057 cgversions = [
1058 v
1058 v
1059 for v in cgversions
1059 for v in cgversions
1060 if v in changegroup.supportedoutgoingversions(pushop.repo)
1060 if v in changegroup.supportedoutgoingversions(pushop.repo)
1061 ]
1061 ]
1062 if not cgversions:
1062 if not cgversions:
1063 raise error.Abort(_(b'no common changegroup version'))
1063 raise error.Abort(_(b'no common changegroup version'))
1064 version = max(cgversions)
1064 version = max(cgversions)
1065 cgstream = changegroup.makestream(
1065 cgstream = changegroup.makestream(
1066 pushop.repo, pushop.outgoing, version, b'push'
1066 pushop.repo, pushop.outgoing, version, b'push'
1067 )
1067 )
1068 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1068 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1069 if cgversions:
1069 if cgversions:
1070 cgpart.addparam(b'version', version)
1070 cgpart.addparam(b'version', version)
1071 if repository.TREEMANIFEST_REQUIREMENT in pushop.repo.requirements:
1071 if requirements.TREEMANIFEST_REQUIREMENT in pushop.repo.requirements:
1072 cgpart.addparam(b'treemanifest', b'1')
1072 cgpart.addparam(b'treemanifest', b'1')
1073 if b'exp-sidedata-flag' in pushop.repo.requirements:
1073 if b'exp-sidedata-flag' in pushop.repo.requirements:
1074 cgpart.addparam(b'exp-sidedata', b'1')
1074 cgpart.addparam(b'exp-sidedata', b'1')
1075
1075
1076 def handlereply(op):
1076 def handlereply(op):
1077 """extract addchangegroup returns from server reply"""
1077 """extract addchangegroup returns from server reply"""
1078 cgreplies = op.records.getreplies(cgpart.id)
1078 cgreplies = op.records.getreplies(cgpart.id)
1079 assert len(cgreplies[b'changegroup']) == 1
1079 assert len(cgreplies[b'changegroup']) == 1
1080 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1080 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1081
1081
1082 return handlereply
1082 return handlereply
1083
1083
1084
1084
1085 @b2partsgenerator(b'phase')
1085 @b2partsgenerator(b'phase')
1086 def _pushb2phases(pushop, bundler):
1086 def _pushb2phases(pushop, bundler):
1087 """handle phase push through bundle2"""
1087 """handle phase push through bundle2"""
1088 if b'phases' in pushop.stepsdone:
1088 if b'phases' in pushop.stepsdone:
1089 return
1089 return
1090 b2caps = bundle2.bundle2caps(pushop.remote)
1090 b2caps = bundle2.bundle2caps(pushop.remote)
1091 ui = pushop.repo.ui
1091 ui = pushop.repo.ui
1092
1092
1093 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1093 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1094 haspushkey = b'pushkey' in b2caps
1094 haspushkey = b'pushkey' in b2caps
1095 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1095 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1096
1096
1097 if hasphaseheads and not legacyphase:
1097 if hasphaseheads and not legacyphase:
1098 return _pushb2phaseheads(pushop, bundler)
1098 return _pushb2phaseheads(pushop, bundler)
1099 elif haspushkey:
1099 elif haspushkey:
1100 return _pushb2phasespushkey(pushop, bundler)
1100 return _pushb2phasespushkey(pushop, bundler)
1101
1101
1102
1102
1103 def _pushb2phaseheads(pushop, bundler):
1103 def _pushb2phaseheads(pushop, bundler):
1104 """push phase information through a bundle2 - binary part"""
1104 """push phase information through a bundle2 - binary part"""
1105 pushop.stepsdone.add(b'phases')
1105 pushop.stepsdone.add(b'phases')
1106 if pushop.outdatedphases:
1106 if pushop.outdatedphases:
1107 updates = {p: [] for p in phases.allphases}
1107 updates = {p: [] for p in phases.allphases}
1108 updates[0].extend(h.node() for h in pushop.outdatedphases)
1108 updates[0].extend(h.node() for h in pushop.outdatedphases)
1109 phasedata = phases.binaryencode(updates)
1109 phasedata = phases.binaryencode(updates)
1110 bundler.newpart(b'phase-heads', data=phasedata)
1110 bundler.newpart(b'phase-heads', data=phasedata)
1111
1111
1112
1112
1113 def _pushb2phasespushkey(pushop, bundler):
1113 def _pushb2phasespushkey(pushop, bundler):
1114 """push phase information through a bundle2 - pushkey part"""
1114 """push phase information through a bundle2 - pushkey part"""
1115 pushop.stepsdone.add(b'phases')
1115 pushop.stepsdone.add(b'phases')
1116 part2node = []
1116 part2node = []
1117
1117
1118 def handlefailure(pushop, exc):
1118 def handlefailure(pushop, exc):
1119 targetid = int(exc.partid)
1119 targetid = int(exc.partid)
1120 for partid, node in part2node:
1120 for partid, node in part2node:
1121 if partid == targetid:
1121 if partid == targetid:
1122 raise error.Abort(_(b'updating %s to public failed') % node)
1122 raise error.Abort(_(b'updating %s to public failed') % node)
1123
1123
1124 enc = pushkey.encode
1124 enc = pushkey.encode
1125 for newremotehead in pushop.outdatedphases:
1125 for newremotehead in pushop.outdatedphases:
1126 part = bundler.newpart(b'pushkey')
1126 part = bundler.newpart(b'pushkey')
1127 part.addparam(b'namespace', enc(b'phases'))
1127 part.addparam(b'namespace', enc(b'phases'))
1128 part.addparam(b'key', enc(newremotehead.hex()))
1128 part.addparam(b'key', enc(newremotehead.hex()))
1129 part.addparam(b'old', enc(b'%d' % phases.draft))
1129 part.addparam(b'old', enc(b'%d' % phases.draft))
1130 part.addparam(b'new', enc(b'%d' % phases.public))
1130 part.addparam(b'new', enc(b'%d' % phases.public))
1131 part2node.append((part.id, newremotehead))
1131 part2node.append((part.id, newremotehead))
1132 pushop.pkfailcb[part.id] = handlefailure
1132 pushop.pkfailcb[part.id] = handlefailure
1133
1133
1134 def handlereply(op):
1134 def handlereply(op):
1135 for partid, node in part2node:
1135 for partid, node in part2node:
1136 partrep = op.records.getreplies(partid)
1136 partrep = op.records.getreplies(partid)
1137 results = partrep[b'pushkey']
1137 results = partrep[b'pushkey']
1138 assert len(results) <= 1
1138 assert len(results) <= 1
1139 msg = None
1139 msg = None
1140 if not results:
1140 if not results:
1141 msg = _(b'server ignored update of %s to public!\n') % node
1141 msg = _(b'server ignored update of %s to public!\n') % node
1142 elif not int(results[0][b'return']):
1142 elif not int(results[0][b'return']):
1143 msg = _(b'updating %s to public failed!\n') % node
1143 msg = _(b'updating %s to public failed!\n') % node
1144 if msg is not None:
1144 if msg is not None:
1145 pushop.ui.warn(msg)
1145 pushop.ui.warn(msg)
1146
1146
1147 return handlereply
1147 return handlereply
1148
1148
1149
1149
1150 @b2partsgenerator(b'obsmarkers')
1150 @b2partsgenerator(b'obsmarkers')
1151 def _pushb2obsmarkers(pushop, bundler):
1151 def _pushb2obsmarkers(pushop, bundler):
1152 if b'obsmarkers' in pushop.stepsdone:
1152 if b'obsmarkers' in pushop.stepsdone:
1153 return
1153 return
1154 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1154 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1155 if obsolete.commonversion(remoteversions) is None:
1155 if obsolete.commonversion(remoteversions) is None:
1156 return
1156 return
1157 pushop.stepsdone.add(b'obsmarkers')
1157 pushop.stepsdone.add(b'obsmarkers')
1158 if pushop.outobsmarkers:
1158 if pushop.outobsmarkers:
1159 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1159 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1160 bundle2.buildobsmarkerspart(bundler, markers)
1160 bundle2.buildobsmarkerspart(bundler, markers)
1161
1161
1162
1162
1163 @b2partsgenerator(b'bookmarks')
1163 @b2partsgenerator(b'bookmarks')
1164 def _pushb2bookmarks(pushop, bundler):
1164 def _pushb2bookmarks(pushop, bundler):
1165 """handle bookmark push through bundle2"""
1165 """handle bookmark push through bundle2"""
1166 if b'bookmarks' in pushop.stepsdone:
1166 if b'bookmarks' in pushop.stepsdone:
1167 return
1167 return
1168 b2caps = bundle2.bundle2caps(pushop.remote)
1168 b2caps = bundle2.bundle2caps(pushop.remote)
1169
1169
1170 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1170 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1171 legacybooks = b'bookmarks' in legacy
1171 legacybooks = b'bookmarks' in legacy
1172
1172
1173 if not legacybooks and b'bookmarks' in b2caps:
1173 if not legacybooks and b'bookmarks' in b2caps:
1174 return _pushb2bookmarkspart(pushop, bundler)
1174 return _pushb2bookmarkspart(pushop, bundler)
1175 elif b'pushkey' in b2caps:
1175 elif b'pushkey' in b2caps:
1176 return _pushb2bookmarkspushkey(pushop, bundler)
1176 return _pushb2bookmarkspushkey(pushop, bundler)
1177
1177
1178
1178
1179 def _bmaction(old, new):
1179 def _bmaction(old, new):
1180 """small utility for bookmark pushing"""
1180 """small utility for bookmark pushing"""
1181 if not old:
1181 if not old:
1182 return b'export'
1182 return b'export'
1183 elif not new:
1183 elif not new:
1184 return b'delete'
1184 return b'delete'
1185 return b'update'
1185 return b'update'
1186
1186
1187
1187
1188 def _abortonsecretctx(pushop, node, b):
1188 def _abortonsecretctx(pushop, node, b):
1189 """abort if a given bookmark points to a secret changeset"""
1189 """abort if a given bookmark points to a secret changeset"""
1190 if node and pushop.repo[node].phase() == phases.secret:
1190 if node and pushop.repo[node].phase() == phases.secret:
1191 raise error.Abort(
1191 raise error.Abort(
1192 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1192 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1193 )
1193 )
1194
1194
1195
1195
1196 def _pushb2bookmarkspart(pushop, bundler):
1196 def _pushb2bookmarkspart(pushop, bundler):
1197 pushop.stepsdone.add(b'bookmarks')
1197 pushop.stepsdone.add(b'bookmarks')
1198 if not pushop.outbookmarks:
1198 if not pushop.outbookmarks:
1199 return
1199 return
1200
1200
1201 allactions = []
1201 allactions = []
1202 data = []
1202 data = []
1203 for book, old, new in pushop.outbookmarks:
1203 for book, old, new in pushop.outbookmarks:
1204 _abortonsecretctx(pushop, new, book)
1204 _abortonsecretctx(pushop, new, book)
1205 data.append((book, new))
1205 data.append((book, new))
1206 allactions.append((book, _bmaction(old, new)))
1206 allactions.append((book, _bmaction(old, new)))
1207 checkdata = bookmod.binaryencode(data)
1207 checkdata = bookmod.binaryencode(data)
1208 bundler.newpart(b'bookmarks', data=checkdata)
1208 bundler.newpart(b'bookmarks', data=checkdata)
1209
1209
1210 def handlereply(op):
1210 def handlereply(op):
1211 ui = pushop.ui
1211 ui = pushop.ui
1212 # if success
1212 # if success
1213 for book, action in allactions:
1213 for book, action in allactions:
1214 ui.status(bookmsgmap[action][0] % book)
1214 ui.status(bookmsgmap[action][0] % book)
1215
1215
1216 return handlereply
1216 return handlereply
1217
1217
1218
1218
1219 def _pushb2bookmarkspushkey(pushop, bundler):
1219 def _pushb2bookmarkspushkey(pushop, bundler):
1220 pushop.stepsdone.add(b'bookmarks')
1220 pushop.stepsdone.add(b'bookmarks')
1221 part2book = []
1221 part2book = []
1222 enc = pushkey.encode
1222 enc = pushkey.encode
1223
1223
1224 def handlefailure(pushop, exc):
1224 def handlefailure(pushop, exc):
1225 targetid = int(exc.partid)
1225 targetid = int(exc.partid)
1226 for partid, book, action in part2book:
1226 for partid, book, action in part2book:
1227 if partid == targetid:
1227 if partid == targetid:
1228 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1228 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1229 # we should not be called for part we did not generated
1229 # we should not be called for part we did not generated
1230 assert False
1230 assert False
1231
1231
1232 for book, old, new in pushop.outbookmarks:
1232 for book, old, new in pushop.outbookmarks:
1233 _abortonsecretctx(pushop, new, book)
1233 _abortonsecretctx(pushop, new, book)
1234 part = bundler.newpart(b'pushkey')
1234 part = bundler.newpart(b'pushkey')
1235 part.addparam(b'namespace', enc(b'bookmarks'))
1235 part.addparam(b'namespace', enc(b'bookmarks'))
1236 part.addparam(b'key', enc(book))
1236 part.addparam(b'key', enc(book))
1237 part.addparam(b'old', enc(hex(old)))
1237 part.addparam(b'old', enc(hex(old)))
1238 part.addparam(b'new', enc(hex(new)))
1238 part.addparam(b'new', enc(hex(new)))
1239 action = b'update'
1239 action = b'update'
1240 if not old:
1240 if not old:
1241 action = b'export'
1241 action = b'export'
1242 elif not new:
1242 elif not new:
1243 action = b'delete'
1243 action = b'delete'
1244 part2book.append((part.id, book, action))
1244 part2book.append((part.id, book, action))
1245 pushop.pkfailcb[part.id] = handlefailure
1245 pushop.pkfailcb[part.id] = handlefailure
1246
1246
1247 def handlereply(op):
1247 def handlereply(op):
1248 ui = pushop.ui
1248 ui = pushop.ui
1249 for partid, book, action in part2book:
1249 for partid, book, action in part2book:
1250 partrep = op.records.getreplies(partid)
1250 partrep = op.records.getreplies(partid)
1251 results = partrep[b'pushkey']
1251 results = partrep[b'pushkey']
1252 assert len(results) <= 1
1252 assert len(results) <= 1
1253 if not results:
1253 if not results:
1254 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1254 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1255 else:
1255 else:
1256 ret = int(results[0][b'return'])
1256 ret = int(results[0][b'return'])
1257 if ret:
1257 if ret:
1258 ui.status(bookmsgmap[action][0] % book)
1258 ui.status(bookmsgmap[action][0] % book)
1259 else:
1259 else:
1260 ui.warn(bookmsgmap[action][1] % book)
1260 ui.warn(bookmsgmap[action][1] % book)
1261 if pushop.bkresult is not None:
1261 if pushop.bkresult is not None:
1262 pushop.bkresult = 1
1262 pushop.bkresult = 1
1263
1263
1264 return handlereply
1264 return handlereply
1265
1265
1266
1266
1267 @b2partsgenerator(b'pushvars', idx=0)
1267 @b2partsgenerator(b'pushvars', idx=0)
1268 def _getbundlesendvars(pushop, bundler):
1268 def _getbundlesendvars(pushop, bundler):
1269 '''send shellvars via bundle2'''
1269 '''send shellvars via bundle2'''
1270 pushvars = pushop.pushvars
1270 pushvars = pushop.pushvars
1271 if pushvars:
1271 if pushvars:
1272 shellvars = {}
1272 shellvars = {}
1273 for raw in pushvars:
1273 for raw in pushvars:
1274 if b'=' not in raw:
1274 if b'=' not in raw:
1275 msg = (
1275 msg = (
1276 b"unable to parse variable '%s', should follow "
1276 b"unable to parse variable '%s', should follow "
1277 b"'KEY=VALUE' or 'KEY=' format"
1277 b"'KEY=VALUE' or 'KEY=' format"
1278 )
1278 )
1279 raise error.Abort(msg % raw)
1279 raise error.Abort(msg % raw)
1280 k, v = raw.split(b'=', 1)
1280 k, v = raw.split(b'=', 1)
1281 shellvars[k] = v
1281 shellvars[k] = v
1282
1282
1283 part = bundler.newpart(b'pushvars')
1283 part = bundler.newpart(b'pushvars')
1284
1284
1285 for key, value in pycompat.iteritems(shellvars):
1285 for key, value in pycompat.iteritems(shellvars):
1286 part.addparam(key, value, mandatory=False)
1286 part.addparam(key, value, mandatory=False)
1287
1287
1288
1288
1289 def _pushbundle2(pushop):
1289 def _pushbundle2(pushop):
1290 """push data to the remote using bundle2
1290 """push data to the remote using bundle2
1291
1291
1292 The only currently supported type of data is changegroup but this will
1292 The only currently supported type of data is changegroup but this will
1293 evolve in the future."""
1293 evolve in the future."""
1294 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1294 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1295 pushback = pushop.trmanager and pushop.ui.configbool(
1295 pushback = pushop.trmanager and pushop.ui.configbool(
1296 b'experimental', b'bundle2.pushback'
1296 b'experimental', b'bundle2.pushback'
1297 )
1297 )
1298
1298
1299 # create reply capability
1299 # create reply capability
1300 capsblob = bundle2.encodecaps(
1300 capsblob = bundle2.encodecaps(
1301 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1301 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1302 )
1302 )
1303 bundler.newpart(b'replycaps', data=capsblob)
1303 bundler.newpart(b'replycaps', data=capsblob)
1304 replyhandlers = []
1304 replyhandlers = []
1305 for partgenname in b2partsgenorder:
1305 for partgenname in b2partsgenorder:
1306 partgen = b2partsgenmapping[partgenname]
1306 partgen = b2partsgenmapping[partgenname]
1307 ret = partgen(pushop, bundler)
1307 ret = partgen(pushop, bundler)
1308 if callable(ret):
1308 if callable(ret):
1309 replyhandlers.append(ret)
1309 replyhandlers.append(ret)
1310 # do not push if nothing to push
1310 # do not push if nothing to push
1311 if bundler.nbparts <= 1:
1311 if bundler.nbparts <= 1:
1312 return
1312 return
1313 stream = util.chunkbuffer(bundler.getchunks())
1313 stream = util.chunkbuffer(bundler.getchunks())
1314 try:
1314 try:
1315 try:
1315 try:
1316 with pushop.remote.commandexecutor() as e:
1316 with pushop.remote.commandexecutor() as e:
1317 reply = e.callcommand(
1317 reply = e.callcommand(
1318 b'unbundle',
1318 b'unbundle',
1319 {
1319 {
1320 b'bundle': stream,
1320 b'bundle': stream,
1321 b'heads': [b'force'],
1321 b'heads': [b'force'],
1322 b'url': pushop.remote.url(),
1322 b'url': pushop.remote.url(),
1323 },
1323 },
1324 ).result()
1324 ).result()
1325 except error.BundleValueError as exc:
1325 except error.BundleValueError as exc:
1326 raise error.Abort(_(b'missing support for %s') % exc)
1326 raise error.Abort(_(b'missing support for %s') % exc)
1327 try:
1327 try:
1328 trgetter = None
1328 trgetter = None
1329 if pushback:
1329 if pushback:
1330 trgetter = pushop.trmanager.transaction
1330 trgetter = pushop.trmanager.transaction
1331 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1331 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1332 except error.BundleValueError as exc:
1332 except error.BundleValueError as exc:
1333 raise error.Abort(_(b'missing support for %s') % exc)
1333 raise error.Abort(_(b'missing support for %s') % exc)
1334 except bundle2.AbortFromPart as exc:
1334 except bundle2.AbortFromPart as exc:
1335 pushop.ui.status(_(b'remote: %s\n') % exc)
1335 pushop.ui.status(_(b'remote: %s\n') % exc)
1336 if exc.hint is not None:
1336 if exc.hint is not None:
1337 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1337 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1338 raise error.Abort(_(b'push failed on remote'))
1338 raise error.Abort(_(b'push failed on remote'))
1339 except error.PushkeyFailed as exc:
1339 except error.PushkeyFailed as exc:
1340 partid = int(exc.partid)
1340 partid = int(exc.partid)
1341 if partid not in pushop.pkfailcb:
1341 if partid not in pushop.pkfailcb:
1342 raise
1342 raise
1343 pushop.pkfailcb[partid](pushop, exc)
1343 pushop.pkfailcb[partid](pushop, exc)
1344 for rephand in replyhandlers:
1344 for rephand in replyhandlers:
1345 rephand(op)
1345 rephand(op)
1346
1346
1347
1347
1348 def _pushchangeset(pushop):
1348 def _pushchangeset(pushop):
1349 """Make the actual push of changeset bundle to remote repo"""
1349 """Make the actual push of changeset bundle to remote repo"""
1350 if b'changesets' in pushop.stepsdone:
1350 if b'changesets' in pushop.stepsdone:
1351 return
1351 return
1352 pushop.stepsdone.add(b'changesets')
1352 pushop.stepsdone.add(b'changesets')
1353 if not _pushcheckoutgoing(pushop):
1353 if not _pushcheckoutgoing(pushop):
1354 return
1354 return
1355
1355
1356 # Should have verified this in push().
1356 # Should have verified this in push().
1357 assert pushop.remote.capable(b'unbundle')
1357 assert pushop.remote.capable(b'unbundle')
1358
1358
1359 pushop.repo.prepushoutgoinghooks(pushop)
1359 pushop.repo.prepushoutgoinghooks(pushop)
1360 outgoing = pushop.outgoing
1360 outgoing = pushop.outgoing
1361 # TODO: get bundlecaps from remote
1361 # TODO: get bundlecaps from remote
1362 bundlecaps = None
1362 bundlecaps = None
1363 # create a changegroup from local
1363 # create a changegroup from local
1364 if pushop.revs is None and not (
1364 if pushop.revs is None and not (
1365 outgoing.excluded or pushop.repo.changelog.filteredrevs
1365 outgoing.excluded or pushop.repo.changelog.filteredrevs
1366 ):
1366 ):
1367 # push everything,
1367 # push everything,
1368 # use the fast path, no race possible on push
1368 # use the fast path, no race possible on push
1369 cg = changegroup.makechangegroup(
1369 cg = changegroup.makechangegroup(
1370 pushop.repo,
1370 pushop.repo,
1371 outgoing,
1371 outgoing,
1372 b'01',
1372 b'01',
1373 b'push',
1373 b'push',
1374 fastpath=True,
1374 fastpath=True,
1375 bundlecaps=bundlecaps,
1375 bundlecaps=bundlecaps,
1376 )
1376 )
1377 else:
1377 else:
1378 cg = changegroup.makechangegroup(
1378 cg = changegroup.makechangegroup(
1379 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1379 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1380 )
1380 )
1381
1381
1382 # apply changegroup to remote
1382 # apply changegroup to remote
1383 # local repo finds heads on server, finds out what
1383 # local repo finds heads on server, finds out what
1384 # revs it must push. once revs transferred, if server
1384 # revs it must push. once revs transferred, if server
1385 # finds it has different heads (someone else won
1385 # finds it has different heads (someone else won
1386 # commit/push race), server aborts.
1386 # commit/push race), server aborts.
1387 if pushop.force:
1387 if pushop.force:
1388 remoteheads = [b'force']
1388 remoteheads = [b'force']
1389 else:
1389 else:
1390 remoteheads = pushop.remoteheads
1390 remoteheads = pushop.remoteheads
1391 # ssh: return remote's addchangegroup()
1391 # ssh: return remote's addchangegroup()
1392 # http: return remote's addchangegroup() or 0 for error
1392 # http: return remote's addchangegroup() or 0 for error
1393 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1393 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1394
1394
1395
1395
1396 def _pushsyncphase(pushop):
1396 def _pushsyncphase(pushop):
1397 """synchronise phase information locally and remotely"""
1397 """synchronise phase information locally and remotely"""
1398 cheads = pushop.commonheads
1398 cheads = pushop.commonheads
1399 # even when we don't push, exchanging phase data is useful
1399 # even when we don't push, exchanging phase data is useful
1400 remotephases = listkeys(pushop.remote, b'phases')
1400 remotephases = listkeys(pushop.remote, b'phases')
1401 if (
1401 if (
1402 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1402 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1403 and remotephases # server supports phases
1403 and remotephases # server supports phases
1404 and pushop.cgresult is None # nothing was pushed
1404 and pushop.cgresult is None # nothing was pushed
1405 and remotephases.get(b'publishing', False)
1405 and remotephases.get(b'publishing', False)
1406 ):
1406 ):
1407 # When:
1407 # When:
1408 # - this is a subrepo push
1408 # - this is a subrepo push
1409 # - and remote support phase
1409 # - and remote support phase
1410 # - and no changeset was pushed
1410 # - and no changeset was pushed
1411 # - and remote is publishing
1411 # - and remote is publishing
1412 # We may be in issue 3871 case!
1412 # We may be in issue 3871 case!
1413 # We drop the possible phase synchronisation done by
1413 # We drop the possible phase synchronisation done by
1414 # courtesy to publish changesets possibly locally draft
1414 # courtesy to publish changesets possibly locally draft
1415 # on the remote.
1415 # on the remote.
1416 remotephases = {b'publishing': b'True'}
1416 remotephases = {b'publishing': b'True'}
1417 if not remotephases: # old server or public only reply from non-publishing
1417 if not remotephases: # old server or public only reply from non-publishing
1418 _localphasemove(pushop, cheads)
1418 _localphasemove(pushop, cheads)
1419 # don't push any phase data as there is nothing to push
1419 # don't push any phase data as there is nothing to push
1420 else:
1420 else:
1421 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1421 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1422 pheads, droots = ana
1422 pheads, droots = ana
1423 ### Apply remote phase on local
1423 ### Apply remote phase on local
1424 if remotephases.get(b'publishing', False):
1424 if remotephases.get(b'publishing', False):
1425 _localphasemove(pushop, cheads)
1425 _localphasemove(pushop, cheads)
1426 else: # publish = False
1426 else: # publish = False
1427 _localphasemove(pushop, pheads)
1427 _localphasemove(pushop, pheads)
1428 _localphasemove(pushop, cheads, phases.draft)
1428 _localphasemove(pushop, cheads, phases.draft)
1429 ### Apply local phase on remote
1429 ### Apply local phase on remote
1430
1430
1431 if pushop.cgresult:
1431 if pushop.cgresult:
1432 if b'phases' in pushop.stepsdone:
1432 if b'phases' in pushop.stepsdone:
1433 # phases already pushed though bundle2
1433 # phases already pushed though bundle2
1434 return
1434 return
1435 outdated = pushop.outdatedphases
1435 outdated = pushop.outdatedphases
1436 else:
1436 else:
1437 outdated = pushop.fallbackoutdatedphases
1437 outdated = pushop.fallbackoutdatedphases
1438
1438
1439 pushop.stepsdone.add(b'phases')
1439 pushop.stepsdone.add(b'phases')
1440
1440
1441 # filter heads already turned public by the push
1441 # filter heads already turned public by the push
1442 outdated = [c for c in outdated if c.node() not in pheads]
1442 outdated = [c for c in outdated if c.node() not in pheads]
1443 # fallback to independent pushkey command
1443 # fallback to independent pushkey command
1444 for newremotehead in outdated:
1444 for newremotehead in outdated:
1445 with pushop.remote.commandexecutor() as e:
1445 with pushop.remote.commandexecutor() as e:
1446 r = e.callcommand(
1446 r = e.callcommand(
1447 b'pushkey',
1447 b'pushkey',
1448 {
1448 {
1449 b'namespace': b'phases',
1449 b'namespace': b'phases',
1450 b'key': newremotehead.hex(),
1450 b'key': newremotehead.hex(),
1451 b'old': b'%d' % phases.draft,
1451 b'old': b'%d' % phases.draft,
1452 b'new': b'%d' % phases.public,
1452 b'new': b'%d' % phases.public,
1453 },
1453 },
1454 ).result()
1454 ).result()
1455
1455
1456 if not r:
1456 if not r:
1457 pushop.ui.warn(
1457 pushop.ui.warn(
1458 _(b'updating %s to public failed!\n') % newremotehead
1458 _(b'updating %s to public failed!\n') % newremotehead
1459 )
1459 )
1460
1460
1461
1461
1462 def _localphasemove(pushop, nodes, phase=phases.public):
1462 def _localphasemove(pushop, nodes, phase=phases.public):
1463 """move <nodes> to <phase> in the local source repo"""
1463 """move <nodes> to <phase> in the local source repo"""
1464 if pushop.trmanager:
1464 if pushop.trmanager:
1465 phases.advanceboundary(
1465 phases.advanceboundary(
1466 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1466 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1467 )
1467 )
1468 else:
1468 else:
1469 # repo is not locked, do not change any phases!
1469 # repo is not locked, do not change any phases!
1470 # Informs the user that phases should have been moved when
1470 # Informs the user that phases should have been moved when
1471 # applicable.
1471 # applicable.
1472 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1472 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1473 phasestr = phases.phasenames[phase]
1473 phasestr = phases.phasenames[phase]
1474 if actualmoves:
1474 if actualmoves:
1475 pushop.ui.status(
1475 pushop.ui.status(
1476 _(
1476 _(
1477 b'cannot lock source repo, skipping '
1477 b'cannot lock source repo, skipping '
1478 b'local %s phase update\n'
1478 b'local %s phase update\n'
1479 )
1479 )
1480 % phasestr
1480 % phasestr
1481 )
1481 )
1482
1482
1483
1483
1484 def _pushobsolete(pushop):
1484 def _pushobsolete(pushop):
1485 """utility function to push obsolete markers to a remote"""
1485 """utility function to push obsolete markers to a remote"""
1486 if b'obsmarkers' in pushop.stepsdone:
1486 if b'obsmarkers' in pushop.stepsdone:
1487 return
1487 return
1488 repo = pushop.repo
1488 repo = pushop.repo
1489 remote = pushop.remote
1489 remote = pushop.remote
1490 pushop.stepsdone.add(b'obsmarkers')
1490 pushop.stepsdone.add(b'obsmarkers')
1491 if pushop.outobsmarkers:
1491 if pushop.outobsmarkers:
1492 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1492 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1493 rslts = []
1493 rslts = []
1494 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1494 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1495 remotedata = obsolete._pushkeyescape(markers)
1495 remotedata = obsolete._pushkeyescape(markers)
1496 for key in sorted(remotedata, reverse=True):
1496 for key in sorted(remotedata, reverse=True):
1497 # reverse sort to ensure we end with dump0
1497 # reverse sort to ensure we end with dump0
1498 data = remotedata[key]
1498 data = remotedata[key]
1499 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1499 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1500 if [r for r in rslts if not r]:
1500 if [r for r in rslts if not r]:
1501 msg = _(b'failed to push some obsolete markers!\n')
1501 msg = _(b'failed to push some obsolete markers!\n')
1502 repo.ui.warn(msg)
1502 repo.ui.warn(msg)
1503
1503
1504
1504
1505 def _pushbookmark(pushop):
1505 def _pushbookmark(pushop):
1506 """Update bookmark position on remote"""
1506 """Update bookmark position on remote"""
1507 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1507 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1508 return
1508 return
1509 pushop.stepsdone.add(b'bookmarks')
1509 pushop.stepsdone.add(b'bookmarks')
1510 ui = pushop.ui
1510 ui = pushop.ui
1511 remote = pushop.remote
1511 remote = pushop.remote
1512
1512
1513 for b, old, new in pushop.outbookmarks:
1513 for b, old, new in pushop.outbookmarks:
1514 action = b'update'
1514 action = b'update'
1515 if not old:
1515 if not old:
1516 action = b'export'
1516 action = b'export'
1517 elif not new:
1517 elif not new:
1518 action = b'delete'
1518 action = b'delete'
1519
1519
1520 with remote.commandexecutor() as e:
1520 with remote.commandexecutor() as e:
1521 r = e.callcommand(
1521 r = e.callcommand(
1522 b'pushkey',
1522 b'pushkey',
1523 {
1523 {
1524 b'namespace': b'bookmarks',
1524 b'namespace': b'bookmarks',
1525 b'key': b,
1525 b'key': b,
1526 b'old': hex(old),
1526 b'old': hex(old),
1527 b'new': hex(new),
1527 b'new': hex(new),
1528 },
1528 },
1529 ).result()
1529 ).result()
1530
1530
1531 if r:
1531 if r:
1532 ui.status(bookmsgmap[action][0] % b)
1532 ui.status(bookmsgmap[action][0] % b)
1533 else:
1533 else:
1534 ui.warn(bookmsgmap[action][1] % b)
1534 ui.warn(bookmsgmap[action][1] % b)
1535 # discovery can have set the value form invalid entry
1535 # discovery can have set the value form invalid entry
1536 if pushop.bkresult is not None:
1536 if pushop.bkresult is not None:
1537 pushop.bkresult = 1
1537 pushop.bkresult = 1
1538
1538
1539
1539
1540 class pulloperation(object):
1540 class pulloperation(object):
1541 """A object that represent a single pull operation
1541 """A object that represent a single pull operation
1542
1542
1543 It purpose is to carry pull related state and very common operation.
1543 It purpose is to carry pull related state and very common operation.
1544
1544
1545 A new should be created at the beginning of each pull and discarded
1545 A new should be created at the beginning of each pull and discarded
1546 afterward.
1546 afterward.
1547 """
1547 """
1548
1548
1549 def __init__(
1549 def __init__(
1550 self,
1550 self,
1551 repo,
1551 repo,
1552 remote,
1552 remote,
1553 heads=None,
1553 heads=None,
1554 force=False,
1554 force=False,
1555 bookmarks=(),
1555 bookmarks=(),
1556 remotebookmarks=None,
1556 remotebookmarks=None,
1557 streamclonerequested=None,
1557 streamclonerequested=None,
1558 includepats=None,
1558 includepats=None,
1559 excludepats=None,
1559 excludepats=None,
1560 depth=None,
1560 depth=None,
1561 ):
1561 ):
1562 # repo we pull into
1562 # repo we pull into
1563 self.repo = repo
1563 self.repo = repo
1564 # repo we pull from
1564 # repo we pull from
1565 self.remote = remote
1565 self.remote = remote
1566 # revision we try to pull (None is "all")
1566 # revision we try to pull (None is "all")
1567 self.heads = heads
1567 self.heads = heads
1568 # bookmark pulled explicitly
1568 # bookmark pulled explicitly
1569 self.explicitbookmarks = [
1569 self.explicitbookmarks = [
1570 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1570 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1571 ]
1571 ]
1572 # do we force pull?
1572 # do we force pull?
1573 self.force = force
1573 self.force = force
1574 # whether a streaming clone was requested
1574 # whether a streaming clone was requested
1575 self.streamclonerequested = streamclonerequested
1575 self.streamclonerequested = streamclonerequested
1576 # transaction manager
1576 # transaction manager
1577 self.trmanager = None
1577 self.trmanager = None
1578 # set of common changeset between local and remote before pull
1578 # set of common changeset between local and remote before pull
1579 self.common = None
1579 self.common = None
1580 # set of pulled head
1580 # set of pulled head
1581 self.rheads = None
1581 self.rheads = None
1582 # list of missing changeset to fetch remotely
1582 # list of missing changeset to fetch remotely
1583 self.fetch = None
1583 self.fetch = None
1584 # remote bookmarks data
1584 # remote bookmarks data
1585 self.remotebookmarks = remotebookmarks
1585 self.remotebookmarks = remotebookmarks
1586 # result of changegroup pulling (used as return code by pull)
1586 # result of changegroup pulling (used as return code by pull)
1587 self.cgresult = None
1587 self.cgresult = None
1588 # list of step already done
1588 # list of step already done
1589 self.stepsdone = set()
1589 self.stepsdone = set()
1590 # Whether we attempted a clone from pre-generated bundles.
1590 # Whether we attempted a clone from pre-generated bundles.
1591 self.clonebundleattempted = False
1591 self.clonebundleattempted = False
1592 # Set of file patterns to include.
1592 # Set of file patterns to include.
1593 self.includepats = includepats
1593 self.includepats = includepats
1594 # Set of file patterns to exclude.
1594 # Set of file patterns to exclude.
1595 self.excludepats = excludepats
1595 self.excludepats = excludepats
1596 # Number of ancestor changesets to pull from each pulled head.
1596 # Number of ancestor changesets to pull from each pulled head.
1597 self.depth = depth
1597 self.depth = depth
1598
1598
1599 @util.propertycache
1599 @util.propertycache
1600 def pulledsubset(self):
1600 def pulledsubset(self):
1601 """heads of the set of changeset target by the pull"""
1601 """heads of the set of changeset target by the pull"""
1602 # compute target subset
1602 # compute target subset
1603 if self.heads is None:
1603 if self.heads is None:
1604 # We pulled every thing possible
1604 # We pulled every thing possible
1605 # sync on everything common
1605 # sync on everything common
1606 c = set(self.common)
1606 c = set(self.common)
1607 ret = list(self.common)
1607 ret = list(self.common)
1608 for n in self.rheads:
1608 for n in self.rheads:
1609 if n not in c:
1609 if n not in c:
1610 ret.append(n)
1610 ret.append(n)
1611 return ret
1611 return ret
1612 else:
1612 else:
1613 # We pulled a specific subset
1613 # We pulled a specific subset
1614 # sync on this subset
1614 # sync on this subset
1615 return self.heads
1615 return self.heads
1616
1616
1617 @util.propertycache
1617 @util.propertycache
1618 def canusebundle2(self):
1618 def canusebundle2(self):
1619 return not _forcebundle1(self)
1619 return not _forcebundle1(self)
1620
1620
1621 @util.propertycache
1621 @util.propertycache
1622 def remotebundle2caps(self):
1622 def remotebundle2caps(self):
1623 return bundle2.bundle2caps(self.remote)
1623 return bundle2.bundle2caps(self.remote)
1624
1624
1625 def gettransaction(self):
1625 def gettransaction(self):
1626 # deprecated; talk to trmanager directly
1626 # deprecated; talk to trmanager directly
1627 return self.trmanager.transaction()
1627 return self.trmanager.transaction()
1628
1628
1629
1629
1630 class transactionmanager(util.transactional):
1630 class transactionmanager(util.transactional):
1631 """An object to manage the life cycle of a transaction
1631 """An object to manage the life cycle of a transaction
1632
1632
1633 It creates the transaction on demand and calls the appropriate hooks when
1633 It creates the transaction on demand and calls the appropriate hooks when
1634 closing the transaction."""
1634 closing the transaction."""
1635
1635
1636 def __init__(self, repo, source, url):
1636 def __init__(self, repo, source, url):
1637 self.repo = repo
1637 self.repo = repo
1638 self.source = source
1638 self.source = source
1639 self.url = url
1639 self.url = url
1640 self._tr = None
1640 self._tr = None
1641
1641
1642 def transaction(self):
1642 def transaction(self):
1643 """Return an open transaction object, constructing if necessary"""
1643 """Return an open transaction object, constructing if necessary"""
1644 if not self._tr:
1644 if not self._tr:
1645 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1645 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1646 self._tr = self.repo.transaction(trname)
1646 self._tr = self.repo.transaction(trname)
1647 self._tr.hookargs[b'source'] = self.source
1647 self._tr.hookargs[b'source'] = self.source
1648 self._tr.hookargs[b'url'] = self.url
1648 self._tr.hookargs[b'url'] = self.url
1649 return self._tr
1649 return self._tr
1650
1650
1651 def close(self):
1651 def close(self):
1652 """close transaction if created"""
1652 """close transaction if created"""
1653 if self._tr is not None:
1653 if self._tr is not None:
1654 self._tr.close()
1654 self._tr.close()
1655
1655
1656 def release(self):
1656 def release(self):
1657 """release transaction if created"""
1657 """release transaction if created"""
1658 if self._tr is not None:
1658 if self._tr is not None:
1659 self._tr.release()
1659 self._tr.release()
1660
1660
1661
1661
1662 def listkeys(remote, namespace):
1662 def listkeys(remote, namespace):
1663 with remote.commandexecutor() as e:
1663 with remote.commandexecutor() as e:
1664 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1664 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1665
1665
1666
1666
1667 def _fullpullbundle2(repo, pullop):
1667 def _fullpullbundle2(repo, pullop):
1668 # The server may send a partial reply, i.e. when inlining
1668 # The server may send a partial reply, i.e. when inlining
1669 # pre-computed bundles. In that case, update the common
1669 # pre-computed bundles. In that case, update the common
1670 # set based on the results and pull another bundle.
1670 # set based on the results and pull another bundle.
1671 #
1671 #
1672 # There are two indicators that the process is finished:
1672 # There are two indicators that the process is finished:
1673 # - no changeset has been added, or
1673 # - no changeset has been added, or
1674 # - all remote heads are known locally.
1674 # - all remote heads are known locally.
1675 # The head check must use the unfiltered view as obsoletion
1675 # The head check must use the unfiltered view as obsoletion
1676 # markers can hide heads.
1676 # markers can hide heads.
1677 unfi = repo.unfiltered()
1677 unfi = repo.unfiltered()
1678 unficl = unfi.changelog
1678 unficl = unfi.changelog
1679
1679
1680 def headsofdiff(h1, h2):
1680 def headsofdiff(h1, h2):
1681 """Returns heads(h1 % h2)"""
1681 """Returns heads(h1 % h2)"""
1682 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1682 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1683 return {ctx.node() for ctx in res}
1683 return {ctx.node() for ctx in res}
1684
1684
1685 def headsofunion(h1, h2):
1685 def headsofunion(h1, h2):
1686 """Returns heads((h1 + h2) - null)"""
1686 """Returns heads((h1 + h2) - null)"""
1687 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1687 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1688 return {ctx.node() for ctx in res}
1688 return {ctx.node() for ctx in res}
1689
1689
1690 while True:
1690 while True:
1691 old_heads = unficl.heads()
1691 old_heads = unficl.heads()
1692 clstart = len(unficl)
1692 clstart = len(unficl)
1693 _pullbundle2(pullop)
1693 _pullbundle2(pullop)
1694 if repository.NARROW_REQUIREMENT in repo.requirements:
1694 if requirements.NARROW_REQUIREMENT in repo.requirements:
1695 # XXX narrow clones filter the heads on the server side during
1695 # XXX narrow clones filter the heads on the server side during
1696 # XXX getbundle and result in partial replies as well.
1696 # XXX getbundle and result in partial replies as well.
1697 # XXX Disable pull bundles in this case as band aid to avoid
1697 # XXX Disable pull bundles in this case as band aid to avoid
1698 # XXX extra round trips.
1698 # XXX extra round trips.
1699 break
1699 break
1700 if clstart == len(unficl):
1700 if clstart == len(unficl):
1701 break
1701 break
1702 if all(unficl.hasnode(n) for n in pullop.rheads):
1702 if all(unficl.hasnode(n) for n in pullop.rheads):
1703 break
1703 break
1704 new_heads = headsofdiff(unficl.heads(), old_heads)
1704 new_heads = headsofdiff(unficl.heads(), old_heads)
1705 pullop.common = headsofunion(new_heads, pullop.common)
1705 pullop.common = headsofunion(new_heads, pullop.common)
1706 pullop.rheads = set(pullop.rheads) - pullop.common
1706 pullop.rheads = set(pullop.rheads) - pullop.common
1707
1707
1708
1708
1709 def add_confirm_callback(repo, pullop):
1709 def add_confirm_callback(repo, pullop):
1710 """ adds a finalize callback to transaction which can be used to show stats
1710 """ adds a finalize callback to transaction which can be used to show stats
1711 to user and confirm the pull before committing transaction """
1711 to user and confirm the pull before committing transaction """
1712
1712
1713 tr = pullop.trmanager.transaction()
1713 tr = pullop.trmanager.transaction()
1714 scmutil.registersummarycallback(
1714 scmutil.registersummarycallback(
1715 repo, tr, txnname=b'pull', as_validator=True
1715 repo, tr, txnname=b'pull', as_validator=True
1716 )
1716 )
1717 reporef = weakref.ref(repo.unfiltered())
1717 reporef = weakref.ref(repo.unfiltered())
1718
1718
1719 def prompt(tr):
1719 def prompt(tr):
1720 repo = reporef()
1720 repo = reporef()
1721 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1721 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1722 if repo.ui.promptchoice(cm):
1722 if repo.ui.promptchoice(cm):
1723 raise error.Abort("user aborted")
1723 raise error.Abort("user aborted")
1724
1724
1725 tr.addvalidator(b'900-pull-prompt', prompt)
1725 tr.addvalidator(b'900-pull-prompt', prompt)
1726
1726
1727
1727
1728 def pull(
1728 def pull(
1729 repo,
1729 repo,
1730 remote,
1730 remote,
1731 heads=None,
1731 heads=None,
1732 force=False,
1732 force=False,
1733 bookmarks=(),
1733 bookmarks=(),
1734 opargs=None,
1734 opargs=None,
1735 streamclonerequested=None,
1735 streamclonerequested=None,
1736 includepats=None,
1736 includepats=None,
1737 excludepats=None,
1737 excludepats=None,
1738 depth=None,
1738 depth=None,
1739 confirm=None,
1739 confirm=None,
1740 ):
1740 ):
1741 """Fetch repository data from a remote.
1741 """Fetch repository data from a remote.
1742
1742
1743 This is the main function used to retrieve data from a remote repository.
1743 This is the main function used to retrieve data from a remote repository.
1744
1744
1745 ``repo`` is the local repository to clone into.
1745 ``repo`` is the local repository to clone into.
1746 ``remote`` is a peer instance.
1746 ``remote`` is a peer instance.
1747 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1747 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1748 default) means to pull everything from the remote.
1748 default) means to pull everything from the remote.
1749 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1749 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1750 default, all remote bookmarks are pulled.
1750 default, all remote bookmarks are pulled.
1751 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1751 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1752 initialization.
1752 initialization.
1753 ``streamclonerequested`` is a boolean indicating whether a "streaming
1753 ``streamclonerequested`` is a boolean indicating whether a "streaming
1754 clone" is requested. A "streaming clone" is essentially a raw file copy
1754 clone" is requested. A "streaming clone" is essentially a raw file copy
1755 of revlogs from the server. This only works when the local repository is
1755 of revlogs from the server. This only works when the local repository is
1756 empty. The default value of ``None`` means to respect the server
1756 empty. The default value of ``None`` means to respect the server
1757 configuration for preferring stream clones.
1757 configuration for preferring stream clones.
1758 ``includepats`` and ``excludepats`` define explicit file patterns to
1758 ``includepats`` and ``excludepats`` define explicit file patterns to
1759 include and exclude in storage, respectively. If not defined, narrow
1759 include and exclude in storage, respectively. If not defined, narrow
1760 patterns from the repo instance are used, if available.
1760 patterns from the repo instance are used, if available.
1761 ``depth`` is an integer indicating the DAG depth of history we're
1761 ``depth`` is an integer indicating the DAG depth of history we're
1762 interested in. If defined, for each revision specified in ``heads``, we
1762 interested in. If defined, for each revision specified in ``heads``, we
1763 will fetch up to this many of its ancestors and data associated with them.
1763 will fetch up to this many of its ancestors and data associated with them.
1764 ``confirm`` is a boolean indicating whether the pull should be confirmed
1764 ``confirm`` is a boolean indicating whether the pull should be confirmed
1765 before committing the transaction. This overrides HGPLAIN.
1765 before committing the transaction. This overrides HGPLAIN.
1766
1766
1767 Returns the ``pulloperation`` created for this pull.
1767 Returns the ``pulloperation`` created for this pull.
1768 """
1768 """
1769 if opargs is None:
1769 if opargs is None:
1770 opargs = {}
1770 opargs = {}
1771
1771
1772 # We allow the narrow patterns to be passed in explicitly to provide more
1772 # We allow the narrow patterns to be passed in explicitly to provide more
1773 # flexibility for API consumers.
1773 # flexibility for API consumers.
1774 if includepats or excludepats:
1774 if includepats or excludepats:
1775 includepats = includepats or set()
1775 includepats = includepats or set()
1776 excludepats = excludepats or set()
1776 excludepats = excludepats or set()
1777 else:
1777 else:
1778 includepats, excludepats = repo.narrowpats
1778 includepats, excludepats = repo.narrowpats
1779
1779
1780 narrowspec.validatepatterns(includepats)
1780 narrowspec.validatepatterns(includepats)
1781 narrowspec.validatepatterns(excludepats)
1781 narrowspec.validatepatterns(excludepats)
1782
1782
1783 pullop = pulloperation(
1783 pullop = pulloperation(
1784 repo,
1784 repo,
1785 remote,
1785 remote,
1786 heads,
1786 heads,
1787 force,
1787 force,
1788 bookmarks=bookmarks,
1788 bookmarks=bookmarks,
1789 streamclonerequested=streamclonerequested,
1789 streamclonerequested=streamclonerequested,
1790 includepats=includepats,
1790 includepats=includepats,
1791 excludepats=excludepats,
1791 excludepats=excludepats,
1792 depth=depth,
1792 depth=depth,
1793 **pycompat.strkwargs(opargs)
1793 **pycompat.strkwargs(opargs)
1794 )
1794 )
1795
1795
1796 peerlocal = pullop.remote.local()
1796 peerlocal = pullop.remote.local()
1797 if peerlocal:
1797 if peerlocal:
1798 missing = set(peerlocal.requirements) - pullop.repo.supported
1798 missing = set(peerlocal.requirements) - pullop.repo.supported
1799 if missing:
1799 if missing:
1800 msg = _(
1800 msg = _(
1801 b"required features are not"
1801 b"required features are not"
1802 b" supported in the destination:"
1802 b" supported in the destination:"
1803 b" %s"
1803 b" %s"
1804 ) % (b', '.join(sorted(missing)))
1804 ) % (b', '.join(sorted(missing)))
1805 raise error.Abort(msg)
1805 raise error.Abort(msg)
1806
1806
1807 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1807 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1808 wlock = util.nullcontextmanager()
1808 wlock = util.nullcontextmanager()
1809 if not bookmod.bookmarksinstore(repo):
1809 if not bookmod.bookmarksinstore(repo):
1810 wlock = repo.wlock()
1810 wlock = repo.wlock()
1811 with wlock, repo.lock(), pullop.trmanager:
1811 with wlock, repo.lock(), pullop.trmanager:
1812 if confirm or (
1812 if confirm or (
1813 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1813 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1814 ):
1814 ):
1815 add_confirm_callback(repo, pullop)
1815 add_confirm_callback(repo, pullop)
1816
1816
1817 # Use the modern wire protocol, if available.
1817 # Use the modern wire protocol, if available.
1818 if remote.capable(b'command-changesetdata'):
1818 if remote.capable(b'command-changesetdata'):
1819 exchangev2.pull(pullop)
1819 exchangev2.pull(pullop)
1820 else:
1820 else:
1821 # This should ideally be in _pullbundle2(). However, it needs to run
1821 # This should ideally be in _pullbundle2(). However, it needs to run
1822 # before discovery to avoid extra work.
1822 # before discovery to avoid extra work.
1823 _maybeapplyclonebundle(pullop)
1823 _maybeapplyclonebundle(pullop)
1824 streamclone.maybeperformlegacystreamclone(pullop)
1824 streamclone.maybeperformlegacystreamclone(pullop)
1825 _pulldiscovery(pullop)
1825 _pulldiscovery(pullop)
1826 if pullop.canusebundle2:
1826 if pullop.canusebundle2:
1827 _fullpullbundle2(repo, pullop)
1827 _fullpullbundle2(repo, pullop)
1828 _pullchangeset(pullop)
1828 _pullchangeset(pullop)
1829 _pullphase(pullop)
1829 _pullphase(pullop)
1830 _pullbookmarks(pullop)
1830 _pullbookmarks(pullop)
1831 _pullobsolete(pullop)
1831 _pullobsolete(pullop)
1832
1832
1833 # storing remotenames
1833 # storing remotenames
1834 if repo.ui.configbool(b'experimental', b'remotenames'):
1834 if repo.ui.configbool(b'experimental', b'remotenames'):
1835 logexchange.pullremotenames(repo, remote)
1835 logexchange.pullremotenames(repo, remote)
1836
1836
1837 return pullop
1837 return pullop
1838
1838
1839
1839
1840 # list of steps to perform discovery before pull
1840 # list of steps to perform discovery before pull
1841 pulldiscoveryorder = []
1841 pulldiscoveryorder = []
1842
1842
1843 # Mapping between step name and function
1843 # Mapping between step name and function
1844 #
1844 #
1845 # This exists to help extensions wrap steps if necessary
1845 # This exists to help extensions wrap steps if necessary
1846 pulldiscoverymapping = {}
1846 pulldiscoverymapping = {}
1847
1847
1848
1848
1849 def pulldiscovery(stepname):
1849 def pulldiscovery(stepname):
1850 """decorator for function performing discovery before pull
1850 """decorator for function performing discovery before pull
1851
1851
1852 The function is added to the step -> function mapping and appended to the
1852 The function is added to the step -> function mapping and appended to the
1853 list of steps. Beware that decorated function will be added in order (this
1853 list of steps. Beware that decorated function will be added in order (this
1854 may matter).
1854 may matter).
1855
1855
1856 You can only use this decorator for a new step, if you want to wrap a step
1856 You can only use this decorator for a new step, if you want to wrap a step
1857 from an extension, change the pulldiscovery dictionary directly."""
1857 from an extension, change the pulldiscovery dictionary directly."""
1858
1858
1859 def dec(func):
1859 def dec(func):
1860 assert stepname not in pulldiscoverymapping
1860 assert stepname not in pulldiscoverymapping
1861 pulldiscoverymapping[stepname] = func
1861 pulldiscoverymapping[stepname] = func
1862 pulldiscoveryorder.append(stepname)
1862 pulldiscoveryorder.append(stepname)
1863 return func
1863 return func
1864
1864
1865 return dec
1865 return dec
1866
1866
1867
1867
1868 def _pulldiscovery(pullop):
1868 def _pulldiscovery(pullop):
1869 """Run all discovery steps"""
1869 """Run all discovery steps"""
1870 for stepname in pulldiscoveryorder:
1870 for stepname in pulldiscoveryorder:
1871 step = pulldiscoverymapping[stepname]
1871 step = pulldiscoverymapping[stepname]
1872 step(pullop)
1872 step(pullop)
1873
1873
1874
1874
1875 @pulldiscovery(b'b1:bookmarks')
1875 @pulldiscovery(b'b1:bookmarks')
1876 def _pullbookmarkbundle1(pullop):
1876 def _pullbookmarkbundle1(pullop):
1877 """fetch bookmark data in bundle1 case
1877 """fetch bookmark data in bundle1 case
1878
1878
1879 If not using bundle2, we have to fetch bookmarks before changeset
1879 If not using bundle2, we have to fetch bookmarks before changeset
1880 discovery to reduce the chance and impact of race conditions."""
1880 discovery to reduce the chance and impact of race conditions."""
1881 if pullop.remotebookmarks is not None:
1881 if pullop.remotebookmarks is not None:
1882 return
1882 return
1883 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1883 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1884 # all known bundle2 servers now support listkeys, but lets be nice with
1884 # all known bundle2 servers now support listkeys, but lets be nice with
1885 # new implementation.
1885 # new implementation.
1886 return
1886 return
1887 books = listkeys(pullop.remote, b'bookmarks')
1887 books = listkeys(pullop.remote, b'bookmarks')
1888 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1888 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1889
1889
1890
1890
1891 @pulldiscovery(b'changegroup')
1891 @pulldiscovery(b'changegroup')
1892 def _pulldiscoverychangegroup(pullop):
1892 def _pulldiscoverychangegroup(pullop):
1893 """discovery phase for the pull
1893 """discovery phase for the pull
1894
1894
1895 Current handle changeset discovery only, will change handle all discovery
1895 Current handle changeset discovery only, will change handle all discovery
1896 at some point."""
1896 at some point."""
1897 tmp = discovery.findcommonincoming(
1897 tmp = discovery.findcommonincoming(
1898 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1898 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1899 )
1899 )
1900 common, fetch, rheads = tmp
1900 common, fetch, rheads = tmp
1901 has_node = pullop.repo.unfiltered().changelog.index.has_node
1901 has_node = pullop.repo.unfiltered().changelog.index.has_node
1902 if fetch and rheads:
1902 if fetch and rheads:
1903 # If a remote heads is filtered locally, put in back in common.
1903 # If a remote heads is filtered locally, put in back in common.
1904 #
1904 #
1905 # This is a hackish solution to catch most of "common but locally
1905 # This is a hackish solution to catch most of "common but locally
1906 # hidden situation". We do not performs discovery on unfiltered
1906 # hidden situation". We do not performs discovery on unfiltered
1907 # repository because it end up doing a pathological amount of round
1907 # repository because it end up doing a pathological amount of round
1908 # trip for w huge amount of changeset we do not care about.
1908 # trip for w huge amount of changeset we do not care about.
1909 #
1909 #
1910 # If a set of such "common but filtered" changeset exist on the server
1910 # If a set of such "common but filtered" changeset exist on the server
1911 # but are not including a remote heads, we'll not be able to detect it,
1911 # but are not including a remote heads, we'll not be able to detect it,
1912 scommon = set(common)
1912 scommon = set(common)
1913 for n in rheads:
1913 for n in rheads:
1914 if has_node(n):
1914 if has_node(n):
1915 if n not in scommon:
1915 if n not in scommon:
1916 common.append(n)
1916 common.append(n)
1917 if set(rheads).issubset(set(common)):
1917 if set(rheads).issubset(set(common)):
1918 fetch = []
1918 fetch = []
1919 pullop.common = common
1919 pullop.common = common
1920 pullop.fetch = fetch
1920 pullop.fetch = fetch
1921 pullop.rheads = rheads
1921 pullop.rheads = rheads
1922
1922
1923
1923
1924 def _pullbundle2(pullop):
1924 def _pullbundle2(pullop):
1925 """pull data using bundle2
1925 """pull data using bundle2
1926
1926
1927 For now, the only supported data are changegroup."""
1927 For now, the only supported data are changegroup."""
1928 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1928 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1929
1929
1930 # make ui easier to access
1930 # make ui easier to access
1931 ui = pullop.repo.ui
1931 ui = pullop.repo.ui
1932
1932
1933 # At the moment we don't do stream clones over bundle2. If that is
1933 # At the moment we don't do stream clones over bundle2. If that is
1934 # implemented then here's where the check for that will go.
1934 # implemented then here's where the check for that will go.
1935 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1935 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1936
1936
1937 # declare pull perimeters
1937 # declare pull perimeters
1938 kwargs[b'common'] = pullop.common
1938 kwargs[b'common'] = pullop.common
1939 kwargs[b'heads'] = pullop.heads or pullop.rheads
1939 kwargs[b'heads'] = pullop.heads or pullop.rheads
1940
1940
1941 # check server supports narrow and then adding includepats and excludepats
1941 # check server supports narrow and then adding includepats and excludepats
1942 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1942 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1943 if servernarrow and pullop.includepats:
1943 if servernarrow and pullop.includepats:
1944 kwargs[b'includepats'] = pullop.includepats
1944 kwargs[b'includepats'] = pullop.includepats
1945 if servernarrow and pullop.excludepats:
1945 if servernarrow and pullop.excludepats:
1946 kwargs[b'excludepats'] = pullop.excludepats
1946 kwargs[b'excludepats'] = pullop.excludepats
1947
1947
1948 if streaming:
1948 if streaming:
1949 kwargs[b'cg'] = False
1949 kwargs[b'cg'] = False
1950 kwargs[b'stream'] = True
1950 kwargs[b'stream'] = True
1951 pullop.stepsdone.add(b'changegroup')
1951 pullop.stepsdone.add(b'changegroup')
1952 pullop.stepsdone.add(b'phases')
1952 pullop.stepsdone.add(b'phases')
1953
1953
1954 else:
1954 else:
1955 # pulling changegroup
1955 # pulling changegroup
1956 pullop.stepsdone.add(b'changegroup')
1956 pullop.stepsdone.add(b'changegroup')
1957
1957
1958 kwargs[b'cg'] = pullop.fetch
1958 kwargs[b'cg'] = pullop.fetch
1959
1959
1960 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1960 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1961 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1961 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1962 if not legacyphase and hasbinaryphase:
1962 if not legacyphase and hasbinaryphase:
1963 kwargs[b'phases'] = True
1963 kwargs[b'phases'] = True
1964 pullop.stepsdone.add(b'phases')
1964 pullop.stepsdone.add(b'phases')
1965
1965
1966 if b'listkeys' in pullop.remotebundle2caps:
1966 if b'listkeys' in pullop.remotebundle2caps:
1967 if b'phases' not in pullop.stepsdone:
1967 if b'phases' not in pullop.stepsdone:
1968 kwargs[b'listkeys'] = [b'phases']
1968 kwargs[b'listkeys'] = [b'phases']
1969
1969
1970 bookmarksrequested = False
1970 bookmarksrequested = False
1971 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1971 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1972 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1972 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1973
1973
1974 if pullop.remotebookmarks is not None:
1974 if pullop.remotebookmarks is not None:
1975 pullop.stepsdone.add(b'request-bookmarks')
1975 pullop.stepsdone.add(b'request-bookmarks')
1976
1976
1977 if (
1977 if (
1978 b'request-bookmarks' not in pullop.stepsdone
1978 b'request-bookmarks' not in pullop.stepsdone
1979 and pullop.remotebookmarks is None
1979 and pullop.remotebookmarks is None
1980 and not legacybookmark
1980 and not legacybookmark
1981 and hasbinarybook
1981 and hasbinarybook
1982 ):
1982 ):
1983 kwargs[b'bookmarks'] = True
1983 kwargs[b'bookmarks'] = True
1984 bookmarksrequested = True
1984 bookmarksrequested = True
1985
1985
1986 if b'listkeys' in pullop.remotebundle2caps:
1986 if b'listkeys' in pullop.remotebundle2caps:
1987 if b'request-bookmarks' not in pullop.stepsdone:
1987 if b'request-bookmarks' not in pullop.stepsdone:
1988 # make sure to always includes bookmark data when migrating
1988 # make sure to always includes bookmark data when migrating
1989 # `hg incoming --bundle` to using this function.
1989 # `hg incoming --bundle` to using this function.
1990 pullop.stepsdone.add(b'request-bookmarks')
1990 pullop.stepsdone.add(b'request-bookmarks')
1991 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1991 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1992
1992
1993 # If this is a full pull / clone and the server supports the clone bundles
1993 # If this is a full pull / clone and the server supports the clone bundles
1994 # feature, tell the server whether we attempted a clone bundle. The
1994 # feature, tell the server whether we attempted a clone bundle. The
1995 # presence of this flag indicates the client supports clone bundles. This
1995 # presence of this flag indicates the client supports clone bundles. This
1996 # will enable the server to treat clients that support clone bundles
1996 # will enable the server to treat clients that support clone bundles
1997 # differently from those that don't.
1997 # differently from those that don't.
1998 if (
1998 if (
1999 pullop.remote.capable(b'clonebundles')
1999 pullop.remote.capable(b'clonebundles')
2000 and pullop.heads is None
2000 and pullop.heads is None
2001 and list(pullop.common) == [nullid]
2001 and list(pullop.common) == [nullid]
2002 ):
2002 ):
2003 kwargs[b'cbattempted'] = pullop.clonebundleattempted
2003 kwargs[b'cbattempted'] = pullop.clonebundleattempted
2004
2004
2005 if streaming:
2005 if streaming:
2006 pullop.repo.ui.status(_(b'streaming all changes\n'))
2006 pullop.repo.ui.status(_(b'streaming all changes\n'))
2007 elif not pullop.fetch:
2007 elif not pullop.fetch:
2008 pullop.repo.ui.status(_(b"no changes found\n"))
2008 pullop.repo.ui.status(_(b"no changes found\n"))
2009 pullop.cgresult = 0
2009 pullop.cgresult = 0
2010 else:
2010 else:
2011 if pullop.heads is None and list(pullop.common) == [nullid]:
2011 if pullop.heads is None and list(pullop.common) == [nullid]:
2012 pullop.repo.ui.status(_(b"requesting all changes\n"))
2012 pullop.repo.ui.status(_(b"requesting all changes\n"))
2013 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2013 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2014 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
2014 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
2015 if obsolete.commonversion(remoteversions) is not None:
2015 if obsolete.commonversion(remoteversions) is not None:
2016 kwargs[b'obsmarkers'] = True
2016 kwargs[b'obsmarkers'] = True
2017 pullop.stepsdone.add(b'obsmarkers')
2017 pullop.stepsdone.add(b'obsmarkers')
2018 _pullbundle2extraprepare(pullop, kwargs)
2018 _pullbundle2extraprepare(pullop, kwargs)
2019
2019
2020 with pullop.remote.commandexecutor() as e:
2020 with pullop.remote.commandexecutor() as e:
2021 args = dict(kwargs)
2021 args = dict(kwargs)
2022 args[b'source'] = b'pull'
2022 args[b'source'] = b'pull'
2023 bundle = e.callcommand(b'getbundle', args).result()
2023 bundle = e.callcommand(b'getbundle', args).result()
2024
2024
2025 try:
2025 try:
2026 op = bundle2.bundleoperation(
2026 op = bundle2.bundleoperation(
2027 pullop.repo, pullop.gettransaction, source=b'pull'
2027 pullop.repo, pullop.gettransaction, source=b'pull'
2028 )
2028 )
2029 op.modes[b'bookmarks'] = b'records'
2029 op.modes[b'bookmarks'] = b'records'
2030 bundle2.processbundle(pullop.repo, bundle, op=op)
2030 bundle2.processbundle(pullop.repo, bundle, op=op)
2031 except bundle2.AbortFromPart as exc:
2031 except bundle2.AbortFromPart as exc:
2032 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2032 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2033 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2033 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2034 except error.BundleValueError as exc:
2034 except error.BundleValueError as exc:
2035 raise error.Abort(_(b'missing support for %s') % exc)
2035 raise error.Abort(_(b'missing support for %s') % exc)
2036
2036
2037 if pullop.fetch:
2037 if pullop.fetch:
2038 pullop.cgresult = bundle2.combinechangegroupresults(op)
2038 pullop.cgresult = bundle2.combinechangegroupresults(op)
2039
2039
2040 # processing phases change
2040 # processing phases change
2041 for namespace, value in op.records[b'listkeys']:
2041 for namespace, value in op.records[b'listkeys']:
2042 if namespace == b'phases':
2042 if namespace == b'phases':
2043 _pullapplyphases(pullop, value)
2043 _pullapplyphases(pullop, value)
2044
2044
2045 # processing bookmark update
2045 # processing bookmark update
2046 if bookmarksrequested:
2046 if bookmarksrequested:
2047 books = {}
2047 books = {}
2048 for record in op.records[b'bookmarks']:
2048 for record in op.records[b'bookmarks']:
2049 books[record[b'bookmark']] = record[b"node"]
2049 books[record[b'bookmark']] = record[b"node"]
2050 pullop.remotebookmarks = books
2050 pullop.remotebookmarks = books
2051 else:
2051 else:
2052 for namespace, value in op.records[b'listkeys']:
2052 for namespace, value in op.records[b'listkeys']:
2053 if namespace == b'bookmarks':
2053 if namespace == b'bookmarks':
2054 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2054 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2055
2055
2056 # bookmark data were either already there or pulled in the bundle
2056 # bookmark data were either already there or pulled in the bundle
2057 if pullop.remotebookmarks is not None:
2057 if pullop.remotebookmarks is not None:
2058 _pullbookmarks(pullop)
2058 _pullbookmarks(pullop)
2059
2059
2060
2060
2061 def _pullbundle2extraprepare(pullop, kwargs):
2061 def _pullbundle2extraprepare(pullop, kwargs):
2062 """hook function so that extensions can extend the getbundle call"""
2062 """hook function so that extensions can extend the getbundle call"""
2063
2063
2064
2064
2065 def _pullchangeset(pullop):
2065 def _pullchangeset(pullop):
2066 """pull changeset from unbundle into the local repo"""
2066 """pull changeset from unbundle into the local repo"""
2067 # We delay the open of the transaction as late as possible so we
2067 # We delay the open of the transaction as late as possible so we
2068 # don't open transaction for nothing or you break future useful
2068 # don't open transaction for nothing or you break future useful
2069 # rollback call
2069 # rollback call
2070 if b'changegroup' in pullop.stepsdone:
2070 if b'changegroup' in pullop.stepsdone:
2071 return
2071 return
2072 pullop.stepsdone.add(b'changegroup')
2072 pullop.stepsdone.add(b'changegroup')
2073 if not pullop.fetch:
2073 if not pullop.fetch:
2074 pullop.repo.ui.status(_(b"no changes found\n"))
2074 pullop.repo.ui.status(_(b"no changes found\n"))
2075 pullop.cgresult = 0
2075 pullop.cgresult = 0
2076 return
2076 return
2077 tr = pullop.gettransaction()
2077 tr = pullop.gettransaction()
2078 if pullop.heads is None and list(pullop.common) == [nullid]:
2078 if pullop.heads is None and list(pullop.common) == [nullid]:
2079 pullop.repo.ui.status(_(b"requesting all changes\n"))
2079 pullop.repo.ui.status(_(b"requesting all changes\n"))
2080 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2080 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2081 # issue1320, avoid a race if remote changed after discovery
2081 # issue1320, avoid a race if remote changed after discovery
2082 pullop.heads = pullop.rheads
2082 pullop.heads = pullop.rheads
2083
2083
2084 if pullop.remote.capable(b'getbundle'):
2084 if pullop.remote.capable(b'getbundle'):
2085 # TODO: get bundlecaps from remote
2085 # TODO: get bundlecaps from remote
2086 cg = pullop.remote.getbundle(
2086 cg = pullop.remote.getbundle(
2087 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2087 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2088 )
2088 )
2089 elif pullop.heads is None:
2089 elif pullop.heads is None:
2090 with pullop.remote.commandexecutor() as e:
2090 with pullop.remote.commandexecutor() as e:
2091 cg = e.callcommand(
2091 cg = e.callcommand(
2092 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2092 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2093 ).result()
2093 ).result()
2094
2094
2095 elif not pullop.remote.capable(b'changegroupsubset'):
2095 elif not pullop.remote.capable(b'changegroupsubset'):
2096 raise error.Abort(
2096 raise error.Abort(
2097 _(
2097 _(
2098 b"partial pull cannot be done because "
2098 b"partial pull cannot be done because "
2099 b"other repository doesn't support "
2099 b"other repository doesn't support "
2100 b"changegroupsubset."
2100 b"changegroupsubset."
2101 )
2101 )
2102 )
2102 )
2103 else:
2103 else:
2104 with pullop.remote.commandexecutor() as e:
2104 with pullop.remote.commandexecutor() as e:
2105 cg = e.callcommand(
2105 cg = e.callcommand(
2106 b'changegroupsubset',
2106 b'changegroupsubset',
2107 {
2107 {
2108 b'bases': pullop.fetch,
2108 b'bases': pullop.fetch,
2109 b'heads': pullop.heads,
2109 b'heads': pullop.heads,
2110 b'source': b'pull',
2110 b'source': b'pull',
2111 },
2111 },
2112 ).result()
2112 ).result()
2113
2113
2114 bundleop = bundle2.applybundle(
2114 bundleop = bundle2.applybundle(
2115 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2115 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2116 )
2116 )
2117 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2117 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2118
2118
2119
2119
2120 def _pullphase(pullop):
2120 def _pullphase(pullop):
2121 # Get remote phases data from remote
2121 # Get remote phases data from remote
2122 if b'phases' in pullop.stepsdone:
2122 if b'phases' in pullop.stepsdone:
2123 return
2123 return
2124 remotephases = listkeys(pullop.remote, b'phases')
2124 remotephases = listkeys(pullop.remote, b'phases')
2125 _pullapplyphases(pullop, remotephases)
2125 _pullapplyphases(pullop, remotephases)
2126
2126
2127
2127
2128 def _pullapplyphases(pullop, remotephases):
2128 def _pullapplyphases(pullop, remotephases):
2129 """apply phase movement from observed remote state"""
2129 """apply phase movement from observed remote state"""
2130 if b'phases' in pullop.stepsdone:
2130 if b'phases' in pullop.stepsdone:
2131 return
2131 return
2132 pullop.stepsdone.add(b'phases')
2132 pullop.stepsdone.add(b'phases')
2133 publishing = bool(remotephases.get(b'publishing', False))
2133 publishing = bool(remotephases.get(b'publishing', False))
2134 if remotephases and not publishing:
2134 if remotephases and not publishing:
2135 # remote is new and non-publishing
2135 # remote is new and non-publishing
2136 pheads, _dr = phases.analyzeremotephases(
2136 pheads, _dr = phases.analyzeremotephases(
2137 pullop.repo, pullop.pulledsubset, remotephases
2137 pullop.repo, pullop.pulledsubset, remotephases
2138 )
2138 )
2139 dheads = pullop.pulledsubset
2139 dheads = pullop.pulledsubset
2140 else:
2140 else:
2141 # Remote is old or publishing all common changesets
2141 # Remote is old or publishing all common changesets
2142 # should be seen as public
2142 # should be seen as public
2143 pheads = pullop.pulledsubset
2143 pheads = pullop.pulledsubset
2144 dheads = []
2144 dheads = []
2145 unfi = pullop.repo.unfiltered()
2145 unfi = pullop.repo.unfiltered()
2146 phase = unfi._phasecache.phase
2146 phase = unfi._phasecache.phase
2147 rev = unfi.changelog.index.get_rev
2147 rev = unfi.changelog.index.get_rev
2148 public = phases.public
2148 public = phases.public
2149 draft = phases.draft
2149 draft = phases.draft
2150
2150
2151 # exclude changesets already public locally and update the others
2151 # exclude changesets already public locally and update the others
2152 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2152 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2153 if pheads:
2153 if pheads:
2154 tr = pullop.gettransaction()
2154 tr = pullop.gettransaction()
2155 phases.advanceboundary(pullop.repo, tr, public, pheads)
2155 phases.advanceboundary(pullop.repo, tr, public, pheads)
2156
2156
2157 # exclude changesets already draft locally and update the others
2157 # exclude changesets already draft locally and update the others
2158 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2158 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2159 if dheads:
2159 if dheads:
2160 tr = pullop.gettransaction()
2160 tr = pullop.gettransaction()
2161 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2161 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2162
2162
2163
2163
2164 def _pullbookmarks(pullop):
2164 def _pullbookmarks(pullop):
2165 """process the remote bookmark information to update the local one"""
2165 """process the remote bookmark information to update the local one"""
2166 if b'bookmarks' in pullop.stepsdone:
2166 if b'bookmarks' in pullop.stepsdone:
2167 return
2167 return
2168 pullop.stepsdone.add(b'bookmarks')
2168 pullop.stepsdone.add(b'bookmarks')
2169 repo = pullop.repo
2169 repo = pullop.repo
2170 remotebookmarks = pullop.remotebookmarks
2170 remotebookmarks = pullop.remotebookmarks
2171 bookmod.updatefromremote(
2171 bookmod.updatefromremote(
2172 repo.ui,
2172 repo.ui,
2173 repo,
2173 repo,
2174 remotebookmarks,
2174 remotebookmarks,
2175 pullop.remote.url(),
2175 pullop.remote.url(),
2176 pullop.gettransaction,
2176 pullop.gettransaction,
2177 explicit=pullop.explicitbookmarks,
2177 explicit=pullop.explicitbookmarks,
2178 )
2178 )
2179
2179
2180
2180
2181 def _pullobsolete(pullop):
2181 def _pullobsolete(pullop):
2182 """utility function to pull obsolete markers from a remote
2182 """utility function to pull obsolete markers from a remote
2183
2183
2184 The `gettransaction` is function that return the pull transaction, creating
2184 The `gettransaction` is function that return the pull transaction, creating
2185 one if necessary. We return the transaction to inform the calling code that
2185 one if necessary. We return the transaction to inform the calling code that
2186 a new transaction have been created (when applicable).
2186 a new transaction have been created (when applicable).
2187
2187
2188 Exists mostly to allow overriding for experimentation purpose"""
2188 Exists mostly to allow overriding for experimentation purpose"""
2189 if b'obsmarkers' in pullop.stepsdone:
2189 if b'obsmarkers' in pullop.stepsdone:
2190 return
2190 return
2191 pullop.stepsdone.add(b'obsmarkers')
2191 pullop.stepsdone.add(b'obsmarkers')
2192 tr = None
2192 tr = None
2193 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2193 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2194 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2194 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2195 remoteobs = listkeys(pullop.remote, b'obsolete')
2195 remoteobs = listkeys(pullop.remote, b'obsolete')
2196 if b'dump0' in remoteobs:
2196 if b'dump0' in remoteobs:
2197 tr = pullop.gettransaction()
2197 tr = pullop.gettransaction()
2198 markers = []
2198 markers = []
2199 for key in sorted(remoteobs, reverse=True):
2199 for key in sorted(remoteobs, reverse=True):
2200 if key.startswith(b'dump'):
2200 if key.startswith(b'dump'):
2201 data = util.b85decode(remoteobs[key])
2201 data = util.b85decode(remoteobs[key])
2202 version, newmarks = obsolete._readmarkers(data)
2202 version, newmarks = obsolete._readmarkers(data)
2203 markers += newmarks
2203 markers += newmarks
2204 if markers:
2204 if markers:
2205 pullop.repo.obsstore.add(tr, markers)
2205 pullop.repo.obsstore.add(tr, markers)
2206 pullop.repo.invalidatevolatilesets()
2206 pullop.repo.invalidatevolatilesets()
2207 return tr
2207 return tr
2208
2208
2209
2209
2210 def applynarrowacl(repo, kwargs):
2210 def applynarrowacl(repo, kwargs):
2211 """Apply narrow fetch access control.
2211 """Apply narrow fetch access control.
2212
2212
2213 This massages the named arguments for getbundle wire protocol commands
2213 This massages the named arguments for getbundle wire protocol commands
2214 so requested data is filtered through access control rules.
2214 so requested data is filtered through access control rules.
2215 """
2215 """
2216 ui = repo.ui
2216 ui = repo.ui
2217 # TODO this assumes existence of HTTP and is a layering violation.
2217 # TODO this assumes existence of HTTP and is a layering violation.
2218 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2218 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2219 user_includes = ui.configlist(
2219 user_includes = ui.configlist(
2220 _NARROWACL_SECTION,
2220 _NARROWACL_SECTION,
2221 username + b'.includes',
2221 username + b'.includes',
2222 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2222 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2223 )
2223 )
2224 user_excludes = ui.configlist(
2224 user_excludes = ui.configlist(
2225 _NARROWACL_SECTION,
2225 _NARROWACL_SECTION,
2226 username + b'.excludes',
2226 username + b'.excludes',
2227 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2227 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2228 )
2228 )
2229 if not user_includes:
2229 if not user_includes:
2230 raise error.Abort(
2230 raise error.Abort(
2231 _(b"%s configuration for user %s is empty")
2231 _(b"%s configuration for user %s is empty")
2232 % (_NARROWACL_SECTION, username)
2232 % (_NARROWACL_SECTION, username)
2233 )
2233 )
2234
2234
2235 user_includes = [
2235 user_includes = [
2236 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2236 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2237 ]
2237 ]
2238 user_excludes = [
2238 user_excludes = [
2239 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2239 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2240 ]
2240 ]
2241
2241
2242 req_includes = set(kwargs.get('includepats', []))
2242 req_includes = set(kwargs.get('includepats', []))
2243 req_excludes = set(kwargs.get('excludepats', []))
2243 req_excludes = set(kwargs.get('excludepats', []))
2244
2244
2245 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2245 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2246 req_includes, req_excludes, user_includes, user_excludes
2246 req_includes, req_excludes, user_includes, user_excludes
2247 )
2247 )
2248
2248
2249 if invalid_includes:
2249 if invalid_includes:
2250 raise error.Abort(
2250 raise error.Abort(
2251 _(b"The following includes are not accessible for %s: %s")
2251 _(b"The following includes are not accessible for %s: %s")
2252 % (username, stringutil.pprint(invalid_includes))
2252 % (username, stringutil.pprint(invalid_includes))
2253 )
2253 )
2254
2254
2255 new_args = {}
2255 new_args = {}
2256 new_args.update(kwargs)
2256 new_args.update(kwargs)
2257 new_args['narrow'] = True
2257 new_args['narrow'] = True
2258 new_args['narrow_acl'] = True
2258 new_args['narrow_acl'] = True
2259 new_args['includepats'] = req_includes
2259 new_args['includepats'] = req_includes
2260 if req_excludes:
2260 if req_excludes:
2261 new_args['excludepats'] = req_excludes
2261 new_args['excludepats'] = req_excludes
2262
2262
2263 return new_args
2263 return new_args
2264
2264
2265
2265
2266 def _computeellipsis(repo, common, heads, known, match, depth=None):
2266 def _computeellipsis(repo, common, heads, known, match, depth=None):
2267 """Compute the shape of a narrowed DAG.
2267 """Compute the shape of a narrowed DAG.
2268
2268
2269 Args:
2269 Args:
2270 repo: The repository we're transferring.
2270 repo: The repository we're transferring.
2271 common: The roots of the DAG range we're transferring.
2271 common: The roots of the DAG range we're transferring.
2272 May be just [nullid], which means all ancestors of heads.
2272 May be just [nullid], which means all ancestors of heads.
2273 heads: The heads of the DAG range we're transferring.
2273 heads: The heads of the DAG range we're transferring.
2274 match: The narrowmatcher that allows us to identify relevant changes.
2274 match: The narrowmatcher that allows us to identify relevant changes.
2275 depth: If not None, only consider nodes to be full nodes if they are at
2275 depth: If not None, only consider nodes to be full nodes if they are at
2276 most depth changesets away from one of heads.
2276 most depth changesets away from one of heads.
2277
2277
2278 Returns:
2278 Returns:
2279 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2279 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2280
2280
2281 visitnodes: The list of nodes (either full or ellipsis) which
2281 visitnodes: The list of nodes (either full or ellipsis) which
2282 need to be sent to the client.
2282 need to be sent to the client.
2283 relevant_nodes: The set of changelog nodes which change a file inside
2283 relevant_nodes: The set of changelog nodes which change a file inside
2284 the narrowspec. The client needs these as non-ellipsis nodes.
2284 the narrowspec. The client needs these as non-ellipsis nodes.
2285 ellipsisroots: A dict of {rev: parents} that is used in
2285 ellipsisroots: A dict of {rev: parents} that is used in
2286 narrowchangegroup to produce ellipsis nodes with the
2286 narrowchangegroup to produce ellipsis nodes with the
2287 correct parents.
2287 correct parents.
2288 """
2288 """
2289 cl = repo.changelog
2289 cl = repo.changelog
2290 mfl = repo.manifestlog
2290 mfl = repo.manifestlog
2291
2291
2292 clrev = cl.rev
2292 clrev = cl.rev
2293
2293
2294 commonrevs = {clrev(n) for n in common} | {nullrev}
2294 commonrevs = {clrev(n) for n in common} | {nullrev}
2295 headsrevs = {clrev(n) for n in heads}
2295 headsrevs = {clrev(n) for n in heads}
2296
2296
2297 if depth:
2297 if depth:
2298 revdepth = {h: 0 for h in headsrevs}
2298 revdepth = {h: 0 for h in headsrevs}
2299
2299
2300 ellipsisheads = collections.defaultdict(set)
2300 ellipsisheads = collections.defaultdict(set)
2301 ellipsisroots = collections.defaultdict(set)
2301 ellipsisroots = collections.defaultdict(set)
2302
2302
2303 def addroot(head, curchange):
2303 def addroot(head, curchange):
2304 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2304 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2305 ellipsisroots[head].add(curchange)
2305 ellipsisroots[head].add(curchange)
2306 # Recursively split ellipsis heads with 3 roots by finding the
2306 # Recursively split ellipsis heads with 3 roots by finding the
2307 # roots' youngest common descendant which is an elided merge commit.
2307 # roots' youngest common descendant which is an elided merge commit.
2308 # That descendant takes 2 of the 3 roots as its own, and becomes a
2308 # That descendant takes 2 of the 3 roots as its own, and becomes a
2309 # root of the head.
2309 # root of the head.
2310 while len(ellipsisroots[head]) > 2:
2310 while len(ellipsisroots[head]) > 2:
2311 child, roots = splithead(head)
2311 child, roots = splithead(head)
2312 splitroots(head, child, roots)
2312 splitroots(head, child, roots)
2313 head = child # Recurse in case we just added a 3rd root
2313 head = child # Recurse in case we just added a 3rd root
2314
2314
2315 def splitroots(head, child, roots):
2315 def splitroots(head, child, roots):
2316 ellipsisroots[head].difference_update(roots)
2316 ellipsisroots[head].difference_update(roots)
2317 ellipsisroots[head].add(child)
2317 ellipsisroots[head].add(child)
2318 ellipsisroots[child].update(roots)
2318 ellipsisroots[child].update(roots)
2319 ellipsisroots[child].discard(child)
2319 ellipsisroots[child].discard(child)
2320
2320
2321 def splithead(head):
2321 def splithead(head):
2322 r1, r2, r3 = sorted(ellipsisroots[head])
2322 r1, r2, r3 = sorted(ellipsisroots[head])
2323 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2323 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2324 mid = repo.revs(
2324 mid = repo.revs(
2325 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2325 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2326 )
2326 )
2327 for j in mid:
2327 for j in mid:
2328 if j == nr2:
2328 if j == nr2:
2329 return nr2, (nr1, nr2)
2329 return nr2, (nr1, nr2)
2330 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2330 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2331 return j, (nr1, nr2)
2331 return j, (nr1, nr2)
2332 raise error.Abort(
2332 raise error.Abort(
2333 _(
2333 _(
2334 b'Failed to split up ellipsis node! head: %d, '
2334 b'Failed to split up ellipsis node! head: %d, '
2335 b'roots: %d %d %d'
2335 b'roots: %d %d %d'
2336 )
2336 )
2337 % (head, r1, r2, r3)
2337 % (head, r1, r2, r3)
2338 )
2338 )
2339
2339
2340 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2340 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2341 visit = reversed(missing)
2341 visit = reversed(missing)
2342 relevant_nodes = set()
2342 relevant_nodes = set()
2343 visitnodes = [cl.node(m) for m in missing]
2343 visitnodes = [cl.node(m) for m in missing]
2344 required = set(headsrevs) | known
2344 required = set(headsrevs) | known
2345 for rev in visit:
2345 for rev in visit:
2346 clrev = cl.changelogrevision(rev)
2346 clrev = cl.changelogrevision(rev)
2347 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2347 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2348 if depth is not None:
2348 if depth is not None:
2349 curdepth = revdepth[rev]
2349 curdepth = revdepth[rev]
2350 for p in ps:
2350 for p in ps:
2351 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2351 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2352 needed = False
2352 needed = False
2353 shallow_enough = depth is None or revdepth[rev] <= depth
2353 shallow_enough = depth is None or revdepth[rev] <= depth
2354 if shallow_enough:
2354 if shallow_enough:
2355 curmf = mfl[clrev.manifest].read()
2355 curmf = mfl[clrev.manifest].read()
2356 if ps:
2356 if ps:
2357 # We choose to not trust the changed files list in
2357 # We choose to not trust the changed files list in
2358 # changesets because it's not always correct. TODO: could
2358 # changesets because it's not always correct. TODO: could
2359 # we trust it for the non-merge case?
2359 # we trust it for the non-merge case?
2360 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2360 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2361 needed = bool(curmf.diff(p1mf, match))
2361 needed = bool(curmf.diff(p1mf, match))
2362 if not needed and len(ps) > 1:
2362 if not needed and len(ps) > 1:
2363 # For merge changes, the list of changed files is not
2363 # For merge changes, the list of changed files is not
2364 # helpful, since we need to emit the merge if a file
2364 # helpful, since we need to emit the merge if a file
2365 # in the narrow spec has changed on either side of the
2365 # in the narrow spec has changed on either side of the
2366 # merge. As a result, we do a manifest diff to check.
2366 # merge. As a result, we do a manifest diff to check.
2367 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2367 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2368 needed = bool(curmf.diff(p2mf, match))
2368 needed = bool(curmf.diff(p2mf, match))
2369 else:
2369 else:
2370 # For a root node, we need to include the node if any
2370 # For a root node, we need to include the node if any
2371 # files in the node match the narrowspec.
2371 # files in the node match the narrowspec.
2372 needed = any(curmf.walk(match))
2372 needed = any(curmf.walk(match))
2373
2373
2374 if needed:
2374 if needed:
2375 for head in ellipsisheads[rev]:
2375 for head in ellipsisheads[rev]:
2376 addroot(head, rev)
2376 addroot(head, rev)
2377 for p in ps:
2377 for p in ps:
2378 required.add(p)
2378 required.add(p)
2379 relevant_nodes.add(cl.node(rev))
2379 relevant_nodes.add(cl.node(rev))
2380 else:
2380 else:
2381 if not ps:
2381 if not ps:
2382 ps = [nullrev]
2382 ps = [nullrev]
2383 if rev in required:
2383 if rev in required:
2384 for head in ellipsisheads[rev]:
2384 for head in ellipsisheads[rev]:
2385 addroot(head, rev)
2385 addroot(head, rev)
2386 for p in ps:
2386 for p in ps:
2387 ellipsisheads[p].add(rev)
2387 ellipsisheads[p].add(rev)
2388 else:
2388 else:
2389 for p in ps:
2389 for p in ps:
2390 ellipsisheads[p] |= ellipsisheads[rev]
2390 ellipsisheads[p] |= ellipsisheads[rev]
2391
2391
2392 # add common changesets as roots of their reachable ellipsis heads
2392 # add common changesets as roots of their reachable ellipsis heads
2393 for c in commonrevs:
2393 for c in commonrevs:
2394 for head in ellipsisheads[c]:
2394 for head in ellipsisheads[c]:
2395 addroot(head, c)
2395 addroot(head, c)
2396 return visitnodes, relevant_nodes, ellipsisroots
2396 return visitnodes, relevant_nodes, ellipsisroots
2397
2397
2398
2398
2399 def caps20to10(repo, role):
2399 def caps20to10(repo, role):
2400 """return a set with appropriate options to use bundle20 during getbundle"""
2400 """return a set with appropriate options to use bundle20 during getbundle"""
2401 caps = {b'HG20'}
2401 caps = {b'HG20'}
2402 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2402 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2403 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2403 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2404 return caps
2404 return caps
2405
2405
2406
2406
2407 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2407 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2408 getbundle2partsorder = []
2408 getbundle2partsorder = []
2409
2409
2410 # Mapping between step name and function
2410 # Mapping between step name and function
2411 #
2411 #
2412 # This exists to help extensions wrap steps if necessary
2412 # This exists to help extensions wrap steps if necessary
2413 getbundle2partsmapping = {}
2413 getbundle2partsmapping = {}
2414
2414
2415
2415
2416 def getbundle2partsgenerator(stepname, idx=None):
2416 def getbundle2partsgenerator(stepname, idx=None):
2417 """decorator for function generating bundle2 part for getbundle
2417 """decorator for function generating bundle2 part for getbundle
2418
2418
2419 The function is added to the step -> function mapping and appended to the
2419 The function is added to the step -> function mapping and appended to the
2420 list of steps. Beware that decorated functions will be added in order
2420 list of steps. Beware that decorated functions will be added in order
2421 (this may matter).
2421 (this may matter).
2422
2422
2423 You can only use this decorator for new steps, if you want to wrap a step
2423 You can only use this decorator for new steps, if you want to wrap a step
2424 from an extension, attack the getbundle2partsmapping dictionary directly."""
2424 from an extension, attack the getbundle2partsmapping dictionary directly."""
2425
2425
2426 def dec(func):
2426 def dec(func):
2427 assert stepname not in getbundle2partsmapping
2427 assert stepname not in getbundle2partsmapping
2428 getbundle2partsmapping[stepname] = func
2428 getbundle2partsmapping[stepname] = func
2429 if idx is None:
2429 if idx is None:
2430 getbundle2partsorder.append(stepname)
2430 getbundle2partsorder.append(stepname)
2431 else:
2431 else:
2432 getbundle2partsorder.insert(idx, stepname)
2432 getbundle2partsorder.insert(idx, stepname)
2433 return func
2433 return func
2434
2434
2435 return dec
2435 return dec
2436
2436
2437
2437
2438 def bundle2requested(bundlecaps):
2438 def bundle2requested(bundlecaps):
2439 if bundlecaps is not None:
2439 if bundlecaps is not None:
2440 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2440 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2441 return False
2441 return False
2442
2442
2443
2443
2444 def getbundlechunks(
2444 def getbundlechunks(
2445 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2445 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2446 ):
2446 ):
2447 """Return chunks constituting a bundle's raw data.
2447 """Return chunks constituting a bundle's raw data.
2448
2448
2449 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2449 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2450 passed.
2450 passed.
2451
2451
2452 Returns a 2-tuple of a dict with metadata about the generated bundle
2452 Returns a 2-tuple of a dict with metadata about the generated bundle
2453 and an iterator over raw chunks (of varying sizes).
2453 and an iterator over raw chunks (of varying sizes).
2454 """
2454 """
2455 kwargs = pycompat.byteskwargs(kwargs)
2455 kwargs = pycompat.byteskwargs(kwargs)
2456 info = {}
2456 info = {}
2457 usebundle2 = bundle2requested(bundlecaps)
2457 usebundle2 = bundle2requested(bundlecaps)
2458 # bundle10 case
2458 # bundle10 case
2459 if not usebundle2:
2459 if not usebundle2:
2460 if bundlecaps and not kwargs.get(b'cg', True):
2460 if bundlecaps and not kwargs.get(b'cg', True):
2461 raise ValueError(
2461 raise ValueError(
2462 _(b'request for bundle10 must include changegroup')
2462 _(b'request for bundle10 must include changegroup')
2463 )
2463 )
2464
2464
2465 if kwargs:
2465 if kwargs:
2466 raise ValueError(
2466 raise ValueError(
2467 _(b'unsupported getbundle arguments: %s')
2467 _(b'unsupported getbundle arguments: %s')
2468 % b', '.join(sorted(kwargs.keys()))
2468 % b', '.join(sorted(kwargs.keys()))
2469 )
2469 )
2470 outgoing = _computeoutgoing(repo, heads, common)
2470 outgoing = _computeoutgoing(repo, heads, common)
2471 info[b'bundleversion'] = 1
2471 info[b'bundleversion'] = 1
2472 return (
2472 return (
2473 info,
2473 info,
2474 changegroup.makestream(
2474 changegroup.makestream(
2475 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2475 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2476 ),
2476 ),
2477 )
2477 )
2478
2478
2479 # bundle20 case
2479 # bundle20 case
2480 info[b'bundleversion'] = 2
2480 info[b'bundleversion'] = 2
2481 b2caps = {}
2481 b2caps = {}
2482 for bcaps in bundlecaps:
2482 for bcaps in bundlecaps:
2483 if bcaps.startswith(b'bundle2='):
2483 if bcaps.startswith(b'bundle2='):
2484 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2484 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2485 b2caps.update(bundle2.decodecaps(blob))
2485 b2caps.update(bundle2.decodecaps(blob))
2486 bundler = bundle2.bundle20(repo.ui, b2caps)
2486 bundler = bundle2.bundle20(repo.ui, b2caps)
2487
2487
2488 kwargs[b'heads'] = heads
2488 kwargs[b'heads'] = heads
2489 kwargs[b'common'] = common
2489 kwargs[b'common'] = common
2490
2490
2491 for name in getbundle2partsorder:
2491 for name in getbundle2partsorder:
2492 func = getbundle2partsmapping[name]
2492 func = getbundle2partsmapping[name]
2493 func(
2493 func(
2494 bundler,
2494 bundler,
2495 repo,
2495 repo,
2496 source,
2496 source,
2497 bundlecaps=bundlecaps,
2497 bundlecaps=bundlecaps,
2498 b2caps=b2caps,
2498 b2caps=b2caps,
2499 **pycompat.strkwargs(kwargs)
2499 **pycompat.strkwargs(kwargs)
2500 )
2500 )
2501
2501
2502 info[b'prefercompressed'] = bundler.prefercompressed
2502 info[b'prefercompressed'] = bundler.prefercompressed
2503
2503
2504 return info, bundler.getchunks()
2504 return info, bundler.getchunks()
2505
2505
2506
2506
2507 @getbundle2partsgenerator(b'stream2')
2507 @getbundle2partsgenerator(b'stream2')
2508 def _getbundlestream2(bundler, repo, *args, **kwargs):
2508 def _getbundlestream2(bundler, repo, *args, **kwargs):
2509 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2509 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2510
2510
2511
2511
2512 @getbundle2partsgenerator(b'changegroup')
2512 @getbundle2partsgenerator(b'changegroup')
2513 def _getbundlechangegrouppart(
2513 def _getbundlechangegrouppart(
2514 bundler,
2514 bundler,
2515 repo,
2515 repo,
2516 source,
2516 source,
2517 bundlecaps=None,
2517 bundlecaps=None,
2518 b2caps=None,
2518 b2caps=None,
2519 heads=None,
2519 heads=None,
2520 common=None,
2520 common=None,
2521 **kwargs
2521 **kwargs
2522 ):
2522 ):
2523 """add a changegroup part to the requested bundle"""
2523 """add a changegroup part to the requested bundle"""
2524 if not kwargs.get('cg', True) or not b2caps:
2524 if not kwargs.get('cg', True) or not b2caps:
2525 return
2525 return
2526
2526
2527 version = b'01'
2527 version = b'01'
2528 cgversions = b2caps.get(b'changegroup')
2528 cgversions = b2caps.get(b'changegroup')
2529 if cgversions: # 3.1 and 3.2 ship with an empty value
2529 if cgversions: # 3.1 and 3.2 ship with an empty value
2530 cgversions = [
2530 cgversions = [
2531 v
2531 v
2532 for v in cgversions
2532 for v in cgversions
2533 if v in changegroup.supportedoutgoingversions(repo)
2533 if v in changegroup.supportedoutgoingversions(repo)
2534 ]
2534 ]
2535 if not cgversions:
2535 if not cgversions:
2536 raise error.Abort(_(b'no common changegroup version'))
2536 raise error.Abort(_(b'no common changegroup version'))
2537 version = max(cgversions)
2537 version = max(cgversions)
2538
2538
2539 outgoing = _computeoutgoing(repo, heads, common)
2539 outgoing = _computeoutgoing(repo, heads, common)
2540 if not outgoing.missing:
2540 if not outgoing.missing:
2541 return
2541 return
2542
2542
2543 if kwargs.get('narrow', False):
2543 if kwargs.get('narrow', False):
2544 include = sorted(filter(bool, kwargs.get('includepats', [])))
2544 include = sorted(filter(bool, kwargs.get('includepats', [])))
2545 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2545 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2546 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2546 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2547 else:
2547 else:
2548 matcher = None
2548 matcher = None
2549
2549
2550 cgstream = changegroup.makestream(
2550 cgstream = changegroup.makestream(
2551 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2551 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2552 )
2552 )
2553
2553
2554 part = bundler.newpart(b'changegroup', data=cgstream)
2554 part = bundler.newpart(b'changegroup', data=cgstream)
2555 if cgversions:
2555 if cgversions:
2556 part.addparam(b'version', version)
2556 part.addparam(b'version', version)
2557
2557
2558 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2558 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2559
2559
2560 if repository.TREEMANIFEST_REQUIREMENT in repo.requirements:
2560 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
2561 part.addparam(b'treemanifest', b'1')
2561 part.addparam(b'treemanifest', b'1')
2562
2562
2563 if b'exp-sidedata-flag' in repo.requirements:
2563 if b'exp-sidedata-flag' in repo.requirements:
2564 part.addparam(b'exp-sidedata', b'1')
2564 part.addparam(b'exp-sidedata', b'1')
2565
2565
2566 if (
2566 if (
2567 kwargs.get('narrow', False)
2567 kwargs.get('narrow', False)
2568 and kwargs.get('narrow_acl', False)
2568 and kwargs.get('narrow_acl', False)
2569 and (include or exclude)
2569 and (include or exclude)
2570 ):
2570 ):
2571 # this is mandatory because otherwise ACL clients won't work
2571 # this is mandatory because otherwise ACL clients won't work
2572 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2572 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2573 narrowspecpart.data = b'%s\0%s' % (
2573 narrowspecpart.data = b'%s\0%s' % (
2574 b'\n'.join(include),
2574 b'\n'.join(include),
2575 b'\n'.join(exclude),
2575 b'\n'.join(exclude),
2576 )
2576 )
2577
2577
2578
2578
2579 @getbundle2partsgenerator(b'bookmarks')
2579 @getbundle2partsgenerator(b'bookmarks')
2580 def _getbundlebookmarkpart(
2580 def _getbundlebookmarkpart(
2581 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2581 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2582 ):
2582 ):
2583 """add a bookmark part to the requested bundle"""
2583 """add a bookmark part to the requested bundle"""
2584 if not kwargs.get('bookmarks', False):
2584 if not kwargs.get('bookmarks', False):
2585 return
2585 return
2586 if not b2caps or b'bookmarks' not in b2caps:
2586 if not b2caps or b'bookmarks' not in b2caps:
2587 raise error.Abort(_(b'no common bookmarks exchange method'))
2587 raise error.Abort(_(b'no common bookmarks exchange method'))
2588 books = bookmod.listbinbookmarks(repo)
2588 books = bookmod.listbinbookmarks(repo)
2589 data = bookmod.binaryencode(books)
2589 data = bookmod.binaryencode(books)
2590 if data:
2590 if data:
2591 bundler.newpart(b'bookmarks', data=data)
2591 bundler.newpart(b'bookmarks', data=data)
2592
2592
2593
2593
2594 @getbundle2partsgenerator(b'listkeys')
2594 @getbundle2partsgenerator(b'listkeys')
2595 def _getbundlelistkeysparts(
2595 def _getbundlelistkeysparts(
2596 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2596 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2597 ):
2597 ):
2598 """add parts containing listkeys namespaces to the requested bundle"""
2598 """add parts containing listkeys namespaces to the requested bundle"""
2599 listkeys = kwargs.get('listkeys', ())
2599 listkeys = kwargs.get('listkeys', ())
2600 for namespace in listkeys:
2600 for namespace in listkeys:
2601 part = bundler.newpart(b'listkeys')
2601 part = bundler.newpart(b'listkeys')
2602 part.addparam(b'namespace', namespace)
2602 part.addparam(b'namespace', namespace)
2603 keys = repo.listkeys(namespace).items()
2603 keys = repo.listkeys(namespace).items()
2604 part.data = pushkey.encodekeys(keys)
2604 part.data = pushkey.encodekeys(keys)
2605
2605
2606
2606
2607 @getbundle2partsgenerator(b'obsmarkers')
2607 @getbundle2partsgenerator(b'obsmarkers')
2608 def _getbundleobsmarkerpart(
2608 def _getbundleobsmarkerpart(
2609 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2609 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2610 ):
2610 ):
2611 """add an obsolescence markers part to the requested bundle"""
2611 """add an obsolescence markers part to the requested bundle"""
2612 if kwargs.get('obsmarkers', False):
2612 if kwargs.get('obsmarkers', False):
2613 if heads is None:
2613 if heads is None:
2614 heads = repo.heads()
2614 heads = repo.heads()
2615 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2615 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2616 markers = repo.obsstore.relevantmarkers(subset)
2616 markers = repo.obsstore.relevantmarkers(subset)
2617 markers = obsutil.sortedmarkers(markers)
2617 markers = obsutil.sortedmarkers(markers)
2618 bundle2.buildobsmarkerspart(bundler, markers)
2618 bundle2.buildobsmarkerspart(bundler, markers)
2619
2619
2620
2620
2621 @getbundle2partsgenerator(b'phases')
2621 @getbundle2partsgenerator(b'phases')
2622 def _getbundlephasespart(
2622 def _getbundlephasespart(
2623 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2623 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2624 ):
2624 ):
2625 """add phase heads part to the requested bundle"""
2625 """add phase heads part to the requested bundle"""
2626 if kwargs.get('phases', False):
2626 if kwargs.get('phases', False):
2627 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2627 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2628 raise error.Abort(_(b'no common phases exchange method'))
2628 raise error.Abort(_(b'no common phases exchange method'))
2629 if heads is None:
2629 if heads is None:
2630 heads = repo.heads()
2630 heads = repo.heads()
2631
2631
2632 headsbyphase = collections.defaultdict(set)
2632 headsbyphase = collections.defaultdict(set)
2633 if repo.publishing():
2633 if repo.publishing():
2634 headsbyphase[phases.public] = heads
2634 headsbyphase[phases.public] = heads
2635 else:
2635 else:
2636 # find the appropriate heads to move
2636 # find the appropriate heads to move
2637
2637
2638 phase = repo._phasecache.phase
2638 phase = repo._phasecache.phase
2639 node = repo.changelog.node
2639 node = repo.changelog.node
2640 rev = repo.changelog.rev
2640 rev = repo.changelog.rev
2641 for h in heads:
2641 for h in heads:
2642 headsbyphase[phase(repo, rev(h))].add(h)
2642 headsbyphase[phase(repo, rev(h))].add(h)
2643 seenphases = list(headsbyphase.keys())
2643 seenphases = list(headsbyphase.keys())
2644
2644
2645 # We do not handle anything but public and draft phase for now)
2645 # We do not handle anything but public and draft phase for now)
2646 if seenphases:
2646 if seenphases:
2647 assert max(seenphases) <= phases.draft
2647 assert max(seenphases) <= phases.draft
2648
2648
2649 # if client is pulling non-public changesets, we need to find
2649 # if client is pulling non-public changesets, we need to find
2650 # intermediate public heads.
2650 # intermediate public heads.
2651 draftheads = headsbyphase.get(phases.draft, set())
2651 draftheads = headsbyphase.get(phases.draft, set())
2652 if draftheads:
2652 if draftheads:
2653 publicheads = headsbyphase.get(phases.public, set())
2653 publicheads = headsbyphase.get(phases.public, set())
2654
2654
2655 revset = b'heads(only(%ln, %ln) and public())'
2655 revset = b'heads(only(%ln, %ln) and public())'
2656 extraheads = repo.revs(revset, draftheads, publicheads)
2656 extraheads = repo.revs(revset, draftheads, publicheads)
2657 for r in extraheads:
2657 for r in extraheads:
2658 headsbyphase[phases.public].add(node(r))
2658 headsbyphase[phases.public].add(node(r))
2659
2659
2660 # transform data in a format used by the encoding function
2660 # transform data in a format used by the encoding function
2661 phasemapping = {
2661 phasemapping = {
2662 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2662 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2663 }
2663 }
2664
2664
2665 # generate the actual part
2665 # generate the actual part
2666 phasedata = phases.binaryencode(phasemapping)
2666 phasedata = phases.binaryencode(phasemapping)
2667 bundler.newpart(b'phase-heads', data=phasedata)
2667 bundler.newpart(b'phase-heads', data=phasedata)
2668
2668
2669
2669
2670 @getbundle2partsgenerator(b'hgtagsfnodes')
2670 @getbundle2partsgenerator(b'hgtagsfnodes')
2671 def _getbundletagsfnodes(
2671 def _getbundletagsfnodes(
2672 bundler,
2672 bundler,
2673 repo,
2673 repo,
2674 source,
2674 source,
2675 bundlecaps=None,
2675 bundlecaps=None,
2676 b2caps=None,
2676 b2caps=None,
2677 heads=None,
2677 heads=None,
2678 common=None,
2678 common=None,
2679 **kwargs
2679 **kwargs
2680 ):
2680 ):
2681 """Transfer the .hgtags filenodes mapping.
2681 """Transfer the .hgtags filenodes mapping.
2682
2682
2683 Only values for heads in this bundle will be transferred.
2683 Only values for heads in this bundle will be transferred.
2684
2684
2685 The part data consists of pairs of 20 byte changeset node and .hgtags
2685 The part data consists of pairs of 20 byte changeset node and .hgtags
2686 filenodes raw values.
2686 filenodes raw values.
2687 """
2687 """
2688 # Don't send unless:
2688 # Don't send unless:
2689 # - changeset are being exchanged,
2689 # - changeset are being exchanged,
2690 # - the client supports it.
2690 # - the client supports it.
2691 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2691 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2692 return
2692 return
2693
2693
2694 outgoing = _computeoutgoing(repo, heads, common)
2694 outgoing = _computeoutgoing(repo, heads, common)
2695 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2695 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2696
2696
2697
2697
2698 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2698 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2699 def _getbundlerevbranchcache(
2699 def _getbundlerevbranchcache(
2700 bundler,
2700 bundler,
2701 repo,
2701 repo,
2702 source,
2702 source,
2703 bundlecaps=None,
2703 bundlecaps=None,
2704 b2caps=None,
2704 b2caps=None,
2705 heads=None,
2705 heads=None,
2706 common=None,
2706 common=None,
2707 **kwargs
2707 **kwargs
2708 ):
2708 ):
2709 """Transfer the rev-branch-cache mapping
2709 """Transfer the rev-branch-cache mapping
2710
2710
2711 The payload is a series of data related to each branch
2711 The payload is a series of data related to each branch
2712
2712
2713 1) branch name length
2713 1) branch name length
2714 2) number of open heads
2714 2) number of open heads
2715 3) number of closed heads
2715 3) number of closed heads
2716 4) open heads nodes
2716 4) open heads nodes
2717 5) closed heads nodes
2717 5) closed heads nodes
2718 """
2718 """
2719 # Don't send unless:
2719 # Don't send unless:
2720 # - changeset are being exchanged,
2720 # - changeset are being exchanged,
2721 # - the client supports it.
2721 # - the client supports it.
2722 # - narrow bundle isn't in play (not currently compatible).
2722 # - narrow bundle isn't in play (not currently compatible).
2723 if (
2723 if (
2724 not kwargs.get('cg', True)
2724 not kwargs.get('cg', True)
2725 or not b2caps
2725 or not b2caps
2726 or b'rev-branch-cache' not in b2caps
2726 or b'rev-branch-cache' not in b2caps
2727 or kwargs.get('narrow', False)
2727 or kwargs.get('narrow', False)
2728 or repo.ui.has_section(_NARROWACL_SECTION)
2728 or repo.ui.has_section(_NARROWACL_SECTION)
2729 ):
2729 ):
2730 return
2730 return
2731
2731
2732 outgoing = _computeoutgoing(repo, heads, common)
2732 outgoing = _computeoutgoing(repo, heads, common)
2733 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2733 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2734
2734
2735
2735
2736 def check_heads(repo, their_heads, context):
2736 def check_heads(repo, their_heads, context):
2737 """check if the heads of a repo have been modified
2737 """check if the heads of a repo have been modified
2738
2738
2739 Used by peer for unbundling.
2739 Used by peer for unbundling.
2740 """
2740 """
2741 heads = repo.heads()
2741 heads = repo.heads()
2742 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2742 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2743 if not (
2743 if not (
2744 their_heads == [b'force']
2744 their_heads == [b'force']
2745 or their_heads == heads
2745 or their_heads == heads
2746 or their_heads == [b'hashed', heads_hash]
2746 or their_heads == [b'hashed', heads_hash]
2747 ):
2747 ):
2748 # someone else committed/pushed/unbundled while we
2748 # someone else committed/pushed/unbundled while we
2749 # were transferring data
2749 # were transferring data
2750 raise error.PushRaced(
2750 raise error.PushRaced(
2751 b'repository changed while %s - please try again' % context
2751 b'repository changed while %s - please try again' % context
2752 )
2752 )
2753
2753
2754
2754
2755 def unbundle(repo, cg, heads, source, url):
2755 def unbundle(repo, cg, heads, source, url):
2756 """Apply a bundle to a repo.
2756 """Apply a bundle to a repo.
2757
2757
2758 this function makes sure the repo is locked during the application and have
2758 this function makes sure the repo is locked during the application and have
2759 mechanism to check that no push race occurred between the creation of the
2759 mechanism to check that no push race occurred between the creation of the
2760 bundle and its application.
2760 bundle and its application.
2761
2761
2762 If the push was raced as PushRaced exception is raised."""
2762 If the push was raced as PushRaced exception is raised."""
2763 r = 0
2763 r = 0
2764 # need a transaction when processing a bundle2 stream
2764 # need a transaction when processing a bundle2 stream
2765 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2765 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2766 lockandtr = [None, None, None]
2766 lockandtr = [None, None, None]
2767 recordout = None
2767 recordout = None
2768 # quick fix for output mismatch with bundle2 in 3.4
2768 # quick fix for output mismatch with bundle2 in 3.4
2769 captureoutput = repo.ui.configbool(
2769 captureoutput = repo.ui.configbool(
2770 b'experimental', b'bundle2-output-capture'
2770 b'experimental', b'bundle2-output-capture'
2771 )
2771 )
2772 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2772 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2773 captureoutput = True
2773 captureoutput = True
2774 try:
2774 try:
2775 # note: outside bundle1, 'heads' is expected to be empty and this
2775 # note: outside bundle1, 'heads' is expected to be empty and this
2776 # 'check_heads' call wil be a no-op
2776 # 'check_heads' call wil be a no-op
2777 check_heads(repo, heads, b'uploading changes')
2777 check_heads(repo, heads, b'uploading changes')
2778 # push can proceed
2778 # push can proceed
2779 if not isinstance(cg, bundle2.unbundle20):
2779 if not isinstance(cg, bundle2.unbundle20):
2780 # legacy case: bundle1 (changegroup 01)
2780 # legacy case: bundle1 (changegroup 01)
2781 txnname = b"\n".join([source, util.hidepassword(url)])
2781 txnname = b"\n".join([source, util.hidepassword(url)])
2782 with repo.lock(), repo.transaction(txnname) as tr:
2782 with repo.lock(), repo.transaction(txnname) as tr:
2783 op = bundle2.applybundle(repo, cg, tr, source, url)
2783 op = bundle2.applybundle(repo, cg, tr, source, url)
2784 r = bundle2.combinechangegroupresults(op)
2784 r = bundle2.combinechangegroupresults(op)
2785 else:
2785 else:
2786 r = None
2786 r = None
2787 try:
2787 try:
2788
2788
2789 def gettransaction():
2789 def gettransaction():
2790 if not lockandtr[2]:
2790 if not lockandtr[2]:
2791 if not bookmod.bookmarksinstore(repo):
2791 if not bookmod.bookmarksinstore(repo):
2792 lockandtr[0] = repo.wlock()
2792 lockandtr[0] = repo.wlock()
2793 lockandtr[1] = repo.lock()
2793 lockandtr[1] = repo.lock()
2794 lockandtr[2] = repo.transaction(source)
2794 lockandtr[2] = repo.transaction(source)
2795 lockandtr[2].hookargs[b'source'] = source
2795 lockandtr[2].hookargs[b'source'] = source
2796 lockandtr[2].hookargs[b'url'] = url
2796 lockandtr[2].hookargs[b'url'] = url
2797 lockandtr[2].hookargs[b'bundle2'] = b'1'
2797 lockandtr[2].hookargs[b'bundle2'] = b'1'
2798 return lockandtr[2]
2798 return lockandtr[2]
2799
2799
2800 # Do greedy locking by default until we're satisfied with lazy
2800 # Do greedy locking by default until we're satisfied with lazy
2801 # locking.
2801 # locking.
2802 if not repo.ui.configbool(
2802 if not repo.ui.configbool(
2803 b'experimental', b'bundle2lazylocking'
2803 b'experimental', b'bundle2lazylocking'
2804 ):
2804 ):
2805 gettransaction()
2805 gettransaction()
2806
2806
2807 op = bundle2.bundleoperation(
2807 op = bundle2.bundleoperation(
2808 repo,
2808 repo,
2809 gettransaction,
2809 gettransaction,
2810 captureoutput=captureoutput,
2810 captureoutput=captureoutput,
2811 source=b'push',
2811 source=b'push',
2812 )
2812 )
2813 try:
2813 try:
2814 op = bundle2.processbundle(repo, cg, op=op)
2814 op = bundle2.processbundle(repo, cg, op=op)
2815 finally:
2815 finally:
2816 r = op.reply
2816 r = op.reply
2817 if captureoutput and r is not None:
2817 if captureoutput and r is not None:
2818 repo.ui.pushbuffer(error=True, subproc=True)
2818 repo.ui.pushbuffer(error=True, subproc=True)
2819
2819
2820 def recordout(output):
2820 def recordout(output):
2821 r.newpart(b'output', data=output, mandatory=False)
2821 r.newpart(b'output', data=output, mandatory=False)
2822
2822
2823 if lockandtr[2] is not None:
2823 if lockandtr[2] is not None:
2824 lockandtr[2].close()
2824 lockandtr[2].close()
2825 except BaseException as exc:
2825 except BaseException as exc:
2826 exc.duringunbundle2 = True
2826 exc.duringunbundle2 = True
2827 if captureoutput and r is not None:
2827 if captureoutput and r is not None:
2828 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2828 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2829
2829
2830 def recordout(output):
2830 def recordout(output):
2831 part = bundle2.bundlepart(
2831 part = bundle2.bundlepart(
2832 b'output', data=output, mandatory=False
2832 b'output', data=output, mandatory=False
2833 )
2833 )
2834 parts.append(part)
2834 parts.append(part)
2835
2835
2836 raise
2836 raise
2837 finally:
2837 finally:
2838 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2838 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2839 if recordout is not None:
2839 if recordout is not None:
2840 recordout(repo.ui.popbuffer())
2840 recordout(repo.ui.popbuffer())
2841 return r
2841 return r
2842
2842
2843
2843
2844 def _maybeapplyclonebundle(pullop):
2844 def _maybeapplyclonebundle(pullop):
2845 """Apply a clone bundle from a remote, if possible."""
2845 """Apply a clone bundle from a remote, if possible."""
2846
2846
2847 repo = pullop.repo
2847 repo = pullop.repo
2848 remote = pullop.remote
2848 remote = pullop.remote
2849
2849
2850 if not repo.ui.configbool(b'ui', b'clonebundles'):
2850 if not repo.ui.configbool(b'ui', b'clonebundles'):
2851 return
2851 return
2852
2852
2853 # Only run if local repo is empty.
2853 # Only run if local repo is empty.
2854 if len(repo):
2854 if len(repo):
2855 return
2855 return
2856
2856
2857 if pullop.heads:
2857 if pullop.heads:
2858 return
2858 return
2859
2859
2860 if not remote.capable(b'clonebundles'):
2860 if not remote.capable(b'clonebundles'):
2861 return
2861 return
2862
2862
2863 with remote.commandexecutor() as e:
2863 with remote.commandexecutor() as e:
2864 res = e.callcommand(b'clonebundles', {}).result()
2864 res = e.callcommand(b'clonebundles', {}).result()
2865
2865
2866 # If we call the wire protocol command, that's good enough to record the
2866 # If we call the wire protocol command, that's good enough to record the
2867 # attempt.
2867 # attempt.
2868 pullop.clonebundleattempted = True
2868 pullop.clonebundleattempted = True
2869
2869
2870 entries = parseclonebundlesmanifest(repo, res)
2870 entries = parseclonebundlesmanifest(repo, res)
2871 if not entries:
2871 if not entries:
2872 repo.ui.note(
2872 repo.ui.note(
2873 _(
2873 _(
2874 b'no clone bundles available on remote; '
2874 b'no clone bundles available on remote; '
2875 b'falling back to regular clone\n'
2875 b'falling back to regular clone\n'
2876 )
2876 )
2877 )
2877 )
2878 return
2878 return
2879
2879
2880 entries = filterclonebundleentries(
2880 entries = filterclonebundleentries(
2881 repo, entries, streamclonerequested=pullop.streamclonerequested
2881 repo, entries, streamclonerequested=pullop.streamclonerequested
2882 )
2882 )
2883
2883
2884 if not entries:
2884 if not entries:
2885 # There is a thundering herd concern here. However, if a server
2885 # There is a thundering herd concern here. However, if a server
2886 # operator doesn't advertise bundles appropriate for its clients,
2886 # operator doesn't advertise bundles appropriate for its clients,
2887 # they deserve what's coming. Furthermore, from a client's
2887 # they deserve what's coming. Furthermore, from a client's
2888 # perspective, no automatic fallback would mean not being able to
2888 # perspective, no automatic fallback would mean not being able to
2889 # clone!
2889 # clone!
2890 repo.ui.warn(
2890 repo.ui.warn(
2891 _(
2891 _(
2892 b'no compatible clone bundles available on server; '
2892 b'no compatible clone bundles available on server; '
2893 b'falling back to regular clone\n'
2893 b'falling back to regular clone\n'
2894 )
2894 )
2895 )
2895 )
2896 repo.ui.warn(
2896 repo.ui.warn(
2897 _(b'(you may want to report this to the server operator)\n')
2897 _(b'(you may want to report this to the server operator)\n')
2898 )
2898 )
2899 return
2899 return
2900
2900
2901 entries = sortclonebundleentries(repo.ui, entries)
2901 entries = sortclonebundleentries(repo.ui, entries)
2902
2902
2903 url = entries[0][b'URL']
2903 url = entries[0][b'URL']
2904 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2904 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2905 if trypullbundlefromurl(repo.ui, repo, url):
2905 if trypullbundlefromurl(repo.ui, repo, url):
2906 repo.ui.status(_(b'finished applying clone bundle\n'))
2906 repo.ui.status(_(b'finished applying clone bundle\n'))
2907 # Bundle failed.
2907 # Bundle failed.
2908 #
2908 #
2909 # We abort by default to avoid the thundering herd of
2909 # We abort by default to avoid the thundering herd of
2910 # clients flooding a server that was expecting expensive
2910 # clients flooding a server that was expecting expensive
2911 # clone load to be offloaded.
2911 # clone load to be offloaded.
2912 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2912 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2913 repo.ui.warn(_(b'falling back to normal clone\n'))
2913 repo.ui.warn(_(b'falling back to normal clone\n'))
2914 else:
2914 else:
2915 raise error.Abort(
2915 raise error.Abort(
2916 _(b'error applying bundle'),
2916 _(b'error applying bundle'),
2917 hint=_(
2917 hint=_(
2918 b'if this error persists, consider contacting '
2918 b'if this error persists, consider contacting '
2919 b'the server operator or disable clone '
2919 b'the server operator or disable clone '
2920 b'bundles via '
2920 b'bundles via '
2921 b'"--config ui.clonebundles=false"'
2921 b'"--config ui.clonebundles=false"'
2922 ),
2922 ),
2923 )
2923 )
2924
2924
2925
2925
2926 def parseclonebundlesmanifest(repo, s):
2926 def parseclonebundlesmanifest(repo, s):
2927 """Parses the raw text of a clone bundles manifest.
2927 """Parses the raw text of a clone bundles manifest.
2928
2928
2929 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2929 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2930 to the URL and other keys are the attributes for the entry.
2930 to the URL and other keys are the attributes for the entry.
2931 """
2931 """
2932 m = []
2932 m = []
2933 for line in s.splitlines():
2933 for line in s.splitlines():
2934 fields = line.split()
2934 fields = line.split()
2935 if not fields:
2935 if not fields:
2936 continue
2936 continue
2937 attrs = {b'URL': fields[0]}
2937 attrs = {b'URL': fields[0]}
2938 for rawattr in fields[1:]:
2938 for rawattr in fields[1:]:
2939 key, value = rawattr.split(b'=', 1)
2939 key, value = rawattr.split(b'=', 1)
2940 key = urlreq.unquote(key)
2940 key = urlreq.unquote(key)
2941 value = urlreq.unquote(value)
2941 value = urlreq.unquote(value)
2942 attrs[key] = value
2942 attrs[key] = value
2943
2943
2944 # Parse BUNDLESPEC into components. This makes client-side
2944 # Parse BUNDLESPEC into components. This makes client-side
2945 # preferences easier to specify since you can prefer a single
2945 # preferences easier to specify since you can prefer a single
2946 # component of the BUNDLESPEC.
2946 # component of the BUNDLESPEC.
2947 if key == b'BUNDLESPEC':
2947 if key == b'BUNDLESPEC':
2948 try:
2948 try:
2949 bundlespec = parsebundlespec(repo, value)
2949 bundlespec = parsebundlespec(repo, value)
2950 attrs[b'COMPRESSION'] = bundlespec.compression
2950 attrs[b'COMPRESSION'] = bundlespec.compression
2951 attrs[b'VERSION'] = bundlespec.version
2951 attrs[b'VERSION'] = bundlespec.version
2952 except error.InvalidBundleSpecification:
2952 except error.InvalidBundleSpecification:
2953 pass
2953 pass
2954 except error.UnsupportedBundleSpecification:
2954 except error.UnsupportedBundleSpecification:
2955 pass
2955 pass
2956
2956
2957 m.append(attrs)
2957 m.append(attrs)
2958
2958
2959 return m
2959 return m
2960
2960
2961
2961
2962 def isstreamclonespec(bundlespec):
2962 def isstreamclonespec(bundlespec):
2963 # Stream clone v1
2963 # Stream clone v1
2964 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2964 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2965 return True
2965 return True
2966
2966
2967 # Stream clone v2
2967 # Stream clone v2
2968 if (
2968 if (
2969 bundlespec.wirecompression == b'UN'
2969 bundlespec.wirecompression == b'UN'
2970 and bundlespec.wireversion == b'02'
2970 and bundlespec.wireversion == b'02'
2971 and bundlespec.contentopts.get(b'streamv2')
2971 and bundlespec.contentopts.get(b'streamv2')
2972 ):
2972 ):
2973 return True
2973 return True
2974
2974
2975 return False
2975 return False
2976
2976
2977
2977
2978 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2978 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2979 """Remove incompatible clone bundle manifest entries.
2979 """Remove incompatible clone bundle manifest entries.
2980
2980
2981 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2981 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2982 and returns a new list consisting of only the entries that this client
2982 and returns a new list consisting of only the entries that this client
2983 should be able to apply.
2983 should be able to apply.
2984
2984
2985 There is no guarantee we'll be able to apply all returned entries because
2985 There is no guarantee we'll be able to apply all returned entries because
2986 the metadata we use to filter on may be missing or wrong.
2986 the metadata we use to filter on may be missing or wrong.
2987 """
2987 """
2988 newentries = []
2988 newentries = []
2989 for entry in entries:
2989 for entry in entries:
2990 spec = entry.get(b'BUNDLESPEC')
2990 spec = entry.get(b'BUNDLESPEC')
2991 if spec:
2991 if spec:
2992 try:
2992 try:
2993 bundlespec = parsebundlespec(repo, spec, strict=True)
2993 bundlespec = parsebundlespec(repo, spec, strict=True)
2994
2994
2995 # If a stream clone was requested, filter out non-streamclone
2995 # If a stream clone was requested, filter out non-streamclone
2996 # entries.
2996 # entries.
2997 if streamclonerequested and not isstreamclonespec(bundlespec):
2997 if streamclonerequested and not isstreamclonespec(bundlespec):
2998 repo.ui.debug(
2998 repo.ui.debug(
2999 b'filtering %s because not a stream clone\n'
2999 b'filtering %s because not a stream clone\n'
3000 % entry[b'URL']
3000 % entry[b'URL']
3001 )
3001 )
3002 continue
3002 continue
3003
3003
3004 except error.InvalidBundleSpecification as e:
3004 except error.InvalidBundleSpecification as e:
3005 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
3005 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
3006 continue
3006 continue
3007 except error.UnsupportedBundleSpecification as e:
3007 except error.UnsupportedBundleSpecification as e:
3008 repo.ui.debug(
3008 repo.ui.debug(
3009 b'filtering %s because unsupported bundle '
3009 b'filtering %s because unsupported bundle '
3010 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
3010 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
3011 )
3011 )
3012 continue
3012 continue
3013 # If we don't have a spec and requested a stream clone, we don't know
3013 # If we don't have a spec and requested a stream clone, we don't know
3014 # what the entry is so don't attempt to apply it.
3014 # what the entry is so don't attempt to apply it.
3015 elif streamclonerequested:
3015 elif streamclonerequested:
3016 repo.ui.debug(
3016 repo.ui.debug(
3017 b'filtering %s because cannot determine if a stream '
3017 b'filtering %s because cannot determine if a stream '
3018 b'clone bundle\n' % entry[b'URL']
3018 b'clone bundle\n' % entry[b'URL']
3019 )
3019 )
3020 continue
3020 continue
3021
3021
3022 if b'REQUIRESNI' in entry and not sslutil.hassni:
3022 if b'REQUIRESNI' in entry and not sslutil.hassni:
3023 repo.ui.debug(
3023 repo.ui.debug(
3024 b'filtering %s because SNI not supported\n' % entry[b'URL']
3024 b'filtering %s because SNI not supported\n' % entry[b'URL']
3025 )
3025 )
3026 continue
3026 continue
3027
3027
3028 if b'REQUIREDRAM' in entry:
3028 if b'REQUIREDRAM' in entry:
3029 try:
3029 try:
3030 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
3030 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
3031 except error.ParseError:
3031 except error.ParseError:
3032 repo.ui.debug(
3032 repo.ui.debug(
3033 b'filtering %s due to a bad REQUIREDRAM attribute\n'
3033 b'filtering %s due to a bad REQUIREDRAM attribute\n'
3034 % entry[b'URL']
3034 % entry[b'URL']
3035 )
3035 )
3036 continue
3036 continue
3037 actualram = repo.ui.estimatememory()
3037 actualram = repo.ui.estimatememory()
3038 if actualram is not None and actualram * 0.66 < requiredram:
3038 if actualram is not None and actualram * 0.66 < requiredram:
3039 repo.ui.debug(
3039 repo.ui.debug(
3040 b'filtering %s as it needs more than 2/3 of system memory\n'
3040 b'filtering %s as it needs more than 2/3 of system memory\n'
3041 % entry[b'URL']
3041 % entry[b'URL']
3042 )
3042 )
3043 continue
3043 continue
3044
3044
3045 newentries.append(entry)
3045 newentries.append(entry)
3046
3046
3047 return newentries
3047 return newentries
3048
3048
3049
3049
3050 class clonebundleentry(object):
3050 class clonebundleentry(object):
3051 """Represents an item in a clone bundles manifest.
3051 """Represents an item in a clone bundles manifest.
3052
3052
3053 This rich class is needed to support sorting since sorted() in Python 3
3053 This rich class is needed to support sorting since sorted() in Python 3
3054 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3054 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3055 won't work.
3055 won't work.
3056 """
3056 """
3057
3057
3058 def __init__(self, value, prefers):
3058 def __init__(self, value, prefers):
3059 self.value = value
3059 self.value = value
3060 self.prefers = prefers
3060 self.prefers = prefers
3061
3061
3062 def _cmp(self, other):
3062 def _cmp(self, other):
3063 for prefkey, prefvalue in self.prefers:
3063 for prefkey, prefvalue in self.prefers:
3064 avalue = self.value.get(prefkey)
3064 avalue = self.value.get(prefkey)
3065 bvalue = other.value.get(prefkey)
3065 bvalue = other.value.get(prefkey)
3066
3066
3067 # Special case for b missing attribute and a matches exactly.
3067 # Special case for b missing attribute and a matches exactly.
3068 if avalue is not None and bvalue is None and avalue == prefvalue:
3068 if avalue is not None and bvalue is None and avalue == prefvalue:
3069 return -1
3069 return -1
3070
3070
3071 # Special case for a missing attribute and b matches exactly.
3071 # Special case for a missing attribute and b matches exactly.
3072 if bvalue is not None and avalue is None and bvalue == prefvalue:
3072 if bvalue is not None and avalue is None and bvalue == prefvalue:
3073 return 1
3073 return 1
3074
3074
3075 # We can't compare unless attribute present on both.
3075 # We can't compare unless attribute present on both.
3076 if avalue is None or bvalue is None:
3076 if avalue is None or bvalue is None:
3077 continue
3077 continue
3078
3078
3079 # Same values should fall back to next attribute.
3079 # Same values should fall back to next attribute.
3080 if avalue == bvalue:
3080 if avalue == bvalue:
3081 continue
3081 continue
3082
3082
3083 # Exact matches come first.
3083 # Exact matches come first.
3084 if avalue == prefvalue:
3084 if avalue == prefvalue:
3085 return -1
3085 return -1
3086 if bvalue == prefvalue:
3086 if bvalue == prefvalue:
3087 return 1
3087 return 1
3088
3088
3089 # Fall back to next attribute.
3089 # Fall back to next attribute.
3090 continue
3090 continue
3091
3091
3092 # If we got here we couldn't sort by attributes and prefers. Fall
3092 # If we got here we couldn't sort by attributes and prefers. Fall
3093 # back to index order.
3093 # back to index order.
3094 return 0
3094 return 0
3095
3095
3096 def __lt__(self, other):
3096 def __lt__(self, other):
3097 return self._cmp(other) < 0
3097 return self._cmp(other) < 0
3098
3098
3099 def __gt__(self, other):
3099 def __gt__(self, other):
3100 return self._cmp(other) > 0
3100 return self._cmp(other) > 0
3101
3101
3102 def __eq__(self, other):
3102 def __eq__(self, other):
3103 return self._cmp(other) == 0
3103 return self._cmp(other) == 0
3104
3104
3105 def __le__(self, other):
3105 def __le__(self, other):
3106 return self._cmp(other) <= 0
3106 return self._cmp(other) <= 0
3107
3107
3108 def __ge__(self, other):
3108 def __ge__(self, other):
3109 return self._cmp(other) >= 0
3109 return self._cmp(other) >= 0
3110
3110
3111 def __ne__(self, other):
3111 def __ne__(self, other):
3112 return self._cmp(other) != 0
3112 return self._cmp(other) != 0
3113
3113
3114
3114
3115 def sortclonebundleentries(ui, entries):
3115 def sortclonebundleentries(ui, entries):
3116 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3116 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3117 if not prefers:
3117 if not prefers:
3118 return list(entries)
3118 return list(entries)
3119
3119
3120 def _split(p):
3120 def _split(p):
3121 if b'=' not in p:
3121 if b'=' not in p:
3122 hint = _(b"each comma separated item should be key=value pairs")
3122 hint = _(b"each comma separated item should be key=value pairs")
3123 raise error.Abort(
3123 raise error.Abort(
3124 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
3124 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
3125 )
3125 )
3126 return p.split(b'=', 1)
3126 return p.split(b'=', 1)
3127
3127
3128 prefers = [_split(p) for p in prefers]
3128 prefers = [_split(p) for p in prefers]
3129
3129
3130 items = sorted(clonebundleentry(v, prefers) for v in entries)
3130 items = sorted(clonebundleentry(v, prefers) for v in entries)
3131 return [i.value for i in items]
3131 return [i.value for i in items]
3132
3132
3133
3133
3134 def trypullbundlefromurl(ui, repo, url):
3134 def trypullbundlefromurl(ui, repo, url):
3135 """Attempt to apply a bundle from a URL."""
3135 """Attempt to apply a bundle from a URL."""
3136 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3136 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3137 try:
3137 try:
3138 fh = urlmod.open(ui, url)
3138 fh = urlmod.open(ui, url)
3139 cg = readbundle(ui, fh, b'stream')
3139 cg = readbundle(ui, fh, b'stream')
3140
3140
3141 if isinstance(cg, streamclone.streamcloneapplier):
3141 if isinstance(cg, streamclone.streamcloneapplier):
3142 cg.apply(repo)
3142 cg.apply(repo)
3143 else:
3143 else:
3144 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3144 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3145 return True
3145 return True
3146 except urlerr.httperror as e:
3146 except urlerr.httperror as e:
3147 ui.warn(
3147 ui.warn(
3148 _(b'HTTP error fetching bundle: %s\n')
3148 _(b'HTTP error fetching bundle: %s\n')
3149 % stringutil.forcebytestr(e)
3149 % stringutil.forcebytestr(e)
3150 )
3150 )
3151 except urlerr.urlerror as e:
3151 except urlerr.urlerror as e:
3152 ui.warn(
3152 ui.warn(
3153 _(b'error fetching bundle: %s\n')
3153 _(b'error fetching bundle: %s\n')
3154 % stringutil.forcebytestr(e.reason)
3154 % stringutil.forcebytestr(e.reason)
3155 )
3155 )
3156
3156
3157 return False
3157 return False
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now