##// END OF EJS Templates
narrow: send specs as bundle2 data instead of param (issue5952) (issue6019)...
Pulkit Goyal -
r42307:8a37c9b6 default draft
parent child Browse files
Show More
@@ -1,280 +1,289 b''
1 1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import struct
12 12
13 13 from mercurial.i18n import _
14 14 from mercurial.node import (
15 15 bin,
16 16 nullid,
17 17 )
18 18 from mercurial import (
19 19 bundle2,
20 20 changegroup,
21 21 error,
22 22 exchange,
23 23 localrepo,
24 24 narrowspec,
25 25 repair,
26 26 repository,
27 27 util,
28 28 wireprototypes,
29 29 )
30 30 from mercurial.utils import (
31 31 stringutil,
32 32 )
33 33
34 34 _NARROWACL_SECTION = 'narrowhgacl'
35 35 _CHANGESPECPART = 'narrow:changespec'
36 36 _SPECPART = 'narrow:spec'
37 37 _SPECPART_INCLUDE = 'include'
38 38 _SPECPART_EXCLUDE = 'exclude'
39 39 _KILLNODESIGNAL = 'KILL'
40 40 _DONESIGNAL = 'DONE'
41 41 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
42 42 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
43 43 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
44 44 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
45 45
46 46 # Serve a changegroup for a client with a narrow clone.
47 47 def getbundlechangegrouppart_narrow(bundler, repo, source,
48 48 bundlecaps=None, b2caps=None, heads=None,
49 49 common=None, **kwargs):
50 50 assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
51 51
52 52 cgversions = b2caps.get('changegroup')
53 53 if cgversions: # 3.1 and 3.2 ship with an empty value
54 54 cgversions = [v for v in cgversions
55 55 if v in changegroup.supportedoutgoingversions(repo)]
56 56 if not cgversions:
57 57 raise ValueError(_('no common changegroup version'))
58 58 version = max(cgversions)
59 59 else:
60 60 raise ValueError(_("server does not advertise changegroup version,"
61 61 " can't negotiate support for ellipsis nodes"))
62 62
63 63 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
64 64 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
65 65 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
66 66
67 67 depth = kwargs.get(r'depth', None)
68 68 if depth is not None:
69 69 depth = int(depth)
70 70 if depth < 1:
71 71 raise error.Abort(_('depth must be positive, got %d') % depth)
72 72
73 73 heads = set(heads or repo.heads())
74 74 common = set(common or [nullid])
75 75 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
76 76 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
77 77 known = {bin(n) for n in kwargs.get(r'known', [])}
78 78 if known and (oldinclude != include or oldexclude != exclude):
79 79 # Steps:
80 80 # 1. Send kill for "$known & ::common"
81 81 #
82 82 # 2. Send changegroup for ::common
83 83 #
84 84 # 3. Proceed.
85 85 #
86 86 # In the future, we can send kills for only the specific
87 87 # nodes we know should go away or change shape, and then
88 88 # send a data stream that tells the client something like this:
89 89 #
90 90 # a) apply this changegroup
91 91 # b) apply nodes XXX, YYY, ZZZ that you already have
92 92 # c) goto a
93 93 #
94 94 # until they've built up the full new state.
95 95 # Convert to revnums and intersect with "common". The client should
96 96 # have made it a subset of "common" already, but let's be safe.
97 97 known = set(repo.revs("%ln & ::%ln", known, common))
98 98 # TODO: we could send only roots() of this set, and the
99 99 # list of nodes in common, and the client could work out
100 100 # what to strip, instead of us explicitly sending every
101 101 # single node.
102 102 deadrevs = known
103 103 def genkills():
104 104 for r in deadrevs:
105 105 yield _KILLNODESIGNAL
106 106 yield repo.changelog.node(r)
107 107 yield _DONESIGNAL
108 108 bundler.newpart(_CHANGESPECPART, data=genkills())
109 109 newvisit, newfull, newellipsis = exchange._computeellipsis(
110 110 repo, set(), common, known, newmatch)
111 111 if newvisit:
112 112 packer = changegroup.getbundler(version, repo,
113 113 matcher=newmatch,
114 114 ellipses=True,
115 115 shallow=depth is not None,
116 116 ellipsisroots=newellipsis,
117 117 fullnodes=newfull)
118 118 cgdata = packer.generate(common, newvisit, False, 'narrow_widen')
119 119
120 120 part = bundler.newpart('changegroup', data=cgdata)
121 121 part.addparam('version', version)
122 122 if 'treemanifest' in repo.requirements:
123 123 part.addparam('treemanifest', '1')
124 124
125 125 visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
126 126 repo, common, heads, set(), newmatch, depth=depth)
127 127
128 128 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
129 129 if visitnodes:
130 130 packer = changegroup.getbundler(version, repo,
131 131 matcher=newmatch,
132 132 ellipses=True,
133 133 shallow=depth is not None,
134 134 ellipsisroots=ellipsisroots,
135 135 fullnodes=relevant_nodes)
136 136 cgdata = packer.generate(common, visitnodes, False, 'narrow_widen')
137 137
138 138 part = bundler.newpart('changegroup', data=cgdata)
139 139 part.addparam('version', version)
140 140 if 'treemanifest' in repo.requirements:
141 141 part.addparam('treemanifest', '1')
142 142
143 143 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
144 144 def _handlechangespec_2(op, inpart):
145 145 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
146 146 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
147 data = inpart.read()
148 # old servers don't send includes and excludes using bundle2 data, they use
149 # bundle2 parameters instead.
150 if data:
151 inc, exc = data.split('\0')
152 if inc:
153 includepats |= set(inc.splitlines())
154 if exc:
155 excludepats |= set(exc.splitlines())
147 156 narrowspec.validatepatterns(includepats)
148 157 narrowspec.validatepatterns(excludepats)
149 158
150 159 if not repository.NARROW_REQUIREMENT in op.repo.requirements:
151 160 op.repo.requirements.add(repository.NARROW_REQUIREMENT)
152 161 op.repo._writerequirements()
153 162 op.repo.setnarrowpats(includepats, excludepats)
154 163 narrowspec.copytoworkingcopy(op.repo)
155 164
156 165 @bundle2.parthandler(_CHANGESPECPART)
157 166 def _handlechangespec(op, inpart):
158 167 repo = op.repo
159 168 cl = repo.changelog
160 169
161 170 # changesets which need to be stripped entirely. either they're no longer
162 171 # needed in the new narrow spec, or the server is sending a replacement
163 172 # in the changegroup part.
164 173 clkills = set()
165 174
166 175 # A changespec part contains all the updates to ellipsis nodes
167 176 # that will happen as a result of widening or narrowing a
168 177 # repo. All the changes that this block encounters are ellipsis
169 178 # nodes or flags to kill an existing ellipsis.
170 179 chunksignal = changegroup.readexactly(inpart, 4)
171 180 while chunksignal != _DONESIGNAL:
172 181 if chunksignal == _KILLNODESIGNAL:
173 182 # a node used to be an ellipsis but isn't anymore
174 183 ck = changegroup.readexactly(inpart, 20)
175 184 if cl.hasnode(ck):
176 185 clkills.add(ck)
177 186 else:
178 187 raise error.Abort(
179 188 _('unexpected changespec node chunk type: %s') % chunksignal)
180 189 chunksignal = changegroup.readexactly(inpart, 4)
181 190
182 191 if clkills:
183 192 # preserve bookmarks that repair.strip() would otherwise strip
184 193 op._bookmarksbackup = repo._bookmarks
185 194 class dummybmstore(dict):
186 195 def applychanges(self, repo, tr, changes):
187 196 pass
188 197 localrepo.localrepository._bookmarks.set(repo, dummybmstore())
189 198 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
190 199 topic='widen')
191 200 if chgrpfile:
192 201 op._widen_uninterr = repo.ui.uninterruptible()
193 202 op._widen_uninterr.__enter__()
194 203 # presence of _widen_bundle attribute activates widen handler later
195 204 op._widen_bundle = chgrpfile
196 205 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
197 206 # will currently always be there when using the core+narrowhg server, but
198 207 # other servers may include a changespec part even when not widening (e.g.
199 208 # because we're deepening a shallow repo).
200 209 if util.safehasattr(repo, 'setnewnarrowpats'):
201 210 repo.setnewnarrowpats()
202 211
203 212 def handlechangegroup_widen(op, inpart):
204 213 """Changegroup exchange handler which restores temporarily-stripped nodes"""
205 214 # We saved a bundle with stripped node data we must now restore.
206 215 # This approach is based on mercurial/repair.py@6ee26a53c111.
207 216 repo = op.repo
208 217 ui = op.ui
209 218
210 219 chgrpfile = op._widen_bundle
211 220 del op._widen_bundle
212 221 vfs = repo.vfs
213 222
214 223 ui.note(_("adding branch\n"))
215 224 f = vfs.open(chgrpfile, "rb")
216 225 try:
217 226 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
218 227 if not ui.verbose:
219 228 # silence internal shuffling chatter
220 229 ui.pushbuffer()
221 230 if isinstance(gen, bundle2.unbundle20):
222 231 with repo.transaction('strip') as tr:
223 232 bundle2.processbundle(repo, gen, lambda: tr)
224 233 else:
225 234 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
226 235 if not ui.verbose:
227 236 ui.popbuffer()
228 237 finally:
229 238 f.close()
230 239
231 240 # remove undo files
232 241 for undovfs, undofile in repo.undofiles():
233 242 try:
234 243 undovfs.unlink(undofile)
235 244 except OSError as e:
236 245 if e.errno != errno.ENOENT:
237 246 ui.warn(_('error removing %s: %s\n') %
238 247 (undovfs.join(undofile), stringutil.forcebytestr(e)))
239 248
240 249 # Remove partial backup only if there were no exceptions
241 250 op._widen_uninterr.__exit__(None, None, None)
242 251 vfs.unlink(chgrpfile)
243 252
244 253 def setup():
245 254 """Enable narrow repo support in bundle2-related extension points."""
246 255 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
247 256
248 257 getbundleargs['narrow'] = 'boolean'
249 258 getbundleargs['depth'] = 'plain'
250 259 getbundleargs['oldincludepats'] = 'csv'
251 260 getbundleargs['oldexcludepats'] = 'csv'
252 261 getbundleargs['known'] = 'csv'
253 262
254 263 # Extend changegroup serving to handle requests from narrow clients.
255 264 origcgfn = exchange.getbundle2partsmapping['changegroup']
256 265 def wrappedcgfn(*args, **kwargs):
257 266 repo = args[1]
258 267 if repo.ui.has_section(_NARROWACL_SECTION):
259 268 kwargs = exchange.applynarrowacl(repo, kwargs)
260 269
261 270 if (kwargs.get(r'narrow', False) and
262 271 repo.ui.configbool('experimental', 'narrowservebrokenellipses')):
263 272 getbundlechangegrouppart_narrow(*args, **kwargs)
264 273 else:
265 274 origcgfn(*args, **kwargs)
266 275 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
267 276
268 277 # Extend changegroup receiver so client can fixup after widen requests.
269 278 origcghandler = bundle2.parthandlermapping['changegroup']
270 279 def wrappedcghandler(op, inpart):
271 280 origcghandler(op, inpart)
272 281 if util.safehasattr(op, '_widen_bundle'):
273 282 handlechangegroup_widen(op, inpart)
274 283 if util.safehasattr(op, '_bookmarksbackup'):
275 284 localrepo.localrepository._bookmarks.set(op.repo,
276 285 op._bookmarksbackup)
277 286 del op._bookmarksbackup
278 287
279 288 wrappedcghandler.params = origcghandler.params
280 289 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
@@ -1,2698 +1,2700 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 nullid,
18 18 nullrev,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 27 discovery,
28 28 error,
29 29 exchangev2,
30 30 lock as lockmod,
31 31 logexchange,
32 32 narrowspec,
33 33 obsolete,
34 34 phases,
35 35 pushkey,
36 36 pycompat,
37 37 repository,
38 38 scmutil,
39 39 sslutil,
40 40 streamclone,
41 41 url as urlmod,
42 42 util,
43 43 wireprototypes,
44 44 )
45 45 from .utils import (
46 46 stringutil,
47 47 )
48 48
49 49 urlerr = util.urlerr
50 50 urlreq = util.urlreq
51 51
52 52 _NARROWACL_SECTION = 'narrowhgacl'
53 53
54 54 # Maps bundle version human names to changegroup versions.
55 55 _bundlespeccgversions = {'v1': '01',
56 56 'v2': '02',
57 57 'packed1': 's1',
58 58 'bundle2': '02', #legacy
59 59 }
60 60
61 61 # Maps bundle version with content opts to choose which part to bundle
62 62 _bundlespeccontentopts = {
63 63 'v1': {
64 64 'changegroup': True,
65 65 'cg.version': '01',
66 66 'obsolescence': False,
67 67 'phases': False,
68 68 'tagsfnodescache': False,
69 69 'revbranchcache': False
70 70 },
71 71 'v2': {
72 72 'changegroup': True,
73 73 'cg.version': '02',
74 74 'obsolescence': False,
75 75 'phases': False,
76 76 'tagsfnodescache': True,
77 77 'revbranchcache': True
78 78 },
79 79 'packed1' : {
80 80 'cg.version': 's1'
81 81 }
82 82 }
83 83 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
84 84
85 85 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
86 86 "tagsfnodescache": False,
87 87 "revbranchcache": False}}
88 88
89 89 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
90 90 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
91 91
92 92 @attr.s
93 93 class bundlespec(object):
94 94 compression = attr.ib()
95 95 wirecompression = attr.ib()
96 96 version = attr.ib()
97 97 wireversion = attr.ib()
98 98 params = attr.ib()
99 99 contentopts = attr.ib()
100 100
101 101 def parsebundlespec(repo, spec, strict=True):
102 102 """Parse a bundle string specification into parts.
103 103
104 104 Bundle specifications denote a well-defined bundle/exchange format.
105 105 The content of a given specification should not change over time in
106 106 order to ensure that bundles produced by a newer version of Mercurial are
107 107 readable from an older version.
108 108
109 109 The string currently has the form:
110 110
111 111 <compression>-<type>[;<parameter0>[;<parameter1>]]
112 112
113 113 Where <compression> is one of the supported compression formats
114 114 and <type> is (currently) a version string. A ";" can follow the type and
115 115 all text afterwards is interpreted as URI encoded, ";" delimited key=value
116 116 pairs.
117 117
118 118 If ``strict`` is True (the default) <compression> is required. Otherwise,
119 119 it is optional.
120 120
121 121 Returns a bundlespec object of (compression, version, parameters).
122 122 Compression will be ``None`` if not in strict mode and a compression isn't
123 123 defined.
124 124
125 125 An ``InvalidBundleSpecification`` is raised when the specification is
126 126 not syntactically well formed.
127 127
128 128 An ``UnsupportedBundleSpecification`` is raised when the compression or
129 129 bundle type/version is not recognized.
130 130
131 131 Note: this function will likely eventually return a more complex data
132 132 structure, including bundle2 part information.
133 133 """
134 134 def parseparams(s):
135 135 if ';' not in s:
136 136 return s, {}
137 137
138 138 params = {}
139 139 version, paramstr = s.split(';', 1)
140 140
141 141 for p in paramstr.split(';'):
142 142 if '=' not in p:
143 143 raise error.InvalidBundleSpecification(
144 144 _('invalid bundle specification: '
145 145 'missing "=" in parameter: %s') % p)
146 146
147 147 key, value = p.split('=', 1)
148 148 key = urlreq.unquote(key)
149 149 value = urlreq.unquote(value)
150 150 params[key] = value
151 151
152 152 return version, params
153 153
154 154
155 155 if strict and '-' not in spec:
156 156 raise error.InvalidBundleSpecification(
157 157 _('invalid bundle specification; '
158 158 'must be prefixed with compression: %s') % spec)
159 159
160 160 if '-' in spec:
161 161 compression, version = spec.split('-', 1)
162 162
163 163 if compression not in util.compengines.supportedbundlenames:
164 164 raise error.UnsupportedBundleSpecification(
165 165 _('%s compression is not supported') % compression)
166 166
167 167 version, params = parseparams(version)
168 168
169 169 if version not in _bundlespeccgversions:
170 170 raise error.UnsupportedBundleSpecification(
171 171 _('%s is not a recognized bundle version') % version)
172 172 else:
173 173 # Value could be just the compression or just the version, in which
174 174 # case some defaults are assumed (but only when not in strict mode).
175 175 assert not strict
176 176
177 177 spec, params = parseparams(spec)
178 178
179 179 if spec in util.compengines.supportedbundlenames:
180 180 compression = spec
181 181 version = 'v1'
182 182 # Generaldelta repos require v2.
183 183 if 'generaldelta' in repo.requirements:
184 184 version = 'v2'
185 185 # Modern compression engines require v2.
186 186 if compression not in _bundlespecv1compengines:
187 187 version = 'v2'
188 188 elif spec in _bundlespeccgversions:
189 189 if spec == 'packed1':
190 190 compression = 'none'
191 191 else:
192 192 compression = 'bzip2'
193 193 version = spec
194 194 else:
195 195 raise error.UnsupportedBundleSpecification(
196 196 _('%s is not a recognized bundle specification') % spec)
197 197
198 198 # Bundle version 1 only supports a known set of compression engines.
199 199 if version == 'v1' and compression not in _bundlespecv1compengines:
200 200 raise error.UnsupportedBundleSpecification(
201 201 _('compression engine %s is not supported on v1 bundles') %
202 202 compression)
203 203
204 204 # The specification for packed1 can optionally declare the data formats
205 205 # required to apply it. If we see this metadata, compare against what the
206 206 # repo supports and error if the bundle isn't compatible.
207 207 if version == 'packed1' and 'requirements' in params:
208 208 requirements = set(params['requirements'].split(','))
209 209 missingreqs = requirements - repo.supportedformats
210 210 if missingreqs:
211 211 raise error.UnsupportedBundleSpecification(
212 212 _('missing support for repository features: %s') %
213 213 ', '.join(sorted(missingreqs)))
214 214
215 215 # Compute contentopts based on the version
216 216 contentopts = _bundlespeccontentopts.get(version, {}).copy()
217 217
218 218 # Process the variants
219 219 if "stream" in params and params["stream"] == "v2":
220 220 variant = _bundlespecvariants["streamv2"]
221 221 contentopts.update(variant)
222 222
223 223 engine = util.compengines.forbundlename(compression)
224 224 compression, wirecompression = engine.bundletype()
225 225 wireversion = _bundlespeccgversions[version]
226 226
227 227 return bundlespec(compression, wirecompression, version, wireversion,
228 228 params, contentopts)
229 229
230 230 def readbundle(ui, fh, fname, vfs=None):
231 231 header = changegroup.readexactly(fh, 4)
232 232
233 233 alg = None
234 234 if not fname:
235 235 fname = "stream"
236 236 if not header.startswith('HG') and header.startswith('\0'):
237 237 fh = changegroup.headerlessfixup(fh, header)
238 238 header = "HG10"
239 239 alg = 'UN'
240 240 elif vfs:
241 241 fname = vfs.join(fname)
242 242
243 243 magic, version = header[0:2], header[2:4]
244 244
245 245 if magic != 'HG':
246 246 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
247 247 if version == '10':
248 248 if alg is None:
249 249 alg = changegroup.readexactly(fh, 2)
250 250 return changegroup.cg1unpacker(fh, alg)
251 251 elif version.startswith('2'):
252 252 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
253 253 elif version == 'S1':
254 254 return streamclone.streamcloneapplier(fh)
255 255 else:
256 256 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
257 257
258 258 def getbundlespec(ui, fh):
259 259 """Infer the bundlespec from a bundle file handle.
260 260
261 261 The input file handle is seeked and the original seek position is not
262 262 restored.
263 263 """
264 264 def speccompression(alg):
265 265 try:
266 266 return util.compengines.forbundletype(alg).bundletype()[0]
267 267 except KeyError:
268 268 return None
269 269
270 270 b = readbundle(ui, fh, None)
271 271 if isinstance(b, changegroup.cg1unpacker):
272 272 alg = b._type
273 273 if alg == '_truncatedBZ':
274 274 alg = 'BZ'
275 275 comp = speccompression(alg)
276 276 if not comp:
277 277 raise error.Abort(_('unknown compression algorithm: %s') % alg)
278 278 return '%s-v1' % comp
279 279 elif isinstance(b, bundle2.unbundle20):
280 280 if 'Compression' in b.params:
281 281 comp = speccompression(b.params['Compression'])
282 282 if not comp:
283 283 raise error.Abort(_('unknown compression algorithm: %s') % comp)
284 284 else:
285 285 comp = 'none'
286 286
287 287 version = None
288 288 for part in b.iterparts():
289 289 if part.type == 'changegroup':
290 290 version = part.params['version']
291 291 if version in ('01', '02'):
292 292 version = 'v2'
293 293 else:
294 294 raise error.Abort(_('changegroup version %s does not have '
295 295 'a known bundlespec') % version,
296 296 hint=_('try upgrading your Mercurial '
297 297 'client'))
298 298 elif part.type == 'stream2' and version is None:
299 299 # A stream2 part requires to be part of a v2 bundle
300 300 requirements = urlreq.unquote(part.params['requirements'])
301 301 splitted = requirements.split()
302 302 params = bundle2._formatrequirementsparams(splitted)
303 303 return 'none-v2;stream=v2;%s' % params
304 304
305 305 if not version:
306 306 raise error.Abort(_('could not identify changegroup version in '
307 307 'bundle'))
308 308
309 309 return '%s-%s' % (comp, version)
310 310 elif isinstance(b, streamclone.streamcloneapplier):
311 311 requirements = streamclone.readbundle1header(fh)[2]
312 312 formatted = bundle2._formatrequirementsparams(requirements)
313 313 return 'none-packed1;%s' % formatted
314 314 else:
315 315 raise error.Abort(_('unknown bundle type: %s') % b)
316 316
317 317 def _computeoutgoing(repo, heads, common):
318 318 """Computes which revs are outgoing given a set of common
319 319 and a set of heads.
320 320
321 321 This is a separate function so extensions can have access to
322 322 the logic.
323 323
324 324 Returns a discovery.outgoing object.
325 325 """
326 326 cl = repo.changelog
327 327 if common:
328 328 hasnode = cl.hasnode
329 329 common = [n for n in common if hasnode(n)]
330 330 else:
331 331 common = [nullid]
332 332 if not heads:
333 333 heads = cl.heads()
334 334 return discovery.outgoing(repo, common, heads)
335 335
336 336 def _checkpublish(pushop):
337 337 repo = pushop.repo
338 338 ui = repo.ui
339 339 behavior = ui.config('experimental', 'auto-publish')
340 340 if pushop.publish or behavior not in ('warn', 'confirm', 'abort'):
341 341 return
342 342 remotephases = listkeys(pushop.remote, 'phases')
343 343 if not remotephases.get('publishing', False):
344 344 return
345 345
346 346 if pushop.revs is None:
347 347 published = repo.filtered('served').revs('not public()')
348 348 else:
349 349 published = repo.revs('::%ln - public()', pushop.revs)
350 350 if published:
351 351 if behavior == 'warn':
352 352 ui.warn(_('%i changesets about to be published\n')
353 353 % len(published))
354 354 elif behavior == 'confirm':
355 355 if ui.promptchoice(_('push and publish %i changesets (yn)?'
356 356 '$$ &Yes $$ &No') % len(published)):
357 357 raise error.Abort(_('user quit'))
358 358 elif behavior == 'abort':
359 359 msg = _('push would publish %i changesets') % len(published)
360 360 hint = _("use --publish or adjust 'experimental.auto-publish'"
361 361 " config")
362 362 raise error.Abort(msg, hint=hint)
363 363
364 364 def _forcebundle1(op):
365 365 """return true if a pull/push must use bundle1
366 366
367 367 This function is used to allow testing of the older bundle version"""
368 368 ui = op.repo.ui
369 369 # The goal is this config is to allow developer to choose the bundle
370 370 # version used during exchanged. This is especially handy during test.
371 371 # Value is a list of bundle version to be picked from, highest version
372 372 # should be used.
373 373 #
374 374 # developer config: devel.legacy.exchange
375 375 exchange = ui.configlist('devel', 'legacy.exchange')
376 376 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
377 377 return forcebundle1 or not op.remote.capable('bundle2')
378 378
379 379 class pushoperation(object):
380 380 """A object that represent a single push operation
381 381
382 382 Its purpose is to carry push related state and very common operations.
383 383
384 384 A new pushoperation should be created at the beginning of each push and
385 385 discarded afterward.
386 386 """
387 387
388 388 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
389 389 bookmarks=(), publish=False, pushvars=None):
390 390 # repo we push from
391 391 self.repo = repo
392 392 self.ui = repo.ui
393 393 # repo we push to
394 394 self.remote = remote
395 395 # force option provided
396 396 self.force = force
397 397 # revs to be pushed (None is "all")
398 398 self.revs = revs
399 399 # bookmark explicitly pushed
400 400 self.bookmarks = bookmarks
401 401 # allow push of new branch
402 402 self.newbranch = newbranch
403 403 # step already performed
404 404 # (used to check what steps have been already performed through bundle2)
405 405 self.stepsdone = set()
406 406 # Integer version of the changegroup push result
407 407 # - None means nothing to push
408 408 # - 0 means HTTP error
409 409 # - 1 means we pushed and remote head count is unchanged *or*
410 410 # we have outgoing changesets but refused to push
411 411 # - other values as described by addchangegroup()
412 412 self.cgresult = None
413 413 # Boolean value for the bookmark push
414 414 self.bkresult = None
415 415 # discover.outgoing object (contains common and outgoing data)
416 416 self.outgoing = None
417 417 # all remote topological heads before the push
418 418 self.remoteheads = None
419 419 # Details of the remote branch pre and post push
420 420 #
421 421 # mapping: {'branch': ([remoteheads],
422 422 # [newheads],
423 423 # [unsyncedheads],
424 424 # [discardedheads])}
425 425 # - branch: the branch name
426 426 # - remoteheads: the list of remote heads known locally
427 427 # None if the branch is new
428 428 # - newheads: the new remote heads (known locally) with outgoing pushed
429 429 # - unsyncedheads: the list of remote heads unknown locally.
430 430 # - discardedheads: the list of remote heads made obsolete by the push
431 431 self.pushbranchmap = None
432 432 # testable as a boolean indicating if any nodes are missing locally.
433 433 self.incoming = None
434 434 # summary of the remote phase situation
435 435 self.remotephases = None
436 436 # phases changes that must be pushed along side the changesets
437 437 self.outdatedphases = None
438 438 # phases changes that must be pushed if changeset push fails
439 439 self.fallbackoutdatedphases = None
440 440 # outgoing obsmarkers
441 441 self.outobsmarkers = set()
442 442 # outgoing bookmarks
443 443 self.outbookmarks = []
444 444 # transaction manager
445 445 self.trmanager = None
446 446 # map { pushkey partid -> callback handling failure}
447 447 # used to handle exception from mandatory pushkey part failure
448 448 self.pkfailcb = {}
449 449 # an iterable of pushvars or None
450 450 self.pushvars = pushvars
451 451 # publish pushed changesets
452 452 self.publish = publish
453 453
454 454 @util.propertycache
455 455 def futureheads(self):
456 456 """future remote heads if the changeset push succeeds"""
457 457 return self.outgoing.missingheads
458 458
459 459 @util.propertycache
460 460 def fallbackheads(self):
461 461 """future remote heads if the changeset push fails"""
462 462 if self.revs is None:
463 463 # not target to push, all common are relevant
464 464 return self.outgoing.commonheads
465 465 unfi = self.repo.unfiltered()
466 466 # I want cheads = heads(::missingheads and ::commonheads)
467 467 # (missingheads is revs with secret changeset filtered out)
468 468 #
469 469 # This can be expressed as:
470 470 # cheads = ( (missingheads and ::commonheads)
471 471 # + (commonheads and ::missingheads))"
472 472 # )
473 473 #
474 474 # while trying to push we already computed the following:
475 475 # common = (::commonheads)
476 476 # missing = ((commonheads::missingheads) - commonheads)
477 477 #
478 478 # We can pick:
479 479 # * missingheads part of common (::commonheads)
480 480 common = self.outgoing.common
481 481 nm = self.repo.changelog.nodemap
482 482 cheads = [node for node in self.revs if nm[node] in common]
483 483 # and
484 484 # * commonheads parents on missing
485 485 revset = unfi.set('%ln and parents(roots(%ln))',
486 486 self.outgoing.commonheads,
487 487 self.outgoing.missing)
488 488 cheads.extend(c.node() for c in revset)
489 489 return cheads
490 490
491 491 @property
492 492 def commonheads(self):
493 493 """set of all common heads after changeset bundle push"""
494 494 if self.cgresult:
495 495 return self.futureheads
496 496 else:
497 497 return self.fallbackheads
498 498
499 499 # mapping of message used when pushing bookmark
500 500 bookmsgmap = {'update': (_("updating bookmark %s\n"),
501 501 _('updating bookmark %s failed!\n')),
502 502 'export': (_("exporting bookmark %s\n"),
503 503 _('exporting bookmark %s failed!\n')),
504 504 'delete': (_("deleting remote bookmark %s\n"),
505 505 _('deleting remote bookmark %s failed!\n')),
506 506 }
507 507
508 508
509 509 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
510 510 publish=False, opargs=None):
511 511 '''Push outgoing changesets (limited by revs) from a local
512 512 repository to remote. Return an integer:
513 513 - None means nothing to push
514 514 - 0 means HTTP error
515 515 - 1 means we pushed and remote head count is unchanged *or*
516 516 we have outgoing changesets but refused to push
517 517 - other values as described by addchangegroup()
518 518 '''
519 519 if opargs is None:
520 520 opargs = {}
521 521 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
522 522 publish, **pycompat.strkwargs(opargs))
523 523 if pushop.remote.local():
524 524 missing = (set(pushop.repo.requirements)
525 525 - pushop.remote.local().supported)
526 526 if missing:
527 527 msg = _("required features are not"
528 528 " supported in the destination:"
529 529 " %s") % (', '.join(sorted(missing)))
530 530 raise error.Abort(msg)
531 531
532 532 if not pushop.remote.canpush():
533 533 raise error.Abort(_("destination does not support push"))
534 534
535 535 if not pushop.remote.capable('unbundle'):
536 536 raise error.Abort(_('cannot push: destination does not support the '
537 537 'unbundle wire protocol command'))
538 538
539 539 # get lock as we might write phase data
540 540 wlock = lock = None
541 541 try:
542 542 # bundle2 push may receive a reply bundle touching bookmarks or other
543 543 # things requiring the wlock. Take it now to ensure proper ordering.
544 544 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
545 545 if (not _forcebundle1(pushop)) and maypushback:
546 546 wlock = pushop.repo.wlock()
547 547 lock = pushop.repo.lock()
548 548 pushop.trmanager = transactionmanager(pushop.repo,
549 549 'push-response',
550 550 pushop.remote.url())
551 551 except error.LockUnavailable as err:
552 552 # source repo cannot be locked.
553 553 # We do not abort the push, but just disable the local phase
554 554 # synchronisation.
555 555 msg = ('cannot lock source repository: %s\n'
556 556 % stringutil.forcebytestr(err))
557 557 pushop.ui.debug(msg)
558 558
559 559 with wlock or util.nullcontextmanager():
560 560 with lock or util.nullcontextmanager():
561 561 with pushop.trmanager or util.nullcontextmanager():
562 562 pushop.repo.checkpush(pushop)
563 563 _checkpublish(pushop)
564 564 _pushdiscovery(pushop)
565 565 if not _forcebundle1(pushop):
566 566 _pushbundle2(pushop)
567 567 _pushchangeset(pushop)
568 568 _pushsyncphase(pushop)
569 569 _pushobsolete(pushop)
570 570 _pushbookmark(pushop)
571 571
572 572 if repo.ui.configbool('experimental', 'remotenames'):
573 573 logexchange.pullremotenames(repo, remote)
574 574
575 575 return pushop
576 576
577 577 # list of steps to perform discovery before push
578 578 pushdiscoveryorder = []
579 579
580 580 # Mapping between step name and function
581 581 #
582 582 # This exists to help extensions wrap steps if necessary
583 583 pushdiscoverymapping = {}
584 584
585 585 def pushdiscovery(stepname):
586 586 """decorator for function performing discovery before push
587 587
588 588 The function is added to the step -> function mapping and appended to the
589 589 list of steps. Beware that decorated function will be added in order (this
590 590 may matter).
591 591
592 592 You can only use this decorator for a new step, if you want to wrap a step
593 593 from an extension, change the pushdiscovery dictionary directly."""
594 594 def dec(func):
595 595 assert stepname not in pushdiscoverymapping
596 596 pushdiscoverymapping[stepname] = func
597 597 pushdiscoveryorder.append(stepname)
598 598 return func
599 599 return dec
600 600
601 601 def _pushdiscovery(pushop):
602 602 """Run all discovery steps"""
603 603 for stepname in pushdiscoveryorder:
604 604 step = pushdiscoverymapping[stepname]
605 605 step(pushop)
606 606
607 607 @pushdiscovery('changeset')
608 608 def _pushdiscoverychangeset(pushop):
609 609 """discover the changeset that need to be pushed"""
610 610 fci = discovery.findcommonincoming
611 611 if pushop.revs:
612 612 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
613 613 ancestorsof=pushop.revs)
614 614 else:
615 615 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
616 616 common, inc, remoteheads = commoninc
617 617 fco = discovery.findcommonoutgoing
618 618 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
619 619 commoninc=commoninc, force=pushop.force)
620 620 pushop.outgoing = outgoing
621 621 pushop.remoteheads = remoteheads
622 622 pushop.incoming = inc
623 623
624 624 @pushdiscovery('phase')
625 625 def _pushdiscoveryphase(pushop):
626 626 """discover the phase that needs to be pushed
627 627
628 628 (computed for both success and failure case for changesets push)"""
629 629 outgoing = pushop.outgoing
630 630 unfi = pushop.repo.unfiltered()
631 631 remotephases = listkeys(pushop.remote, 'phases')
632 632
633 633 if (pushop.ui.configbool('ui', '_usedassubrepo')
634 634 and remotephases # server supports phases
635 635 and not pushop.outgoing.missing # no changesets to be pushed
636 636 and remotephases.get('publishing', False)):
637 637 # When:
638 638 # - this is a subrepo push
639 639 # - and remote support phase
640 640 # - and no changeset are to be pushed
641 641 # - and remote is publishing
642 642 # We may be in issue 3781 case!
643 643 # We drop the possible phase synchronisation done by
644 644 # courtesy to publish changesets possibly locally draft
645 645 # on the remote.
646 646 pushop.outdatedphases = []
647 647 pushop.fallbackoutdatedphases = []
648 648 return
649 649
650 650 pushop.remotephases = phases.remotephasessummary(pushop.repo,
651 651 pushop.fallbackheads,
652 652 remotephases)
653 653 droots = pushop.remotephases.draftroots
654 654
655 655 extracond = ''
656 656 if not pushop.remotephases.publishing:
657 657 extracond = ' and public()'
658 658 revset = 'heads((%%ln::%%ln) %s)' % extracond
659 659 # Get the list of all revs draft on remote by public here.
660 660 # XXX Beware that revset break if droots is not strictly
661 661 # XXX root we may want to ensure it is but it is costly
662 662 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
663 663 if not pushop.remotephases.publishing and pushop.publish:
664 664 future = list(unfi.set('%ln and (not public() or %ln::)',
665 665 pushop.futureheads, droots))
666 666 elif not outgoing.missing:
667 667 future = fallback
668 668 else:
669 669 # adds changeset we are going to push as draft
670 670 #
671 671 # should not be necessary for publishing server, but because of an
672 672 # issue fixed in xxxxx we have to do it anyway.
673 673 fdroots = list(unfi.set('roots(%ln + %ln::)',
674 674 outgoing.missing, droots))
675 675 fdroots = [f.node() for f in fdroots]
676 676 future = list(unfi.set(revset, fdroots, pushop.futureheads))
677 677 pushop.outdatedphases = future
678 678 pushop.fallbackoutdatedphases = fallback
679 679
680 680 @pushdiscovery('obsmarker')
681 681 def _pushdiscoveryobsmarkers(pushop):
682 682 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
683 683 return
684 684
685 685 if not pushop.repo.obsstore:
686 686 return
687 687
688 688 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
689 689 return
690 690
691 691 repo = pushop.repo
692 692 # very naive computation, that can be quite expensive on big repo.
693 693 # However: evolution is currently slow on them anyway.
694 694 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
695 695 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
696 696
697 697 @pushdiscovery('bookmarks')
698 698 def _pushdiscoverybookmarks(pushop):
699 699 ui = pushop.ui
700 700 repo = pushop.repo.unfiltered()
701 701 remote = pushop.remote
702 702 ui.debug("checking for updated bookmarks\n")
703 703 ancestors = ()
704 704 if pushop.revs:
705 705 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
706 706 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
707 707
708 708 remotebookmark = listkeys(remote, 'bookmarks')
709 709
710 710 explicit = {repo._bookmarks.expandname(bookmark)
711 711 for bookmark in pushop.bookmarks}
712 712
713 713 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
714 714 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
715 715
716 716 def safehex(x):
717 717 if x is None:
718 718 return x
719 719 return hex(x)
720 720
721 721 def hexifycompbookmarks(bookmarks):
722 722 return [(b, safehex(scid), safehex(dcid))
723 723 for (b, scid, dcid) in bookmarks]
724 724
725 725 comp = [hexifycompbookmarks(marks) for marks in comp]
726 726 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
727 727
728 728 def _processcompared(pushop, pushed, explicit, remotebms, comp):
729 729 """take decision on bookmark to pull from the remote bookmark
730 730
731 731 Exist to help extensions who want to alter this behavior.
732 732 """
733 733 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
734 734
735 735 repo = pushop.repo
736 736
737 737 for b, scid, dcid in advsrc:
738 738 if b in explicit:
739 739 explicit.remove(b)
740 740 if not pushed or repo[scid].rev() in pushed:
741 741 pushop.outbookmarks.append((b, dcid, scid))
742 742 # search added bookmark
743 743 for b, scid, dcid in addsrc:
744 744 if b in explicit:
745 745 explicit.remove(b)
746 746 pushop.outbookmarks.append((b, '', scid))
747 747 # search for overwritten bookmark
748 748 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
749 749 if b in explicit:
750 750 explicit.remove(b)
751 751 pushop.outbookmarks.append((b, dcid, scid))
752 752 # search for bookmark to delete
753 753 for b, scid, dcid in adddst:
754 754 if b in explicit:
755 755 explicit.remove(b)
756 756 # treat as "deleted locally"
757 757 pushop.outbookmarks.append((b, dcid, ''))
758 758 # identical bookmarks shouldn't get reported
759 759 for b, scid, dcid in same:
760 760 if b in explicit:
761 761 explicit.remove(b)
762 762
763 763 if explicit:
764 764 explicit = sorted(explicit)
765 765 # we should probably list all of them
766 766 pushop.ui.warn(_('bookmark %s does not exist on the local '
767 767 'or remote repository!\n') % explicit[0])
768 768 pushop.bkresult = 2
769 769
770 770 pushop.outbookmarks.sort()
771 771
772 772 def _pushcheckoutgoing(pushop):
773 773 outgoing = pushop.outgoing
774 774 unfi = pushop.repo.unfiltered()
775 775 if not outgoing.missing:
776 776 # nothing to push
777 777 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
778 778 return False
779 779 # something to push
780 780 if not pushop.force:
781 781 # if repo.obsstore == False --> no obsolete
782 782 # then, save the iteration
783 783 if unfi.obsstore:
784 784 # this message are here for 80 char limit reason
785 785 mso = _("push includes obsolete changeset: %s!")
786 786 mspd = _("push includes phase-divergent changeset: %s!")
787 787 mscd = _("push includes content-divergent changeset: %s!")
788 788 mst = {"orphan": _("push includes orphan changeset: %s!"),
789 789 "phase-divergent": mspd,
790 790 "content-divergent": mscd}
791 791 # If we are to push if there is at least one
792 792 # obsolete or unstable changeset in missing, at
793 793 # least one of the missinghead will be obsolete or
794 794 # unstable. So checking heads only is ok
795 795 for node in outgoing.missingheads:
796 796 ctx = unfi[node]
797 797 if ctx.obsolete():
798 798 raise error.Abort(mso % ctx)
799 799 elif ctx.isunstable():
800 800 # TODO print more than one instability in the abort
801 801 # message
802 802 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
803 803
804 804 discovery.checkheads(pushop)
805 805 return True
806 806
807 807 # List of names of steps to perform for an outgoing bundle2, order matters.
808 808 b2partsgenorder = []
809 809
810 810 # Mapping between step name and function
811 811 #
812 812 # This exists to help extensions wrap steps if necessary
813 813 b2partsgenmapping = {}
814 814
815 815 def b2partsgenerator(stepname, idx=None):
816 816 """decorator for function generating bundle2 part
817 817
818 818 The function is added to the step -> function mapping and appended to the
819 819 list of steps. Beware that decorated functions will be added in order
820 820 (this may matter).
821 821
822 822 You can only use this decorator for new steps, if you want to wrap a step
823 823 from an extension, attack the b2partsgenmapping dictionary directly."""
824 824 def dec(func):
825 825 assert stepname not in b2partsgenmapping
826 826 b2partsgenmapping[stepname] = func
827 827 if idx is None:
828 828 b2partsgenorder.append(stepname)
829 829 else:
830 830 b2partsgenorder.insert(idx, stepname)
831 831 return func
832 832 return dec
833 833
834 834 def _pushb2ctxcheckheads(pushop, bundler):
835 835 """Generate race condition checking parts
836 836
837 837 Exists as an independent function to aid extensions
838 838 """
839 839 # * 'force' do not check for push race,
840 840 # * if we don't push anything, there are nothing to check.
841 841 if not pushop.force and pushop.outgoing.missingheads:
842 842 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
843 843 emptyremote = pushop.pushbranchmap is None
844 844 if not allowunrelated or emptyremote:
845 845 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
846 846 else:
847 847 affected = set()
848 848 for branch, heads in pushop.pushbranchmap.iteritems():
849 849 remoteheads, newheads, unsyncedheads, discardedheads = heads
850 850 if remoteheads is not None:
851 851 remote = set(remoteheads)
852 852 affected |= set(discardedheads) & remote
853 853 affected |= remote - set(newheads)
854 854 if affected:
855 855 data = iter(sorted(affected))
856 856 bundler.newpart('check:updated-heads', data=data)
857 857
858 858 def _pushing(pushop):
859 859 """return True if we are pushing anything"""
860 860 return bool(pushop.outgoing.missing
861 861 or pushop.outdatedphases
862 862 or pushop.outobsmarkers
863 863 or pushop.outbookmarks)
864 864
865 865 @b2partsgenerator('check-bookmarks')
866 866 def _pushb2checkbookmarks(pushop, bundler):
867 867 """insert bookmark move checking"""
868 868 if not _pushing(pushop) or pushop.force:
869 869 return
870 870 b2caps = bundle2.bundle2caps(pushop.remote)
871 871 hasbookmarkcheck = 'bookmarks' in b2caps
872 872 if not (pushop.outbookmarks and hasbookmarkcheck):
873 873 return
874 874 data = []
875 875 for book, old, new in pushop.outbookmarks:
876 876 old = bin(old)
877 877 data.append((book, old))
878 878 checkdata = bookmod.binaryencode(data)
879 879 bundler.newpart('check:bookmarks', data=checkdata)
880 880
881 881 @b2partsgenerator('check-phases')
882 882 def _pushb2checkphases(pushop, bundler):
883 883 """insert phase move checking"""
884 884 if not _pushing(pushop) or pushop.force:
885 885 return
886 886 b2caps = bundle2.bundle2caps(pushop.remote)
887 887 hasphaseheads = 'heads' in b2caps.get('phases', ())
888 888 if pushop.remotephases is not None and hasphaseheads:
889 889 # check that the remote phase has not changed
890 890 checks = [[] for p in phases.allphases]
891 891 checks[phases.public].extend(pushop.remotephases.publicheads)
892 892 checks[phases.draft].extend(pushop.remotephases.draftroots)
893 893 if any(checks):
894 894 for nodes in checks:
895 895 nodes.sort()
896 896 checkdata = phases.binaryencode(checks)
897 897 bundler.newpart('check:phases', data=checkdata)
898 898
899 899 @b2partsgenerator('changeset')
900 900 def _pushb2ctx(pushop, bundler):
901 901 """handle changegroup push through bundle2
902 902
903 903 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
904 904 """
905 905 if 'changesets' in pushop.stepsdone:
906 906 return
907 907 pushop.stepsdone.add('changesets')
908 908 # Send known heads to the server for race detection.
909 909 if not _pushcheckoutgoing(pushop):
910 910 return
911 911 pushop.repo.prepushoutgoinghooks(pushop)
912 912
913 913 _pushb2ctxcheckheads(pushop, bundler)
914 914
915 915 b2caps = bundle2.bundle2caps(pushop.remote)
916 916 version = '01'
917 917 cgversions = b2caps.get('changegroup')
918 918 if cgversions: # 3.1 and 3.2 ship with an empty value
919 919 cgversions = [v for v in cgversions
920 920 if v in changegroup.supportedoutgoingversions(
921 921 pushop.repo)]
922 922 if not cgversions:
923 923 raise error.Abort(_('no common changegroup version'))
924 924 version = max(cgversions)
925 925 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
926 926 'push')
927 927 cgpart = bundler.newpart('changegroup', data=cgstream)
928 928 if cgversions:
929 929 cgpart.addparam('version', version)
930 930 if 'treemanifest' in pushop.repo.requirements:
931 931 cgpart.addparam('treemanifest', '1')
932 932 def handlereply(op):
933 933 """extract addchangegroup returns from server reply"""
934 934 cgreplies = op.records.getreplies(cgpart.id)
935 935 assert len(cgreplies['changegroup']) == 1
936 936 pushop.cgresult = cgreplies['changegroup'][0]['return']
937 937 return handlereply
938 938
939 939 @b2partsgenerator('phase')
940 940 def _pushb2phases(pushop, bundler):
941 941 """handle phase push through bundle2"""
942 942 if 'phases' in pushop.stepsdone:
943 943 return
944 944 b2caps = bundle2.bundle2caps(pushop.remote)
945 945 ui = pushop.repo.ui
946 946
947 947 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
948 948 haspushkey = 'pushkey' in b2caps
949 949 hasphaseheads = 'heads' in b2caps.get('phases', ())
950 950
951 951 if hasphaseheads and not legacyphase:
952 952 return _pushb2phaseheads(pushop, bundler)
953 953 elif haspushkey:
954 954 return _pushb2phasespushkey(pushop, bundler)
955 955
956 956 def _pushb2phaseheads(pushop, bundler):
957 957 """push phase information through a bundle2 - binary part"""
958 958 pushop.stepsdone.add('phases')
959 959 if pushop.outdatedphases:
960 960 updates = [[] for p in phases.allphases]
961 961 updates[0].extend(h.node() for h in pushop.outdatedphases)
962 962 phasedata = phases.binaryencode(updates)
963 963 bundler.newpart('phase-heads', data=phasedata)
964 964
965 965 def _pushb2phasespushkey(pushop, bundler):
966 966 """push phase information through a bundle2 - pushkey part"""
967 967 pushop.stepsdone.add('phases')
968 968 part2node = []
969 969
970 970 def handlefailure(pushop, exc):
971 971 targetid = int(exc.partid)
972 972 for partid, node in part2node:
973 973 if partid == targetid:
974 974 raise error.Abort(_('updating %s to public failed') % node)
975 975
976 976 enc = pushkey.encode
977 977 for newremotehead in pushop.outdatedphases:
978 978 part = bundler.newpart('pushkey')
979 979 part.addparam('namespace', enc('phases'))
980 980 part.addparam('key', enc(newremotehead.hex()))
981 981 part.addparam('old', enc('%d' % phases.draft))
982 982 part.addparam('new', enc('%d' % phases.public))
983 983 part2node.append((part.id, newremotehead))
984 984 pushop.pkfailcb[part.id] = handlefailure
985 985
986 986 def handlereply(op):
987 987 for partid, node in part2node:
988 988 partrep = op.records.getreplies(partid)
989 989 results = partrep['pushkey']
990 990 assert len(results) <= 1
991 991 msg = None
992 992 if not results:
993 993 msg = _('server ignored update of %s to public!\n') % node
994 994 elif not int(results[0]['return']):
995 995 msg = _('updating %s to public failed!\n') % node
996 996 if msg is not None:
997 997 pushop.ui.warn(msg)
998 998 return handlereply
999 999
1000 1000 @b2partsgenerator('obsmarkers')
1001 1001 def _pushb2obsmarkers(pushop, bundler):
1002 1002 if 'obsmarkers' in pushop.stepsdone:
1003 1003 return
1004 1004 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1005 1005 if obsolete.commonversion(remoteversions) is None:
1006 1006 return
1007 1007 pushop.stepsdone.add('obsmarkers')
1008 1008 if pushop.outobsmarkers:
1009 1009 markers = sorted(pushop.outobsmarkers)
1010 1010 bundle2.buildobsmarkerspart(bundler, markers)
1011 1011
1012 1012 @b2partsgenerator('bookmarks')
1013 1013 def _pushb2bookmarks(pushop, bundler):
1014 1014 """handle bookmark push through bundle2"""
1015 1015 if 'bookmarks' in pushop.stepsdone:
1016 1016 return
1017 1017 b2caps = bundle2.bundle2caps(pushop.remote)
1018 1018
1019 1019 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
1020 1020 legacybooks = 'bookmarks' in legacy
1021 1021
1022 1022 if not legacybooks and 'bookmarks' in b2caps:
1023 1023 return _pushb2bookmarkspart(pushop, bundler)
1024 1024 elif 'pushkey' in b2caps:
1025 1025 return _pushb2bookmarkspushkey(pushop, bundler)
1026 1026
1027 1027 def _bmaction(old, new):
1028 1028 """small utility for bookmark pushing"""
1029 1029 if not old:
1030 1030 return 'export'
1031 1031 elif not new:
1032 1032 return 'delete'
1033 1033 return 'update'
1034 1034
1035 1035 def _pushb2bookmarkspart(pushop, bundler):
1036 1036 pushop.stepsdone.add('bookmarks')
1037 1037 if not pushop.outbookmarks:
1038 1038 return
1039 1039
1040 1040 allactions = []
1041 1041 data = []
1042 1042 for book, old, new in pushop.outbookmarks:
1043 1043 new = bin(new)
1044 1044 data.append((book, new))
1045 1045 allactions.append((book, _bmaction(old, new)))
1046 1046 checkdata = bookmod.binaryencode(data)
1047 1047 bundler.newpart('bookmarks', data=checkdata)
1048 1048
1049 1049 def handlereply(op):
1050 1050 ui = pushop.ui
1051 1051 # if success
1052 1052 for book, action in allactions:
1053 1053 ui.status(bookmsgmap[action][0] % book)
1054 1054
1055 1055 return handlereply
1056 1056
1057 1057 def _pushb2bookmarkspushkey(pushop, bundler):
1058 1058 pushop.stepsdone.add('bookmarks')
1059 1059 part2book = []
1060 1060 enc = pushkey.encode
1061 1061
1062 1062 def handlefailure(pushop, exc):
1063 1063 targetid = int(exc.partid)
1064 1064 for partid, book, action in part2book:
1065 1065 if partid == targetid:
1066 1066 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1067 1067 # we should not be called for part we did not generated
1068 1068 assert False
1069 1069
1070 1070 for book, old, new in pushop.outbookmarks:
1071 1071 part = bundler.newpart('pushkey')
1072 1072 part.addparam('namespace', enc('bookmarks'))
1073 1073 part.addparam('key', enc(book))
1074 1074 part.addparam('old', enc(old))
1075 1075 part.addparam('new', enc(new))
1076 1076 action = 'update'
1077 1077 if not old:
1078 1078 action = 'export'
1079 1079 elif not new:
1080 1080 action = 'delete'
1081 1081 part2book.append((part.id, book, action))
1082 1082 pushop.pkfailcb[part.id] = handlefailure
1083 1083
1084 1084 def handlereply(op):
1085 1085 ui = pushop.ui
1086 1086 for partid, book, action in part2book:
1087 1087 partrep = op.records.getreplies(partid)
1088 1088 results = partrep['pushkey']
1089 1089 assert len(results) <= 1
1090 1090 if not results:
1091 1091 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1092 1092 else:
1093 1093 ret = int(results[0]['return'])
1094 1094 if ret:
1095 1095 ui.status(bookmsgmap[action][0] % book)
1096 1096 else:
1097 1097 ui.warn(bookmsgmap[action][1] % book)
1098 1098 if pushop.bkresult is not None:
1099 1099 pushop.bkresult = 1
1100 1100 return handlereply
1101 1101
1102 1102 @b2partsgenerator('pushvars', idx=0)
1103 1103 def _getbundlesendvars(pushop, bundler):
1104 1104 '''send shellvars via bundle2'''
1105 1105 pushvars = pushop.pushvars
1106 1106 if pushvars:
1107 1107 shellvars = {}
1108 1108 for raw in pushvars:
1109 1109 if '=' not in raw:
1110 1110 msg = ("unable to parse variable '%s', should follow "
1111 1111 "'KEY=VALUE' or 'KEY=' format")
1112 1112 raise error.Abort(msg % raw)
1113 1113 k, v = raw.split('=', 1)
1114 1114 shellvars[k] = v
1115 1115
1116 1116 part = bundler.newpart('pushvars')
1117 1117
1118 1118 for key, value in shellvars.iteritems():
1119 1119 part.addparam(key, value, mandatory=False)
1120 1120
1121 1121 def _pushbundle2(pushop):
1122 1122 """push data to the remote using bundle2
1123 1123
1124 1124 The only currently supported type of data is changegroup but this will
1125 1125 evolve in the future."""
1126 1126 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1127 1127 pushback = (pushop.trmanager
1128 1128 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1129 1129
1130 1130 # create reply capability
1131 1131 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1132 1132 allowpushback=pushback,
1133 1133 role='client'))
1134 1134 bundler.newpart('replycaps', data=capsblob)
1135 1135 replyhandlers = []
1136 1136 for partgenname in b2partsgenorder:
1137 1137 partgen = b2partsgenmapping[partgenname]
1138 1138 ret = partgen(pushop, bundler)
1139 1139 if callable(ret):
1140 1140 replyhandlers.append(ret)
1141 1141 # do not push if nothing to push
1142 1142 if bundler.nbparts <= 1:
1143 1143 return
1144 1144 stream = util.chunkbuffer(bundler.getchunks())
1145 1145 try:
1146 1146 try:
1147 1147 with pushop.remote.commandexecutor() as e:
1148 1148 reply = e.callcommand('unbundle', {
1149 1149 'bundle': stream,
1150 1150 'heads': ['force'],
1151 1151 'url': pushop.remote.url(),
1152 1152 }).result()
1153 1153 except error.BundleValueError as exc:
1154 1154 raise error.Abort(_('missing support for %s') % exc)
1155 1155 try:
1156 1156 trgetter = None
1157 1157 if pushback:
1158 1158 trgetter = pushop.trmanager.transaction
1159 1159 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1160 1160 except error.BundleValueError as exc:
1161 1161 raise error.Abort(_('missing support for %s') % exc)
1162 1162 except bundle2.AbortFromPart as exc:
1163 1163 pushop.ui.status(_('remote: %s\n') % exc)
1164 1164 if exc.hint is not None:
1165 1165 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1166 1166 raise error.Abort(_('push failed on remote'))
1167 1167 except error.PushkeyFailed as exc:
1168 1168 partid = int(exc.partid)
1169 1169 if partid not in pushop.pkfailcb:
1170 1170 raise
1171 1171 pushop.pkfailcb[partid](pushop, exc)
1172 1172 for rephand in replyhandlers:
1173 1173 rephand(op)
1174 1174
1175 1175 def _pushchangeset(pushop):
1176 1176 """Make the actual push of changeset bundle to remote repo"""
1177 1177 if 'changesets' in pushop.stepsdone:
1178 1178 return
1179 1179 pushop.stepsdone.add('changesets')
1180 1180 if not _pushcheckoutgoing(pushop):
1181 1181 return
1182 1182
1183 1183 # Should have verified this in push().
1184 1184 assert pushop.remote.capable('unbundle')
1185 1185
1186 1186 pushop.repo.prepushoutgoinghooks(pushop)
1187 1187 outgoing = pushop.outgoing
1188 1188 # TODO: get bundlecaps from remote
1189 1189 bundlecaps = None
1190 1190 # create a changegroup from local
1191 1191 if pushop.revs is None and not (outgoing.excluded
1192 1192 or pushop.repo.changelog.filteredrevs):
1193 1193 # push everything,
1194 1194 # use the fast path, no race possible on push
1195 1195 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1196 1196 fastpath=True, bundlecaps=bundlecaps)
1197 1197 else:
1198 1198 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1199 1199 'push', bundlecaps=bundlecaps)
1200 1200
1201 1201 # apply changegroup to remote
1202 1202 # local repo finds heads on server, finds out what
1203 1203 # revs it must push. once revs transferred, if server
1204 1204 # finds it has different heads (someone else won
1205 1205 # commit/push race), server aborts.
1206 1206 if pushop.force:
1207 1207 remoteheads = ['force']
1208 1208 else:
1209 1209 remoteheads = pushop.remoteheads
1210 1210 # ssh: return remote's addchangegroup()
1211 1211 # http: return remote's addchangegroup() or 0 for error
1212 1212 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1213 1213 pushop.repo.url())
1214 1214
1215 1215 def _pushsyncphase(pushop):
1216 1216 """synchronise phase information locally and remotely"""
1217 1217 cheads = pushop.commonheads
1218 1218 # even when we don't push, exchanging phase data is useful
1219 1219 remotephases = listkeys(pushop.remote, 'phases')
1220 1220 if (pushop.ui.configbool('ui', '_usedassubrepo')
1221 1221 and remotephases # server supports phases
1222 1222 and pushop.cgresult is None # nothing was pushed
1223 1223 and remotephases.get('publishing', False)):
1224 1224 # When:
1225 1225 # - this is a subrepo push
1226 1226 # - and remote support phase
1227 1227 # - and no changeset was pushed
1228 1228 # - and remote is publishing
1229 1229 # We may be in issue 3871 case!
1230 1230 # We drop the possible phase synchronisation done by
1231 1231 # courtesy to publish changesets possibly locally draft
1232 1232 # on the remote.
1233 1233 remotephases = {'publishing': 'True'}
1234 1234 if not remotephases: # old server or public only reply from non-publishing
1235 1235 _localphasemove(pushop, cheads)
1236 1236 # don't push any phase data as there is nothing to push
1237 1237 else:
1238 1238 ana = phases.analyzeremotephases(pushop.repo, cheads,
1239 1239 remotephases)
1240 1240 pheads, droots = ana
1241 1241 ### Apply remote phase on local
1242 1242 if remotephases.get('publishing', False):
1243 1243 _localphasemove(pushop, cheads)
1244 1244 else: # publish = False
1245 1245 _localphasemove(pushop, pheads)
1246 1246 _localphasemove(pushop, cheads, phases.draft)
1247 1247 ### Apply local phase on remote
1248 1248
1249 1249 if pushop.cgresult:
1250 1250 if 'phases' in pushop.stepsdone:
1251 1251 # phases already pushed though bundle2
1252 1252 return
1253 1253 outdated = pushop.outdatedphases
1254 1254 else:
1255 1255 outdated = pushop.fallbackoutdatedphases
1256 1256
1257 1257 pushop.stepsdone.add('phases')
1258 1258
1259 1259 # filter heads already turned public by the push
1260 1260 outdated = [c for c in outdated if c.node() not in pheads]
1261 1261 # fallback to independent pushkey command
1262 1262 for newremotehead in outdated:
1263 1263 with pushop.remote.commandexecutor() as e:
1264 1264 r = e.callcommand('pushkey', {
1265 1265 'namespace': 'phases',
1266 1266 'key': newremotehead.hex(),
1267 1267 'old': '%d' % phases.draft,
1268 1268 'new': '%d' % phases.public
1269 1269 }).result()
1270 1270
1271 1271 if not r:
1272 1272 pushop.ui.warn(_('updating %s to public failed!\n')
1273 1273 % newremotehead)
1274 1274
1275 1275 def _localphasemove(pushop, nodes, phase=phases.public):
1276 1276 """move <nodes> to <phase> in the local source repo"""
1277 1277 if pushop.trmanager:
1278 1278 phases.advanceboundary(pushop.repo,
1279 1279 pushop.trmanager.transaction(),
1280 1280 phase,
1281 1281 nodes)
1282 1282 else:
1283 1283 # repo is not locked, do not change any phases!
1284 1284 # Informs the user that phases should have been moved when
1285 1285 # applicable.
1286 1286 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1287 1287 phasestr = phases.phasenames[phase]
1288 1288 if actualmoves:
1289 1289 pushop.ui.status(_('cannot lock source repo, skipping '
1290 1290 'local %s phase update\n') % phasestr)
1291 1291
1292 1292 def _pushobsolete(pushop):
1293 1293 """utility function to push obsolete markers to a remote"""
1294 1294 if 'obsmarkers' in pushop.stepsdone:
1295 1295 return
1296 1296 repo = pushop.repo
1297 1297 remote = pushop.remote
1298 1298 pushop.stepsdone.add('obsmarkers')
1299 1299 if pushop.outobsmarkers:
1300 1300 pushop.ui.debug('try to push obsolete markers to remote\n')
1301 1301 rslts = []
1302 1302 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1303 1303 for key in sorted(remotedata, reverse=True):
1304 1304 # reverse sort to ensure we end with dump0
1305 1305 data = remotedata[key]
1306 1306 rslts.append(remote.pushkey('obsolete', key, '', data))
1307 1307 if [r for r in rslts if not r]:
1308 1308 msg = _('failed to push some obsolete markers!\n')
1309 1309 repo.ui.warn(msg)
1310 1310
1311 1311 def _pushbookmark(pushop):
1312 1312 """Update bookmark position on remote"""
1313 1313 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1314 1314 return
1315 1315 pushop.stepsdone.add('bookmarks')
1316 1316 ui = pushop.ui
1317 1317 remote = pushop.remote
1318 1318
1319 1319 for b, old, new in pushop.outbookmarks:
1320 1320 action = 'update'
1321 1321 if not old:
1322 1322 action = 'export'
1323 1323 elif not new:
1324 1324 action = 'delete'
1325 1325
1326 1326 with remote.commandexecutor() as e:
1327 1327 r = e.callcommand('pushkey', {
1328 1328 'namespace': 'bookmarks',
1329 1329 'key': b,
1330 1330 'old': old,
1331 1331 'new': new,
1332 1332 }).result()
1333 1333
1334 1334 if r:
1335 1335 ui.status(bookmsgmap[action][0] % b)
1336 1336 else:
1337 1337 ui.warn(bookmsgmap[action][1] % b)
1338 1338 # discovery can have set the value form invalid entry
1339 1339 if pushop.bkresult is not None:
1340 1340 pushop.bkresult = 1
1341 1341
1342 1342 class pulloperation(object):
1343 1343 """A object that represent a single pull operation
1344 1344
1345 1345 It purpose is to carry pull related state and very common operation.
1346 1346
1347 1347 A new should be created at the beginning of each pull and discarded
1348 1348 afterward.
1349 1349 """
1350 1350
1351 1351 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1352 1352 remotebookmarks=None, streamclonerequested=None,
1353 1353 includepats=None, excludepats=None, depth=None):
1354 1354 # repo we pull into
1355 1355 self.repo = repo
1356 1356 # repo we pull from
1357 1357 self.remote = remote
1358 1358 # revision we try to pull (None is "all")
1359 1359 self.heads = heads
1360 1360 # bookmark pulled explicitly
1361 1361 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1362 1362 for bookmark in bookmarks]
1363 1363 # do we force pull?
1364 1364 self.force = force
1365 1365 # whether a streaming clone was requested
1366 1366 self.streamclonerequested = streamclonerequested
1367 1367 # transaction manager
1368 1368 self.trmanager = None
1369 1369 # set of common changeset between local and remote before pull
1370 1370 self.common = None
1371 1371 # set of pulled head
1372 1372 self.rheads = None
1373 1373 # list of missing changeset to fetch remotely
1374 1374 self.fetch = None
1375 1375 # remote bookmarks data
1376 1376 self.remotebookmarks = remotebookmarks
1377 1377 # result of changegroup pulling (used as return code by pull)
1378 1378 self.cgresult = None
1379 1379 # list of step already done
1380 1380 self.stepsdone = set()
1381 1381 # Whether we attempted a clone from pre-generated bundles.
1382 1382 self.clonebundleattempted = False
1383 1383 # Set of file patterns to include.
1384 1384 self.includepats = includepats
1385 1385 # Set of file patterns to exclude.
1386 1386 self.excludepats = excludepats
1387 1387 # Number of ancestor changesets to pull from each pulled head.
1388 1388 self.depth = depth
1389 1389
1390 1390 @util.propertycache
1391 1391 def pulledsubset(self):
1392 1392 """heads of the set of changeset target by the pull"""
1393 1393 # compute target subset
1394 1394 if self.heads is None:
1395 1395 # We pulled every thing possible
1396 1396 # sync on everything common
1397 1397 c = set(self.common)
1398 1398 ret = list(self.common)
1399 1399 for n in self.rheads:
1400 1400 if n not in c:
1401 1401 ret.append(n)
1402 1402 return ret
1403 1403 else:
1404 1404 # We pulled a specific subset
1405 1405 # sync on this subset
1406 1406 return self.heads
1407 1407
1408 1408 @util.propertycache
1409 1409 def canusebundle2(self):
1410 1410 return not _forcebundle1(self)
1411 1411
1412 1412 @util.propertycache
1413 1413 def remotebundle2caps(self):
1414 1414 return bundle2.bundle2caps(self.remote)
1415 1415
1416 1416 def gettransaction(self):
1417 1417 # deprecated; talk to trmanager directly
1418 1418 return self.trmanager.transaction()
1419 1419
1420 1420 class transactionmanager(util.transactional):
1421 1421 """An object to manage the life cycle of a transaction
1422 1422
1423 1423 It creates the transaction on demand and calls the appropriate hooks when
1424 1424 closing the transaction."""
1425 1425 def __init__(self, repo, source, url):
1426 1426 self.repo = repo
1427 1427 self.source = source
1428 1428 self.url = url
1429 1429 self._tr = None
1430 1430
1431 1431 def transaction(self):
1432 1432 """Return an open transaction object, constructing if necessary"""
1433 1433 if not self._tr:
1434 1434 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1435 1435 self._tr = self.repo.transaction(trname)
1436 1436 self._tr.hookargs['source'] = self.source
1437 1437 self._tr.hookargs['url'] = self.url
1438 1438 return self._tr
1439 1439
1440 1440 def close(self):
1441 1441 """close transaction if created"""
1442 1442 if self._tr is not None:
1443 1443 self._tr.close()
1444 1444
1445 1445 def release(self):
1446 1446 """release transaction if created"""
1447 1447 if self._tr is not None:
1448 1448 self._tr.release()
1449 1449
1450 1450 def listkeys(remote, namespace):
1451 1451 with remote.commandexecutor() as e:
1452 1452 return e.callcommand('listkeys', {'namespace': namespace}).result()
1453 1453
1454 1454 def _fullpullbundle2(repo, pullop):
1455 1455 # The server may send a partial reply, i.e. when inlining
1456 1456 # pre-computed bundles. In that case, update the common
1457 1457 # set based on the results and pull another bundle.
1458 1458 #
1459 1459 # There are two indicators that the process is finished:
1460 1460 # - no changeset has been added, or
1461 1461 # - all remote heads are known locally.
1462 1462 # The head check must use the unfiltered view as obsoletion
1463 1463 # markers can hide heads.
1464 1464 unfi = repo.unfiltered()
1465 1465 unficl = unfi.changelog
1466 1466 def headsofdiff(h1, h2):
1467 1467 """Returns heads(h1 % h2)"""
1468 1468 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1469 1469 return set(ctx.node() for ctx in res)
1470 1470 def headsofunion(h1, h2):
1471 1471 """Returns heads((h1 + h2) - null)"""
1472 1472 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1473 1473 return set(ctx.node() for ctx in res)
1474 1474 while True:
1475 1475 old_heads = unficl.heads()
1476 1476 clstart = len(unficl)
1477 1477 _pullbundle2(pullop)
1478 1478 if repository.NARROW_REQUIREMENT in repo.requirements:
1479 1479 # XXX narrow clones filter the heads on the server side during
1480 1480 # XXX getbundle and result in partial replies as well.
1481 1481 # XXX Disable pull bundles in this case as band aid to avoid
1482 1482 # XXX extra round trips.
1483 1483 break
1484 1484 if clstart == len(unficl):
1485 1485 break
1486 1486 if all(unficl.hasnode(n) for n in pullop.rheads):
1487 1487 break
1488 1488 new_heads = headsofdiff(unficl.heads(), old_heads)
1489 1489 pullop.common = headsofunion(new_heads, pullop.common)
1490 1490 pullop.rheads = set(pullop.rheads) - pullop.common
1491 1491
1492 1492 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1493 1493 streamclonerequested=None, includepats=None, excludepats=None,
1494 1494 depth=None):
1495 1495 """Fetch repository data from a remote.
1496 1496
1497 1497 This is the main function used to retrieve data from a remote repository.
1498 1498
1499 1499 ``repo`` is the local repository to clone into.
1500 1500 ``remote`` is a peer instance.
1501 1501 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1502 1502 default) means to pull everything from the remote.
1503 1503 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1504 1504 default, all remote bookmarks are pulled.
1505 1505 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1506 1506 initialization.
1507 1507 ``streamclonerequested`` is a boolean indicating whether a "streaming
1508 1508 clone" is requested. A "streaming clone" is essentially a raw file copy
1509 1509 of revlogs from the server. This only works when the local repository is
1510 1510 empty. The default value of ``None`` means to respect the server
1511 1511 configuration for preferring stream clones.
1512 1512 ``includepats`` and ``excludepats`` define explicit file patterns to
1513 1513 include and exclude in storage, respectively. If not defined, narrow
1514 1514 patterns from the repo instance are used, if available.
1515 1515 ``depth`` is an integer indicating the DAG depth of history we're
1516 1516 interested in. If defined, for each revision specified in ``heads``, we
1517 1517 will fetch up to this many of its ancestors and data associated with them.
1518 1518
1519 1519 Returns the ``pulloperation`` created for this pull.
1520 1520 """
1521 1521 if opargs is None:
1522 1522 opargs = {}
1523 1523
1524 1524 # We allow the narrow patterns to be passed in explicitly to provide more
1525 1525 # flexibility for API consumers.
1526 1526 if includepats or excludepats:
1527 1527 includepats = includepats or set()
1528 1528 excludepats = excludepats or set()
1529 1529 else:
1530 1530 includepats, excludepats = repo.narrowpats
1531 1531
1532 1532 narrowspec.validatepatterns(includepats)
1533 1533 narrowspec.validatepatterns(excludepats)
1534 1534
1535 1535 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1536 1536 streamclonerequested=streamclonerequested,
1537 1537 includepats=includepats, excludepats=excludepats,
1538 1538 depth=depth,
1539 1539 **pycompat.strkwargs(opargs))
1540 1540
1541 1541 peerlocal = pullop.remote.local()
1542 1542 if peerlocal:
1543 1543 missing = set(peerlocal.requirements) - pullop.repo.supported
1544 1544 if missing:
1545 1545 msg = _("required features are not"
1546 1546 " supported in the destination:"
1547 1547 " %s") % (', '.join(sorted(missing)))
1548 1548 raise error.Abort(msg)
1549 1549
1550 1550 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1551 1551 with repo.wlock(), repo.lock(), pullop.trmanager:
1552 1552 # Use the modern wire protocol, if available.
1553 1553 if remote.capable('command-changesetdata'):
1554 1554 exchangev2.pull(pullop)
1555 1555 else:
1556 1556 # This should ideally be in _pullbundle2(). However, it needs to run
1557 1557 # before discovery to avoid extra work.
1558 1558 _maybeapplyclonebundle(pullop)
1559 1559 streamclone.maybeperformlegacystreamclone(pullop)
1560 1560 _pulldiscovery(pullop)
1561 1561 if pullop.canusebundle2:
1562 1562 _fullpullbundle2(repo, pullop)
1563 1563 _pullchangeset(pullop)
1564 1564 _pullphase(pullop)
1565 1565 _pullbookmarks(pullop)
1566 1566 _pullobsolete(pullop)
1567 1567
1568 1568 # storing remotenames
1569 1569 if repo.ui.configbool('experimental', 'remotenames'):
1570 1570 logexchange.pullremotenames(repo, remote)
1571 1571
1572 1572 return pullop
1573 1573
1574 1574 # list of steps to perform discovery before pull
1575 1575 pulldiscoveryorder = []
1576 1576
1577 1577 # Mapping between step name and function
1578 1578 #
1579 1579 # This exists to help extensions wrap steps if necessary
1580 1580 pulldiscoverymapping = {}
1581 1581
1582 1582 def pulldiscovery(stepname):
1583 1583 """decorator for function performing discovery before pull
1584 1584
1585 1585 The function is added to the step -> function mapping and appended to the
1586 1586 list of steps. Beware that decorated function will be added in order (this
1587 1587 may matter).
1588 1588
1589 1589 You can only use this decorator for a new step, if you want to wrap a step
1590 1590 from an extension, change the pulldiscovery dictionary directly."""
1591 1591 def dec(func):
1592 1592 assert stepname not in pulldiscoverymapping
1593 1593 pulldiscoverymapping[stepname] = func
1594 1594 pulldiscoveryorder.append(stepname)
1595 1595 return func
1596 1596 return dec
1597 1597
1598 1598 def _pulldiscovery(pullop):
1599 1599 """Run all discovery steps"""
1600 1600 for stepname in pulldiscoveryorder:
1601 1601 step = pulldiscoverymapping[stepname]
1602 1602 step(pullop)
1603 1603
1604 1604 @pulldiscovery('b1:bookmarks')
1605 1605 def _pullbookmarkbundle1(pullop):
1606 1606 """fetch bookmark data in bundle1 case
1607 1607
1608 1608 If not using bundle2, we have to fetch bookmarks before changeset
1609 1609 discovery to reduce the chance and impact of race conditions."""
1610 1610 if pullop.remotebookmarks is not None:
1611 1611 return
1612 1612 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1613 1613 # all known bundle2 servers now support listkeys, but lets be nice with
1614 1614 # new implementation.
1615 1615 return
1616 1616 books = listkeys(pullop.remote, 'bookmarks')
1617 1617 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1618 1618
1619 1619
1620 1620 @pulldiscovery('changegroup')
1621 1621 def _pulldiscoverychangegroup(pullop):
1622 1622 """discovery phase for the pull
1623 1623
1624 1624 Current handle changeset discovery only, will change handle all discovery
1625 1625 at some point."""
1626 1626 tmp = discovery.findcommonincoming(pullop.repo,
1627 1627 pullop.remote,
1628 1628 heads=pullop.heads,
1629 1629 force=pullop.force)
1630 1630 common, fetch, rheads = tmp
1631 1631 nm = pullop.repo.unfiltered().changelog.nodemap
1632 1632 if fetch and rheads:
1633 1633 # If a remote heads is filtered locally, put in back in common.
1634 1634 #
1635 1635 # This is a hackish solution to catch most of "common but locally
1636 1636 # hidden situation". We do not performs discovery on unfiltered
1637 1637 # repository because it end up doing a pathological amount of round
1638 1638 # trip for w huge amount of changeset we do not care about.
1639 1639 #
1640 1640 # If a set of such "common but filtered" changeset exist on the server
1641 1641 # but are not including a remote heads, we'll not be able to detect it,
1642 1642 scommon = set(common)
1643 1643 for n in rheads:
1644 1644 if n in nm:
1645 1645 if n not in scommon:
1646 1646 common.append(n)
1647 1647 if set(rheads).issubset(set(common)):
1648 1648 fetch = []
1649 1649 pullop.common = common
1650 1650 pullop.fetch = fetch
1651 1651 pullop.rheads = rheads
1652 1652
1653 1653 def _pullbundle2(pullop):
1654 1654 """pull data using bundle2
1655 1655
1656 1656 For now, the only supported data are changegroup."""
1657 1657 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1658 1658
1659 1659 # make ui easier to access
1660 1660 ui = pullop.repo.ui
1661 1661
1662 1662 # At the moment we don't do stream clones over bundle2. If that is
1663 1663 # implemented then here's where the check for that will go.
1664 1664 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1665 1665
1666 1666 # declare pull perimeters
1667 1667 kwargs['common'] = pullop.common
1668 1668 kwargs['heads'] = pullop.heads or pullop.rheads
1669 1669
1670 1670 # check server supports narrow and then adding includepats and excludepats
1671 1671 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1672 1672 if servernarrow and pullop.includepats:
1673 1673 kwargs['includepats'] = pullop.includepats
1674 1674 if servernarrow and pullop.excludepats:
1675 1675 kwargs['excludepats'] = pullop.excludepats
1676 1676
1677 1677 if streaming:
1678 1678 kwargs['cg'] = False
1679 1679 kwargs['stream'] = True
1680 1680 pullop.stepsdone.add('changegroup')
1681 1681 pullop.stepsdone.add('phases')
1682 1682
1683 1683 else:
1684 1684 # pulling changegroup
1685 1685 pullop.stepsdone.add('changegroup')
1686 1686
1687 1687 kwargs['cg'] = pullop.fetch
1688 1688
1689 1689 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1690 1690 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1691 1691 if (not legacyphase and hasbinaryphase):
1692 1692 kwargs['phases'] = True
1693 1693 pullop.stepsdone.add('phases')
1694 1694
1695 1695 if 'listkeys' in pullop.remotebundle2caps:
1696 1696 if 'phases' not in pullop.stepsdone:
1697 1697 kwargs['listkeys'] = ['phases']
1698 1698
1699 1699 bookmarksrequested = False
1700 1700 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1701 1701 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1702 1702
1703 1703 if pullop.remotebookmarks is not None:
1704 1704 pullop.stepsdone.add('request-bookmarks')
1705 1705
1706 1706 if ('request-bookmarks' not in pullop.stepsdone
1707 1707 and pullop.remotebookmarks is None
1708 1708 and not legacybookmark and hasbinarybook):
1709 1709 kwargs['bookmarks'] = True
1710 1710 bookmarksrequested = True
1711 1711
1712 1712 if 'listkeys' in pullop.remotebundle2caps:
1713 1713 if 'request-bookmarks' not in pullop.stepsdone:
1714 1714 # make sure to always includes bookmark data when migrating
1715 1715 # `hg incoming --bundle` to using this function.
1716 1716 pullop.stepsdone.add('request-bookmarks')
1717 1717 kwargs.setdefault('listkeys', []).append('bookmarks')
1718 1718
1719 1719 # If this is a full pull / clone and the server supports the clone bundles
1720 1720 # feature, tell the server whether we attempted a clone bundle. The
1721 1721 # presence of this flag indicates the client supports clone bundles. This
1722 1722 # will enable the server to treat clients that support clone bundles
1723 1723 # differently from those that don't.
1724 1724 if (pullop.remote.capable('clonebundles')
1725 1725 and pullop.heads is None and list(pullop.common) == [nullid]):
1726 1726 kwargs['cbattempted'] = pullop.clonebundleattempted
1727 1727
1728 1728 if streaming:
1729 1729 pullop.repo.ui.status(_('streaming all changes\n'))
1730 1730 elif not pullop.fetch:
1731 1731 pullop.repo.ui.status(_("no changes found\n"))
1732 1732 pullop.cgresult = 0
1733 1733 else:
1734 1734 if pullop.heads is None and list(pullop.common) == [nullid]:
1735 1735 pullop.repo.ui.status(_("requesting all changes\n"))
1736 1736 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1737 1737 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1738 1738 if obsolete.commonversion(remoteversions) is not None:
1739 1739 kwargs['obsmarkers'] = True
1740 1740 pullop.stepsdone.add('obsmarkers')
1741 1741 _pullbundle2extraprepare(pullop, kwargs)
1742 1742
1743 1743 with pullop.remote.commandexecutor() as e:
1744 1744 args = dict(kwargs)
1745 1745 args['source'] = 'pull'
1746 1746 bundle = e.callcommand('getbundle', args).result()
1747 1747
1748 1748 try:
1749 1749 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1750 1750 source='pull')
1751 1751 op.modes['bookmarks'] = 'records'
1752 1752 bundle2.processbundle(pullop.repo, bundle, op=op)
1753 1753 except bundle2.AbortFromPart as exc:
1754 1754 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1755 1755 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1756 1756 except error.BundleValueError as exc:
1757 1757 raise error.Abort(_('missing support for %s') % exc)
1758 1758
1759 1759 if pullop.fetch:
1760 1760 pullop.cgresult = bundle2.combinechangegroupresults(op)
1761 1761
1762 1762 # processing phases change
1763 1763 for namespace, value in op.records['listkeys']:
1764 1764 if namespace == 'phases':
1765 1765 _pullapplyphases(pullop, value)
1766 1766
1767 1767 # processing bookmark update
1768 1768 if bookmarksrequested:
1769 1769 books = {}
1770 1770 for record in op.records['bookmarks']:
1771 1771 books[record['bookmark']] = record["node"]
1772 1772 pullop.remotebookmarks = books
1773 1773 else:
1774 1774 for namespace, value in op.records['listkeys']:
1775 1775 if namespace == 'bookmarks':
1776 1776 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1777 1777
1778 1778 # bookmark data were either already there or pulled in the bundle
1779 1779 if pullop.remotebookmarks is not None:
1780 1780 _pullbookmarks(pullop)
1781 1781
1782 1782 def _pullbundle2extraprepare(pullop, kwargs):
1783 1783 """hook function so that extensions can extend the getbundle call"""
1784 1784
1785 1785 def _pullchangeset(pullop):
1786 1786 """pull changeset from unbundle into the local repo"""
1787 1787 # We delay the open of the transaction as late as possible so we
1788 1788 # don't open transaction for nothing or you break future useful
1789 1789 # rollback call
1790 1790 if 'changegroup' in pullop.stepsdone:
1791 1791 return
1792 1792 pullop.stepsdone.add('changegroup')
1793 1793 if not pullop.fetch:
1794 1794 pullop.repo.ui.status(_("no changes found\n"))
1795 1795 pullop.cgresult = 0
1796 1796 return
1797 1797 tr = pullop.gettransaction()
1798 1798 if pullop.heads is None and list(pullop.common) == [nullid]:
1799 1799 pullop.repo.ui.status(_("requesting all changes\n"))
1800 1800 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1801 1801 # issue1320, avoid a race if remote changed after discovery
1802 1802 pullop.heads = pullop.rheads
1803 1803
1804 1804 if pullop.remote.capable('getbundle'):
1805 1805 # TODO: get bundlecaps from remote
1806 1806 cg = pullop.remote.getbundle('pull', common=pullop.common,
1807 1807 heads=pullop.heads or pullop.rheads)
1808 1808 elif pullop.heads is None:
1809 1809 with pullop.remote.commandexecutor() as e:
1810 1810 cg = e.callcommand('changegroup', {
1811 1811 'nodes': pullop.fetch,
1812 1812 'source': 'pull',
1813 1813 }).result()
1814 1814
1815 1815 elif not pullop.remote.capable('changegroupsubset'):
1816 1816 raise error.Abort(_("partial pull cannot be done because "
1817 1817 "other repository doesn't support "
1818 1818 "changegroupsubset."))
1819 1819 else:
1820 1820 with pullop.remote.commandexecutor() as e:
1821 1821 cg = e.callcommand('changegroupsubset', {
1822 1822 'bases': pullop.fetch,
1823 1823 'heads': pullop.heads,
1824 1824 'source': 'pull',
1825 1825 }).result()
1826 1826
1827 1827 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1828 1828 pullop.remote.url())
1829 1829 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1830 1830
1831 1831 def _pullphase(pullop):
1832 1832 # Get remote phases data from remote
1833 1833 if 'phases' in pullop.stepsdone:
1834 1834 return
1835 1835 remotephases = listkeys(pullop.remote, 'phases')
1836 1836 _pullapplyphases(pullop, remotephases)
1837 1837
1838 1838 def _pullapplyphases(pullop, remotephases):
1839 1839 """apply phase movement from observed remote state"""
1840 1840 if 'phases' in pullop.stepsdone:
1841 1841 return
1842 1842 pullop.stepsdone.add('phases')
1843 1843 publishing = bool(remotephases.get('publishing', False))
1844 1844 if remotephases and not publishing:
1845 1845 # remote is new and non-publishing
1846 1846 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1847 1847 pullop.pulledsubset,
1848 1848 remotephases)
1849 1849 dheads = pullop.pulledsubset
1850 1850 else:
1851 1851 # Remote is old or publishing all common changesets
1852 1852 # should be seen as public
1853 1853 pheads = pullop.pulledsubset
1854 1854 dheads = []
1855 1855 unfi = pullop.repo.unfiltered()
1856 1856 phase = unfi._phasecache.phase
1857 1857 rev = unfi.changelog.nodemap.get
1858 1858 public = phases.public
1859 1859 draft = phases.draft
1860 1860
1861 1861 # exclude changesets already public locally and update the others
1862 1862 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1863 1863 if pheads:
1864 1864 tr = pullop.gettransaction()
1865 1865 phases.advanceboundary(pullop.repo, tr, public, pheads)
1866 1866
1867 1867 # exclude changesets already draft locally and update the others
1868 1868 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1869 1869 if dheads:
1870 1870 tr = pullop.gettransaction()
1871 1871 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1872 1872
1873 1873 def _pullbookmarks(pullop):
1874 1874 """process the remote bookmark information to update the local one"""
1875 1875 if 'bookmarks' in pullop.stepsdone:
1876 1876 return
1877 1877 pullop.stepsdone.add('bookmarks')
1878 1878 repo = pullop.repo
1879 1879 remotebookmarks = pullop.remotebookmarks
1880 1880 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1881 1881 pullop.remote.url(),
1882 1882 pullop.gettransaction,
1883 1883 explicit=pullop.explicitbookmarks)
1884 1884
1885 1885 def _pullobsolete(pullop):
1886 1886 """utility function to pull obsolete markers from a remote
1887 1887
1888 1888 The `gettransaction` is function that return the pull transaction, creating
1889 1889 one if necessary. We return the transaction to inform the calling code that
1890 1890 a new transaction have been created (when applicable).
1891 1891
1892 1892 Exists mostly to allow overriding for experimentation purpose"""
1893 1893 if 'obsmarkers' in pullop.stepsdone:
1894 1894 return
1895 1895 pullop.stepsdone.add('obsmarkers')
1896 1896 tr = None
1897 1897 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1898 1898 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1899 1899 remoteobs = listkeys(pullop.remote, 'obsolete')
1900 1900 if 'dump0' in remoteobs:
1901 1901 tr = pullop.gettransaction()
1902 1902 markers = []
1903 1903 for key in sorted(remoteobs, reverse=True):
1904 1904 if key.startswith('dump'):
1905 1905 data = util.b85decode(remoteobs[key])
1906 1906 version, newmarks = obsolete._readmarkers(data)
1907 1907 markers += newmarks
1908 1908 if markers:
1909 1909 pullop.repo.obsstore.add(tr, markers)
1910 1910 pullop.repo.invalidatevolatilesets()
1911 1911 return tr
1912 1912
1913 1913 def applynarrowacl(repo, kwargs):
1914 1914 """Apply narrow fetch access control.
1915 1915
1916 1916 This massages the named arguments for getbundle wire protocol commands
1917 1917 so requested data is filtered through access control rules.
1918 1918 """
1919 1919 ui = repo.ui
1920 1920 # TODO this assumes existence of HTTP and is a layering violation.
1921 1921 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1922 1922 user_includes = ui.configlist(
1923 1923 _NARROWACL_SECTION, username + '.includes',
1924 1924 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1925 1925 user_excludes = ui.configlist(
1926 1926 _NARROWACL_SECTION, username + '.excludes',
1927 1927 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1928 1928 if not user_includes:
1929 1929 raise error.Abort(_("{} configuration for user {} is empty")
1930 1930 .format(_NARROWACL_SECTION, username))
1931 1931
1932 1932 user_includes = [
1933 1933 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1934 1934 user_excludes = [
1935 1935 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1936 1936
1937 1937 req_includes = set(kwargs.get(r'includepats', []))
1938 1938 req_excludes = set(kwargs.get(r'excludepats', []))
1939 1939
1940 1940 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1941 1941 req_includes, req_excludes, user_includes, user_excludes)
1942 1942
1943 1943 if invalid_includes:
1944 1944 raise error.Abort(
1945 1945 _("The following includes are not accessible for {}: {}")
1946 1946 .format(username, invalid_includes))
1947 1947
1948 1948 new_args = {}
1949 1949 new_args.update(kwargs)
1950 1950 new_args[r'narrow'] = True
1951 1951 new_args[r'narrow_acl'] = True
1952 1952 new_args[r'includepats'] = req_includes
1953 1953 if req_excludes:
1954 1954 new_args[r'excludepats'] = req_excludes
1955 1955
1956 1956 return new_args
1957 1957
1958 1958 def _computeellipsis(repo, common, heads, known, match, depth=None):
1959 1959 """Compute the shape of a narrowed DAG.
1960 1960
1961 1961 Args:
1962 1962 repo: The repository we're transferring.
1963 1963 common: The roots of the DAG range we're transferring.
1964 1964 May be just [nullid], which means all ancestors of heads.
1965 1965 heads: The heads of the DAG range we're transferring.
1966 1966 match: The narrowmatcher that allows us to identify relevant changes.
1967 1967 depth: If not None, only consider nodes to be full nodes if they are at
1968 1968 most depth changesets away from one of heads.
1969 1969
1970 1970 Returns:
1971 1971 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1972 1972
1973 1973 visitnodes: The list of nodes (either full or ellipsis) which
1974 1974 need to be sent to the client.
1975 1975 relevant_nodes: The set of changelog nodes which change a file inside
1976 1976 the narrowspec. The client needs these as non-ellipsis nodes.
1977 1977 ellipsisroots: A dict of {rev: parents} that is used in
1978 1978 narrowchangegroup to produce ellipsis nodes with the
1979 1979 correct parents.
1980 1980 """
1981 1981 cl = repo.changelog
1982 1982 mfl = repo.manifestlog
1983 1983
1984 1984 clrev = cl.rev
1985 1985
1986 1986 commonrevs = {clrev(n) for n in common} | {nullrev}
1987 1987 headsrevs = {clrev(n) for n in heads}
1988 1988
1989 1989 if depth:
1990 1990 revdepth = {h: 0 for h in headsrevs}
1991 1991
1992 1992 ellipsisheads = collections.defaultdict(set)
1993 1993 ellipsisroots = collections.defaultdict(set)
1994 1994
1995 1995 def addroot(head, curchange):
1996 1996 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1997 1997 ellipsisroots[head].add(curchange)
1998 1998 # Recursively split ellipsis heads with 3 roots by finding the
1999 1999 # roots' youngest common descendant which is an elided merge commit.
2000 2000 # That descendant takes 2 of the 3 roots as its own, and becomes a
2001 2001 # root of the head.
2002 2002 while len(ellipsisroots[head]) > 2:
2003 2003 child, roots = splithead(head)
2004 2004 splitroots(head, child, roots)
2005 2005 head = child # Recurse in case we just added a 3rd root
2006 2006
2007 2007 def splitroots(head, child, roots):
2008 2008 ellipsisroots[head].difference_update(roots)
2009 2009 ellipsisroots[head].add(child)
2010 2010 ellipsisroots[child].update(roots)
2011 2011 ellipsisroots[child].discard(child)
2012 2012
2013 2013 def splithead(head):
2014 2014 r1, r2, r3 = sorted(ellipsisroots[head])
2015 2015 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2016 2016 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
2017 2017 nr1, head, nr2, head)
2018 2018 for j in mid:
2019 2019 if j == nr2:
2020 2020 return nr2, (nr1, nr2)
2021 2021 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2022 2022 return j, (nr1, nr2)
2023 2023 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
2024 2024 'roots: %d %d %d') % (head, r1, r2, r3))
2025 2025
2026 2026 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2027 2027 visit = reversed(missing)
2028 2028 relevant_nodes = set()
2029 2029 visitnodes = [cl.node(m) for m in missing]
2030 2030 required = set(headsrevs) | known
2031 2031 for rev in visit:
2032 2032 clrev = cl.changelogrevision(rev)
2033 2033 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2034 2034 if depth is not None:
2035 2035 curdepth = revdepth[rev]
2036 2036 for p in ps:
2037 2037 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2038 2038 needed = False
2039 2039 shallow_enough = depth is None or revdepth[rev] <= depth
2040 2040 if shallow_enough:
2041 2041 curmf = mfl[clrev.manifest].read()
2042 2042 if ps:
2043 2043 # We choose to not trust the changed files list in
2044 2044 # changesets because it's not always correct. TODO: could
2045 2045 # we trust it for the non-merge case?
2046 2046 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2047 2047 needed = bool(curmf.diff(p1mf, match))
2048 2048 if not needed and len(ps) > 1:
2049 2049 # For merge changes, the list of changed files is not
2050 2050 # helpful, since we need to emit the merge if a file
2051 2051 # in the narrow spec has changed on either side of the
2052 2052 # merge. As a result, we do a manifest diff to check.
2053 2053 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2054 2054 needed = bool(curmf.diff(p2mf, match))
2055 2055 else:
2056 2056 # For a root node, we need to include the node if any
2057 2057 # files in the node match the narrowspec.
2058 2058 needed = any(curmf.walk(match))
2059 2059
2060 2060 if needed:
2061 2061 for head in ellipsisheads[rev]:
2062 2062 addroot(head, rev)
2063 2063 for p in ps:
2064 2064 required.add(p)
2065 2065 relevant_nodes.add(cl.node(rev))
2066 2066 else:
2067 2067 if not ps:
2068 2068 ps = [nullrev]
2069 2069 if rev in required:
2070 2070 for head in ellipsisheads[rev]:
2071 2071 addroot(head, rev)
2072 2072 for p in ps:
2073 2073 ellipsisheads[p].add(rev)
2074 2074 else:
2075 2075 for p in ps:
2076 2076 ellipsisheads[p] |= ellipsisheads[rev]
2077 2077
2078 2078 # add common changesets as roots of their reachable ellipsis heads
2079 2079 for c in commonrevs:
2080 2080 for head in ellipsisheads[c]:
2081 2081 addroot(head, c)
2082 2082 return visitnodes, relevant_nodes, ellipsisroots
2083 2083
2084 2084 def caps20to10(repo, role):
2085 2085 """return a set with appropriate options to use bundle20 during getbundle"""
2086 2086 caps = {'HG20'}
2087 2087 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2088 2088 caps.add('bundle2=' + urlreq.quote(capsblob))
2089 2089 return caps
2090 2090
2091 2091 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2092 2092 getbundle2partsorder = []
2093 2093
2094 2094 # Mapping between step name and function
2095 2095 #
2096 2096 # This exists to help extensions wrap steps if necessary
2097 2097 getbundle2partsmapping = {}
2098 2098
2099 2099 def getbundle2partsgenerator(stepname, idx=None):
2100 2100 """decorator for function generating bundle2 part for getbundle
2101 2101
2102 2102 The function is added to the step -> function mapping and appended to the
2103 2103 list of steps. Beware that decorated functions will be added in order
2104 2104 (this may matter).
2105 2105
2106 2106 You can only use this decorator for new steps, if you want to wrap a step
2107 2107 from an extension, attack the getbundle2partsmapping dictionary directly."""
2108 2108 def dec(func):
2109 2109 assert stepname not in getbundle2partsmapping
2110 2110 getbundle2partsmapping[stepname] = func
2111 2111 if idx is None:
2112 2112 getbundle2partsorder.append(stepname)
2113 2113 else:
2114 2114 getbundle2partsorder.insert(idx, stepname)
2115 2115 return func
2116 2116 return dec
2117 2117
2118 2118 def bundle2requested(bundlecaps):
2119 2119 if bundlecaps is not None:
2120 2120 return any(cap.startswith('HG2') for cap in bundlecaps)
2121 2121 return False
2122 2122
2123 2123 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2124 2124 **kwargs):
2125 2125 """Return chunks constituting a bundle's raw data.
2126 2126
2127 2127 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2128 2128 passed.
2129 2129
2130 2130 Returns a 2-tuple of a dict with metadata about the generated bundle
2131 2131 and an iterator over raw chunks (of varying sizes).
2132 2132 """
2133 2133 kwargs = pycompat.byteskwargs(kwargs)
2134 2134 info = {}
2135 2135 usebundle2 = bundle2requested(bundlecaps)
2136 2136 # bundle10 case
2137 2137 if not usebundle2:
2138 2138 if bundlecaps and not kwargs.get('cg', True):
2139 2139 raise ValueError(_('request for bundle10 must include changegroup'))
2140 2140
2141 2141 if kwargs:
2142 2142 raise ValueError(_('unsupported getbundle arguments: %s')
2143 2143 % ', '.join(sorted(kwargs.keys())))
2144 2144 outgoing = _computeoutgoing(repo, heads, common)
2145 2145 info['bundleversion'] = 1
2146 2146 return info, changegroup.makestream(repo, outgoing, '01', source,
2147 2147 bundlecaps=bundlecaps)
2148 2148
2149 2149 # bundle20 case
2150 2150 info['bundleversion'] = 2
2151 2151 b2caps = {}
2152 2152 for bcaps in bundlecaps:
2153 2153 if bcaps.startswith('bundle2='):
2154 2154 blob = urlreq.unquote(bcaps[len('bundle2='):])
2155 2155 b2caps.update(bundle2.decodecaps(blob))
2156 2156 bundler = bundle2.bundle20(repo.ui, b2caps)
2157 2157
2158 2158 kwargs['heads'] = heads
2159 2159 kwargs['common'] = common
2160 2160
2161 2161 for name in getbundle2partsorder:
2162 2162 func = getbundle2partsmapping[name]
2163 2163 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2164 2164 **pycompat.strkwargs(kwargs))
2165 2165
2166 2166 info['prefercompressed'] = bundler.prefercompressed
2167 2167
2168 2168 return info, bundler.getchunks()
2169 2169
2170 2170 @getbundle2partsgenerator('stream2')
2171 2171 def _getbundlestream2(bundler, repo, *args, **kwargs):
2172 2172 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2173 2173
2174 2174 @getbundle2partsgenerator('changegroup')
2175 2175 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2176 2176 b2caps=None, heads=None, common=None, **kwargs):
2177 2177 """add a changegroup part to the requested bundle"""
2178 2178 if not kwargs.get(r'cg', True):
2179 2179 return
2180 2180
2181 2181 version = '01'
2182 2182 cgversions = b2caps.get('changegroup')
2183 2183 if cgversions: # 3.1 and 3.2 ship with an empty value
2184 2184 cgversions = [v for v in cgversions
2185 2185 if v in changegroup.supportedoutgoingversions(repo)]
2186 2186 if not cgversions:
2187 2187 raise error.Abort(_('no common changegroup version'))
2188 2188 version = max(cgversions)
2189 2189
2190 2190 outgoing = _computeoutgoing(repo, heads, common)
2191 2191 if not outgoing.missing:
2192 2192 return
2193 2193
2194 2194 if kwargs.get(r'narrow', False):
2195 2195 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2196 2196 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2197 2197 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2198 2198 else:
2199 2199 matcher = None
2200 2200
2201 2201 cgstream = changegroup.makestream(repo, outgoing, version, source,
2202 2202 bundlecaps=bundlecaps, matcher=matcher)
2203 2203
2204 2204 part = bundler.newpart('changegroup', data=cgstream)
2205 2205 if cgversions:
2206 2206 part.addparam('version', version)
2207 2207
2208 2208 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2209 2209 mandatory=False)
2210 2210
2211 2211 if 'treemanifest' in repo.requirements:
2212 2212 part.addparam('treemanifest', '1')
2213 2213
2214 2214 if (kwargs.get(r'narrow', False) and kwargs.get(r'narrow_acl', False)
2215 2215 and (include or exclude)):
2216 2216 narrowspecpart = bundler.newpart('narrow:spec')
2217 data = ''
2217 2218 if include:
2218 narrowspecpart.addparam(
2219 'include', '\n'.join(include), mandatory=True)
2219 data += '\n'.join(include)
2220 data += '\0'
2220 2221 if exclude:
2221 narrowspecpart.addparam(
2222 'exclude', '\n'.join(exclude), mandatory=True)
2222 data += '\n'.join(exclude)
2223
2224 narrowspecpart.data = data
2223 2225
2224 2226 @getbundle2partsgenerator('bookmarks')
2225 2227 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2226 2228 b2caps=None, **kwargs):
2227 2229 """add a bookmark part to the requested bundle"""
2228 2230 if not kwargs.get(r'bookmarks', False):
2229 2231 return
2230 2232 if 'bookmarks' not in b2caps:
2231 2233 raise error.Abort(_('no common bookmarks exchange method'))
2232 2234 books = bookmod.listbinbookmarks(repo)
2233 2235 data = bookmod.binaryencode(books)
2234 2236 if data:
2235 2237 bundler.newpart('bookmarks', data=data)
2236 2238
2237 2239 @getbundle2partsgenerator('listkeys')
2238 2240 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2239 2241 b2caps=None, **kwargs):
2240 2242 """add parts containing listkeys namespaces to the requested bundle"""
2241 2243 listkeys = kwargs.get(r'listkeys', ())
2242 2244 for namespace in listkeys:
2243 2245 part = bundler.newpart('listkeys')
2244 2246 part.addparam('namespace', namespace)
2245 2247 keys = repo.listkeys(namespace).items()
2246 2248 part.data = pushkey.encodekeys(keys)
2247 2249
2248 2250 @getbundle2partsgenerator('obsmarkers')
2249 2251 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2250 2252 b2caps=None, heads=None, **kwargs):
2251 2253 """add an obsolescence markers part to the requested bundle"""
2252 2254 if kwargs.get(r'obsmarkers', False):
2253 2255 if heads is None:
2254 2256 heads = repo.heads()
2255 2257 subset = [c.node() for c in repo.set('::%ln', heads)]
2256 2258 markers = repo.obsstore.relevantmarkers(subset)
2257 2259 markers = sorted(markers)
2258 2260 bundle2.buildobsmarkerspart(bundler, markers)
2259 2261
2260 2262 @getbundle2partsgenerator('phases')
2261 2263 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2262 2264 b2caps=None, heads=None, **kwargs):
2263 2265 """add phase heads part to the requested bundle"""
2264 2266 if kwargs.get(r'phases', False):
2265 2267 if not 'heads' in b2caps.get('phases'):
2266 2268 raise error.Abort(_('no common phases exchange method'))
2267 2269 if heads is None:
2268 2270 heads = repo.heads()
2269 2271
2270 2272 headsbyphase = collections.defaultdict(set)
2271 2273 if repo.publishing():
2272 2274 headsbyphase[phases.public] = heads
2273 2275 else:
2274 2276 # find the appropriate heads to move
2275 2277
2276 2278 phase = repo._phasecache.phase
2277 2279 node = repo.changelog.node
2278 2280 rev = repo.changelog.rev
2279 2281 for h in heads:
2280 2282 headsbyphase[phase(repo, rev(h))].add(h)
2281 2283 seenphases = list(headsbyphase.keys())
2282 2284
2283 2285 # We do not handle anything but public and draft phase for now)
2284 2286 if seenphases:
2285 2287 assert max(seenphases) <= phases.draft
2286 2288
2287 2289 # if client is pulling non-public changesets, we need to find
2288 2290 # intermediate public heads.
2289 2291 draftheads = headsbyphase.get(phases.draft, set())
2290 2292 if draftheads:
2291 2293 publicheads = headsbyphase.get(phases.public, set())
2292 2294
2293 2295 revset = 'heads(only(%ln, %ln) and public())'
2294 2296 extraheads = repo.revs(revset, draftheads, publicheads)
2295 2297 for r in extraheads:
2296 2298 headsbyphase[phases.public].add(node(r))
2297 2299
2298 2300 # transform data in a format used by the encoding function
2299 2301 phasemapping = []
2300 2302 for phase in phases.allphases:
2301 2303 phasemapping.append(sorted(headsbyphase[phase]))
2302 2304
2303 2305 # generate the actual part
2304 2306 phasedata = phases.binaryencode(phasemapping)
2305 2307 bundler.newpart('phase-heads', data=phasedata)
2306 2308
2307 2309 @getbundle2partsgenerator('hgtagsfnodes')
2308 2310 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2309 2311 b2caps=None, heads=None, common=None,
2310 2312 **kwargs):
2311 2313 """Transfer the .hgtags filenodes mapping.
2312 2314
2313 2315 Only values for heads in this bundle will be transferred.
2314 2316
2315 2317 The part data consists of pairs of 20 byte changeset node and .hgtags
2316 2318 filenodes raw values.
2317 2319 """
2318 2320 # Don't send unless:
2319 2321 # - changeset are being exchanged,
2320 2322 # - the client supports it.
2321 2323 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2322 2324 return
2323 2325
2324 2326 outgoing = _computeoutgoing(repo, heads, common)
2325 2327 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2326 2328
2327 2329 @getbundle2partsgenerator('cache:rev-branch-cache')
2328 2330 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2329 2331 b2caps=None, heads=None, common=None,
2330 2332 **kwargs):
2331 2333 """Transfer the rev-branch-cache mapping
2332 2334
2333 2335 The payload is a series of data related to each branch
2334 2336
2335 2337 1) branch name length
2336 2338 2) number of open heads
2337 2339 3) number of closed heads
2338 2340 4) open heads nodes
2339 2341 5) closed heads nodes
2340 2342 """
2341 2343 # Don't send unless:
2342 2344 # - changeset are being exchanged,
2343 2345 # - the client supports it.
2344 2346 # - narrow bundle isn't in play (not currently compatible).
2345 2347 if (not kwargs.get(r'cg', True)
2346 2348 or 'rev-branch-cache' not in b2caps
2347 2349 or kwargs.get(r'narrow', False)
2348 2350 or repo.ui.has_section(_NARROWACL_SECTION)):
2349 2351 return
2350 2352
2351 2353 outgoing = _computeoutgoing(repo, heads, common)
2352 2354 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2353 2355
2354 2356 def check_heads(repo, their_heads, context):
2355 2357 """check if the heads of a repo have been modified
2356 2358
2357 2359 Used by peer for unbundling.
2358 2360 """
2359 2361 heads = repo.heads()
2360 2362 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2361 2363 if not (their_heads == ['force'] or their_heads == heads or
2362 2364 their_heads == ['hashed', heads_hash]):
2363 2365 # someone else committed/pushed/unbundled while we
2364 2366 # were transferring data
2365 2367 raise error.PushRaced('repository changed while %s - '
2366 2368 'please try again' % context)
2367 2369
2368 2370 def unbundle(repo, cg, heads, source, url):
2369 2371 """Apply a bundle to a repo.
2370 2372
2371 2373 this function makes sure the repo is locked during the application and have
2372 2374 mechanism to check that no push race occurred between the creation of the
2373 2375 bundle and its application.
2374 2376
2375 2377 If the push was raced as PushRaced exception is raised."""
2376 2378 r = 0
2377 2379 # need a transaction when processing a bundle2 stream
2378 2380 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2379 2381 lockandtr = [None, None, None]
2380 2382 recordout = None
2381 2383 # quick fix for output mismatch with bundle2 in 3.4
2382 2384 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2383 2385 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2384 2386 captureoutput = True
2385 2387 try:
2386 2388 # note: outside bundle1, 'heads' is expected to be empty and this
2387 2389 # 'check_heads' call wil be a no-op
2388 2390 check_heads(repo, heads, 'uploading changes')
2389 2391 # push can proceed
2390 2392 if not isinstance(cg, bundle2.unbundle20):
2391 2393 # legacy case: bundle1 (changegroup 01)
2392 2394 txnname = "\n".join([source, util.hidepassword(url)])
2393 2395 with repo.lock(), repo.transaction(txnname) as tr:
2394 2396 op = bundle2.applybundle(repo, cg, tr, source, url)
2395 2397 r = bundle2.combinechangegroupresults(op)
2396 2398 else:
2397 2399 r = None
2398 2400 try:
2399 2401 def gettransaction():
2400 2402 if not lockandtr[2]:
2401 2403 lockandtr[0] = repo.wlock()
2402 2404 lockandtr[1] = repo.lock()
2403 2405 lockandtr[2] = repo.transaction(source)
2404 2406 lockandtr[2].hookargs['source'] = source
2405 2407 lockandtr[2].hookargs['url'] = url
2406 2408 lockandtr[2].hookargs['bundle2'] = '1'
2407 2409 return lockandtr[2]
2408 2410
2409 2411 # Do greedy locking by default until we're satisfied with lazy
2410 2412 # locking.
2411 2413 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2412 2414 gettransaction()
2413 2415
2414 2416 op = bundle2.bundleoperation(repo, gettransaction,
2415 2417 captureoutput=captureoutput,
2416 2418 source='push')
2417 2419 try:
2418 2420 op = bundle2.processbundle(repo, cg, op=op)
2419 2421 finally:
2420 2422 r = op.reply
2421 2423 if captureoutput and r is not None:
2422 2424 repo.ui.pushbuffer(error=True, subproc=True)
2423 2425 def recordout(output):
2424 2426 r.newpart('output', data=output, mandatory=False)
2425 2427 if lockandtr[2] is not None:
2426 2428 lockandtr[2].close()
2427 2429 except BaseException as exc:
2428 2430 exc.duringunbundle2 = True
2429 2431 if captureoutput and r is not None:
2430 2432 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2431 2433 def recordout(output):
2432 2434 part = bundle2.bundlepart('output', data=output,
2433 2435 mandatory=False)
2434 2436 parts.append(part)
2435 2437 raise
2436 2438 finally:
2437 2439 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2438 2440 if recordout is not None:
2439 2441 recordout(repo.ui.popbuffer())
2440 2442 return r
2441 2443
2442 2444 def _maybeapplyclonebundle(pullop):
2443 2445 """Apply a clone bundle from a remote, if possible."""
2444 2446
2445 2447 repo = pullop.repo
2446 2448 remote = pullop.remote
2447 2449
2448 2450 if not repo.ui.configbool('ui', 'clonebundles'):
2449 2451 return
2450 2452
2451 2453 # Only run if local repo is empty.
2452 2454 if len(repo):
2453 2455 return
2454 2456
2455 2457 if pullop.heads:
2456 2458 return
2457 2459
2458 2460 if not remote.capable('clonebundles'):
2459 2461 return
2460 2462
2461 2463 with remote.commandexecutor() as e:
2462 2464 res = e.callcommand('clonebundles', {}).result()
2463 2465
2464 2466 # If we call the wire protocol command, that's good enough to record the
2465 2467 # attempt.
2466 2468 pullop.clonebundleattempted = True
2467 2469
2468 2470 entries = parseclonebundlesmanifest(repo, res)
2469 2471 if not entries:
2470 2472 repo.ui.note(_('no clone bundles available on remote; '
2471 2473 'falling back to regular clone\n'))
2472 2474 return
2473 2475
2474 2476 entries = filterclonebundleentries(
2475 2477 repo, entries, streamclonerequested=pullop.streamclonerequested)
2476 2478
2477 2479 if not entries:
2478 2480 # There is a thundering herd concern here. However, if a server
2479 2481 # operator doesn't advertise bundles appropriate for its clients,
2480 2482 # they deserve what's coming. Furthermore, from a client's
2481 2483 # perspective, no automatic fallback would mean not being able to
2482 2484 # clone!
2483 2485 repo.ui.warn(_('no compatible clone bundles available on server; '
2484 2486 'falling back to regular clone\n'))
2485 2487 repo.ui.warn(_('(you may want to report this to the server '
2486 2488 'operator)\n'))
2487 2489 return
2488 2490
2489 2491 entries = sortclonebundleentries(repo.ui, entries)
2490 2492
2491 2493 url = entries[0]['URL']
2492 2494 repo.ui.status(_('applying clone bundle from %s\n') % url)
2493 2495 if trypullbundlefromurl(repo.ui, repo, url):
2494 2496 repo.ui.status(_('finished applying clone bundle\n'))
2495 2497 # Bundle failed.
2496 2498 #
2497 2499 # We abort by default to avoid the thundering herd of
2498 2500 # clients flooding a server that was expecting expensive
2499 2501 # clone load to be offloaded.
2500 2502 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2501 2503 repo.ui.warn(_('falling back to normal clone\n'))
2502 2504 else:
2503 2505 raise error.Abort(_('error applying bundle'),
2504 2506 hint=_('if this error persists, consider contacting '
2505 2507 'the server operator or disable clone '
2506 2508 'bundles via '
2507 2509 '"--config ui.clonebundles=false"'))
2508 2510
2509 2511 def parseclonebundlesmanifest(repo, s):
2510 2512 """Parses the raw text of a clone bundles manifest.
2511 2513
2512 2514 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2513 2515 to the URL and other keys are the attributes for the entry.
2514 2516 """
2515 2517 m = []
2516 2518 for line in s.splitlines():
2517 2519 fields = line.split()
2518 2520 if not fields:
2519 2521 continue
2520 2522 attrs = {'URL': fields[0]}
2521 2523 for rawattr in fields[1:]:
2522 2524 key, value = rawattr.split('=', 1)
2523 2525 key = urlreq.unquote(key)
2524 2526 value = urlreq.unquote(value)
2525 2527 attrs[key] = value
2526 2528
2527 2529 # Parse BUNDLESPEC into components. This makes client-side
2528 2530 # preferences easier to specify since you can prefer a single
2529 2531 # component of the BUNDLESPEC.
2530 2532 if key == 'BUNDLESPEC':
2531 2533 try:
2532 2534 bundlespec = parsebundlespec(repo, value)
2533 2535 attrs['COMPRESSION'] = bundlespec.compression
2534 2536 attrs['VERSION'] = bundlespec.version
2535 2537 except error.InvalidBundleSpecification:
2536 2538 pass
2537 2539 except error.UnsupportedBundleSpecification:
2538 2540 pass
2539 2541
2540 2542 m.append(attrs)
2541 2543
2542 2544 return m
2543 2545
2544 2546 def isstreamclonespec(bundlespec):
2545 2547 # Stream clone v1
2546 2548 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2547 2549 return True
2548 2550
2549 2551 # Stream clone v2
2550 2552 if (bundlespec.wirecompression == 'UN' and
2551 2553 bundlespec.wireversion == '02' and
2552 2554 bundlespec.contentopts.get('streamv2')):
2553 2555 return True
2554 2556
2555 2557 return False
2556 2558
2557 2559 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2558 2560 """Remove incompatible clone bundle manifest entries.
2559 2561
2560 2562 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2561 2563 and returns a new list consisting of only the entries that this client
2562 2564 should be able to apply.
2563 2565
2564 2566 There is no guarantee we'll be able to apply all returned entries because
2565 2567 the metadata we use to filter on may be missing or wrong.
2566 2568 """
2567 2569 newentries = []
2568 2570 for entry in entries:
2569 2571 spec = entry.get('BUNDLESPEC')
2570 2572 if spec:
2571 2573 try:
2572 2574 bundlespec = parsebundlespec(repo, spec, strict=True)
2573 2575
2574 2576 # If a stream clone was requested, filter out non-streamclone
2575 2577 # entries.
2576 2578 if streamclonerequested and not isstreamclonespec(bundlespec):
2577 2579 repo.ui.debug('filtering %s because not a stream clone\n' %
2578 2580 entry['URL'])
2579 2581 continue
2580 2582
2581 2583 except error.InvalidBundleSpecification as e:
2582 2584 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2583 2585 continue
2584 2586 except error.UnsupportedBundleSpecification as e:
2585 2587 repo.ui.debug('filtering %s because unsupported bundle '
2586 2588 'spec: %s\n' % (
2587 2589 entry['URL'], stringutil.forcebytestr(e)))
2588 2590 continue
2589 2591 # If we don't have a spec and requested a stream clone, we don't know
2590 2592 # what the entry is so don't attempt to apply it.
2591 2593 elif streamclonerequested:
2592 2594 repo.ui.debug('filtering %s because cannot determine if a stream '
2593 2595 'clone bundle\n' % entry['URL'])
2594 2596 continue
2595 2597
2596 2598 if 'REQUIRESNI' in entry and not sslutil.hassni:
2597 2599 repo.ui.debug('filtering %s because SNI not supported\n' %
2598 2600 entry['URL'])
2599 2601 continue
2600 2602
2601 2603 newentries.append(entry)
2602 2604
2603 2605 return newentries
2604 2606
2605 2607 class clonebundleentry(object):
2606 2608 """Represents an item in a clone bundles manifest.
2607 2609
2608 2610 This rich class is needed to support sorting since sorted() in Python 3
2609 2611 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2610 2612 won't work.
2611 2613 """
2612 2614
2613 2615 def __init__(self, value, prefers):
2614 2616 self.value = value
2615 2617 self.prefers = prefers
2616 2618
2617 2619 def _cmp(self, other):
2618 2620 for prefkey, prefvalue in self.prefers:
2619 2621 avalue = self.value.get(prefkey)
2620 2622 bvalue = other.value.get(prefkey)
2621 2623
2622 2624 # Special case for b missing attribute and a matches exactly.
2623 2625 if avalue is not None and bvalue is None and avalue == prefvalue:
2624 2626 return -1
2625 2627
2626 2628 # Special case for a missing attribute and b matches exactly.
2627 2629 if bvalue is not None and avalue is None and bvalue == prefvalue:
2628 2630 return 1
2629 2631
2630 2632 # We can't compare unless attribute present on both.
2631 2633 if avalue is None or bvalue is None:
2632 2634 continue
2633 2635
2634 2636 # Same values should fall back to next attribute.
2635 2637 if avalue == bvalue:
2636 2638 continue
2637 2639
2638 2640 # Exact matches come first.
2639 2641 if avalue == prefvalue:
2640 2642 return -1
2641 2643 if bvalue == prefvalue:
2642 2644 return 1
2643 2645
2644 2646 # Fall back to next attribute.
2645 2647 continue
2646 2648
2647 2649 # If we got here we couldn't sort by attributes and prefers. Fall
2648 2650 # back to index order.
2649 2651 return 0
2650 2652
2651 2653 def __lt__(self, other):
2652 2654 return self._cmp(other) < 0
2653 2655
2654 2656 def __gt__(self, other):
2655 2657 return self._cmp(other) > 0
2656 2658
2657 2659 def __eq__(self, other):
2658 2660 return self._cmp(other) == 0
2659 2661
2660 2662 def __le__(self, other):
2661 2663 return self._cmp(other) <= 0
2662 2664
2663 2665 def __ge__(self, other):
2664 2666 return self._cmp(other) >= 0
2665 2667
2666 2668 def __ne__(self, other):
2667 2669 return self._cmp(other) != 0
2668 2670
2669 2671 def sortclonebundleentries(ui, entries):
2670 2672 prefers = ui.configlist('ui', 'clonebundleprefers')
2671 2673 if not prefers:
2672 2674 return list(entries)
2673 2675
2674 2676 prefers = [p.split('=', 1) for p in prefers]
2675 2677
2676 2678 items = sorted(clonebundleentry(v, prefers) for v in entries)
2677 2679 return [i.value for i in items]
2678 2680
2679 2681 def trypullbundlefromurl(ui, repo, url):
2680 2682 """Attempt to apply a bundle from a URL."""
2681 2683 with repo.lock(), repo.transaction('bundleurl') as tr:
2682 2684 try:
2683 2685 fh = urlmod.open(ui, url)
2684 2686 cg = readbundle(ui, fh, 'stream')
2685 2687
2686 2688 if isinstance(cg, streamclone.streamcloneapplier):
2687 2689 cg.apply(repo)
2688 2690 else:
2689 2691 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2690 2692 return True
2691 2693 except urlerr.httperror as e:
2692 2694 ui.warn(_('HTTP error fetching bundle: %s\n') %
2693 2695 stringutil.forcebytestr(e))
2694 2696 except urlerr.urlerror as e:
2695 2697 ui.warn(_('error fetching bundle: %s\n') %
2696 2698 stringutil.forcebytestr(e.reason))
2697 2699
2698 2700 return False
General Comments 0
You need to be logged in to leave comments. Login now