##// END OF EJS Templates
exchange: extract a function to sort obsolete markers...
Denis Laxalde -
r43571:48b9fbfb default
parent child Browse files
Show More
@@ -1,3085 +1,3089
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .thirdparty import attr
19 from .thirdparty import attr
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchangev2,
26 exchangev2,
27 lock as lockmod,
27 lock as lockmod,
28 logexchange,
28 logexchange,
29 narrowspec,
29 narrowspec,
30 obsolete,
30 obsolete,
31 phases,
31 phases,
32 pushkey,
32 pushkey,
33 pycompat,
33 pycompat,
34 scmutil,
34 scmutil,
35 sslutil,
35 sslutil,
36 streamclone,
36 streamclone,
37 url as urlmod,
37 url as urlmod,
38 util,
38 util,
39 wireprototypes,
39 wireprototypes,
40 )
40 )
41 from .interfaces import repository
41 from .interfaces import repository
42 from .utils import stringutil
42 from .utils import stringutil
43
43
44 urlerr = util.urlerr
44 urlerr = util.urlerr
45 urlreq = util.urlreq
45 urlreq = util.urlreq
46
46
47 _NARROWACL_SECTION = b'narrowacl'
47 _NARROWACL_SECTION = b'narrowacl'
48
48
49 # Maps bundle version human names to changegroup versions.
49 # Maps bundle version human names to changegroup versions.
50 _bundlespeccgversions = {
50 _bundlespeccgversions = {
51 b'v1': b'01',
51 b'v1': b'01',
52 b'v2': b'02',
52 b'v2': b'02',
53 b'packed1': b's1',
53 b'packed1': b's1',
54 b'bundle2': b'02', # legacy
54 b'bundle2': b'02', # legacy
55 }
55 }
56
56
57 # Maps bundle version with content opts to choose which part to bundle
57 # Maps bundle version with content opts to choose which part to bundle
58 _bundlespeccontentopts = {
58 _bundlespeccontentopts = {
59 b'v1': {
59 b'v1': {
60 b'changegroup': True,
60 b'changegroup': True,
61 b'cg.version': b'01',
61 b'cg.version': b'01',
62 b'obsolescence': False,
62 b'obsolescence': False,
63 b'phases': False,
63 b'phases': False,
64 b'tagsfnodescache': False,
64 b'tagsfnodescache': False,
65 b'revbranchcache': False,
65 b'revbranchcache': False,
66 },
66 },
67 b'v2': {
67 b'v2': {
68 b'changegroup': True,
68 b'changegroup': True,
69 b'cg.version': b'02',
69 b'cg.version': b'02',
70 b'obsolescence': False,
70 b'obsolescence': False,
71 b'phases': False,
71 b'phases': False,
72 b'tagsfnodescache': True,
72 b'tagsfnodescache': True,
73 b'revbranchcache': True,
73 b'revbranchcache': True,
74 },
74 },
75 b'packed1': {b'cg.version': b's1'},
75 b'packed1': {b'cg.version': b's1'},
76 }
76 }
77 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
77 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
78
78
79 _bundlespecvariants = {
79 _bundlespecvariants = {
80 b"streamv2": {
80 b"streamv2": {
81 b"changegroup": False,
81 b"changegroup": False,
82 b"streamv2": True,
82 b"streamv2": True,
83 b"tagsfnodescache": False,
83 b"tagsfnodescache": False,
84 b"revbranchcache": False,
84 b"revbranchcache": False,
85 }
85 }
86 }
86 }
87
87
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
89 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
90
90
91
91
92 @attr.s
92 @attr.s
93 class bundlespec(object):
93 class bundlespec(object):
94 compression = attr.ib()
94 compression = attr.ib()
95 wirecompression = attr.ib()
95 wirecompression = attr.ib()
96 version = attr.ib()
96 version = attr.ib()
97 wireversion = attr.ib()
97 wireversion = attr.ib()
98 params = attr.ib()
98 params = attr.ib()
99 contentopts = attr.ib()
99 contentopts = attr.ib()
100
100
101
101
102 def _sortedmarkers(markers):
103 # last item of marker tuple ('parents') may be None or a tuple
104 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
105
106
102 def parsebundlespec(repo, spec, strict=True):
107 def parsebundlespec(repo, spec, strict=True):
103 """Parse a bundle string specification into parts.
108 """Parse a bundle string specification into parts.
104
109
105 Bundle specifications denote a well-defined bundle/exchange format.
110 Bundle specifications denote a well-defined bundle/exchange format.
106 The content of a given specification should not change over time in
111 The content of a given specification should not change over time in
107 order to ensure that bundles produced by a newer version of Mercurial are
112 order to ensure that bundles produced by a newer version of Mercurial are
108 readable from an older version.
113 readable from an older version.
109
114
110 The string currently has the form:
115 The string currently has the form:
111
116
112 <compression>-<type>[;<parameter0>[;<parameter1>]]
117 <compression>-<type>[;<parameter0>[;<parameter1>]]
113
118
114 Where <compression> is one of the supported compression formats
119 Where <compression> is one of the supported compression formats
115 and <type> is (currently) a version string. A ";" can follow the type and
120 and <type> is (currently) a version string. A ";" can follow the type and
116 all text afterwards is interpreted as URI encoded, ";" delimited key=value
121 all text afterwards is interpreted as URI encoded, ";" delimited key=value
117 pairs.
122 pairs.
118
123
119 If ``strict`` is True (the default) <compression> is required. Otherwise,
124 If ``strict`` is True (the default) <compression> is required. Otherwise,
120 it is optional.
125 it is optional.
121
126
122 Returns a bundlespec object of (compression, version, parameters).
127 Returns a bundlespec object of (compression, version, parameters).
123 Compression will be ``None`` if not in strict mode and a compression isn't
128 Compression will be ``None`` if not in strict mode and a compression isn't
124 defined.
129 defined.
125
130
126 An ``InvalidBundleSpecification`` is raised when the specification is
131 An ``InvalidBundleSpecification`` is raised when the specification is
127 not syntactically well formed.
132 not syntactically well formed.
128
133
129 An ``UnsupportedBundleSpecification`` is raised when the compression or
134 An ``UnsupportedBundleSpecification`` is raised when the compression or
130 bundle type/version is not recognized.
135 bundle type/version is not recognized.
131
136
132 Note: this function will likely eventually return a more complex data
137 Note: this function will likely eventually return a more complex data
133 structure, including bundle2 part information.
138 structure, including bundle2 part information.
134 """
139 """
135
140
136 def parseparams(s):
141 def parseparams(s):
137 if b';' not in s:
142 if b';' not in s:
138 return s, {}
143 return s, {}
139
144
140 params = {}
145 params = {}
141 version, paramstr = s.split(b';', 1)
146 version, paramstr = s.split(b';', 1)
142
147
143 for p in paramstr.split(b';'):
148 for p in paramstr.split(b';'):
144 if b'=' not in p:
149 if b'=' not in p:
145 raise error.InvalidBundleSpecification(
150 raise error.InvalidBundleSpecification(
146 _(
151 _(
147 b'invalid bundle specification: '
152 b'invalid bundle specification: '
148 b'missing "=" in parameter: %s'
153 b'missing "=" in parameter: %s'
149 )
154 )
150 % p
155 % p
151 )
156 )
152
157
153 key, value = p.split(b'=', 1)
158 key, value = p.split(b'=', 1)
154 key = urlreq.unquote(key)
159 key = urlreq.unquote(key)
155 value = urlreq.unquote(value)
160 value = urlreq.unquote(value)
156 params[key] = value
161 params[key] = value
157
162
158 return version, params
163 return version, params
159
164
160 if strict and b'-' not in spec:
165 if strict and b'-' not in spec:
161 raise error.InvalidBundleSpecification(
166 raise error.InvalidBundleSpecification(
162 _(
167 _(
163 b'invalid bundle specification; '
168 b'invalid bundle specification; '
164 b'must be prefixed with compression: %s'
169 b'must be prefixed with compression: %s'
165 )
170 )
166 % spec
171 % spec
167 )
172 )
168
173
169 if b'-' in spec:
174 if b'-' in spec:
170 compression, version = spec.split(b'-', 1)
175 compression, version = spec.split(b'-', 1)
171
176
172 if compression not in util.compengines.supportedbundlenames:
177 if compression not in util.compengines.supportedbundlenames:
173 raise error.UnsupportedBundleSpecification(
178 raise error.UnsupportedBundleSpecification(
174 _(b'%s compression is not supported') % compression
179 _(b'%s compression is not supported') % compression
175 )
180 )
176
181
177 version, params = parseparams(version)
182 version, params = parseparams(version)
178
183
179 if version not in _bundlespeccgversions:
184 if version not in _bundlespeccgversions:
180 raise error.UnsupportedBundleSpecification(
185 raise error.UnsupportedBundleSpecification(
181 _(b'%s is not a recognized bundle version') % version
186 _(b'%s is not a recognized bundle version') % version
182 )
187 )
183 else:
188 else:
184 # Value could be just the compression or just the version, in which
189 # Value could be just the compression or just the version, in which
185 # case some defaults are assumed (but only when not in strict mode).
190 # case some defaults are assumed (but only when not in strict mode).
186 assert not strict
191 assert not strict
187
192
188 spec, params = parseparams(spec)
193 spec, params = parseparams(spec)
189
194
190 if spec in util.compengines.supportedbundlenames:
195 if spec in util.compengines.supportedbundlenames:
191 compression = spec
196 compression = spec
192 version = b'v1'
197 version = b'v1'
193 # Generaldelta repos require v2.
198 # Generaldelta repos require v2.
194 if b'generaldelta' in repo.requirements:
199 if b'generaldelta' in repo.requirements:
195 version = b'v2'
200 version = b'v2'
196 # Modern compression engines require v2.
201 # Modern compression engines require v2.
197 if compression not in _bundlespecv1compengines:
202 if compression not in _bundlespecv1compengines:
198 version = b'v2'
203 version = b'v2'
199 elif spec in _bundlespeccgversions:
204 elif spec in _bundlespeccgversions:
200 if spec == b'packed1':
205 if spec == b'packed1':
201 compression = b'none'
206 compression = b'none'
202 else:
207 else:
203 compression = b'bzip2'
208 compression = b'bzip2'
204 version = spec
209 version = spec
205 else:
210 else:
206 raise error.UnsupportedBundleSpecification(
211 raise error.UnsupportedBundleSpecification(
207 _(b'%s is not a recognized bundle specification') % spec
212 _(b'%s is not a recognized bundle specification') % spec
208 )
213 )
209
214
210 # Bundle version 1 only supports a known set of compression engines.
215 # Bundle version 1 only supports a known set of compression engines.
211 if version == b'v1' and compression not in _bundlespecv1compengines:
216 if version == b'v1' and compression not in _bundlespecv1compengines:
212 raise error.UnsupportedBundleSpecification(
217 raise error.UnsupportedBundleSpecification(
213 _(b'compression engine %s is not supported on v1 bundles')
218 _(b'compression engine %s is not supported on v1 bundles')
214 % compression
219 % compression
215 )
220 )
216
221
217 # The specification for packed1 can optionally declare the data formats
222 # The specification for packed1 can optionally declare the data formats
218 # required to apply it. If we see this metadata, compare against what the
223 # required to apply it. If we see this metadata, compare against what the
219 # repo supports and error if the bundle isn't compatible.
224 # repo supports and error if the bundle isn't compatible.
220 if version == b'packed1' and b'requirements' in params:
225 if version == b'packed1' and b'requirements' in params:
221 requirements = set(params[b'requirements'].split(b','))
226 requirements = set(params[b'requirements'].split(b','))
222 missingreqs = requirements - repo.supportedformats
227 missingreqs = requirements - repo.supportedformats
223 if missingreqs:
228 if missingreqs:
224 raise error.UnsupportedBundleSpecification(
229 raise error.UnsupportedBundleSpecification(
225 _(b'missing support for repository features: %s')
230 _(b'missing support for repository features: %s')
226 % b', '.join(sorted(missingreqs))
231 % b', '.join(sorted(missingreqs))
227 )
232 )
228
233
229 # Compute contentopts based on the version
234 # Compute contentopts based on the version
230 contentopts = _bundlespeccontentopts.get(version, {}).copy()
235 contentopts = _bundlespeccontentopts.get(version, {}).copy()
231
236
232 # Process the variants
237 # Process the variants
233 if b"stream" in params and params[b"stream"] == b"v2":
238 if b"stream" in params and params[b"stream"] == b"v2":
234 variant = _bundlespecvariants[b"streamv2"]
239 variant = _bundlespecvariants[b"streamv2"]
235 contentopts.update(variant)
240 contentopts.update(variant)
236
241
237 engine = util.compengines.forbundlename(compression)
242 engine = util.compengines.forbundlename(compression)
238 compression, wirecompression = engine.bundletype()
243 compression, wirecompression = engine.bundletype()
239 wireversion = _bundlespeccgversions[version]
244 wireversion = _bundlespeccgversions[version]
240
245
241 return bundlespec(
246 return bundlespec(
242 compression, wirecompression, version, wireversion, params, contentopts
247 compression, wirecompression, version, wireversion, params, contentopts
243 )
248 )
244
249
245
250
246 def readbundle(ui, fh, fname, vfs=None):
251 def readbundle(ui, fh, fname, vfs=None):
247 header = changegroup.readexactly(fh, 4)
252 header = changegroup.readexactly(fh, 4)
248
253
249 alg = None
254 alg = None
250 if not fname:
255 if not fname:
251 fname = b"stream"
256 fname = b"stream"
252 if not header.startswith(b'HG') and header.startswith(b'\0'):
257 if not header.startswith(b'HG') and header.startswith(b'\0'):
253 fh = changegroup.headerlessfixup(fh, header)
258 fh = changegroup.headerlessfixup(fh, header)
254 header = b"HG10"
259 header = b"HG10"
255 alg = b'UN'
260 alg = b'UN'
256 elif vfs:
261 elif vfs:
257 fname = vfs.join(fname)
262 fname = vfs.join(fname)
258
263
259 magic, version = header[0:2], header[2:4]
264 magic, version = header[0:2], header[2:4]
260
265
261 if magic != b'HG':
266 if magic != b'HG':
262 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
267 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
263 if version == b'10':
268 if version == b'10':
264 if alg is None:
269 if alg is None:
265 alg = changegroup.readexactly(fh, 2)
270 alg = changegroup.readexactly(fh, 2)
266 return changegroup.cg1unpacker(fh, alg)
271 return changegroup.cg1unpacker(fh, alg)
267 elif version.startswith(b'2'):
272 elif version.startswith(b'2'):
268 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
273 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
269 elif version == b'S1':
274 elif version == b'S1':
270 return streamclone.streamcloneapplier(fh)
275 return streamclone.streamcloneapplier(fh)
271 else:
276 else:
272 raise error.Abort(
277 raise error.Abort(
273 _(b'%s: unknown bundle version %s') % (fname, version)
278 _(b'%s: unknown bundle version %s') % (fname, version)
274 )
279 )
275
280
276
281
277 def getbundlespec(ui, fh):
282 def getbundlespec(ui, fh):
278 """Infer the bundlespec from a bundle file handle.
283 """Infer the bundlespec from a bundle file handle.
279
284
280 The input file handle is seeked and the original seek position is not
285 The input file handle is seeked and the original seek position is not
281 restored.
286 restored.
282 """
287 """
283
288
284 def speccompression(alg):
289 def speccompression(alg):
285 try:
290 try:
286 return util.compengines.forbundletype(alg).bundletype()[0]
291 return util.compengines.forbundletype(alg).bundletype()[0]
287 except KeyError:
292 except KeyError:
288 return None
293 return None
289
294
290 b = readbundle(ui, fh, None)
295 b = readbundle(ui, fh, None)
291 if isinstance(b, changegroup.cg1unpacker):
296 if isinstance(b, changegroup.cg1unpacker):
292 alg = b._type
297 alg = b._type
293 if alg == b'_truncatedBZ':
298 if alg == b'_truncatedBZ':
294 alg = b'BZ'
299 alg = b'BZ'
295 comp = speccompression(alg)
300 comp = speccompression(alg)
296 if not comp:
301 if not comp:
297 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
302 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
298 return b'%s-v1' % comp
303 return b'%s-v1' % comp
299 elif isinstance(b, bundle2.unbundle20):
304 elif isinstance(b, bundle2.unbundle20):
300 if b'Compression' in b.params:
305 if b'Compression' in b.params:
301 comp = speccompression(b.params[b'Compression'])
306 comp = speccompression(b.params[b'Compression'])
302 if not comp:
307 if not comp:
303 raise error.Abort(
308 raise error.Abort(
304 _(b'unknown compression algorithm: %s') % comp
309 _(b'unknown compression algorithm: %s') % comp
305 )
310 )
306 else:
311 else:
307 comp = b'none'
312 comp = b'none'
308
313
309 version = None
314 version = None
310 for part in b.iterparts():
315 for part in b.iterparts():
311 if part.type == b'changegroup':
316 if part.type == b'changegroup':
312 version = part.params[b'version']
317 version = part.params[b'version']
313 if version in (b'01', b'02'):
318 if version in (b'01', b'02'):
314 version = b'v2'
319 version = b'v2'
315 else:
320 else:
316 raise error.Abort(
321 raise error.Abort(
317 _(
322 _(
318 b'changegroup version %s does not have '
323 b'changegroup version %s does not have '
319 b'a known bundlespec'
324 b'a known bundlespec'
320 )
325 )
321 % version,
326 % version,
322 hint=_(b'try upgrading your Mercurial client'),
327 hint=_(b'try upgrading your Mercurial client'),
323 )
328 )
324 elif part.type == b'stream2' and version is None:
329 elif part.type == b'stream2' and version is None:
325 # A stream2 part requires to be part of a v2 bundle
330 # A stream2 part requires to be part of a v2 bundle
326 requirements = urlreq.unquote(part.params[b'requirements'])
331 requirements = urlreq.unquote(part.params[b'requirements'])
327 splitted = requirements.split()
332 splitted = requirements.split()
328 params = bundle2._formatrequirementsparams(splitted)
333 params = bundle2._formatrequirementsparams(splitted)
329 return b'none-v2;stream=v2;%s' % params
334 return b'none-v2;stream=v2;%s' % params
330
335
331 if not version:
336 if not version:
332 raise error.Abort(
337 raise error.Abort(
333 _(b'could not identify changegroup version in bundle')
338 _(b'could not identify changegroup version in bundle')
334 )
339 )
335
340
336 return b'%s-%s' % (comp, version)
341 return b'%s-%s' % (comp, version)
337 elif isinstance(b, streamclone.streamcloneapplier):
342 elif isinstance(b, streamclone.streamcloneapplier):
338 requirements = streamclone.readbundle1header(fh)[2]
343 requirements = streamclone.readbundle1header(fh)[2]
339 formatted = bundle2._formatrequirementsparams(requirements)
344 formatted = bundle2._formatrequirementsparams(requirements)
340 return b'none-packed1;%s' % formatted
345 return b'none-packed1;%s' % formatted
341 else:
346 else:
342 raise error.Abort(_(b'unknown bundle type: %s') % b)
347 raise error.Abort(_(b'unknown bundle type: %s') % b)
343
348
344
349
345 def _computeoutgoing(repo, heads, common):
350 def _computeoutgoing(repo, heads, common):
346 """Computes which revs are outgoing given a set of common
351 """Computes which revs are outgoing given a set of common
347 and a set of heads.
352 and a set of heads.
348
353
349 This is a separate function so extensions can have access to
354 This is a separate function so extensions can have access to
350 the logic.
355 the logic.
351
356
352 Returns a discovery.outgoing object.
357 Returns a discovery.outgoing object.
353 """
358 """
354 cl = repo.changelog
359 cl = repo.changelog
355 if common:
360 if common:
356 hasnode = cl.hasnode
361 hasnode = cl.hasnode
357 common = [n for n in common if hasnode(n)]
362 common = [n for n in common if hasnode(n)]
358 else:
363 else:
359 common = [nullid]
364 common = [nullid]
360 if not heads:
365 if not heads:
361 heads = cl.heads()
366 heads = cl.heads()
362 return discovery.outgoing(repo, common, heads)
367 return discovery.outgoing(repo, common, heads)
363
368
364
369
365 def _checkpublish(pushop):
370 def _checkpublish(pushop):
366 repo = pushop.repo
371 repo = pushop.repo
367 ui = repo.ui
372 ui = repo.ui
368 behavior = ui.config(b'experimental', b'auto-publish')
373 behavior = ui.config(b'experimental', b'auto-publish')
369 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
374 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
370 return
375 return
371 remotephases = listkeys(pushop.remote, b'phases')
376 remotephases = listkeys(pushop.remote, b'phases')
372 if not remotephases.get(b'publishing', False):
377 if not remotephases.get(b'publishing', False):
373 return
378 return
374
379
375 if pushop.revs is None:
380 if pushop.revs is None:
376 published = repo.filtered(b'served').revs(b'not public()')
381 published = repo.filtered(b'served').revs(b'not public()')
377 else:
382 else:
378 published = repo.revs(b'::%ln - public()', pushop.revs)
383 published = repo.revs(b'::%ln - public()', pushop.revs)
379 if published:
384 if published:
380 if behavior == b'warn':
385 if behavior == b'warn':
381 ui.warn(
386 ui.warn(
382 _(b'%i changesets about to be published\n') % len(published)
387 _(b'%i changesets about to be published\n') % len(published)
383 )
388 )
384 elif behavior == b'confirm':
389 elif behavior == b'confirm':
385 if ui.promptchoice(
390 if ui.promptchoice(
386 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
391 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
387 % len(published)
392 % len(published)
388 ):
393 ):
389 raise error.Abort(_(b'user quit'))
394 raise error.Abort(_(b'user quit'))
390 elif behavior == b'abort':
395 elif behavior == b'abort':
391 msg = _(b'push would publish %i changesets') % len(published)
396 msg = _(b'push would publish %i changesets') % len(published)
392 hint = _(
397 hint = _(
393 b"use --publish or adjust 'experimental.auto-publish'"
398 b"use --publish or adjust 'experimental.auto-publish'"
394 b" config"
399 b" config"
395 )
400 )
396 raise error.Abort(msg, hint=hint)
401 raise error.Abort(msg, hint=hint)
397
402
398
403
399 def _forcebundle1(op):
404 def _forcebundle1(op):
400 """return true if a pull/push must use bundle1
405 """return true if a pull/push must use bundle1
401
406
402 This function is used to allow testing of the older bundle version"""
407 This function is used to allow testing of the older bundle version"""
403 ui = op.repo.ui
408 ui = op.repo.ui
404 # The goal is this config is to allow developer to choose the bundle
409 # The goal is this config is to allow developer to choose the bundle
405 # version used during exchanged. This is especially handy during test.
410 # version used during exchanged. This is especially handy during test.
406 # Value is a list of bundle version to be picked from, highest version
411 # Value is a list of bundle version to be picked from, highest version
407 # should be used.
412 # should be used.
408 #
413 #
409 # developer config: devel.legacy.exchange
414 # developer config: devel.legacy.exchange
410 exchange = ui.configlist(b'devel', b'legacy.exchange')
415 exchange = ui.configlist(b'devel', b'legacy.exchange')
411 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
416 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
412 return forcebundle1 or not op.remote.capable(b'bundle2')
417 return forcebundle1 or not op.remote.capable(b'bundle2')
413
418
414
419
415 class pushoperation(object):
420 class pushoperation(object):
416 """A object that represent a single push operation
421 """A object that represent a single push operation
417
422
418 Its purpose is to carry push related state and very common operations.
423 Its purpose is to carry push related state and very common operations.
419
424
420 A new pushoperation should be created at the beginning of each push and
425 A new pushoperation should be created at the beginning of each push and
421 discarded afterward.
426 discarded afterward.
422 """
427 """
423
428
424 def __init__(
429 def __init__(
425 self,
430 self,
426 repo,
431 repo,
427 remote,
432 remote,
428 force=False,
433 force=False,
429 revs=None,
434 revs=None,
430 newbranch=False,
435 newbranch=False,
431 bookmarks=(),
436 bookmarks=(),
432 publish=False,
437 publish=False,
433 pushvars=None,
438 pushvars=None,
434 ):
439 ):
435 # repo we push from
440 # repo we push from
436 self.repo = repo
441 self.repo = repo
437 self.ui = repo.ui
442 self.ui = repo.ui
438 # repo we push to
443 # repo we push to
439 self.remote = remote
444 self.remote = remote
440 # force option provided
445 # force option provided
441 self.force = force
446 self.force = force
442 # revs to be pushed (None is "all")
447 # revs to be pushed (None is "all")
443 self.revs = revs
448 self.revs = revs
444 # bookmark explicitly pushed
449 # bookmark explicitly pushed
445 self.bookmarks = bookmarks
450 self.bookmarks = bookmarks
446 # allow push of new branch
451 # allow push of new branch
447 self.newbranch = newbranch
452 self.newbranch = newbranch
448 # step already performed
453 # step already performed
449 # (used to check what steps have been already performed through bundle2)
454 # (used to check what steps have been already performed through bundle2)
450 self.stepsdone = set()
455 self.stepsdone = set()
451 # Integer version of the changegroup push result
456 # Integer version of the changegroup push result
452 # - None means nothing to push
457 # - None means nothing to push
453 # - 0 means HTTP error
458 # - 0 means HTTP error
454 # - 1 means we pushed and remote head count is unchanged *or*
459 # - 1 means we pushed and remote head count is unchanged *or*
455 # we have outgoing changesets but refused to push
460 # we have outgoing changesets but refused to push
456 # - other values as described by addchangegroup()
461 # - other values as described by addchangegroup()
457 self.cgresult = None
462 self.cgresult = None
458 # Boolean value for the bookmark push
463 # Boolean value for the bookmark push
459 self.bkresult = None
464 self.bkresult = None
460 # discover.outgoing object (contains common and outgoing data)
465 # discover.outgoing object (contains common and outgoing data)
461 self.outgoing = None
466 self.outgoing = None
462 # all remote topological heads before the push
467 # all remote topological heads before the push
463 self.remoteheads = None
468 self.remoteheads = None
464 # Details of the remote branch pre and post push
469 # Details of the remote branch pre and post push
465 #
470 #
466 # mapping: {'branch': ([remoteheads],
471 # mapping: {'branch': ([remoteheads],
467 # [newheads],
472 # [newheads],
468 # [unsyncedheads],
473 # [unsyncedheads],
469 # [discardedheads])}
474 # [discardedheads])}
470 # - branch: the branch name
475 # - branch: the branch name
471 # - remoteheads: the list of remote heads known locally
476 # - remoteheads: the list of remote heads known locally
472 # None if the branch is new
477 # None if the branch is new
473 # - newheads: the new remote heads (known locally) with outgoing pushed
478 # - newheads: the new remote heads (known locally) with outgoing pushed
474 # - unsyncedheads: the list of remote heads unknown locally.
479 # - unsyncedheads: the list of remote heads unknown locally.
475 # - discardedheads: the list of remote heads made obsolete by the push
480 # - discardedheads: the list of remote heads made obsolete by the push
476 self.pushbranchmap = None
481 self.pushbranchmap = None
477 # testable as a boolean indicating if any nodes are missing locally.
482 # testable as a boolean indicating if any nodes are missing locally.
478 self.incoming = None
483 self.incoming = None
479 # summary of the remote phase situation
484 # summary of the remote phase situation
480 self.remotephases = None
485 self.remotephases = None
481 # phases changes that must be pushed along side the changesets
486 # phases changes that must be pushed along side the changesets
482 self.outdatedphases = None
487 self.outdatedphases = None
483 # phases changes that must be pushed if changeset push fails
488 # phases changes that must be pushed if changeset push fails
484 self.fallbackoutdatedphases = None
489 self.fallbackoutdatedphases = None
485 # outgoing obsmarkers
490 # outgoing obsmarkers
486 self.outobsmarkers = set()
491 self.outobsmarkers = set()
487 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
492 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
488 self.outbookmarks = []
493 self.outbookmarks = []
489 # transaction manager
494 # transaction manager
490 self.trmanager = None
495 self.trmanager = None
491 # map { pushkey partid -> callback handling failure}
496 # map { pushkey partid -> callback handling failure}
492 # used to handle exception from mandatory pushkey part failure
497 # used to handle exception from mandatory pushkey part failure
493 self.pkfailcb = {}
498 self.pkfailcb = {}
494 # an iterable of pushvars or None
499 # an iterable of pushvars or None
495 self.pushvars = pushvars
500 self.pushvars = pushvars
496 # publish pushed changesets
501 # publish pushed changesets
497 self.publish = publish
502 self.publish = publish
498
503
499 @util.propertycache
504 @util.propertycache
500 def futureheads(self):
505 def futureheads(self):
501 """future remote heads if the changeset push succeeds"""
506 """future remote heads if the changeset push succeeds"""
502 return self.outgoing.missingheads
507 return self.outgoing.missingheads
503
508
504 @util.propertycache
509 @util.propertycache
505 def fallbackheads(self):
510 def fallbackheads(self):
506 """future remote heads if the changeset push fails"""
511 """future remote heads if the changeset push fails"""
507 if self.revs is None:
512 if self.revs is None:
508 # not target to push, all common are relevant
513 # not target to push, all common are relevant
509 return self.outgoing.commonheads
514 return self.outgoing.commonheads
510 unfi = self.repo.unfiltered()
515 unfi = self.repo.unfiltered()
511 # I want cheads = heads(::missingheads and ::commonheads)
516 # I want cheads = heads(::missingheads and ::commonheads)
512 # (missingheads is revs with secret changeset filtered out)
517 # (missingheads is revs with secret changeset filtered out)
513 #
518 #
514 # This can be expressed as:
519 # This can be expressed as:
515 # cheads = ( (missingheads and ::commonheads)
520 # cheads = ( (missingheads and ::commonheads)
516 # + (commonheads and ::missingheads))"
521 # + (commonheads and ::missingheads))"
517 # )
522 # )
518 #
523 #
519 # while trying to push we already computed the following:
524 # while trying to push we already computed the following:
520 # common = (::commonheads)
525 # common = (::commonheads)
521 # missing = ((commonheads::missingheads) - commonheads)
526 # missing = ((commonheads::missingheads) - commonheads)
522 #
527 #
523 # We can pick:
528 # We can pick:
524 # * missingheads part of common (::commonheads)
529 # * missingheads part of common (::commonheads)
525 common = self.outgoing.common
530 common = self.outgoing.common
526 nm = self.repo.changelog.nodemap
531 nm = self.repo.changelog.nodemap
527 cheads = [node for node in self.revs if nm[node] in common]
532 cheads = [node for node in self.revs if nm[node] in common]
528 # and
533 # and
529 # * commonheads parents on missing
534 # * commonheads parents on missing
530 revset = unfi.set(
535 revset = unfi.set(
531 b'%ln and parents(roots(%ln))',
536 b'%ln and parents(roots(%ln))',
532 self.outgoing.commonheads,
537 self.outgoing.commonheads,
533 self.outgoing.missing,
538 self.outgoing.missing,
534 )
539 )
535 cheads.extend(c.node() for c in revset)
540 cheads.extend(c.node() for c in revset)
536 return cheads
541 return cheads
537
542
538 @property
543 @property
539 def commonheads(self):
544 def commonheads(self):
540 """set of all common heads after changeset bundle push"""
545 """set of all common heads after changeset bundle push"""
541 if self.cgresult:
546 if self.cgresult:
542 return self.futureheads
547 return self.futureheads
543 else:
548 else:
544 return self.fallbackheads
549 return self.fallbackheads
545
550
546
551
547 # mapping of message used when pushing bookmark
552 # mapping of message used when pushing bookmark
548 bookmsgmap = {
553 bookmsgmap = {
549 b'update': (
554 b'update': (
550 _(b"updating bookmark %s\n"),
555 _(b"updating bookmark %s\n"),
551 _(b'updating bookmark %s failed!\n'),
556 _(b'updating bookmark %s failed!\n'),
552 ),
557 ),
553 b'export': (
558 b'export': (
554 _(b"exporting bookmark %s\n"),
559 _(b"exporting bookmark %s\n"),
555 _(b'exporting bookmark %s failed!\n'),
560 _(b'exporting bookmark %s failed!\n'),
556 ),
561 ),
557 b'delete': (
562 b'delete': (
558 _(b"deleting remote bookmark %s\n"),
563 _(b"deleting remote bookmark %s\n"),
559 _(b'deleting remote bookmark %s failed!\n'),
564 _(b'deleting remote bookmark %s failed!\n'),
560 ),
565 ),
561 }
566 }
562
567
563
568
564 def push(
569 def push(
565 repo,
570 repo,
566 remote,
571 remote,
567 force=False,
572 force=False,
568 revs=None,
573 revs=None,
569 newbranch=False,
574 newbranch=False,
570 bookmarks=(),
575 bookmarks=(),
571 publish=False,
576 publish=False,
572 opargs=None,
577 opargs=None,
573 ):
578 ):
574 '''Push outgoing changesets (limited by revs) from a local
579 '''Push outgoing changesets (limited by revs) from a local
575 repository to remote. Return an integer:
580 repository to remote. Return an integer:
576 - None means nothing to push
581 - None means nothing to push
577 - 0 means HTTP error
582 - 0 means HTTP error
578 - 1 means we pushed and remote head count is unchanged *or*
583 - 1 means we pushed and remote head count is unchanged *or*
579 we have outgoing changesets but refused to push
584 we have outgoing changesets but refused to push
580 - other values as described by addchangegroup()
585 - other values as described by addchangegroup()
581 '''
586 '''
582 if opargs is None:
587 if opargs is None:
583 opargs = {}
588 opargs = {}
584 pushop = pushoperation(
589 pushop = pushoperation(
585 repo,
590 repo,
586 remote,
591 remote,
587 force,
592 force,
588 revs,
593 revs,
589 newbranch,
594 newbranch,
590 bookmarks,
595 bookmarks,
591 publish,
596 publish,
592 **pycompat.strkwargs(opargs)
597 **pycompat.strkwargs(opargs)
593 )
598 )
594 if pushop.remote.local():
599 if pushop.remote.local():
595 missing = (
600 missing = (
596 set(pushop.repo.requirements) - pushop.remote.local().supported
601 set(pushop.repo.requirements) - pushop.remote.local().supported
597 )
602 )
598 if missing:
603 if missing:
599 msg = _(
604 msg = _(
600 b"required features are not"
605 b"required features are not"
601 b" supported in the destination:"
606 b" supported in the destination:"
602 b" %s"
607 b" %s"
603 ) % (b', '.join(sorted(missing)))
608 ) % (b', '.join(sorted(missing)))
604 raise error.Abort(msg)
609 raise error.Abort(msg)
605
610
606 if not pushop.remote.canpush():
611 if not pushop.remote.canpush():
607 raise error.Abort(_(b"destination does not support push"))
612 raise error.Abort(_(b"destination does not support push"))
608
613
609 if not pushop.remote.capable(b'unbundle'):
614 if not pushop.remote.capable(b'unbundle'):
610 raise error.Abort(
615 raise error.Abort(
611 _(
616 _(
612 b'cannot push: destination does not support the '
617 b'cannot push: destination does not support the '
613 b'unbundle wire protocol command'
618 b'unbundle wire protocol command'
614 )
619 )
615 )
620 )
616
621
617 # get lock as we might write phase data
622 # get lock as we might write phase data
618 wlock = lock = None
623 wlock = lock = None
619 try:
624 try:
620 # bundle2 push may receive a reply bundle touching bookmarks
625 # bundle2 push may receive a reply bundle touching bookmarks
621 # requiring the wlock. Take it now to ensure proper ordering.
626 # requiring the wlock. Take it now to ensure proper ordering.
622 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
627 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
623 if (
628 if (
624 (not _forcebundle1(pushop))
629 (not _forcebundle1(pushop))
625 and maypushback
630 and maypushback
626 and not bookmod.bookmarksinstore(repo)
631 and not bookmod.bookmarksinstore(repo)
627 ):
632 ):
628 wlock = pushop.repo.wlock()
633 wlock = pushop.repo.wlock()
629 lock = pushop.repo.lock()
634 lock = pushop.repo.lock()
630 pushop.trmanager = transactionmanager(
635 pushop.trmanager = transactionmanager(
631 pushop.repo, b'push-response', pushop.remote.url()
636 pushop.repo, b'push-response', pushop.remote.url()
632 )
637 )
633 except error.LockUnavailable as err:
638 except error.LockUnavailable as err:
634 # source repo cannot be locked.
639 # source repo cannot be locked.
635 # We do not abort the push, but just disable the local phase
640 # We do not abort the push, but just disable the local phase
636 # synchronisation.
641 # synchronisation.
637 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
642 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
638 err
643 err
639 )
644 )
640 pushop.ui.debug(msg)
645 pushop.ui.debug(msg)
641
646
642 with wlock or util.nullcontextmanager():
647 with wlock or util.nullcontextmanager():
643 with lock or util.nullcontextmanager():
648 with lock or util.nullcontextmanager():
644 with pushop.trmanager or util.nullcontextmanager():
649 with pushop.trmanager or util.nullcontextmanager():
645 pushop.repo.checkpush(pushop)
650 pushop.repo.checkpush(pushop)
646 _checkpublish(pushop)
651 _checkpublish(pushop)
647 _pushdiscovery(pushop)
652 _pushdiscovery(pushop)
648 if not _forcebundle1(pushop):
653 if not _forcebundle1(pushop):
649 _pushbundle2(pushop)
654 _pushbundle2(pushop)
650 _pushchangeset(pushop)
655 _pushchangeset(pushop)
651 _pushsyncphase(pushop)
656 _pushsyncphase(pushop)
652 _pushobsolete(pushop)
657 _pushobsolete(pushop)
653 _pushbookmark(pushop)
658 _pushbookmark(pushop)
654
659
655 if repo.ui.configbool(b'experimental', b'remotenames'):
660 if repo.ui.configbool(b'experimental', b'remotenames'):
656 logexchange.pullremotenames(repo, remote)
661 logexchange.pullremotenames(repo, remote)
657
662
658 return pushop
663 return pushop
659
664
660
665
661 # list of steps to perform discovery before push
666 # list of steps to perform discovery before push
662 pushdiscoveryorder = []
667 pushdiscoveryorder = []
663
668
664 # Mapping between step name and function
669 # Mapping between step name and function
665 #
670 #
666 # This exists to help extensions wrap steps if necessary
671 # This exists to help extensions wrap steps if necessary
667 pushdiscoverymapping = {}
672 pushdiscoverymapping = {}
668
673
669
674
670 def pushdiscovery(stepname):
675 def pushdiscovery(stepname):
671 """decorator for function performing discovery before push
676 """decorator for function performing discovery before push
672
677
673 The function is added to the step -> function mapping and appended to the
678 The function is added to the step -> function mapping and appended to the
674 list of steps. Beware that decorated function will be added in order (this
679 list of steps. Beware that decorated function will be added in order (this
675 may matter).
680 may matter).
676
681
677 You can only use this decorator for a new step, if you want to wrap a step
682 You can only use this decorator for a new step, if you want to wrap a step
678 from an extension, change the pushdiscovery dictionary directly."""
683 from an extension, change the pushdiscovery dictionary directly."""
679
684
680 def dec(func):
685 def dec(func):
681 assert stepname not in pushdiscoverymapping
686 assert stepname not in pushdiscoverymapping
682 pushdiscoverymapping[stepname] = func
687 pushdiscoverymapping[stepname] = func
683 pushdiscoveryorder.append(stepname)
688 pushdiscoveryorder.append(stepname)
684 return func
689 return func
685
690
686 return dec
691 return dec
687
692
688
693
689 def _pushdiscovery(pushop):
694 def _pushdiscovery(pushop):
690 """Run all discovery steps"""
695 """Run all discovery steps"""
691 for stepname in pushdiscoveryorder:
696 for stepname in pushdiscoveryorder:
692 step = pushdiscoverymapping[stepname]
697 step = pushdiscoverymapping[stepname]
693 step(pushop)
698 step(pushop)
694
699
695
700
696 @pushdiscovery(b'changeset')
701 @pushdiscovery(b'changeset')
697 def _pushdiscoverychangeset(pushop):
702 def _pushdiscoverychangeset(pushop):
698 """discover the changeset that need to be pushed"""
703 """discover the changeset that need to be pushed"""
699 fci = discovery.findcommonincoming
704 fci = discovery.findcommonincoming
700 if pushop.revs:
705 if pushop.revs:
701 commoninc = fci(
706 commoninc = fci(
702 pushop.repo,
707 pushop.repo,
703 pushop.remote,
708 pushop.remote,
704 force=pushop.force,
709 force=pushop.force,
705 ancestorsof=pushop.revs,
710 ancestorsof=pushop.revs,
706 )
711 )
707 else:
712 else:
708 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
713 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
709 common, inc, remoteheads = commoninc
714 common, inc, remoteheads = commoninc
710 fco = discovery.findcommonoutgoing
715 fco = discovery.findcommonoutgoing
711 outgoing = fco(
716 outgoing = fco(
712 pushop.repo,
717 pushop.repo,
713 pushop.remote,
718 pushop.remote,
714 onlyheads=pushop.revs,
719 onlyheads=pushop.revs,
715 commoninc=commoninc,
720 commoninc=commoninc,
716 force=pushop.force,
721 force=pushop.force,
717 )
722 )
718 pushop.outgoing = outgoing
723 pushop.outgoing = outgoing
719 pushop.remoteheads = remoteheads
724 pushop.remoteheads = remoteheads
720 pushop.incoming = inc
725 pushop.incoming = inc
721
726
722
727
723 @pushdiscovery(b'phase')
728 @pushdiscovery(b'phase')
724 def _pushdiscoveryphase(pushop):
729 def _pushdiscoveryphase(pushop):
725 """discover the phase that needs to be pushed
730 """discover the phase that needs to be pushed
726
731
727 (computed for both success and failure case for changesets push)"""
732 (computed for both success and failure case for changesets push)"""
728 outgoing = pushop.outgoing
733 outgoing = pushop.outgoing
729 unfi = pushop.repo.unfiltered()
734 unfi = pushop.repo.unfiltered()
730 remotephases = listkeys(pushop.remote, b'phases')
735 remotephases = listkeys(pushop.remote, b'phases')
731
736
732 if (
737 if (
733 pushop.ui.configbool(b'ui', b'_usedassubrepo')
738 pushop.ui.configbool(b'ui', b'_usedassubrepo')
734 and remotephases # server supports phases
739 and remotephases # server supports phases
735 and not pushop.outgoing.missing # no changesets to be pushed
740 and not pushop.outgoing.missing # no changesets to be pushed
736 and remotephases.get(b'publishing', False)
741 and remotephases.get(b'publishing', False)
737 ):
742 ):
738 # When:
743 # When:
739 # - this is a subrepo push
744 # - this is a subrepo push
740 # - and remote support phase
745 # - and remote support phase
741 # - and no changeset are to be pushed
746 # - and no changeset are to be pushed
742 # - and remote is publishing
747 # - and remote is publishing
743 # We may be in issue 3781 case!
748 # We may be in issue 3781 case!
744 # We drop the possible phase synchronisation done by
749 # We drop the possible phase synchronisation done by
745 # courtesy to publish changesets possibly locally draft
750 # courtesy to publish changesets possibly locally draft
746 # on the remote.
751 # on the remote.
747 pushop.outdatedphases = []
752 pushop.outdatedphases = []
748 pushop.fallbackoutdatedphases = []
753 pushop.fallbackoutdatedphases = []
749 return
754 return
750
755
751 pushop.remotephases = phases.remotephasessummary(
756 pushop.remotephases = phases.remotephasessummary(
752 pushop.repo, pushop.fallbackheads, remotephases
757 pushop.repo, pushop.fallbackheads, remotephases
753 )
758 )
754 droots = pushop.remotephases.draftroots
759 droots = pushop.remotephases.draftroots
755
760
756 extracond = b''
761 extracond = b''
757 if not pushop.remotephases.publishing:
762 if not pushop.remotephases.publishing:
758 extracond = b' and public()'
763 extracond = b' and public()'
759 revset = b'heads((%%ln::%%ln) %s)' % extracond
764 revset = b'heads((%%ln::%%ln) %s)' % extracond
760 # Get the list of all revs draft on remote by public here.
765 # Get the list of all revs draft on remote by public here.
761 # XXX Beware that revset break if droots is not strictly
766 # XXX Beware that revset break if droots is not strictly
762 # XXX root we may want to ensure it is but it is costly
767 # XXX root we may want to ensure it is but it is costly
763 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
768 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
764 if not pushop.remotephases.publishing and pushop.publish:
769 if not pushop.remotephases.publishing and pushop.publish:
765 future = list(
770 future = list(
766 unfi.set(
771 unfi.set(
767 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
772 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
768 )
773 )
769 )
774 )
770 elif not outgoing.missing:
775 elif not outgoing.missing:
771 future = fallback
776 future = fallback
772 else:
777 else:
773 # adds changeset we are going to push as draft
778 # adds changeset we are going to push as draft
774 #
779 #
775 # should not be necessary for publishing server, but because of an
780 # should not be necessary for publishing server, but because of an
776 # issue fixed in xxxxx we have to do it anyway.
781 # issue fixed in xxxxx we have to do it anyway.
777 fdroots = list(
782 fdroots = list(
778 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
783 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
779 )
784 )
780 fdroots = [f.node() for f in fdroots]
785 fdroots = [f.node() for f in fdroots]
781 future = list(unfi.set(revset, fdroots, pushop.futureheads))
786 future = list(unfi.set(revset, fdroots, pushop.futureheads))
782 pushop.outdatedphases = future
787 pushop.outdatedphases = future
783 pushop.fallbackoutdatedphases = fallback
788 pushop.fallbackoutdatedphases = fallback
784
789
785
790
786 @pushdiscovery(b'obsmarker')
791 @pushdiscovery(b'obsmarker')
787 def _pushdiscoveryobsmarkers(pushop):
792 def _pushdiscoveryobsmarkers(pushop):
788 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
793 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
789 return
794 return
790
795
791 if not pushop.repo.obsstore:
796 if not pushop.repo.obsstore:
792 return
797 return
793
798
794 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
799 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
795 return
800 return
796
801
797 repo = pushop.repo
802 repo = pushop.repo
798 # very naive computation, that can be quite expensive on big repo.
803 # very naive computation, that can be quite expensive on big repo.
799 # However: evolution is currently slow on them anyway.
804 # However: evolution is currently slow on them anyway.
800 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
805 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
801 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
806 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
802
807
803
808
804 @pushdiscovery(b'bookmarks')
809 @pushdiscovery(b'bookmarks')
805 def _pushdiscoverybookmarks(pushop):
810 def _pushdiscoverybookmarks(pushop):
806 ui = pushop.ui
811 ui = pushop.ui
807 repo = pushop.repo.unfiltered()
812 repo = pushop.repo.unfiltered()
808 remote = pushop.remote
813 remote = pushop.remote
809 ui.debug(b"checking for updated bookmarks\n")
814 ui.debug(b"checking for updated bookmarks\n")
810 ancestors = ()
815 ancestors = ()
811 if pushop.revs:
816 if pushop.revs:
812 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
817 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
813 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
818 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
814
819
815 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
820 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
816
821
817 explicit = {
822 explicit = {
818 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
823 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
819 }
824 }
820
825
821 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
826 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
822 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
827 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
823
828
824
829
825 def _processcompared(pushop, pushed, explicit, remotebms, comp):
830 def _processcompared(pushop, pushed, explicit, remotebms, comp):
826 """take decision on bookmarks to push to the remote repo
831 """take decision on bookmarks to push to the remote repo
827
832
828 Exists to help extensions alter this behavior.
833 Exists to help extensions alter this behavior.
829 """
834 """
830 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
835 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
831
836
832 repo = pushop.repo
837 repo = pushop.repo
833
838
834 for b, scid, dcid in advsrc:
839 for b, scid, dcid in advsrc:
835 if b in explicit:
840 if b in explicit:
836 explicit.remove(b)
841 explicit.remove(b)
837 if not pushed or repo[scid].rev() in pushed:
842 if not pushed or repo[scid].rev() in pushed:
838 pushop.outbookmarks.append((b, dcid, scid))
843 pushop.outbookmarks.append((b, dcid, scid))
839 # search added bookmark
844 # search added bookmark
840 for b, scid, dcid in addsrc:
845 for b, scid, dcid in addsrc:
841 if b in explicit:
846 if b in explicit:
842 explicit.remove(b)
847 explicit.remove(b)
843 pushop.outbookmarks.append((b, b'', scid))
848 pushop.outbookmarks.append((b, b'', scid))
844 # search for overwritten bookmark
849 # search for overwritten bookmark
845 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
850 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
846 if b in explicit:
851 if b in explicit:
847 explicit.remove(b)
852 explicit.remove(b)
848 pushop.outbookmarks.append((b, dcid, scid))
853 pushop.outbookmarks.append((b, dcid, scid))
849 # search for bookmark to delete
854 # search for bookmark to delete
850 for b, scid, dcid in adddst:
855 for b, scid, dcid in adddst:
851 if b in explicit:
856 if b in explicit:
852 explicit.remove(b)
857 explicit.remove(b)
853 # treat as "deleted locally"
858 # treat as "deleted locally"
854 pushop.outbookmarks.append((b, dcid, b''))
859 pushop.outbookmarks.append((b, dcid, b''))
855 # identical bookmarks shouldn't get reported
860 # identical bookmarks shouldn't get reported
856 for b, scid, dcid in same:
861 for b, scid, dcid in same:
857 if b in explicit:
862 if b in explicit:
858 explicit.remove(b)
863 explicit.remove(b)
859
864
860 if explicit:
865 if explicit:
861 explicit = sorted(explicit)
866 explicit = sorted(explicit)
862 # we should probably list all of them
867 # we should probably list all of them
863 pushop.ui.warn(
868 pushop.ui.warn(
864 _(
869 _(
865 b'bookmark %s does not exist on the local '
870 b'bookmark %s does not exist on the local '
866 b'or remote repository!\n'
871 b'or remote repository!\n'
867 )
872 )
868 % explicit[0]
873 % explicit[0]
869 )
874 )
870 pushop.bkresult = 2
875 pushop.bkresult = 2
871
876
872 pushop.outbookmarks.sort()
877 pushop.outbookmarks.sort()
873
878
874
879
875 def _pushcheckoutgoing(pushop):
880 def _pushcheckoutgoing(pushop):
876 outgoing = pushop.outgoing
881 outgoing = pushop.outgoing
877 unfi = pushop.repo.unfiltered()
882 unfi = pushop.repo.unfiltered()
878 if not outgoing.missing:
883 if not outgoing.missing:
879 # nothing to push
884 # nothing to push
880 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
885 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
881 return False
886 return False
882 # something to push
887 # something to push
883 if not pushop.force:
888 if not pushop.force:
884 # if repo.obsstore == False --> no obsolete
889 # if repo.obsstore == False --> no obsolete
885 # then, save the iteration
890 # then, save the iteration
886 if unfi.obsstore:
891 if unfi.obsstore:
887 # this message are here for 80 char limit reason
892 # this message are here for 80 char limit reason
888 mso = _(b"push includes obsolete changeset: %s!")
893 mso = _(b"push includes obsolete changeset: %s!")
889 mspd = _(b"push includes phase-divergent changeset: %s!")
894 mspd = _(b"push includes phase-divergent changeset: %s!")
890 mscd = _(b"push includes content-divergent changeset: %s!")
895 mscd = _(b"push includes content-divergent changeset: %s!")
891 mst = {
896 mst = {
892 b"orphan": _(b"push includes orphan changeset: %s!"),
897 b"orphan": _(b"push includes orphan changeset: %s!"),
893 b"phase-divergent": mspd,
898 b"phase-divergent": mspd,
894 b"content-divergent": mscd,
899 b"content-divergent": mscd,
895 }
900 }
896 # If we are to push if there is at least one
901 # If we are to push if there is at least one
897 # obsolete or unstable changeset in missing, at
902 # obsolete or unstable changeset in missing, at
898 # least one of the missinghead will be obsolete or
903 # least one of the missinghead will be obsolete or
899 # unstable. So checking heads only is ok
904 # unstable. So checking heads only is ok
900 for node in outgoing.missingheads:
905 for node in outgoing.missingheads:
901 ctx = unfi[node]
906 ctx = unfi[node]
902 if ctx.obsolete():
907 if ctx.obsolete():
903 raise error.Abort(mso % ctx)
908 raise error.Abort(mso % ctx)
904 elif ctx.isunstable():
909 elif ctx.isunstable():
905 # TODO print more than one instability in the abort
910 # TODO print more than one instability in the abort
906 # message
911 # message
907 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
912 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
908
913
909 discovery.checkheads(pushop)
914 discovery.checkheads(pushop)
910 return True
915 return True
911
916
912
917
913 # List of names of steps to perform for an outgoing bundle2, order matters.
918 # List of names of steps to perform for an outgoing bundle2, order matters.
914 b2partsgenorder = []
919 b2partsgenorder = []
915
920
916 # Mapping between step name and function
921 # Mapping between step name and function
917 #
922 #
918 # This exists to help extensions wrap steps if necessary
923 # This exists to help extensions wrap steps if necessary
919 b2partsgenmapping = {}
924 b2partsgenmapping = {}
920
925
921
926
922 def b2partsgenerator(stepname, idx=None):
927 def b2partsgenerator(stepname, idx=None):
923 """decorator for function generating bundle2 part
928 """decorator for function generating bundle2 part
924
929
925 The function is added to the step -> function mapping and appended to the
930 The function is added to the step -> function mapping and appended to the
926 list of steps. Beware that decorated functions will be added in order
931 list of steps. Beware that decorated functions will be added in order
927 (this may matter).
932 (this may matter).
928
933
929 You can only use this decorator for new steps, if you want to wrap a step
934 You can only use this decorator for new steps, if you want to wrap a step
930 from an extension, attack the b2partsgenmapping dictionary directly."""
935 from an extension, attack the b2partsgenmapping dictionary directly."""
931
936
932 def dec(func):
937 def dec(func):
933 assert stepname not in b2partsgenmapping
938 assert stepname not in b2partsgenmapping
934 b2partsgenmapping[stepname] = func
939 b2partsgenmapping[stepname] = func
935 if idx is None:
940 if idx is None:
936 b2partsgenorder.append(stepname)
941 b2partsgenorder.append(stepname)
937 else:
942 else:
938 b2partsgenorder.insert(idx, stepname)
943 b2partsgenorder.insert(idx, stepname)
939 return func
944 return func
940
945
941 return dec
946 return dec
942
947
943
948
944 def _pushb2ctxcheckheads(pushop, bundler):
949 def _pushb2ctxcheckheads(pushop, bundler):
945 """Generate race condition checking parts
950 """Generate race condition checking parts
946
951
947 Exists as an independent function to aid extensions
952 Exists as an independent function to aid extensions
948 """
953 """
949 # * 'force' do not check for push race,
954 # * 'force' do not check for push race,
950 # * if we don't push anything, there are nothing to check.
955 # * if we don't push anything, there are nothing to check.
951 if not pushop.force and pushop.outgoing.missingheads:
956 if not pushop.force and pushop.outgoing.missingheads:
952 allowunrelated = b'related' in bundler.capabilities.get(
957 allowunrelated = b'related' in bundler.capabilities.get(
953 b'checkheads', ()
958 b'checkheads', ()
954 )
959 )
955 emptyremote = pushop.pushbranchmap is None
960 emptyremote = pushop.pushbranchmap is None
956 if not allowunrelated or emptyremote:
961 if not allowunrelated or emptyremote:
957 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
962 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
958 else:
963 else:
959 affected = set()
964 affected = set()
960 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
965 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
961 remoteheads, newheads, unsyncedheads, discardedheads = heads
966 remoteheads, newheads, unsyncedheads, discardedheads = heads
962 if remoteheads is not None:
967 if remoteheads is not None:
963 remote = set(remoteheads)
968 remote = set(remoteheads)
964 affected |= set(discardedheads) & remote
969 affected |= set(discardedheads) & remote
965 affected |= remote - set(newheads)
970 affected |= remote - set(newheads)
966 if affected:
971 if affected:
967 data = iter(sorted(affected))
972 data = iter(sorted(affected))
968 bundler.newpart(b'check:updated-heads', data=data)
973 bundler.newpart(b'check:updated-heads', data=data)
969
974
970
975
971 def _pushing(pushop):
976 def _pushing(pushop):
972 """return True if we are pushing anything"""
977 """return True if we are pushing anything"""
973 return bool(
978 return bool(
974 pushop.outgoing.missing
979 pushop.outgoing.missing
975 or pushop.outdatedphases
980 or pushop.outdatedphases
976 or pushop.outobsmarkers
981 or pushop.outobsmarkers
977 or pushop.outbookmarks
982 or pushop.outbookmarks
978 )
983 )
979
984
980
985
981 @b2partsgenerator(b'check-bookmarks')
986 @b2partsgenerator(b'check-bookmarks')
982 def _pushb2checkbookmarks(pushop, bundler):
987 def _pushb2checkbookmarks(pushop, bundler):
983 """insert bookmark move checking"""
988 """insert bookmark move checking"""
984 if not _pushing(pushop) or pushop.force:
989 if not _pushing(pushop) or pushop.force:
985 return
990 return
986 b2caps = bundle2.bundle2caps(pushop.remote)
991 b2caps = bundle2.bundle2caps(pushop.remote)
987 hasbookmarkcheck = b'bookmarks' in b2caps
992 hasbookmarkcheck = b'bookmarks' in b2caps
988 if not (pushop.outbookmarks and hasbookmarkcheck):
993 if not (pushop.outbookmarks and hasbookmarkcheck):
989 return
994 return
990 data = []
995 data = []
991 for book, old, new in pushop.outbookmarks:
996 for book, old, new in pushop.outbookmarks:
992 data.append((book, old))
997 data.append((book, old))
993 checkdata = bookmod.binaryencode(data)
998 checkdata = bookmod.binaryencode(data)
994 bundler.newpart(b'check:bookmarks', data=checkdata)
999 bundler.newpart(b'check:bookmarks', data=checkdata)
995
1000
996
1001
997 @b2partsgenerator(b'check-phases')
1002 @b2partsgenerator(b'check-phases')
998 def _pushb2checkphases(pushop, bundler):
1003 def _pushb2checkphases(pushop, bundler):
999 """insert phase move checking"""
1004 """insert phase move checking"""
1000 if not _pushing(pushop) or pushop.force:
1005 if not _pushing(pushop) or pushop.force:
1001 return
1006 return
1002 b2caps = bundle2.bundle2caps(pushop.remote)
1007 b2caps = bundle2.bundle2caps(pushop.remote)
1003 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1008 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1004 if pushop.remotephases is not None and hasphaseheads:
1009 if pushop.remotephases is not None and hasphaseheads:
1005 # check that the remote phase has not changed
1010 # check that the remote phase has not changed
1006 checks = [[] for p in phases.allphases]
1011 checks = [[] for p in phases.allphases]
1007 checks[phases.public].extend(pushop.remotephases.publicheads)
1012 checks[phases.public].extend(pushop.remotephases.publicheads)
1008 checks[phases.draft].extend(pushop.remotephases.draftroots)
1013 checks[phases.draft].extend(pushop.remotephases.draftroots)
1009 if any(checks):
1014 if any(checks):
1010 for nodes in checks:
1015 for nodes in checks:
1011 nodes.sort()
1016 nodes.sort()
1012 checkdata = phases.binaryencode(checks)
1017 checkdata = phases.binaryencode(checks)
1013 bundler.newpart(b'check:phases', data=checkdata)
1018 bundler.newpart(b'check:phases', data=checkdata)
1014
1019
1015
1020
1016 @b2partsgenerator(b'changeset')
1021 @b2partsgenerator(b'changeset')
1017 def _pushb2ctx(pushop, bundler):
1022 def _pushb2ctx(pushop, bundler):
1018 """handle changegroup push through bundle2
1023 """handle changegroup push through bundle2
1019
1024
1020 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1025 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1021 """
1026 """
1022 if b'changesets' in pushop.stepsdone:
1027 if b'changesets' in pushop.stepsdone:
1023 return
1028 return
1024 pushop.stepsdone.add(b'changesets')
1029 pushop.stepsdone.add(b'changesets')
1025 # Send known heads to the server for race detection.
1030 # Send known heads to the server for race detection.
1026 if not _pushcheckoutgoing(pushop):
1031 if not _pushcheckoutgoing(pushop):
1027 return
1032 return
1028 pushop.repo.prepushoutgoinghooks(pushop)
1033 pushop.repo.prepushoutgoinghooks(pushop)
1029
1034
1030 _pushb2ctxcheckheads(pushop, bundler)
1035 _pushb2ctxcheckheads(pushop, bundler)
1031
1036
1032 b2caps = bundle2.bundle2caps(pushop.remote)
1037 b2caps = bundle2.bundle2caps(pushop.remote)
1033 version = b'01'
1038 version = b'01'
1034 cgversions = b2caps.get(b'changegroup')
1039 cgversions = b2caps.get(b'changegroup')
1035 if cgversions: # 3.1 and 3.2 ship with an empty value
1040 if cgversions: # 3.1 and 3.2 ship with an empty value
1036 cgversions = [
1041 cgversions = [
1037 v
1042 v
1038 for v in cgversions
1043 for v in cgversions
1039 if v in changegroup.supportedoutgoingversions(pushop.repo)
1044 if v in changegroup.supportedoutgoingversions(pushop.repo)
1040 ]
1045 ]
1041 if not cgversions:
1046 if not cgversions:
1042 raise error.Abort(_(b'no common changegroup version'))
1047 raise error.Abort(_(b'no common changegroup version'))
1043 version = max(cgversions)
1048 version = max(cgversions)
1044 cgstream = changegroup.makestream(
1049 cgstream = changegroup.makestream(
1045 pushop.repo, pushop.outgoing, version, b'push'
1050 pushop.repo, pushop.outgoing, version, b'push'
1046 )
1051 )
1047 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1052 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1048 if cgversions:
1053 if cgversions:
1049 cgpart.addparam(b'version', version)
1054 cgpart.addparam(b'version', version)
1050 if b'treemanifest' in pushop.repo.requirements:
1055 if b'treemanifest' in pushop.repo.requirements:
1051 cgpart.addparam(b'treemanifest', b'1')
1056 cgpart.addparam(b'treemanifest', b'1')
1052 if b'exp-sidedata-flag' in pushop.repo.requirements:
1057 if b'exp-sidedata-flag' in pushop.repo.requirements:
1053 cgpart.addparam(b'exp-sidedata', b'1')
1058 cgpart.addparam(b'exp-sidedata', b'1')
1054
1059
1055 def handlereply(op):
1060 def handlereply(op):
1056 """extract addchangegroup returns from server reply"""
1061 """extract addchangegroup returns from server reply"""
1057 cgreplies = op.records.getreplies(cgpart.id)
1062 cgreplies = op.records.getreplies(cgpart.id)
1058 assert len(cgreplies[b'changegroup']) == 1
1063 assert len(cgreplies[b'changegroup']) == 1
1059 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1064 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1060
1065
1061 return handlereply
1066 return handlereply
1062
1067
1063
1068
1064 @b2partsgenerator(b'phase')
1069 @b2partsgenerator(b'phase')
1065 def _pushb2phases(pushop, bundler):
1070 def _pushb2phases(pushop, bundler):
1066 """handle phase push through bundle2"""
1071 """handle phase push through bundle2"""
1067 if b'phases' in pushop.stepsdone:
1072 if b'phases' in pushop.stepsdone:
1068 return
1073 return
1069 b2caps = bundle2.bundle2caps(pushop.remote)
1074 b2caps = bundle2.bundle2caps(pushop.remote)
1070 ui = pushop.repo.ui
1075 ui = pushop.repo.ui
1071
1076
1072 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1077 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1073 haspushkey = b'pushkey' in b2caps
1078 haspushkey = b'pushkey' in b2caps
1074 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1079 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1075
1080
1076 if hasphaseheads and not legacyphase:
1081 if hasphaseheads and not legacyphase:
1077 return _pushb2phaseheads(pushop, bundler)
1082 return _pushb2phaseheads(pushop, bundler)
1078 elif haspushkey:
1083 elif haspushkey:
1079 return _pushb2phasespushkey(pushop, bundler)
1084 return _pushb2phasespushkey(pushop, bundler)
1080
1085
1081
1086
1082 def _pushb2phaseheads(pushop, bundler):
1087 def _pushb2phaseheads(pushop, bundler):
1083 """push phase information through a bundle2 - binary part"""
1088 """push phase information through a bundle2 - binary part"""
1084 pushop.stepsdone.add(b'phases')
1089 pushop.stepsdone.add(b'phases')
1085 if pushop.outdatedphases:
1090 if pushop.outdatedphases:
1086 updates = [[] for p in phases.allphases]
1091 updates = [[] for p in phases.allphases]
1087 updates[0].extend(h.node() for h in pushop.outdatedphases)
1092 updates[0].extend(h.node() for h in pushop.outdatedphases)
1088 phasedata = phases.binaryencode(updates)
1093 phasedata = phases.binaryencode(updates)
1089 bundler.newpart(b'phase-heads', data=phasedata)
1094 bundler.newpart(b'phase-heads', data=phasedata)
1090
1095
1091
1096
1092 def _pushb2phasespushkey(pushop, bundler):
1097 def _pushb2phasespushkey(pushop, bundler):
1093 """push phase information through a bundle2 - pushkey part"""
1098 """push phase information through a bundle2 - pushkey part"""
1094 pushop.stepsdone.add(b'phases')
1099 pushop.stepsdone.add(b'phases')
1095 part2node = []
1100 part2node = []
1096
1101
1097 def handlefailure(pushop, exc):
1102 def handlefailure(pushop, exc):
1098 targetid = int(exc.partid)
1103 targetid = int(exc.partid)
1099 for partid, node in part2node:
1104 for partid, node in part2node:
1100 if partid == targetid:
1105 if partid == targetid:
1101 raise error.Abort(_(b'updating %s to public failed') % node)
1106 raise error.Abort(_(b'updating %s to public failed') % node)
1102
1107
1103 enc = pushkey.encode
1108 enc = pushkey.encode
1104 for newremotehead in pushop.outdatedphases:
1109 for newremotehead in pushop.outdatedphases:
1105 part = bundler.newpart(b'pushkey')
1110 part = bundler.newpart(b'pushkey')
1106 part.addparam(b'namespace', enc(b'phases'))
1111 part.addparam(b'namespace', enc(b'phases'))
1107 part.addparam(b'key', enc(newremotehead.hex()))
1112 part.addparam(b'key', enc(newremotehead.hex()))
1108 part.addparam(b'old', enc(b'%d' % phases.draft))
1113 part.addparam(b'old', enc(b'%d' % phases.draft))
1109 part.addparam(b'new', enc(b'%d' % phases.public))
1114 part.addparam(b'new', enc(b'%d' % phases.public))
1110 part2node.append((part.id, newremotehead))
1115 part2node.append((part.id, newremotehead))
1111 pushop.pkfailcb[part.id] = handlefailure
1116 pushop.pkfailcb[part.id] = handlefailure
1112
1117
1113 def handlereply(op):
1118 def handlereply(op):
1114 for partid, node in part2node:
1119 for partid, node in part2node:
1115 partrep = op.records.getreplies(partid)
1120 partrep = op.records.getreplies(partid)
1116 results = partrep[b'pushkey']
1121 results = partrep[b'pushkey']
1117 assert len(results) <= 1
1122 assert len(results) <= 1
1118 msg = None
1123 msg = None
1119 if not results:
1124 if not results:
1120 msg = _(b'server ignored update of %s to public!\n') % node
1125 msg = _(b'server ignored update of %s to public!\n') % node
1121 elif not int(results[0][b'return']):
1126 elif not int(results[0][b'return']):
1122 msg = _(b'updating %s to public failed!\n') % node
1127 msg = _(b'updating %s to public failed!\n') % node
1123 if msg is not None:
1128 if msg is not None:
1124 pushop.ui.warn(msg)
1129 pushop.ui.warn(msg)
1125
1130
1126 return handlereply
1131 return handlereply
1127
1132
1128
1133
1129 @b2partsgenerator(b'obsmarkers')
1134 @b2partsgenerator(b'obsmarkers')
1130 def _pushb2obsmarkers(pushop, bundler):
1135 def _pushb2obsmarkers(pushop, bundler):
1131 if b'obsmarkers' in pushop.stepsdone:
1136 if b'obsmarkers' in pushop.stepsdone:
1132 return
1137 return
1133 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1138 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1134 if obsolete.commonversion(remoteversions) is None:
1139 if obsolete.commonversion(remoteversions) is None:
1135 return
1140 return
1136 pushop.stepsdone.add(b'obsmarkers')
1141 pushop.stepsdone.add(b'obsmarkers')
1137 if pushop.outobsmarkers:
1142 if pushop.outobsmarkers:
1138 markers = sorted(pushop.outobsmarkers)
1143 markers = sorted(pushop.outobsmarkers)
1139 bundle2.buildobsmarkerspart(bundler, markers)
1144 bundle2.buildobsmarkerspart(bundler, markers)
1140
1145
1141
1146
1142 @b2partsgenerator(b'bookmarks')
1147 @b2partsgenerator(b'bookmarks')
1143 def _pushb2bookmarks(pushop, bundler):
1148 def _pushb2bookmarks(pushop, bundler):
1144 """handle bookmark push through bundle2"""
1149 """handle bookmark push through bundle2"""
1145 if b'bookmarks' in pushop.stepsdone:
1150 if b'bookmarks' in pushop.stepsdone:
1146 return
1151 return
1147 b2caps = bundle2.bundle2caps(pushop.remote)
1152 b2caps = bundle2.bundle2caps(pushop.remote)
1148
1153
1149 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1154 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1150 legacybooks = b'bookmarks' in legacy
1155 legacybooks = b'bookmarks' in legacy
1151
1156
1152 if not legacybooks and b'bookmarks' in b2caps:
1157 if not legacybooks and b'bookmarks' in b2caps:
1153 return _pushb2bookmarkspart(pushop, bundler)
1158 return _pushb2bookmarkspart(pushop, bundler)
1154 elif b'pushkey' in b2caps:
1159 elif b'pushkey' in b2caps:
1155 return _pushb2bookmarkspushkey(pushop, bundler)
1160 return _pushb2bookmarkspushkey(pushop, bundler)
1156
1161
1157
1162
1158 def _bmaction(old, new):
1163 def _bmaction(old, new):
1159 """small utility for bookmark pushing"""
1164 """small utility for bookmark pushing"""
1160 if not old:
1165 if not old:
1161 return b'export'
1166 return b'export'
1162 elif not new:
1167 elif not new:
1163 return b'delete'
1168 return b'delete'
1164 return b'update'
1169 return b'update'
1165
1170
1166
1171
1167 def _abortonsecretctx(pushop, node, b):
1172 def _abortonsecretctx(pushop, node, b):
1168 """abort if a given bookmark points to a secret changeset"""
1173 """abort if a given bookmark points to a secret changeset"""
1169 if node and pushop.repo[node].phase() == phases.secret:
1174 if node and pushop.repo[node].phase() == phases.secret:
1170 raise error.Abort(
1175 raise error.Abort(
1171 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1176 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1172 )
1177 )
1173
1178
1174
1179
1175 def _pushb2bookmarkspart(pushop, bundler):
1180 def _pushb2bookmarkspart(pushop, bundler):
1176 pushop.stepsdone.add(b'bookmarks')
1181 pushop.stepsdone.add(b'bookmarks')
1177 if not pushop.outbookmarks:
1182 if not pushop.outbookmarks:
1178 return
1183 return
1179
1184
1180 allactions = []
1185 allactions = []
1181 data = []
1186 data = []
1182 for book, old, new in pushop.outbookmarks:
1187 for book, old, new in pushop.outbookmarks:
1183 _abortonsecretctx(pushop, new, book)
1188 _abortonsecretctx(pushop, new, book)
1184 data.append((book, new))
1189 data.append((book, new))
1185 allactions.append((book, _bmaction(old, new)))
1190 allactions.append((book, _bmaction(old, new)))
1186 checkdata = bookmod.binaryencode(data)
1191 checkdata = bookmod.binaryencode(data)
1187 bundler.newpart(b'bookmarks', data=checkdata)
1192 bundler.newpart(b'bookmarks', data=checkdata)
1188
1193
1189 def handlereply(op):
1194 def handlereply(op):
1190 ui = pushop.ui
1195 ui = pushop.ui
1191 # if success
1196 # if success
1192 for book, action in allactions:
1197 for book, action in allactions:
1193 ui.status(bookmsgmap[action][0] % book)
1198 ui.status(bookmsgmap[action][0] % book)
1194
1199
1195 return handlereply
1200 return handlereply
1196
1201
1197
1202
1198 def _pushb2bookmarkspushkey(pushop, bundler):
1203 def _pushb2bookmarkspushkey(pushop, bundler):
1199 pushop.stepsdone.add(b'bookmarks')
1204 pushop.stepsdone.add(b'bookmarks')
1200 part2book = []
1205 part2book = []
1201 enc = pushkey.encode
1206 enc = pushkey.encode
1202
1207
1203 def handlefailure(pushop, exc):
1208 def handlefailure(pushop, exc):
1204 targetid = int(exc.partid)
1209 targetid = int(exc.partid)
1205 for partid, book, action in part2book:
1210 for partid, book, action in part2book:
1206 if partid == targetid:
1211 if partid == targetid:
1207 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1212 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1208 # we should not be called for part we did not generated
1213 # we should not be called for part we did not generated
1209 assert False
1214 assert False
1210
1215
1211 for book, old, new in pushop.outbookmarks:
1216 for book, old, new in pushop.outbookmarks:
1212 _abortonsecretctx(pushop, new, book)
1217 _abortonsecretctx(pushop, new, book)
1213 part = bundler.newpart(b'pushkey')
1218 part = bundler.newpart(b'pushkey')
1214 part.addparam(b'namespace', enc(b'bookmarks'))
1219 part.addparam(b'namespace', enc(b'bookmarks'))
1215 part.addparam(b'key', enc(book))
1220 part.addparam(b'key', enc(book))
1216 part.addparam(b'old', enc(hex(old)))
1221 part.addparam(b'old', enc(hex(old)))
1217 part.addparam(b'new', enc(hex(new)))
1222 part.addparam(b'new', enc(hex(new)))
1218 action = b'update'
1223 action = b'update'
1219 if not old:
1224 if not old:
1220 action = b'export'
1225 action = b'export'
1221 elif not new:
1226 elif not new:
1222 action = b'delete'
1227 action = b'delete'
1223 part2book.append((part.id, book, action))
1228 part2book.append((part.id, book, action))
1224 pushop.pkfailcb[part.id] = handlefailure
1229 pushop.pkfailcb[part.id] = handlefailure
1225
1230
1226 def handlereply(op):
1231 def handlereply(op):
1227 ui = pushop.ui
1232 ui = pushop.ui
1228 for partid, book, action in part2book:
1233 for partid, book, action in part2book:
1229 partrep = op.records.getreplies(partid)
1234 partrep = op.records.getreplies(partid)
1230 results = partrep[b'pushkey']
1235 results = partrep[b'pushkey']
1231 assert len(results) <= 1
1236 assert len(results) <= 1
1232 if not results:
1237 if not results:
1233 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1238 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1234 else:
1239 else:
1235 ret = int(results[0][b'return'])
1240 ret = int(results[0][b'return'])
1236 if ret:
1241 if ret:
1237 ui.status(bookmsgmap[action][0] % book)
1242 ui.status(bookmsgmap[action][0] % book)
1238 else:
1243 else:
1239 ui.warn(bookmsgmap[action][1] % book)
1244 ui.warn(bookmsgmap[action][1] % book)
1240 if pushop.bkresult is not None:
1245 if pushop.bkresult is not None:
1241 pushop.bkresult = 1
1246 pushop.bkresult = 1
1242
1247
1243 return handlereply
1248 return handlereply
1244
1249
1245
1250
1246 @b2partsgenerator(b'pushvars', idx=0)
1251 @b2partsgenerator(b'pushvars', idx=0)
1247 def _getbundlesendvars(pushop, bundler):
1252 def _getbundlesendvars(pushop, bundler):
1248 '''send shellvars via bundle2'''
1253 '''send shellvars via bundle2'''
1249 pushvars = pushop.pushvars
1254 pushvars = pushop.pushvars
1250 if pushvars:
1255 if pushvars:
1251 shellvars = {}
1256 shellvars = {}
1252 for raw in pushvars:
1257 for raw in pushvars:
1253 if b'=' not in raw:
1258 if b'=' not in raw:
1254 msg = (
1259 msg = (
1255 b"unable to parse variable '%s', should follow "
1260 b"unable to parse variable '%s', should follow "
1256 b"'KEY=VALUE' or 'KEY=' format"
1261 b"'KEY=VALUE' or 'KEY=' format"
1257 )
1262 )
1258 raise error.Abort(msg % raw)
1263 raise error.Abort(msg % raw)
1259 k, v = raw.split(b'=', 1)
1264 k, v = raw.split(b'=', 1)
1260 shellvars[k] = v
1265 shellvars[k] = v
1261
1266
1262 part = bundler.newpart(b'pushvars')
1267 part = bundler.newpart(b'pushvars')
1263
1268
1264 for key, value in pycompat.iteritems(shellvars):
1269 for key, value in pycompat.iteritems(shellvars):
1265 part.addparam(key, value, mandatory=False)
1270 part.addparam(key, value, mandatory=False)
1266
1271
1267
1272
1268 def _pushbundle2(pushop):
1273 def _pushbundle2(pushop):
1269 """push data to the remote using bundle2
1274 """push data to the remote using bundle2
1270
1275
1271 The only currently supported type of data is changegroup but this will
1276 The only currently supported type of data is changegroup but this will
1272 evolve in the future."""
1277 evolve in the future."""
1273 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1278 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1274 pushback = pushop.trmanager and pushop.ui.configbool(
1279 pushback = pushop.trmanager and pushop.ui.configbool(
1275 b'experimental', b'bundle2.pushback'
1280 b'experimental', b'bundle2.pushback'
1276 )
1281 )
1277
1282
1278 # create reply capability
1283 # create reply capability
1279 capsblob = bundle2.encodecaps(
1284 capsblob = bundle2.encodecaps(
1280 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1285 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1281 )
1286 )
1282 bundler.newpart(b'replycaps', data=capsblob)
1287 bundler.newpart(b'replycaps', data=capsblob)
1283 replyhandlers = []
1288 replyhandlers = []
1284 for partgenname in b2partsgenorder:
1289 for partgenname in b2partsgenorder:
1285 partgen = b2partsgenmapping[partgenname]
1290 partgen = b2partsgenmapping[partgenname]
1286 ret = partgen(pushop, bundler)
1291 ret = partgen(pushop, bundler)
1287 if callable(ret):
1292 if callable(ret):
1288 replyhandlers.append(ret)
1293 replyhandlers.append(ret)
1289 # do not push if nothing to push
1294 # do not push if nothing to push
1290 if bundler.nbparts <= 1:
1295 if bundler.nbparts <= 1:
1291 return
1296 return
1292 stream = util.chunkbuffer(bundler.getchunks())
1297 stream = util.chunkbuffer(bundler.getchunks())
1293 try:
1298 try:
1294 try:
1299 try:
1295 with pushop.remote.commandexecutor() as e:
1300 with pushop.remote.commandexecutor() as e:
1296 reply = e.callcommand(
1301 reply = e.callcommand(
1297 b'unbundle',
1302 b'unbundle',
1298 {
1303 {
1299 b'bundle': stream,
1304 b'bundle': stream,
1300 b'heads': [b'force'],
1305 b'heads': [b'force'],
1301 b'url': pushop.remote.url(),
1306 b'url': pushop.remote.url(),
1302 },
1307 },
1303 ).result()
1308 ).result()
1304 except error.BundleValueError as exc:
1309 except error.BundleValueError as exc:
1305 raise error.Abort(_(b'missing support for %s') % exc)
1310 raise error.Abort(_(b'missing support for %s') % exc)
1306 try:
1311 try:
1307 trgetter = None
1312 trgetter = None
1308 if pushback:
1313 if pushback:
1309 trgetter = pushop.trmanager.transaction
1314 trgetter = pushop.trmanager.transaction
1310 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1315 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1311 except error.BundleValueError as exc:
1316 except error.BundleValueError as exc:
1312 raise error.Abort(_(b'missing support for %s') % exc)
1317 raise error.Abort(_(b'missing support for %s') % exc)
1313 except bundle2.AbortFromPart as exc:
1318 except bundle2.AbortFromPart as exc:
1314 pushop.ui.status(_(b'remote: %s\n') % exc)
1319 pushop.ui.status(_(b'remote: %s\n') % exc)
1315 if exc.hint is not None:
1320 if exc.hint is not None:
1316 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1321 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1317 raise error.Abort(_(b'push failed on remote'))
1322 raise error.Abort(_(b'push failed on remote'))
1318 except error.PushkeyFailed as exc:
1323 except error.PushkeyFailed as exc:
1319 partid = int(exc.partid)
1324 partid = int(exc.partid)
1320 if partid not in pushop.pkfailcb:
1325 if partid not in pushop.pkfailcb:
1321 raise
1326 raise
1322 pushop.pkfailcb[partid](pushop, exc)
1327 pushop.pkfailcb[partid](pushop, exc)
1323 for rephand in replyhandlers:
1328 for rephand in replyhandlers:
1324 rephand(op)
1329 rephand(op)
1325
1330
1326
1331
1327 def _pushchangeset(pushop):
1332 def _pushchangeset(pushop):
1328 """Make the actual push of changeset bundle to remote repo"""
1333 """Make the actual push of changeset bundle to remote repo"""
1329 if b'changesets' in pushop.stepsdone:
1334 if b'changesets' in pushop.stepsdone:
1330 return
1335 return
1331 pushop.stepsdone.add(b'changesets')
1336 pushop.stepsdone.add(b'changesets')
1332 if not _pushcheckoutgoing(pushop):
1337 if not _pushcheckoutgoing(pushop):
1333 return
1338 return
1334
1339
1335 # Should have verified this in push().
1340 # Should have verified this in push().
1336 assert pushop.remote.capable(b'unbundle')
1341 assert pushop.remote.capable(b'unbundle')
1337
1342
1338 pushop.repo.prepushoutgoinghooks(pushop)
1343 pushop.repo.prepushoutgoinghooks(pushop)
1339 outgoing = pushop.outgoing
1344 outgoing = pushop.outgoing
1340 # TODO: get bundlecaps from remote
1345 # TODO: get bundlecaps from remote
1341 bundlecaps = None
1346 bundlecaps = None
1342 # create a changegroup from local
1347 # create a changegroup from local
1343 if pushop.revs is None and not (
1348 if pushop.revs is None and not (
1344 outgoing.excluded or pushop.repo.changelog.filteredrevs
1349 outgoing.excluded or pushop.repo.changelog.filteredrevs
1345 ):
1350 ):
1346 # push everything,
1351 # push everything,
1347 # use the fast path, no race possible on push
1352 # use the fast path, no race possible on push
1348 cg = changegroup.makechangegroup(
1353 cg = changegroup.makechangegroup(
1349 pushop.repo,
1354 pushop.repo,
1350 outgoing,
1355 outgoing,
1351 b'01',
1356 b'01',
1352 b'push',
1357 b'push',
1353 fastpath=True,
1358 fastpath=True,
1354 bundlecaps=bundlecaps,
1359 bundlecaps=bundlecaps,
1355 )
1360 )
1356 else:
1361 else:
1357 cg = changegroup.makechangegroup(
1362 cg = changegroup.makechangegroup(
1358 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1363 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1359 )
1364 )
1360
1365
1361 # apply changegroup to remote
1366 # apply changegroup to remote
1362 # local repo finds heads on server, finds out what
1367 # local repo finds heads on server, finds out what
1363 # revs it must push. once revs transferred, if server
1368 # revs it must push. once revs transferred, if server
1364 # finds it has different heads (someone else won
1369 # finds it has different heads (someone else won
1365 # commit/push race), server aborts.
1370 # commit/push race), server aborts.
1366 if pushop.force:
1371 if pushop.force:
1367 remoteheads = [b'force']
1372 remoteheads = [b'force']
1368 else:
1373 else:
1369 remoteheads = pushop.remoteheads
1374 remoteheads = pushop.remoteheads
1370 # ssh: return remote's addchangegroup()
1375 # ssh: return remote's addchangegroup()
1371 # http: return remote's addchangegroup() or 0 for error
1376 # http: return remote's addchangegroup() or 0 for error
1372 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1377 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1373
1378
1374
1379
1375 def _pushsyncphase(pushop):
1380 def _pushsyncphase(pushop):
1376 """synchronise phase information locally and remotely"""
1381 """synchronise phase information locally and remotely"""
1377 cheads = pushop.commonheads
1382 cheads = pushop.commonheads
1378 # even when we don't push, exchanging phase data is useful
1383 # even when we don't push, exchanging phase data is useful
1379 remotephases = listkeys(pushop.remote, b'phases')
1384 remotephases = listkeys(pushop.remote, b'phases')
1380 if (
1385 if (
1381 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1386 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1382 and remotephases # server supports phases
1387 and remotephases # server supports phases
1383 and pushop.cgresult is None # nothing was pushed
1388 and pushop.cgresult is None # nothing was pushed
1384 and remotephases.get(b'publishing', False)
1389 and remotephases.get(b'publishing', False)
1385 ):
1390 ):
1386 # When:
1391 # When:
1387 # - this is a subrepo push
1392 # - this is a subrepo push
1388 # - and remote support phase
1393 # - and remote support phase
1389 # - and no changeset was pushed
1394 # - and no changeset was pushed
1390 # - and remote is publishing
1395 # - and remote is publishing
1391 # We may be in issue 3871 case!
1396 # We may be in issue 3871 case!
1392 # We drop the possible phase synchronisation done by
1397 # We drop the possible phase synchronisation done by
1393 # courtesy to publish changesets possibly locally draft
1398 # courtesy to publish changesets possibly locally draft
1394 # on the remote.
1399 # on the remote.
1395 remotephases = {b'publishing': b'True'}
1400 remotephases = {b'publishing': b'True'}
1396 if not remotephases: # old server or public only reply from non-publishing
1401 if not remotephases: # old server or public only reply from non-publishing
1397 _localphasemove(pushop, cheads)
1402 _localphasemove(pushop, cheads)
1398 # don't push any phase data as there is nothing to push
1403 # don't push any phase data as there is nothing to push
1399 else:
1404 else:
1400 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1405 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1401 pheads, droots = ana
1406 pheads, droots = ana
1402 ### Apply remote phase on local
1407 ### Apply remote phase on local
1403 if remotephases.get(b'publishing', False):
1408 if remotephases.get(b'publishing', False):
1404 _localphasemove(pushop, cheads)
1409 _localphasemove(pushop, cheads)
1405 else: # publish = False
1410 else: # publish = False
1406 _localphasemove(pushop, pheads)
1411 _localphasemove(pushop, pheads)
1407 _localphasemove(pushop, cheads, phases.draft)
1412 _localphasemove(pushop, cheads, phases.draft)
1408 ### Apply local phase on remote
1413 ### Apply local phase on remote
1409
1414
1410 if pushop.cgresult:
1415 if pushop.cgresult:
1411 if b'phases' in pushop.stepsdone:
1416 if b'phases' in pushop.stepsdone:
1412 # phases already pushed though bundle2
1417 # phases already pushed though bundle2
1413 return
1418 return
1414 outdated = pushop.outdatedphases
1419 outdated = pushop.outdatedphases
1415 else:
1420 else:
1416 outdated = pushop.fallbackoutdatedphases
1421 outdated = pushop.fallbackoutdatedphases
1417
1422
1418 pushop.stepsdone.add(b'phases')
1423 pushop.stepsdone.add(b'phases')
1419
1424
1420 # filter heads already turned public by the push
1425 # filter heads already turned public by the push
1421 outdated = [c for c in outdated if c.node() not in pheads]
1426 outdated = [c for c in outdated if c.node() not in pheads]
1422 # fallback to independent pushkey command
1427 # fallback to independent pushkey command
1423 for newremotehead in outdated:
1428 for newremotehead in outdated:
1424 with pushop.remote.commandexecutor() as e:
1429 with pushop.remote.commandexecutor() as e:
1425 r = e.callcommand(
1430 r = e.callcommand(
1426 b'pushkey',
1431 b'pushkey',
1427 {
1432 {
1428 b'namespace': b'phases',
1433 b'namespace': b'phases',
1429 b'key': newremotehead.hex(),
1434 b'key': newremotehead.hex(),
1430 b'old': b'%d' % phases.draft,
1435 b'old': b'%d' % phases.draft,
1431 b'new': b'%d' % phases.public,
1436 b'new': b'%d' % phases.public,
1432 },
1437 },
1433 ).result()
1438 ).result()
1434
1439
1435 if not r:
1440 if not r:
1436 pushop.ui.warn(
1441 pushop.ui.warn(
1437 _(b'updating %s to public failed!\n') % newremotehead
1442 _(b'updating %s to public failed!\n') % newremotehead
1438 )
1443 )
1439
1444
1440
1445
1441 def _localphasemove(pushop, nodes, phase=phases.public):
1446 def _localphasemove(pushop, nodes, phase=phases.public):
1442 """move <nodes> to <phase> in the local source repo"""
1447 """move <nodes> to <phase> in the local source repo"""
1443 if pushop.trmanager:
1448 if pushop.trmanager:
1444 phases.advanceboundary(
1449 phases.advanceboundary(
1445 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1450 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1446 )
1451 )
1447 else:
1452 else:
1448 # repo is not locked, do not change any phases!
1453 # repo is not locked, do not change any phases!
1449 # Informs the user that phases should have been moved when
1454 # Informs the user that phases should have been moved when
1450 # applicable.
1455 # applicable.
1451 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1456 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1452 phasestr = phases.phasenames[phase]
1457 phasestr = phases.phasenames[phase]
1453 if actualmoves:
1458 if actualmoves:
1454 pushop.ui.status(
1459 pushop.ui.status(
1455 _(
1460 _(
1456 b'cannot lock source repo, skipping '
1461 b'cannot lock source repo, skipping '
1457 b'local %s phase update\n'
1462 b'local %s phase update\n'
1458 )
1463 )
1459 % phasestr
1464 % phasestr
1460 )
1465 )
1461
1466
1462
1467
1463 def _pushobsolete(pushop):
1468 def _pushobsolete(pushop):
1464 """utility function to push obsolete markers to a remote"""
1469 """utility function to push obsolete markers to a remote"""
1465 if b'obsmarkers' in pushop.stepsdone:
1470 if b'obsmarkers' in pushop.stepsdone:
1466 return
1471 return
1467 repo = pushop.repo
1472 repo = pushop.repo
1468 remote = pushop.remote
1473 remote = pushop.remote
1469 pushop.stepsdone.add(b'obsmarkers')
1474 pushop.stepsdone.add(b'obsmarkers')
1470 if pushop.outobsmarkers:
1475 if pushop.outobsmarkers:
1471 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1476 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1472 rslts = []
1477 rslts = []
1473 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1478 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1474 for key in sorted(remotedata, reverse=True):
1479 for key in sorted(remotedata, reverse=True):
1475 # reverse sort to ensure we end with dump0
1480 # reverse sort to ensure we end with dump0
1476 data = remotedata[key]
1481 data = remotedata[key]
1477 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1482 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1478 if [r for r in rslts if not r]:
1483 if [r for r in rslts if not r]:
1479 msg = _(b'failed to push some obsolete markers!\n')
1484 msg = _(b'failed to push some obsolete markers!\n')
1480 repo.ui.warn(msg)
1485 repo.ui.warn(msg)
1481
1486
1482
1487
1483 def _pushbookmark(pushop):
1488 def _pushbookmark(pushop):
1484 """Update bookmark position on remote"""
1489 """Update bookmark position on remote"""
1485 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1490 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1486 return
1491 return
1487 pushop.stepsdone.add(b'bookmarks')
1492 pushop.stepsdone.add(b'bookmarks')
1488 ui = pushop.ui
1493 ui = pushop.ui
1489 remote = pushop.remote
1494 remote = pushop.remote
1490
1495
1491 for b, old, new in pushop.outbookmarks:
1496 for b, old, new in pushop.outbookmarks:
1492 action = b'update'
1497 action = b'update'
1493 if not old:
1498 if not old:
1494 action = b'export'
1499 action = b'export'
1495 elif not new:
1500 elif not new:
1496 action = b'delete'
1501 action = b'delete'
1497
1502
1498 with remote.commandexecutor() as e:
1503 with remote.commandexecutor() as e:
1499 r = e.callcommand(
1504 r = e.callcommand(
1500 b'pushkey',
1505 b'pushkey',
1501 {
1506 {
1502 b'namespace': b'bookmarks',
1507 b'namespace': b'bookmarks',
1503 b'key': b,
1508 b'key': b,
1504 b'old': hex(old),
1509 b'old': hex(old),
1505 b'new': hex(new),
1510 b'new': hex(new),
1506 },
1511 },
1507 ).result()
1512 ).result()
1508
1513
1509 if r:
1514 if r:
1510 ui.status(bookmsgmap[action][0] % b)
1515 ui.status(bookmsgmap[action][0] % b)
1511 else:
1516 else:
1512 ui.warn(bookmsgmap[action][1] % b)
1517 ui.warn(bookmsgmap[action][1] % b)
1513 # discovery can have set the value form invalid entry
1518 # discovery can have set the value form invalid entry
1514 if pushop.bkresult is not None:
1519 if pushop.bkresult is not None:
1515 pushop.bkresult = 1
1520 pushop.bkresult = 1
1516
1521
1517
1522
1518 class pulloperation(object):
1523 class pulloperation(object):
1519 """A object that represent a single pull operation
1524 """A object that represent a single pull operation
1520
1525
1521 It purpose is to carry pull related state and very common operation.
1526 It purpose is to carry pull related state and very common operation.
1522
1527
1523 A new should be created at the beginning of each pull and discarded
1528 A new should be created at the beginning of each pull and discarded
1524 afterward.
1529 afterward.
1525 """
1530 """
1526
1531
1527 def __init__(
1532 def __init__(
1528 self,
1533 self,
1529 repo,
1534 repo,
1530 remote,
1535 remote,
1531 heads=None,
1536 heads=None,
1532 force=False,
1537 force=False,
1533 bookmarks=(),
1538 bookmarks=(),
1534 remotebookmarks=None,
1539 remotebookmarks=None,
1535 streamclonerequested=None,
1540 streamclonerequested=None,
1536 includepats=None,
1541 includepats=None,
1537 excludepats=None,
1542 excludepats=None,
1538 depth=None,
1543 depth=None,
1539 ):
1544 ):
1540 # repo we pull into
1545 # repo we pull into
1541 self.repo = repo
1546 self.repo = repo
1542 # repo we pull from
1547 # repo we pull from
1543 self.remote = remote
1548 self.remote = remote
1544 # revision we try to pull (None is "all")
1549 # revision we try to pull (None is "all")
1545 self.heads = heads
1550 self.heads = heads
1546 # bookmark pulled explicitly
1551 # bookmark pulled explicitly
1547 self.explicitbookmarks = [
1552 self.explicitbookmarks = [
1548 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1553 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1549 ]
1554 ]
1550 # do we force pull?
1555 # do we force pull?
1551 self.force = force
1556 self.force = force
1552 # whether a streaming clone was requested
1557 # whether a streaming clone was requested
1553 self.streamclonerequested = streamclonerequested
1558 self.streamclonerequested = streamclonerequested
1554 # transaction manager
1559 # transaction manager
1555 self.trmanager = None
1560 self.trmanager = None
1556 # set of common changeset between local and remote before pull
1561 # set of common changeset between local and remote before pull
1557 self.common = None
1562 self.common = None
1558 # set of pulled head
1563 # set of pulled head
1559 self.rheads = None
1564 self.rheads = None
1560 # list of missing changeset to fetch remotely
1565 # list of missing changeset to fetch remotely
1561 self.fetch = None
1566 self.fetch = None
1562 # remote bookmarks data
1567 # remote bookmarks data
1563 self.remotebookmarks = remotebookmarks
1568 self.remotebookmarks = remotebookmarks
1564 # result of changegroup pulling (used as return code by pull)
1569 # result of changegroup pulling (used as return code by pull)
1565 self.cgresult = None
1570 self.cgresult = None
1566 # list of step already done
1571 # list of step already done
1567 self.stepsdone = set()
1572 self.stepsdone = set()
1568 # Whether we attempted a clone from pre-generated bundles.
1573 # Whether we attempted a clone from pre-generated bundles.
1569 self.clonebundleattempted = False
1574 self.clonebundleattempted = False
1570 # Set of file patterns to include.
1575 # Set of file patterns to include.
1571 self.includepats = includepats
1576 self.includepats = includepats
1572 # Set of file patterns to exclude.
1577 # Set of file patterns to exclude.
1573 self.excludepats = excludepats
1578 self.excludepats = excludepats
1574 # Number of ancestor changesets to pull from each pulled head.
1579 # Number of ancestor changesets to pull from each pulled head.
1575 self.depth = depth
1580 self.depth = depth
1576
1581
1577 @util.propertycache
1582 @util.propertycache
1578 def pulledsubset(self):
1583 def pulledsubset(self):
1579 """heads of the set of changeset target by the pull"""
1584 """heads of the set of changeset target by the pull"""
1580 # compute target subset
1585 # compute target subset
1581 if self.heads is None:
1586 if self.heads is None:
1582 # We pulled every thing possible
1587 # We pulled every thing possible
1583 # sync on everything common
1588 # sync on everything common
1584 c = set(self.common)
1589 c = set(self.common)
1585 ret = list(self.common)
1590 ret = list(self.common)
1586 for n in self.rheads:
1591 for n in self.rheads:
1587 if n not in c:
1592 if n not in c:
1588 ret.append(n)
1593 ret.append(n)
1589 return ret
1594 return ret
1590 else:
1595 else:
1591 # We pulled a specific subset
1596 # We pulled a specific subset
1592 # sync on this subset
1597 # sync on this subset
1593 return self.heads
1598 return self.heads
1594
1599
1595 @util.propertycache
1600 @util.propertycache
1596 def canusebundle2(self):
1601 def canusebundle2(self):
1597 return not _forcebundle1(self)
1602 return not _forcebundle1(self)
1598
1603
1599 @util.propertycache
1604 @util.propertycache
1600 def remotebundle2caps(self):
1605 def remotebundle2caps(self):
1601 return bundle2.bundle2caps(self.remote)
1606 return bundle2.bundle2caps(self.remote)
1602
1607
1603 def gettransaction(self):
1608 def gettransaction(self):
1604 # deprecated; talk to trmanager directly
1609 # deprecated; talk to trmanager directly
1605 return self.trmanager.transaction()
1610 return self.trmanager.transaction()
1606
1611
1607
1612
1608 class transactionmanager(util.transactional):
1613 class transactionmanager(util.transactional):
1609 """An object to manage the life cycle of a transaction
1614 """An object to manage the life cycle of a transaction
1610
1615
1611 It creates the transaction on demand and calls the appropriate hooks when
1616 It creates the transaction on demand and calls the appropriate hooks when
1612 closing the transaction."""
1617 closing the transaction."""
1613
1618
1614 def __init__(self, repo, source, url):
1619 def __init__(self, repo, source, url):
1615 self.repo = repo
1620 self.repo = repo
1616 self.source = source
1621 self.source = source
1617 self.url = url
1622 self.url = url
1618 self._tr = None
1623 self._tr = None
1619
1624
1620 def transaction(self):
1625 def transaction(self):
1621 """Return an open transaction object, constructing if necessary"""
1626 """Return an open transaction object, constructing if necessary"""
1622 if not self._tr:
1627 if not self._tr:
1623 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1628 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1624 self._tr = self.repo.transaction(trname)
1629 self._tr = self.repo.transaction(trname)
1625 self._tr.hookargs[b'source'] = self.source
1630 self._tr.hookargs[b'source'] = self.source
1626 self._tr.hookargs[b'url'] = self.url
1631 self._tr.hookargs[b'url'] = self.url
1627 return self._tr
1632 return self._tr
1628
1633
1629 def close(self):
1634 def close(self):
1630 """close transaction if created"""
1635 """close transaction if created"""
1631 if self._tr is not None:
1636 if self._tr is not None:
1632 self._tr.close()
1637 self._tr.close()
1633
1638
1634 def release(self):
1639 def release(self):
1635 """release transaction if created"""
1640 """release transaction if created"""
1636 if self._tr is not None:
1641 if self._tr is not None:
1637 self._tr.release()
1642 self._tr.release()
1638
1643
1639
1644
1640 def listkeys(remote, namespace):
1645 def listkeys(remote, namespace):
1641 with remote.commandexecutor() as e:
1646 with remote.commandexecutor() as e:
1642 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1647 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1643
1648
1644
1649
1645 def _fullpullbundle2(repo, pullop):
1650 def _fullpullbundle2(repo, pullop):
1646 # The server may send a partial reply, i.e. when inlining
1651 # The server may send a partial reply, i.e. when inlining
1647 # pre-computed bundles. In that case, update the common
1652 # pre-computed bundles. In that case, update the common
1648 # set based on the results and pull another bundle.
1653 # set based on the results and pull another bundle.
1649 #
1654 #
1650 # There are two indicators that the process is finished:
1655 # There are two indicators that the process is finished:
1651 # - no changeset has been added, or
1656 # - no changeset has been added, or
1652 # - all remote heads are known locally.
1657 # - all remote heads are known locally.
1653 # The head check must use the unfiltered view as obsoletion
1658 # The head check must use the unfiltered view as obsoletion
1654 # markers can hide heads.
1659 # markers can hide heads.
1655 unfi = repo.unfiltered()
1660 unfi = repo.unfiltered()
1656 unficl = unfi.changelog
1661 unficl = unfi.changelog
1657
1662
1658 def headsofdiff(h1, h2):
1663 def headsofdiff(h1, h2):
1659 """Returns heads(h1 % h2)"""
1664 """Returns heads(h1 % h2)"""
1660 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1665 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1661 return set(ctx.node() for ctx in res)
1666 return set(ctx.node() for ctx in res)
1662
1667
1663 def headsofunion(h1, h2):
1668 def headsofunion(h1, h2):
1664 """Returns heads((h1 + h2) - null)"""
1669 """Returns heads((h1 + h2) - null)"""
1665 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1670 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1666 return set(ctx.node() for ctx in res)
1671 return set(ctx.node() for ctx in res)
1667
1672
1668 while True:
1673 while True:
1669 old_heads = unficl.heads()
1674 old_heads = unficl.heads()
1670 clstart = len(unficl)
1675 clstart = len(unficl)
1671 _pullbundle2(pullop)
1676 _pullbundle2(pullop)
1672 if repository.NARROW_REQUIREMENT in repo.requirements:
1677 if repository.NARROW_REQUIREMENT in repo.requirements:
1673 # XXX narrow clones filter the heads on the server side during
1678 # XXX narrow clones filter the heads on the server side during
1674 # XXX getbundle and result in partial replies as well.
1679 # XXX getbundle and result in partial replies as well.
1675 # XXX Disable pull bundles in this case as band aid to avoid
1680 # XXX Disable pull bundles in this case as band aid to avoid
1676 # XXX extra round trips.
1681 # XXX extra round trips.
1677 break
1682 break
1678 if clstart == len(unficl):
1683 if clstart == len(unficl):
1679 break
1684 break
1680 if all(unficl.hasnode(n) for n in pullop.rheads):
1685 if all(unficl.hasnode(n) for n in pullop.rheads):
1681 break
1686 break
1682 new_heads = headsofdiff(unficl.heads(), old_heads)
1687 new_heads = headsofdiff(unficl.heads(), old_heads)
1683 pullop.common = headsofunion(new_heads, pullop.common)
1688 pullop.common = headsofunion(new_heads, pullop.common)
1684 pullop.rheads = set(pullop.rheads) - pullop.common
1689 pullop.rheads = set(pullop.rheads) - pullop.common
1685
1690
1686
1691
1687 def pull(
1692 def pull(
1688 repo,
1693 repo,
1689 remote,
1694 remote,
1690 heads=None,
1695 heads=None,
1691 force=False,
1696 force=False,
1692 bookmarks=(),
1697 bookmarks=(),
1693 opargs=None,
1698 opargs=None,
1694 streamclonerequested=None,
1699 streamclonerequested=None,
1695 includepats=None,
1700 includepats=None,
1696 excludepats=None,
1701 excludepats=None,
1697 depth=None,
1702 depth=None,
1698 ):
1703 ):
1699 """Fetch repository data from a remote.
1704 """Fetch repository data from a remote.
1700
1705
1701 This is the main function used to retrieve data from a remote repository.
1706 This is the main function used to retrieve data from a remote repository.
1702
1707
1703 ``repo`` is the local repository to clone into.
1708 ``repo`` is the local repository to clone into.
1704 ``remote`` is a peer instance.
1709 ``remote`` is a peer instance.
1705 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1710 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1706 default) means to pull everything from the remote.
1711 default) means to pull everything from the remote.
1707 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1712 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1708 default, all remote bookmarks are pulled.
1713 default, all remote bookmarks are pulled.
1709 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1714 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1710 initialization.
1715 initialization.
1711 ``streamclonerequested`` is a boolean indicating whether a "streaming
1716 ``streamclonerequested`` is a boolean indicating whether a "streaming
1712 clone" is requested. A "streaming clone" is essentially a raw file copy
1717 clone" is requested. A "streaming clone" is essentially a raw file copy
1713 of revlogs from the server. This only works when the local repository is
1718 of revlogs from the server. This only works when the local repository is
1714 empty. The default value of ``None`` means to respect the server
1719 empty. The default value of ``None`` means to respect the server
1715 configuration for preferring stream clones.
1720 configuration for preferring stream clones.
1716 ``includepats`` and ``excludepats`` define explicit file patterns to
1721 ``includepats`` and ``excludepats`` define explicit file patterns to
1717 include and exclude in storage, respectively. If not defined, narrow
1722 include and exclude in storage, respectively. If not defined, narrow
1718 patterns from the repo instance are used, if available.
1723 patterns from the repo instance are used, if available.
1719 ``depth`` is an integer indicating the DAG depth of history we're
1724 ``depth`` is an integer indicating the DAG depth of history we're
1720 interested in. If defined, for each revision specified in ``heads``, we
1725 interested in. If defined, for each revision specified in ``heads``, we
1721 will fetch up to this many of its ancestors and data associated with them.
1726 will fetch up to this many of its ancestors and data associated with them.
1722
1727
1723 Returns the ``pulloperation`` created for this pull.
1728 Returns the ``pulloperation`` created for this pull.
1724 """
1729 """
1725 if opargs is None:
1730 if opargs is None:
1726 opargs = {}
1731 opargs = {}
1727
1732
1728 # We allow the narrow patterns to be passed in explicitly to provide more
1733 # We allow the narrow patterns to be passed in explicitly to provide more
1729 # flexibility for API consumers.
1734 # flexibility for API consumers.
1730 if includepats or excludepats:
1735 if includepats or excludepats:
1731 includepats = includepats or set()
1736 includepats = includepats or set()
1732 excludepats = excludepats or set()
1737 excludepats = excludepats or set()
1733 else:
1738 else:
1734 includepats, excludepats = repo.narrowpats
1739 includepats, excludepats = repo.narrowpats
1735
1740
1736 narrowspec.validatepatterns(includepats)
1741 narrowspec.validatepatterns(includepats)
1737 narrowspec.validatepatterns(excludepats)
1742 narrowspec.validatepatterns(excludepats)
1738
1743
1739 pullop = pulloperation(
1744 pullop = pulloperation(
1740 repo,
1745 repo,
1741 remote,
1746 remote,
1742 heads,
1747 heads,
1743 force,
1748 force,
1744 bookmarks=bookmarks,
1749 bookmarks=bookmarks,
1745 streamclonerequested=streamclonerequested,
1750 streamclonerequested=streamclonerequested,
1746 includepats=includepats,
1751 includepats=includepats,
1747 excludepats=excludepats,
1752 excludepats=excludepats,
1748 depth=depth,
1753 depth=depth,
1749 **pycompat.strkwargs(opargs)
1754 **pycompat.strkwargs(opargs)
1750 )
1755 )
1751
1756
1752 peerlocal = pullop.remote.local()
1757 peerlocal = pullop.remote.local()
1753 if peerlocal:
1758 if peerlocal:
1754 missing = set(peerlocal.requirements) - pullop.repo.supported
1759 missing = set(peerlocal.requirements) - pullop.repo.supported
1755 if missing:
1760 if missing:
1756 msg = _(
1761 msg = _(
1757 b"required features are not"
1762 b"required features are not"
1758 b" supported in the destination:"
1763 b" supported in the destination:"
1759 b" %s"
1764 b" %s"
1760 ) % (b', '.join(sorted(missing)))
1765 ) % (b', '.join(sorted(missing)))
1761 raise error.Abort(msg)
1766 raise error.Abort(msg)
1762
1767
1763 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1768 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1764 wlock = util.nullcontextmanager()
1769 wlock = util.nullcontextmanager()
1765 if not bookmod.bookmarksinstore(repo):
1770 if not bookmod.bookmarksinstore(repo):
1766 wlock = repo.wlock()
1771 wlock = repo.wlock()
1767 with wlock, repo.lock(), pullop.trmanager:
1772 with wlock, repo.lock(), pullop.trmanager:
1768 # Use the modern wire protocol, if available.
1773 # Use the modern wire protocol, if available.
1769 if remote.capable(b'command-changesetdata'):
1774 if remote.capable(b'command-changesetdata'):
1770 exchangev2.pull(pullop)
1775 exchangev2.pull(pullop)
1771 else:
1776 else:
1772 # This should ideally be in _pullbundle2(). However, it needs to run
1777 # This should ideally be in _pullbundle2(). However, it needs to run
1773 # before discovery to avoid extra work.
1778 # before discovery to avoid extra work.
1774 _maybeapplyclonebundle(pullop)
1779 _maybeapplyclonebundle(pullop)
1775 streamclone.maybeperformlegacystreamclone(pullop)
1780 streamclone.maybeperformlegacystreamclone(pullop)
1776 _pulldiscovery(pullop)
1781 _pulldiscovery(pullop)
1777 if pullop.canusebundle2:
1782 if pullop.canusebundle2:
1778 _fullpullbundle2(repo, pullop)
1783 _fullpullbundle2(repo, pullop)
1779 _pullchangeset(pullop)
1784 _pullchangeset(pullop)
1780 _pullphase(pullop)
1785 _pullphase(pullop)
1781 _pullbookmarks(pullop)
1786 _pullbookmarks(pullop)
1782 _pullobsolete(pullop)
1787 _pullobsolete(pullop)
1783
1788
1784 # storing remotenames
1789 # storing remotenames
1785 if repo.ui.configbool(b'experimental', b'remotenames'):
1790 if repo.ui.configbool(b'experimental', b'remotenames'):
1786 logexchange.pullremotenames(repo, remote)
1791 logexchange.pullremotenames(repo, remote)
1787
1792
1788 return pullop
1793 return pullop
1789
1794
1790
1795
1791 # list of steps to perform discovery before pull
1796 # list of steps to perform discovery before pull
1792 pulldiscoveryorder = []
1797 pulldiscoveryorder = []
1793
1798
1794 # Mapping between step name and function
1799 # Mapping between step name and function
1795 #
1800 #
1796 # This exists to help extensions wrap steps if necessary
1801 # This exists to help extensions wrap steps if necessary
1797 pulldiscoverymapping = {}
1802 pulldiscoverymapping = {}
1798
1803
1799
1804
1800 def pulldiscovery(stepname):
1805 def pulldiscovery(stepname):
1801 """decorator for function performing discovery before pull
1806 """decorator for function performing discovery before pull
1802
1807
1803 The function is added to the step -> function mapping and appended to the
1808 The function is added to the step -> function mapping and appended to the
1804 list of steps. Beware that decorated function will be added in order (this
1809 list of steps. Beware that decorated function will be added in order (this
1805 may matter).
1810 may matter).
1806
1811
1807 You can only use this decorator for a new step, if you want to wrap a step
1812 You can only use this decorator for a new step, if you want to wrap a step
1808 from an extension, change the pulldiscovery dictionary directly."""
1813 from an extension, change the pulldiscovery dictionary directly."""
1809
1814
1810 def dec(func):
1815 def dec(func):
1811 assert stepname not in pulldiscoverymapping
1816 assert stepname not in pulldiscoverymapping
1812 pulldiscoverymapping[stepname] = func
1817 pulldiscoverymapping[stepname] = func
1813 pulldiscoveryorder.append(stepname)
1818 pulldiscoveryorder.append(stepname)
1814 return func
1819 return func
1815
1820
1816 return dec
1821 return dec
1817
1822
1818
1823
1819 def _pulldiscovery(pullop):
1824 def _pulldiscovery(pullop):
1820 """Run all discovery steps"""
1825 """Run all discovery steps"""
1821 for stepname in pulldiscoveryorder:
1826 for stepname in pulldiscoveryorder:
1822 step = pulldiscoverymapping[stepname]
1827 step = pulldiscoverymapping[stepname]
1823 step(pullop)
1828 step(pullop)
1824
1829
1825
1830
1826 @pulldiscovery(b'b1:bookmarks')
1831 @pulldiscovery(b'b1:bookmarks')
1827 def _pullbookmarkbundle1(pullop):
1832 def _pullbookmarkbundle1(pullop):
1828 """fetch bookmark data in bundle1 case
1833 """fetch bookmark data in bundle1 case
1829
1834
1830 If not using bundle2, we have to fetch bookmarks before changeset
1835 If not using bundle2, we have to fetch bookmarks before changeset
1831 discovery to reduce the chance and impact of race conditions."""
1836 discovery to reduce the chance and impact of race conditions."""
1832 if pullop.remotebookmarks is not None:
1837 if pullop.remotebookmarks is not None:
1833 return
1838 return
1834 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1839 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1835 # all known bundle2 servers now support listkeys, but lets be nice with
1840 # all known bundle2 servers now support listkeys, but lets be nice with
1836 # new implementation.
1841 # new implementation.
1837 return
1842 return
1838 books = listkeys(pullop.remote, b'bookmarks')
1843 books = listkeys(pullop.remote, b'bookmarks')
1839 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1844 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1840
1845
1841
1846
1842 @pulldiscovery(b'changegroup')
1847 @pulldiscovery(b'changegroup')
1843 def _pulldiscoverychangegroup(pullop):
1848 def _pulldiscoverychangegroup(pullop):
1844 """discovery phase for the pull
1849 """discovery phase for the pull
1845
1850
1846 Current handle changeset discovery only, will change handle all discovery
1851 Current handle changeset discovery only, will change handle all discovery
1847 at some point."""
1852 at some point."""
1848 tmp = discovery.findcommonincoming(
1853 tmp = discovery.findcommonincoming(
1849 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1854 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1850 )
1855 )
1851 common, fetch, rheads = tmp
1856 common, fetch, rheads = tmp
1852 nm = pullop.repo.unfiltered().changelog.nodemap
1857 nm = pullop.repo.unfiltered().changelog.nodemap
1853 if fetch and rheads:
1858 if fetch and rheads:
1854 # If a remote heads is filtered locally, put in back in common.
1859 # If a remote heads is filtered locally, put in back in common.
1855 #
1860 #
1856 # This is a hackish solution to catch most of "common but locally
1861 # This is a hackish solution to catch most of "common but locally
1857 # hidden situation". We do not performs discovery on unfiltered
1862 # hidden situation". We do not performs discovery on unfiltered
1858 # repository because it end up doing a pathological amount of round
1863 # repository because it end up doing a pathological amount of round
1859 # trip for w huge amount of changeset we do not care about.
1864 # trip for w huge amount of changeset we do not care about.
1860 #
1865 #
1861 # If a set of such "common but filtered" changeset exist on the server
1866 # If a set of such "common but filtered" changeset exist on the server
1862 # but are not including a remote heads, we'll not be able to detect it,
1867 # but are not including a remote heads, we'll not be able to detect it,
1863 scommon = set(common)
1868 scommon = set(common)
1864 for n in rheads:
1869 for n in rheads:
1865 if n in nm:
1870 if n in nm:
1866 if n not in scommon:
1871 if n not in scommon:
1867 common.append(n)
1872 common.append(n)
1868 if set(rheads).issubset(set(common)):
1873 if set(rheads).issubset(set(common)):
1869 fetch = []
1874 fetch = []
1870 pullop.common = common
1875 pullop.common = common
1871 pullop.fetch = fetch
1876 pullop.fetch = fetch
1872 pullop.rheads = rheads
1877 pullop.rheads = rheads
1873
1878
1874
1879
1875 def _pullbundle2(pullop):
1880 def _pullbundle2(pullop):
1876 """pull data using bundle2
1881 """pull data using bundle2
1877
1882
1878 For now, the only supported data are changegroup."""
1883 For now, the only supported data are changegroup."""
1879 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1884 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1880
1885
1881 # make ui easier to access
1886 # make ui easier to access
1882 ui = pullop.repo.ui
1887 ui = pullop.repo.ui
1883
1888
1884 # At the moment we don't do stream clones over bundle2. If that is
1889 # At the moment we don't do stream clones over bundle2. If that is
1885 # implemented then here's where the check for that will go.
1890 # implemented then here's where the check for that will go.
1886 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1891 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1887
1892
1888 # declare pull perimeters
1893 # declare pull perimeters
1889 kwargs[b'common'] = pullop.common
1894 kwargs[b'common'] = pullop.common
1890 kwargs[b'heads'] = pullop.heads or pullop.rheads
1895 kwargs[b'heads'] = pullop.heads or pullop.rheads
1891
1896
1892 # check server supports narrow and then adding includepats and excludepats
1897 # check server supports narrow and then adding includepats and excludepats
1893 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1898 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1894 if servernarrow and pullop.includepats:
1899 if servernarrow and pullop.includepats:
1895 kwargs[b'includepats'] = pullop.includepats
1900 kwargs[b'includepats'] = pullop.includepats
1896 if servernarrow and pullop.excludepats:
1901 if servernarrow and pullop.excludepats:
1897 kwargs[b'excludepats'] = pullop.excludepats
1902 kwargs[b'excludepats'] = pullop.excludepats
1898
1903
1899 if streaming:
1904 if streaming:
1900 kwargs[b'cg'] = False
1905 kwargs[b'cg'] = False
1901 kwargs[b'stream'] = True
1906 kwargs[b'stream'] = True
1902 pullop.stepsdone.add(b'changegroup')
1907 pullop.stepsdone.add(b'changegroup')
1903 pullop.stepsdone.add(b'phases')
1908 pullop.stepsdone.add(b'phases')
1904
1909
1905 else:
1910 else:
1906 # pulling changegroup
1911 # pulling changegroup
1907 pullop.stepsdone.add(b'changegroup')
1912 pullop.stepsdone.add(b'changegroup')
1908
1913
1909 kwargs[b'cg'] = pullop.fetch
1914 kwargs[b'cg'] = pullop.fetch
1910
1915
1911 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1916 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1912 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1917 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1913 if not legacyphase and hasbinaryphase:
1918 if not legacyphase and hasbinaryphase:
1914 kwargs[b'phases'] = True
1919 kwargs[b'phases'] = True
1915 pullop.stepsdone.add(b'phases')
1920 pullop.stepsdone.add(b'phases')
1916
1921
1917 if b'listkeys' in pullop.remotebundle2caps:
1922 if b'listkeys' in pullop.remotebundle2caps:
1918 if b'phases' not in pullop.stepsdone:
1923 if b'phases' not in pullop.stepsdone:
1919 kwargs[b'listkeys'] = [b'phases']
1924 kwargs[b'listkeys'] = [b'phases']
1920
1925
1921 bookmarksrequested = False
1926 bookmarksrequested = False
1922 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1927 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1923 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1928 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1924
1929
1925 if pullop.remotebookmarks is not None:
1930 if pullop.remotebookmarks is not None:
1926 pullop.stepsdone.add(b'request-bookmarks')
1931 pullop.stepsdone.add(b'request-bookmarks')
1927
1932
1928 if (
1933 if (
1929 b'request-bookmarks' not in pullop.stepsdone
1934 b'request-bookmarks' not in pullop.stepsdone
1930 and pullop.remotebookmarks is None
1935 and pullop.remotebookmarks is None
1931 and not legacybookmark
1936 and not legacybookmark
1932 and hasbinarybook
1937 and hasbinarybook
1933 ):
1938 ):
1934 kwargs[b'bookmarks'] = True
1939 kwargs[b'bookmarks'] = True
1935 bookmarksrequested = True
1940 bookmarksrequested = True
1936
1941
1937 if b'listkeys' in pullop.remotebundle2caps:
1942 if b'listkeys' in pullop.remotebundle2caps:
1938 if b'request-bookmarks' not in pullop.stepsdone:
1943 if b'request-bookmarks' not in pullop.stepsdone:
1939 # make sure to always includes bookmark data when migrating
1944 # make sure to always includes bookmark data when migrating
1940 # `hg incoming --bundle` to using this function.
1945 # `hg incoming --bundle` to using this function.
1941 pullop.stepsdone.add(b'request-bookmarks')
1946 pullop.stepsdone.add(b'request-bookmarks')
1942 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1947 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1943
1948
1944 # If this is a full pull / clone and the server supports the clone bundles
1949 # If this is a full pull / clone and the server supports the clone bundles
1945 # feature, tell the server whether we attempted a clone bundle. The
1950 # feature, tell the server whether we attempted a clone bundle. The
1946 # presence of this flag indicates the client supports clone bundles. This
1951 # presence of this flag indicates the client supports clone bundles. This
1947 # will enable the server to treat clients that support clone bundles
1952 # will enable the server to treat clients that support clone bundles
1948 # differently from those that don't.
1953 # differently from those that don't.
1949 if (
1954 if (
1950 pullop.remote.capable(b'clonebundles')
1955 pullop.remote.capable(b'clonebundles')
1951 and pullop.heads is None
1956 and pullop.heads is None
1952 and list(pullop.common) == [nullid]
1957 and list(pullop.common) == [nullid]
1953 ):
1958 ):
1954 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1959 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1955
1960
1956 if streaming:
1961 if streaming:
1957 pullop.repo.ui.status(_(b'streaming all changes\n'))
1962 pullop.repo.ui.status(_(b'streaming all changes\n'))
1958 elif not pullop.fetch:
1963 elif not pullop.fetch:
1959 pullop.repo.ui.status(_(b"no changes found\n"))
1964 pullop.repo.ui.status(_(b"no changes found\n"))
1960 pullop.cgresult = 0
1965 pullop.cgresult = 0
1961 else:
1966 else:
1962 if pullop.heads is None and list(pullop.common) == [nullid]:
1967 if pullop.heads is None and list(pullop.common) == [nullid]:
1963 pullop.repo.ui.status(_(b"requesting all changes\n"))
1968 pullop.repo.ui.status(_(b"requesting all changes\n"))
1964 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1969 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1965 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1970 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1966 if obsolete.commonversion(remoteversions) is not None:
1971 if obsolete.commonversion(remoteversions) is not None:
1967 kwargs[b'obsmarkers'] = True
1972 kwargs[b'obsmarkers'] = True
1968 pullop.stepsdone.add(b'obsmarkers')
1973 pullop.stepsdone.add(b'obsmarkers')
1969 _pullbundle2extraprepare(pullop, kwargs)
1974 _pullbundle2extraprepare(pullop, kwargs)
1970
1975
1971 with pullop.remote.commandexecutor() as e:
1976 with pullop.remote.commandexecutor() as e:
1972 args = dict(kwargs)
1977 args = dict(kwargs)
1973 args[b'source'] = b'pull'
1978 args[b'source'] = b'pull'
1974 bundle = e.callcommand(b'getbundle', args).result()
1979 bundle = e.callcommand(b'getbundle', args).result()
1975
1980
1976 try:
1981 try:
1977 op = bundle2.bundleoperation(
1982 op = bundle2.bundleoperation(
1978 pullop.repo, pullop.gettransaction, source=b'pull'
1983 pullop.repo, pullop.gettransaction, source=b'pull'
1979 )
1984 )
1980 op.modes[b'bookmarks'] = b'records'
1985 op.modes[b'bookmarks'] = b'records'
1981 bundle2.processbundle(pullop.repo, bundle, op=op)
1986 bundle2.processbundle(pullop.repo, bundle, op=op)
1982 except bundle2.AbortFromPart as exc:
1987 except bundle2.AbortFromPart as exc:
1983 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1988 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1984 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1989 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1985 except error.BundleValueError as exc:
1990 except error.BundleValueError as exc:
1986 raise error.Abort(_(b'missing support for %s') % exc)
1991 raise error.Abort(_(b'missing support for %s') % exc)
1987
1992
1988 if pullop.fetch:
1993 if pullop.fetch:
1989 pullop.cgresult = bundle2.combinechangegroupresults(op)
1994 pullop.cgresult = bundle2.combinechangegroupresults(op)
1990
1995
1991 # processing phases change
1996 # processing phases change
1992 for namespace, value in op.records[b'listkeys']:
1997 for namespace, value in op.records[b'listkeys']:
1993 if namespace == b'phases':
1998 if namespace == b'phases':
1994 _pullapplyphases(pullop, value)
1999 _pullapplyphases(pullop, value)
1995
2000
1996 # processing bookmark update
2001 # processing bookmark update
1997 if bookmarksrequested:
2002 if bookmarksrequested:
1998 books = {}
2003 books = {}
1999 for record in op.records[b'bookmarks']:
2004 for record in op.records[b'bookmarks']:
2000 books[record[b'bookmark']] = record[b"node"]
2005 books[record[b'bookmark']] = record[b"node"]
2001 pullop.remotebookmarks = books
2006 pullop.remotebookmarks = books
2002 else:
2007 else:
2003 for namespace, value in op.records[b'listkeys']:
2008 for namespace, value in op.records[b'listkeys']:
2004 if namespace == b'bookmarks':
2009 if namespace == b'bookmarks':
2005 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2010 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2006
2011
2007 # bookmark data were either already there or pulled in the bundle
2012 # bookmark data were either already there or pulled in the bundle
2008 if pullop.remotebookmarks is not None:
2013 if pullop.remotebookmarks is not None:
2009 _pullbookmarks(pullop)
2014 _pullbookmarks(pullop)
2010
2015
2011
2016
2012 def _pullbundle2extraprepare(pullop, kwargs):
2017 def _pullbundle2extraprepare(pullop, kwargs):
2013 """hook function so that extensions can extend the getbundle call"""
2018 """hook function so that extensions can extend the getbundle call"""
2014
2019
2015
2020
2016 def _pullchangeset(pullop):
2021 def _pullchangeset(pullop):
2017 """pull changeset from unbundle into the local repo"""
2022 """pull changeset from unbundle into the local repo"""
2018 # We delay the open of the transaction as late as possible so we
2023 # We delay the open of the transaction as late as possible so we
2019 # don't open transaction for nothing or you break future useful
2024 # don't open transaction for nothing or you break future useful
2020 # rollback call
2025 # rollback call
2021 if b'changegroup' in pullop.stepsdone:
2026 if b'changegroup' in pullop.stepsdone:
2022 return
2027 return
2023 pullop.stepsdone.add(b'changegroup')
2028 pullop.stepsdone.add(b'changegroup')
2024 if not pullop.fetch:
2029 if not pullop.fetch:
2025 pullop.repo.ui.status(_(b"no changes found\n"))
2030 pullop.repo.ui.status(_(b"no changes found\n"))
2026 pullop.cgresult = 0
2031 pullop.cgresult = 0
2027 return
2032 return
2028 tr = pullop.gettransaction()
2033 tr = pullop.gettransaction()
2029 if pullop.heads is None and list(pullop.common) == [nullid]:
2034 if pullop.heads is None and list(pullop.common) == [nullid]:
2030 pullop.repo.ui.status(_(b"requesting all changes\n"))
2035 pullop.repo.ui.status(_(b"requesting all changes\n"))
2031 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2036 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2032 # issue1320, avoid a race if remote changed after discovery
2037 # issue1320, avoid a race if remote changed after discovery
2033 pullop.heads = pullop.rheads
2038 pullop.heads = pullop.rheads
2034
2039
2035 if pullop.remote.capable(b'getbundle'):
2040 if pullop.remote.capable(b'getbundle'):
2036 # TODO: get bundlecaps from remote
2041 # TODO: get bundlecaps from remote
2037 cg = pullop.remote.getbundle(
2042 cg = pullop.remote.getbundle(
2038 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2043 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2039 )
2044 )
2040 elif pullop.heads is None:
2045 elif pullop.heads is None:
2041 with pullop.remote.commandexecutor() as e:
2046 with pullop.remote.commandexecutor() as e:
2042 cg = e.callcommand(
2047 cg = e.callcommand(
2043 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2048 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2044 ).result()
2049 ).result()
2045
2050
2046 elif not pullop.remote.capable(b'changegroupsubset'):
2051 elif not pullop.remote.capable(b'changegroupsubset'):
2047 raise error.Abort(
2052 raise error.Abort(
2048 _(
2053 _(
2049 b"partial pull cannot be done because "
2054 b"partial pull cannot be done because "
2050 b"other repository doesn't support "
2055 b"other repository doesn't support "
2051 b"changegroupsubset."
2056 b"changegroupsubset."
2052 )
2057 )
2053 )
2058 )
2054 else:
2059 else:
2055 with pullop.remote.commandexecutor() as e:
2060 with pullop.remote.commandexecutor() as e:
2056 cg = e.callcommand(
2061 cg = e.callcommand(
2057 b'changegroupsubset',
2062 b'changegroupsubset',
2058 {
2063 {
2059 b'bases': pullop.fetch,
2064 b'bases': pullop.fetch,
2060 b'heads': pullop.heads,
2065 b'heads': pullop.heads,
2061 b'source': b'pull',
2066 b'source': b'pull',
2062 },
2067 },
2063 ).result()
2068 ).result()
2064
2069
2065 bundleop = bundle2.applybundle(
2070 bundleop = bundle2.applybundle(
2066 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2071 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2067 )
2072 )
2068 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2073 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2069
2074
2070
2075
2071 def _pullphase(pullop):
2076 def _pullphase(pullop):
2072 # Get remote phases data from remote
2077 # Get remote phases data from remote
2073 if b'phases' in pullop.stepsdone:
2078 if b'phases' in pullop.stepsdone:
2074 return
2079 return
2075 remotephases = listkeys(pullop.remote, b'phases')
2080 remotephases = listkeys(pullop.remote, b'phases')
2076 _pullapplyphases(pullop, remotephases)
2081 _pullapplyphases(pullop, remotephases)
2077
2082
2078
2083
2079 def _pullapplyphases(pullop, remotephases):
2084 def _pullapplyphases(pullop, remotephases):
2080 """apply phase movement from observed remote state"""
2085 """apply phase movement from observed remote state"""
2081 if b'phases' in pullop.stepsdone:
2086 if b'phases' in pullop.stepsdone:
2082 return
2087 return
2083 pullop.stepsdone.add(b'phases')
2088 pullop.stepsdone.add(b'phases')
2084 publishing = bool(remotephases.get(b'publishing', False))
2089 publishing = bool(remotephases.get(b'publishing', False))
2085 if remotephases and not publishing:
2090 if remotephases and not publishing:
2086 # remote is new and non-publishing
2091 # remote is new and non-publishing
2087 pheads, _dr = phases.analyzeremotephases(
2092 pheads, _dr = phases.analyzeremotephases(
2088 pullop.repo, pullop.pulledsubset, remotephases
2093 pullop.repo, pullop.pulledsubset, remotephases
2089 )
2094 )
2090 dheads = pullop.pulledsubset
2095 dheads = pullop.pulledsubset
2091 else:
2096 else:
2092 # Remote is old or publishing all common changesets
2097 # Remote is old or publishing all common changesets
2093 # should be seen as public
2098 # should be seen as public
2094 pheads = pullop.pulledsubset
2099 pheads = pullop.pulledsubset
2095 dheads = []
2100 dheads = []
2096 unfi = pullop.repo.unfiltered()
2101 unfi = pullop.repo.unfiltered()
2097 phase = unfi._phasecache.phase
2102 phase = unfi._phasecache.phase
2098 rev = unfi.changelog.nodemap.get
2103 rev = unfi.changelog.nodemap.get
2099 public = phases.public
2104 public = phases.public
2100 draft = phases.draft
2105 draft = phases.draft
2101
2106
2102 # exclude changesets already public locally and update the others
2107 # exclude changesets already public locally and update the others
2103 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2108 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2104 if pheads:
2109 if pheads:
2105 tr = pullop.gettransaction()
2110 tr = pullop.gettransaction()
2106 phases.advanceboundary(pullop.repo, tr, public, pheads)
2111 phases.advanceboundary(pullop.repo, tr, public, pheads)
2107
2112
2108 # exclude changesets already draft locally and update the others
2113 # exclude changesets already draft locally and update the others
2109 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2114 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2110 if dheads:
2115 if dheads:
2111 tr = pullop.gettransaction()
2116 tr = pullop.gettransaction()
2112 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2117 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2113
2118
2114
2119
2115 def _pullbookmarks(pullop):
2120 def _pullbookmarks(pullop):
2116 """process the remote bookmark information to update the local one"""
2121 """process the remote bookmark information to update the local one"""
2117 if b'bookmarks' in pullop.stepsdone:
2122 if b'bookmarks' in pullop.stepsdone:
2118 return
2123 return
2119 pullop.stepsdone.add(b'bookmarks')
2124 pullop.stepsdone.add(b'bookmarks')
2120 repo = pullop.repo
2125 repo = pullop.repo
2121 remotebookmarks = pullop.remotebookmarks
2126 remotebookmarks = pullop.remotebookmarks
2122 bookmod.updatefromremote(
2127 bookmod.updatefromremote(
2123 repo.ui,
2128 repo.ui,
2124 repo,
2129 repo,
2125 remotebookmarks,
2130 remotebookmarks,
2126 pullop.remote.url(),
2131 pullop.remote.url(),
2127 pullop.gettransaction,
2132 pullop.gettransaction,
2128 explicit=pullop.explicitbookmarks,
2133 explicit=pullop.explicitbookmarks,
2129 )
2134 )
2130
2135
2131
2136
2132 def _pullobsolete(pullop):
2137 def _pullobsolete(pullop):
2133 """utility function to pull obsolete markers from a remote
2138 """utility function to pull obsolete markers from a remote
2134
2139
2135 The `gettransaction` is function that return the pull transaction, creating
2140 The `gettransaction` is function that return the pull transaction, creating
2136 one if necessary. We return the transaction to inform the calling code that
2141 one if necessary. We return the transaction to inform the calling code that
2137 a new transaction have been created (when applicable).
2142 a new transaction have been created (when applicable).
2138
2143
2139 Exists mostly to allow overriding for experimentation purpose"""
2144 Exists mostly to allow overriding for experimentation purpose"""
2140 if b'obsmarkers' in pullop.stepsdone:
2145 if b'obsmarkers' in pullop.stepsdone:
2141 return
2146 return
2142 pullop.stepsdone.add(b'obsmarkers')
2147 pullop.stepsdone.add(b'obsmarkers')
2143 tr = None
2148 tr = None
2144 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2149 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2145 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2150 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2146 remoteobs = listkeys(pullop.remote, b'obsolete')
2151 remoteobs = listkeys(pullop.remote, b'obsolete')
2147 if b'dump0' in remoteobs:
2152 if b'dump0' in remoteobs:
2148 tr = pullop.gettransaction()
2153 tr = pullop.gettransaction()
2149 markers = []
2154 markers = []
2150 for key in sorted(remoteobs, reverse=True):
2155 for key in sorted(remoteobs, reverse=True):
2151 if key.startswith(b'dump'):
2156 if key.startswith(b'dump'):
2152 data = util.b85decode(remoteobs[key])
2157 data = util.b85decode(remoteobs[key])
2153 version, newmarks = obsolete._readmarkers(data)
2158 version, newmarks = obsolete._readmarkers(data)
2154 markers += newmarks
2159 markers += newmarks
2155 if markers:
2160 if markers:
2156 pullop.repo.obsstore.add(tr, markers)
2161 pullop.repo.obsstore.add(tr, markers)
2157 pullop.repo.invalidatevolatilesets()
2162 pullop.repo.invalidatevolatilesets()
2158 return tr
2163 return tr
2159
2164
2160
2165
2161 def applynarrowacl(repo, kwargs):
2166 def applynarrowacl(repo, kwargs):
2162 """Apply narrow fetch access control.
2167 """Apply narrow fetch access control.
2163
2168
2164 This massages the named arguments for getbundle wire protocol commands
2169 This massages the named arguments for getbundle wire protocol commands
2165 so requested data is filtered through access control rules.
2170 so requested data is filtered through access control rules.
2166 """
2171 """
2167 ui = repo.ui
2172 ui = repo.ui
2168 # TODO this assumes existence of HTTP and is a layering violation.
2173 # TODO this assumes existence of HTTP and is a layering violation.
2169 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2174 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2170 user_includes = ui.configlist(
2175 user_includes = ui.configlist(
2171 _NARROWACL_SECTION,
2176 _NARROWACL_SECTION,
2172 username + b'.includes',
2177 username + b'.includes',
2173 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2178 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2174 )
2179 )
2175 user_excludes = ui.configlist(
2180 user_excludes = ui.configlist(
2176 _NARROWACL_SECTION,
2181 _NARROWACL_SECTION,
2177 username + b'.excludes',
2182 username + b'.excludes',
2178 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2183 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2179 )
2184 )
2180 if not user_includes:
2185 if not user_includes:
2181 raise error.Abort(
2186 raise error.Abort(
2182 _(b"{} configuration for user {} is empty").format(
2187 _(b"{} configuration for user {} is empty").format(
2183 _NARROWACL_SECTION, username
2188 _NARROWACL_SECTION, username
2184 )
2189 )
2185 )
2190 )
2186
2191
2187 user_includes = [
2192 user_includes = [
2188 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2193 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2189 ]
2194 ]
2190 user_excludes = [
2195 user_excludes = [
2191 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2196 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2192 ]
2197 ]
2193
2198
2194 req_includes = set(kwargs.get(r'includepats', []))
2199 req_includes = set(kwargs.get(r'includepats', []))
2195 req_excludes = set(kwargs.get(r'excludepats', []))
2200 req_excludes = set(kwargs.get(r'excludepats', []))
2196
2201
2197 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2202 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2198 req_includes, req_excludes, user_includes, user_excludes
2203 req_includes, req_excludes, user_includes, user_excludes
2199 )
2204 )
2200
2205
2201 if invalid_includes:
2206 if invalid_includes:
2202 raise error.Abort(
2207 raise error.Abort(
2203 _(b"The following includes are not accessible for {}: {}").format(
2208 _(b"The following includes are not accessible for {}: {}").format(
2204 username, invalid_includes
2209 username, invalid_includes
2205 )
2210 )
2206 )
2211 )
2207
2212
2208 new_args = {}
2213 new_args = {}
2209 new_args.update(kwargs)
2214 new_args.update(kwargs)
2210 new_args[r'narrow'] = True
2215 new_args[r'narrow'] = True
2211 new_args[r'narrow_acl'] = True
2216 new_args[r'narrow_acl'] = True
2212 new_args[r'includepats'] = req_includes
2217 new_args[r'includepats'] = req_includes
2213 if req_excludes:
2218 if req_excludes:
2214 new_args[r'excludepats'] = req_excludes
2219 new_args[r'excludepats'] = req_excludes
2215
2220
2216 return new_args
2221 return new_args
2217
2222
2218
2223
2219 def _computeellipsis(repo, common, heads, known, match, depth=None):
2224 def _computeellipsis(repo, common, heads, known, match, depth=None):
2220 """Compute the shape of a narrowed DAG.
2225 """Compute the shape of a narrowed DAG.
2221
2226
2222 Args:
2227 Args:
2223 repo: The repository we're transferring.
2228 repo: The repository we're transferring.
2224 common: The roots of the DAG range we're transferring.
2229 common: The roots of the DAG range we're transferring.
2225 May be just [nullid], which means all ancestors of heads.
2230 May be just [nullid], which means all ancestors of heads.
2226 heads: The heads of the DAG range we're transferring.
2231 heads: The heads of the DAG range we're transferring.
2227 match: The narrowmatcher that allows us to identify relevant changes.
2232 match: The narrowmatcher that allows us to identify relevant changes.
2228 depth: If not None, only consider nodes to be full nodes if they are at
2233 depth: If not None, only consider nodes to be full nodes if they are at
2229 most depth changesets away from one of heads.
2234 most depth changesets away from one of heads.
2230
2235
2231 Returns:
2236 Returns:
2232 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2237 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2233
2238
2234 visitnodes: The list of nodes (either full or ellipsis) which
2239 visitnodes: The list of nodes (either full or ellipsis) which
2235 need to be sent to the client.
2240 need to be sent to the client.
2236 relevant_nodes: The set of changelog nodes which change a file inside
2241 relevant_nodes: The set of changelog nodes which change a file inside
2237 the narrowspec. The client needs these as non-ellipsis nodes.
2242 the narrowspec. The client needs these as non-ellipsis nodes.
2238 ellipsisroots: A dict of {rev: parents} that is used in
2243 ellipsisroots: A dict of {rev: parents} that is used in
2239 narrowchangegroup to produce ellipsis nodes with the
2244 narrowchangegroup to produce ellipsis nodes with the
2240 correct parents.
2245 correct parents.
2241 """
2246 """
2242 cl = repo.changelog
2247 cl = repo.changelog
2243 mfl = repo.manifestlog
2248 mfl = repo.manifestlog
2244
2249
2245 clrev = cl.rev
2250 clrev = cl.rev
2246
2251
2247 commonrevs = {clrev(n) for n in common} | {nullrev}
2252 commonrevs = {clrev(n) for n in common} | {nullrev}
2248 headsrevs = {clrev(n) for n in heads}
2253 headsrevs = {clrev(n) for n in heads}
2249
2254
2250 if depth:
2255 if depth:
2251 revdepth = {h: 0 for h in headsrevs}
2256 revdepth = {h: 0 for h in headsrevs}
2252
2257
2253 ellipsisheads = collections.defaultdict(set)
2258 ellipsisheads = collections.defaultdict(set)
2254 ellipsisroots = collections.defaultdict(set)
2259 ellipsisroots = collections.defaultdict(set)
2255
2260
2256 def addroot(head, curchange):
2261 def addroot(head, curchange):
2257 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2262 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2258 ellipsisroots[head].add(curchange)
2263 ellipsisroots[head].add(curchange)
2259 # Recursively split ellipsis heads with 3 roots by finding the
2264 # Recursively split ellipsis heads with 3 roots by finding the
2260 # roots' youngest common descendant which is an elided merge commit.
2265 # roots' youngest common descendant which is an elided merge commit.
2261 # That descendant takes 2 of the 3 roots as its own, and becomes a
2266 # That descendant takes 2 of the 3 roots as its own, and becomes a
2262 # root of the head.
2267 # root of the head.
2263 while len(ellipsisroots[head]) > 2:
2268 while len(ellipsisroots[head]) > 2:
2264 child, roots = splithead(head)
2269 child, roots = splithead(head)
2265 splitroots(head, child, roots)
2270 splitroots(head, child, roots)
2266 head = child # Recurse in case we just added a 3rd root
2271 head = child # Recurse in case we just added a 3rd root
2267
2272
2268 def splitroots(head, child, roots):
2273 def splitroots(head, child, roots):
2269 ellipsisroots[head].difference_update(roots)
2274 ellipsisroots[head].difference_update(roots)
2270 ellipsisroots[head].add(child)
2275 ellipsisroots[head].add(child)
2271 ellipsisroots[child].update(roots)
2276 ellipsisroots[child].update(roots)
2272 ellipsisroots[child].discard(child)
2277 ellipsisroots[child].discard(child)
2273
2278
2274 def splithead(head):
2279 def splithead(head):
2275 r1, r2, r3 = sorted(ellipsisroots[head])
2280 r1, r2, r3 = sorted(ellipsisroots[head])
2276 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2281 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2277 mid = repo.revs(
2282 mid = repo.revs(
2278 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2283 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2279 )
2284 )
2280 for j in mid:
2285 for j in mid:
2281 if j == nr2:
2286 if j == nr2:
2282 return nr2, (nr1, nr2)
2287 return nr2, (nr1, nr2)
2283 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2288 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2284 return j, (nr1, nr2)
2289 return j, (nr1, nr2)
2285 raise error.Abort(
2290 raise error.Abort(
2286 _(
2291 _(
2287 b'Failed to split up ellipsis node! head: %d, '
2292 b'Failed to split up ellipsis node! head: %d, '
2288 b'roots: %d %d %d'
2293 b'roots: %d %d %d'
2289 )
2294 )
2290 % (head, r1, r2, r3)
2295 % (head, r1, r2, r3)
2291 )
2296 )
2292
2297
2293 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2298 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2294 visit = reversed(missing)
2299 visit = reversed(missing)
2295 relevant_nodes = set()
2300 relevant_nodes = set()
2296 visitnodes = [cl.node(m) for m in missing]
2301 visitnodes = [cl.node(m) for m in missing]
2297 required = set(headsrevs) | known
2302 required = set(headsrevs) | known
2298 for rev in visit:
2303 for rev in visit:
2299 clrev = cl.changelogrevision(rev)
2304 clrev = cl.changelogrevision(rev)
2300 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2305 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2301 if depth is not None:
2306 if depth is not None:
2302 curdepth = revdepth[rev]
2307 curdepth = revdepth[rev]
2303 for p in ps:
2308 for p in ps:
2304 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2309 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2305 needed = False
2310 needed = False
2306 shallow_enough = depth is None or revdepth[rev] <= depth
2311 shallow_enough = depth is None or revdepth[rev] <= depth
2307 if shallow_enough:
2312 if shallow_enough:
2308 curmf = mfl[clrev.manifest].read()
2313 curmf = mfl[clrev.manifest].read()
2309 if ps:
2314 if ps:
2310 # We choose to not trust the changed files list in
2315 # We choose to not trust the changed files list in
2311 # changesets because it's not always correct. TODO: could
2316 # changesets because it's not always correct. TODO: could
2312 # we trust it for the non-merge case?
2317 # we trust it for the non-merge case?
2313 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2318 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2314 needed = bool(curmf.diff(p1mf, match))
2319 needed = bool(curmf.diff(p1mf, match))
2315 if not needed and len(ps) > 1:
2320 if not needed and len(ps) > 1:
2316 # For merge changes, the list of changed files is not
2321 # For merge changes, the list of changed files is not
2317 # helpful, since we need to emit the merge if a file
2322 # helpful, since we need to emit the merge if a file
2318 # in the narrow spec has changed on either side of the
2323 # in the narrow spec has changed on either side of the
2319 # merge. As a result, we do a manifest diff to check.
2324 # merge. As a result, we do a manifest diff to check.
2320 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2325 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2321 needed = bool(curmf.diff(p2mf, match))
2326 needed = bool(curmf.diff(p2mf, match))
2322 else:
2327 else:
2323 # For a root node, we need to include the node if any
2328 # For a root node, we need to include the node if any
2324 # files in the node match the narrowspec.
2329 # files in the node match the narrowspec.
2325 needed = any(curmf.walk(match))
2330 needed = any(curmf.walk(match))
2326
2331
2327 if needed:
2332 if needed:
2328 for head in ellipsisheads[rev]:
2333 for head in ellipsisheads[rev]:
2329 addroot(head, rev)
2334 addroot(head, rev)
2330 for p in ps:
2335 for p in ps:
2331 required.add(p)
2336 required.add(p)
2332 relevant_nodes.add(cl.node(rev))
2337 relevant_nodes.add(cl.node(rev))
2333 else:
2338 else:
2334 if not ps:
2339 if not ps:
2335 ps = [nullrev]
2340 ps = [nullrev]
2336 if rev in required:
2341 if rev in required:
2337 for head in ellipsisheads[rev]:
2342 for head in ellipsisheads[rev]:
2338 addroot(head, rev)
2343 addroot(head, rev)
2339 for p in ps:
2344 for p in ps:
2340 ellipsisheads[p].add(rev)
2345 ellipsisheads[p].add(rev)
2341 else:
2346 else:
2342 for p in ps:
2347 for p in ps:
2343 ellipsisheads[p] |= ellipsisheads[rev]
2348 ellipsisheads[p] |= ellipsisheads[rev]
2344
2349
2345 # add common changesets as roots of their reachable ellipsis heads
2350 # add common changesets as roots of their reachable ellipsis heads
2346 for c in commonrevs:
2351 for c in commonrevs:
2347 for head in ellipsisheads[c]:
2352 for head in ellipsisheads[c]:
2348 addroot(head, c)
2353 addroot(head, c)
2349 return visitnodes, relevant_nodes, ellipsisroots
2354 return visitnodes, relevant_nodes, ellipsisroots
2350
2355
2351
2356
2352 def caps20to10(repo, role):
2357 def caps20to10(repo, role):
2353 """return a set with appropriate options to use bundle20 during getbundle"""
2358 """return a set with appropriate options to use bundle20 during getbundle"""
2354 caps = {b'HG20'}
2359 caps = {b'HG20'}
2355 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2360 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2356 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2361 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2357 return caps
2362 return caps
2358
2363
2359
2364
2360 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2365 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2361 getbundle2partsorder = []
2366 getbundle2partsorder = []
2362
2367
2363 # Mapping between step name and function
2368 # Mapping between step name and function
2364 #
2369 #
2365 # This exists to help extensions wrap steps if necessary
2370 # This exists to help extensions wrap steps if necessary
2366 getbundle2partsmapping = {}
2371 getbundle2partsmapping = {}
2367
2372
2368
2373
2369 def getbundle2partsgenerator(stepname, idx=None):
2374 def getbundle2partsgenerator(stepname, idx=None):
2370 """decorator for function generating bundle2 part for getbundle
2375 """decorator for function generating bundle2 part for getbundle
2371
2376
2372 The function is added to the step -> function mapping and appended to the
2377 The function is added to the step -> function mapping and appended to the
2373 list of steps. Beware that decorated functions will be added in order
2378 list of steps. Beware that decorated functions will be added in order
2374 (this may matter).
2379 (this may matter).
2375
2380
2376 You can only use this decorator for new steps, if you want to wrap a step
2381 You can only use this decorator for new steps, if you want to wrap a step
2377 from an extension, attack the getbundle2partsmapping dictionary directly."""
2382 from an extension, attack the getbundle2partsmapping dictionary directly."""
2378
2383
2379 def dec(func):
2384 def dec(func):
2380 assert stepname not in getbundle2partsmapping
2385 assert stepname not in getbundle2partsmapping
2381 getbundle2partsmapping[stepname] = func
2386 getbundle2partsmapping[stepname] = func
2382 if idx is None:
2387 if idx is None:
2383 getbundle2partsorder.append(stepname)
2388 getbundle2partsorder.append(stepname)
2384 else:
2389 else:
2385 getbundle2partsorder.insert(idx, stepname)
2390 getbundle2partsorder.insert(idx, stepname)
2386 return func
2391 return func
2387
2392
2388 return dec
2393 return dec
2389
2394
2390
2395
2391 def bundle2requested(bundlecaps):
2396 def bundle2requested(bundlecaps):
2392 if bundlecaps is not None:
2397 if bundlecaps is not None:
2393 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2398 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2394 return False
2399 return False
2395
2400
2396
2401
2397 def getbundlechunks(
2402 def getbundlechunks(
2398 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2403 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2399 ):
2404 ):
2400 """Return chunks constituting a bundle's raw data.
2405 """Return chunks constituting a bundle's raw data.
2401
2406
2402 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2407 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2403 passed.
2408 passed.
2404
2409
2405 Returns a 2-tuple of a dict with metadata about the generated bundle
2410 Returns a 2-tuple of a dict with metadata about the generated bundle
2406 and an iterator over raw chunks (of varying sizes).
2411 and an iterator over raw chunks (of varying sizes).
2407 """
2412 """
2408 kwargs = pycompat.byteskwargs(kwargs)
2413 kwargs = pycompat.byteskwargs(kwargs)
2409 info = {}
2414 info = {}
2410 usebundle2 = bundle2requested(bundlecaps)
2415 usebundle2 = bundle2requested(bundlecaps)
2411 # bundle10 case
2416 # bundle10 case
2412 if not usebundle2:
2417 if not usebundle2:
2413 if bundlecaps and not kwargs.get(b'cg', True):
2418 if bundlecaps and not kwargs.get(b'cg', True):
2414 raise ValueError(
2419 raise ValueError(
2415 _(b'request for bundle10 must include changegroup')
2420 _(b'request for bundle10 must include changegroup')
2416 )
2421 )
2417
2422
2418 if kwargs:
2423 if kwargs:
2419 raise ValueError(
2424 raise ValueError(
2420 _(b'unsupported getbundle arguments: %s')
2425 _(b'unsupported getbundle arguments: %s')
2421 % b', '.join(sorted(kwargs.keys()))
2426 % b', '.join(sorted(kwargs.keys()))
2422 )
2427 )
2423 outgoing = _computeoutgoing(repo, heads, common)
2428 outgoing = _computeoutgoing(repo, heads, common)
2424 info[b'bundleversion'] = 1
2429 info[b'bundleversion'] = 1
2425 return (
2430 return (
2426 info,
2431 info,
2427 changegroup.makestream(
2432 changegroup.makestream(
2428 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2433 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2429 ),
2434 ),
2430 )
2435 )
2431
2436
2432 # bundle20 case
2437 # bundle20 case
2433 info[b'bundleversion'] = 2
2438 info[b'bundleversion'] = 2
2434 b2caps = {}
2439 b2caps = {}
2435 for bcaps in bundlecaps:
2440 for bcaps in bundlecaps:
2436 if bcaps.startswith(b'bundle2='):
2441 if bcaps.startswith(b'bundle2='):
2437 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2442 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2438 b2caps.update(bundle2.decodecaps(blob))
2443 b2caps.update(bundle2.decodecaps(blob))
2439 bundler = bundle2.bundle20(repo.ui, b2caps)
2444 bundler = bundle2.bundle20(repo.ui, b2caps)
2440
2445
2441 kwargs[b'heads'] = heads
2446 kwargs[b'heads'] = heads
2442 kwargs[b'common'] = common
2447 kwargs[b'common'] = common
2443
2448
2444 for name in getbundle2partsorder:
2449 for name in getbundle2partsorder:
2445 func = getbundle2partsmapping[name]
2450 func = getbundle2partsmapping[name]
2446 func(
2451 func(
2447 bundler,
2452 bundler,
2448 repo,
2453 repo,
2449 source,
2454 source,
2450 bundlecaps=bundlecaps,
2455 bundlecaps=bundlecaps,
2451 b2caps=b2caps,
2456 b2caps=b2caps,
2452 **pycompat.strkwargs(kwargs)
2457 **pycompat.strkwargs(kwargs)
2453 )
2458 )
2454
2459
2455 info[b'prefercompressed'] = bundler.prefercompressed
2460 info[b'prefercompressed'] = bundler.prefercompressed
2456
2461
2457 return info, bundler.getchunks()
2462 return info, bundler.getchunks()
2458
2463
2459
2464
2460 @getbundle2partsgenerator(b'stream2')
2465 @getbundle2partsgenerator(b'stream2')
2461 def _getbundlestream2(bundler, repo, *args, **kwargs):
2466 def _getbundlestream2(bundler, repo, *args, **kwargs):
2462 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2467 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2463
2468
2464
2469
2465 @getbundle2partsgenerator(b'changegroup')
2470 @getbundle2partsgenerator(b'changegroup')
2466 def _getbundlechangegrouppart(
2471 def _getbundlechangegrouppart(
2467 bundler,
2472 bundler,
2468 repo,
2473 repo,
2469 source,
2474 source,
2470 bundlecaps=None,
2475 bundlecaps=None,
2471 b2caps=None,
2476 b2caps=None,
2472 heads=None,
2477 heads=None,
2473 common=None,
2478 common=None,
2474 **kwargs
2479 **kwargs
2475 ):
2480 ):
2476 """add a changegroup part to the requested bundle"""
2481 """add a changegroup part to the requested bundle"""
2477 if not kwargs.get(r'cg', True):
2482 if not kwargs.get(r'cg', True):
2478 return
2483 return
2479
2484
2480 version = b'01'
2485 version = b'01'
2481 cgversions = b2caps.get(b'changegroup')
2486 cgversions = b2caps.get(b'changegroup')
2482 if cgversions: # 3.1 and 3.2 ship with an empty value
2487 if cgversions: # 3.1 and 3.2 ship with an empty value
2483 cgversions = [
2488 cgversions = [
2484 v
2489 v
2485 for v in cgversions
2490 for v in cgversions
2486 if v in changegroup.supportedoutgoingversions(repo)
2491 if v in changegroup.supportedoutgoingversions(repo)
2487 ]
2492 ]
2488 if not cgversions:
2493 if not cgversions:
2489 raise error.Abort(_(b'no common changegroup version'))
2494 raise error.Abort(_(b'no common changegroup version'))
2490 version = max(cgversions)
2495 version = max(cgversions)
2491
2496
2492 outgoing = _computeoutgoing(repo, heads, common)
2497 outgoing = _computeoutgoing(repo, heads, common)
2493 if not outgoing.missing:
2498 if not outgoing.missing:
2494 return
2499 return
2495
2500
2496 if kwargs.get(r'narrow', False):
2501 if kwargs.get(r'narrow', False):
2497 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2502 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2498 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2503 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2499 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2504 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2500 else:
2505 else:
2501 matcher = None
2506 matcher = None
2502
2507
2503 cgstream = changegroup.makestream(
2508 cgstream = changegroup.makestream(
2504 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2509 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2505 )
2510 )
2506
2511
2507 part = bundler.newpart(b'changegroup', data=cgstream)
2512 part = bundler.newpart(b'changegroup', data=cgstream)
2508 if cgversions:
2513 if cgversions:
2509 part.addparam(b'version', version)
2514 part.addparam(b'version', version)
2510
2515
2511 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2516 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2512
2517
2513 if b'treemanifest' in repo.requirements:
2518 if b'treemanifest' in repo.requirements:
2514 part.addparam(b'treemanifest', b'1')
2519 part.addparam(b'treemanifest', b'1')
2515
2520
2516 if b'exp-sidedata-flag' in repo.requirements:
2521 if b'exp-sidedata-flag' in repo.requirements:
2517 part.addparam(b'exp-sidedata', b'1')
2522 part.addparam(b'exp-sidedata', b'1')
2518
2523
2519 if (
2524 if (
2520 kwargs.get(r'narrow', False)
2525 kwargs.get(r'narrow', False)
2521 and kwargs.get(r'narrow_acl', False)
2526 and kwargs.get(r'narrow_acl', False)
2522 and (include or exclude)
2527 and (include or exclude)
2523 ):
2528 ):
2524 # this is mandatory because otherwise ACL clients won't work
2529 # this is mandatory because otherwise ACL clients won't work
2525 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2530 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2526 narrowspecpart.data = b'%s\0%s' % (
2531 narrowspecpart.data = b'%s\0%s' % (
2527 b'\n'.join(include),
2532 b'\n'.join(include),
2528 b'\n'.join(exclude),
2533 b'\n'.join(exclude),
2529 )
2534 )
2530
2535
2531
2536
2532 @getbundle2partsgenerator(b'bookmarks')
2537 @getbundle2partsgenerator(b'bookmarks')
2533 def _getbundlebookmarkpart(
2538 def _getbundlebookmarkpart(
2534 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2539 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2535 ):
2540 ):
2536 """add a bookmark part to the requested bundle"""
2541 """add a bookmark part to the requested bundle"""
2537 if not kwargs.get(r'bookmarks', False):
2542 if not kwargs.get(r'bookmarks', False):
2538 return
2543 return
2539 if b'bookmarks' not in b2caps:
2544 if b'bookmarks' not in b2caps:
2540 raise error.Abort(_(b'no common bookmarks exchange method'))
2545 raise error.Abort(_(b'no common bookmarks exchange method'))
2541 books = bookmod.listbinbookmarks(repo)
2546 books = bookmod.listbinbookmarks(repo)
2542 data = bookmod.binaryencode(books)
2547 data = bookmod.binaryencode(books)
2543 if data:
2548 if data:
2544 bundler.newpart(b'bookmarks', data=data)
2549 bundler.newpart(b'bookmarks', data=data)
2545
2550
2546
2551
2547 @getbundle2partsgenerator(b'listkeys')
2552 @getbundle2partsgenerator(b'listkeys')
2548 def _getbundlelistkeysparts(
2553 def _getbundlelistkeysparts(
2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2554 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2550 ):
2555 ):
2551 """add parts containing listkeys namespaces to the requested bundle"""
2556 """add parts containing listkeys namespaces to the requested bundle"""
2552 listkeys = kwargs.get(r'listkeys', ())
2557 listkeys = kwargs.get(r'listkeys', ())
2553 for namespace in listkeys:
2558 for namespace in listkeys:
2554 part = bundler.newpart(b'listkeys')
2559 part = bundler.newpart(b'listkeys')
2555 part.addparam(b'namespace', namespace)
2560 part.addparam(b'namespace', namespace)
2556 keys = repo.listkeys(namespace).items()
2561 keys = repo.listkeys(namespace).items()
2557 part.data = pushkey.encodekeys(keys)
2562 part.data = pushkey.encodekeys(keys)
2558
2563
2559
2564
2560 @getbundle2partsgenerator(b'obsmarkers')
2565 @getbundle2partsgenerator(b'obsmarkers')
2561 def _getbundleobsmarkerpart(
2566 def _getbundleobsmarkerpart(
2562 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2567 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2563 ):
2568 ):
2564 """add an obsolescence markers part to the requested bundle"""
2569 """add an obsolescence markers part to the requested bundle"""
2565 if kwargs.get(r'obsmarkers', False):
2570 if kwargs.get(r'obsmarkers', False):
2566 if heads is None:
2571 if heads is None:
2567 heads = repo.heads()
2572 heads = repo.heads()
2568 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2573 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2569 markers = repo.obsstore.relevantmarkers(subset)
2574 markers = repo.obsstore.relevantmarkers(subset)
2570 # last item of marker tuple ('parents') may be None or a tuple
2575 markers = _sortedmarkers(markers)
2571 markers = sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
2572 bundle2.buildobsmarkerspart(bundler, markers)
2576 bundle2.buildobsmarkerspart(bundler, markers)
2573
2577
2574
2578
2575 @getbundle2partsgenerator(b'phases')
2579 @getbundle2partsgenerator(b'phases')
2576 def _getbundlephasespart(
2580 def _getbundlephasespart(
2577 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2581 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2578 ):
2582 ):
2579 """add phase heads part to the requested bundle"""
2583 """add phase heads part to the requested bundle"""
2580 if kwargs.get(r'phases', False):
2584 if kwargs.get(r'phases', False):
2581 if not b'heads' in b2caps.get(b'phases'):
2585 if not b'heads' in b2caps.get(b'phases'):
2582 raise error.Abort(_(b'no common phases exchange method'))
2586 raise error.Abort(_(b'no common phases exchange method'))
2583 if heads is None:
2587 if heads is None:
2584 heads = repo.heads()
2588 heads = repo.heads()
2585
2589
2586 headsbyphase = collections.defaultdict(set)
2590 headsbyphase = collections.defaultdict(set)
2587 if repo.publishing():
2591 if repo.publishing():
2588 headsbyphase[phases.public] = heads
2592 headsbyphase[phases.public] = heads
2589 else:
2593 else:
2590 # find the appropriate heads to move
2594 # find the appropriate heads to move
2591
2595
2592 phase = repo._phasecache.phase
2596 phase = repo._phasecache.phase
2593 node = repo.changelog.node
2597 node = repo.changelog.node
2594 rev = repo.changelog.rev
2598 rev = repo.changelog.rev
2595 for h in heads:
2599 for h in heads:
2596 headsbyphase[phase(repo, rev(h))].add(h)
2600 headsbyphase[phase(repo, rev(h))].add(h)
2597 seenphases = list(headsbyphase.keys())
2601 seenphases = list(headsbyphase.keys())
2598
2602
2599 # We do not handle anything but public and draft phase for now)
2603 # We do not handle anything but public and draft phase for now)
2600 if seenphases:
2604 if seenphases:
2601 assert max(seenphases) <= phases.draft
2605 assert max(seenphases) <= phases.draft
2602
2606
2603 # if client is pulling non-public changesets, we need to find
2607 # if client is pulling non-public changesets, we need to find
2604 # intermediate public heads.
2608 # intermediate public heads.
2605 draftheads = headsbyphase.get(phases.draft, set())
2609 draftheads = headsbyphase.get(phases.draft, set())
2606 if draftheads:
2610 if draftheads:
2607 publicheads = headsbyphase.get(phases.public, set())
2611 publicheads = headsbyphase.get(phases.public, set())
2608
2612
2609 revset = b'heads(only(%ln, %ln) and public())'
2613 revset = b'heads(only(%ln, %ln) and public())'
2610 extraheads = repo.revs(revset, draftheads, publicheads)
2614 extraheads = repo.revs(revset, draftheads, publicheads)
2611 for r in extraheads:
2615 for r in extraheads:
2612 headsbyphase[phases.public].add(node(r))
2616 headsbyphase[phases.public].add(node(r))
2613
2617
2614 # transform data in a format used by the encoding function
2618 # transform data in a format used by the encoding function
2615 phasemapping = []
2619 phasemapping = []
2616 for phase in phases.allphases:
2620 for phase in phases.allphases:
2617 phasemapping.append(sorted(headsbyphase[phase]))
2621 phasemapping.append(sorted(headsbyphase[phase]))
2618
2622
2619 # generate the actual part
2623 # generate the actual part
2620 phasedata = phases.binaryencode(phasemapping)
2624 phasedata = phases.binaryencode(phasemapping)
2621 bundler.newpart(b'phase-heads', data=phasedata)
2625 bundler.newpart(b'phase-heads', data=phasedata)
2622
2626
2623
2627
2624 @getbundle2partsgenerator(b'hgtagsfnodes')
2628 @getbundle2partsgenerator(b'hgtagsfnodes')
2625 def _getbundletagsfnodes(
2629 def _getbundletagsfnodes(
2626 bundler,
2630 bundler,
2627 repo,
2631 repo,
2628 source,
2632 source,
2629 bundlecaps=None,
2633 bundlecaps=None,
2630 b2caps=None,
2634 b2caps=None,
2631 heads=None,
2635 heads=None,
2632 common=None,
2636 common=None,
2633 **kwargs
2637 **kwargs
2634 ):
2638 ):
2635 """Transfer the .hgtags filenodes mapping.
2639 """Transfer the .hgtags filenodes mapping.
2636
2640
2637 Only values for heads in this bundle will be transferred.
2641 Only values for heads in this bundle will be transferred.
2638
2642
2639 The part data consists of pairs of 20 byte changeset node and .hgtags
2643 The part data consists of pairs of 20 byte changeset node and .hgtags
2640 filenodes raw values.
2644 filenodes raw values.
2641 """
2645 """
2642 # Don't send unless:
2646 # Don't send unless:
2643 # - changeset are being exchanged,
2647 # - changeset are being exchanged,
2644 # - the client supports it.
2648 # - the client supports it.
2645 if not (kwargs.get(r'cg', True) and b'hgtagsfnodes' in b2caps):
2649 if not (kwargs.get(r'cg', True) and b'hgtagsfnodes' in b2caps):
2646 return
2650 return
2647
2651
2648 outgoing = _computeoutgoing(repo, heads, common)
2652 outgoing = _computeoutgoing(repo, heads, common)
2649 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2653 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2650
2654
2651
2655
2652 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2656 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2653 def _getbundlerevbranchcache(
2657 def _getbundlerevbranchcache(
2654 bundler,
2658 bundler,
2655 repo,
2659 repo,
2656 source,
2660 source,
2657 bundlecaps=None,
2661 bundlecaps=None,
2658 b2caps=None,
2662 b2caps=None,
2659 heads=None,
2663 heads=None,
2660 common=None,
2664 common=None,
2661 **kwargs
2665 **kwargs
2662 ):
2666 ):
2663 """Transfer the rev-branch-cache mapping
2667 """Transfer the rev-branch-cache mapping
2664
2668
2665 The payload is a series of data related to each branch
2669 The payload is a series of data related to each branch
2666
2670
2667 1) branch name length
2671 1) branch name length
2668 2) number of open heads
2672 2) number of open heads
2669 3) number of closed heads
2673 3) number of closed heads
2670 4) open heads nodes
2674 4) open heads nodes
2671 5) closed heads nodes
2675 5) closed heads nodes
2672 """
2676 """
2673 # Don't send unless:
2677 # Don't send unless:
2674 # - changeset are being exchanged,
2678 # - changeset are being exchanged,
2675 # - the client supports it.
2679 # - the client supports it.
2676 # - narrow bundle isn't in play (not currently compatible).
2680 # - narrow bundle isn't in play (not currently compatible).
2677 if (
2681 if (
2678 not kwargs.get(r'cg', True)
2682 not kwargs.get(r'cg', True)
2679 or b'rev-branch-cache' not in b2caps
2683 or b'rev-branch-cache' not in b2caps
2680 or kwargs.get(r'narrow', False)
2684 or kwargs.get(r'narrow', False)
2681 or repo.ui.has_section(_NARROWACL_SECTION)
2685 or repo.ui.has_section(_NARROWACL_SECTION)
2682 ):
2686 ):
2683 return
2687 return
2684
2688
2685 outgoing = _computeoutgoing(repo, heads, common)
2689 outgoing = _computeoutgoing(repo, heads, common)
2686 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2690 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2687
2691
2688
2692
2689 def check_heads(repo, their_heads, context):
2693 def check_heads(repo, their_heads, context):
2690 """check if the heads of a repo have been modified
2694 """check if the heads of a repo have been modified
2691
2695
2692 Used by peer for unbundling.
2696 Used by peer for unbundling.
2693 """
2697 """
2694 heads = repo.heads()
2698 heads = repo.heads()
2695 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2699 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2696 if not (
2700 if not (
2697 their_heads == [b'force']
2701 their_heads == [b'force']
2698 or their_heads == heads
2702 or their_heads == heads
2699 or their_heads == [b'hashed', heads_hash]
2703 or their_heads == [b'hashed', heads_hash]
2700 ):
2704 ):
2701 # someone else committed/pushed/unbundled while we
2705 # someone else committed/pushed/unbundled while we
2702 # were transferring data
2706 # were transferring data
2703 raise error.PushRaced(
2707 raise error.PushRaced(
2704 b'repository changed while %s - please try again' % context
2708 b'repository changed while %s - please try again' % context
2705 )
2709 )
2706
2710
2707
2711
2708 def unbundle(repo, cg, heads, source, url):
2712 def unbundle(repo, cg, heads, source, url):
2709 """Apply a bundle to a repo.
2713 """Apply a bundle to a repo.
2710
2714
2711 this function makes sure the repo is locked during the application and have
2715 this function makes sure the repo is locked during the application and have
2712 mechanism to check that no push race occurred between the creation of the
2716 mechanism to check that no push race occurred between the creation of the
2713 bundle and its application.
2717 bundle and its application.
2714
2718
2715 If the push was raced as PushRaced exception is raised."""
2719 If the push was raced as PushRaced exception is raised."""
2716 r = 0
2720 r = 0
2717 # need a transaction when processing a bundle2 stream
2721 # need a transaction when processing a bundle2 stream
2718 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2722 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2719 lockandtr = [None, None, None]
2723 lockandtr = [None, None, None]
2720 recordout = None
2724 recordout = None
2721 # quick fix for output mismatch with bundle2 in 3.4
2725 # quick fix for output mismatch with bundle2 in 3.4
2722 captureoutput = repo.ui.configbool(
2726 captureoutput = repo.ui.configbool(
2723 b'experimental', b'bundle2-output-capture'
2727 b'experimental', b'bundle2-output-capture'
2724 )
2728 )
2725 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2729 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2726 captureoutput = True
2730 captureoutput = True
2727 try:
2731 try:
2728 # note: outside bundle1, 'heads' is expected to be empty and this
2732 # note: outside bundle1, 'heads' is expected to be empty and this
2729 # 'check_heads' call wil be a no-op
2733 # 'check_heads' call wil be a no-op
2730 check_heads(repo, heads, b'uploading changes')
2734 check_heads(repo, heads, b'uploading changes')
2731 # push can proceed
2735 # push can proceed
2732 if not isinstance(cg, bundle2.unbundle20):
2736 if not isinstance(cg, bundle2.unbundle20):
2733 # legacy case: bundle1 (changegroup 01)
2737 # legacy case: bundle1 (changegroup 01)
2734 txnname = b"\n".join([source, util.hidepassword(url)])
2738 txnname = b"\n".join([source, util.hidepassword(url)])
2735 with repo.lock(), repo.transaction(txnname) as tr:
2739 with repo.lock(), repo.transaction(txnname) as tr:
2736 op = bundle2.applybundle(repo, cg, tr, source, url)
2740 op = bundle2.applybundle(repo, cg, tr, source, url)
2737 r = bundle2.combinechangegroupresults(op)
2741 r = bundle2.combinechangegroupresults(op)
2738 else:
2742 else:
2739 r = None
2743 r = None
2740 try:
2744 try:
2741
2745
2742 def gettransaction():
2746 def gettransaction():
2743 if not lockandtr[2]:
2747 if not lockandtr[2]:
2744 if not bookmod.bookmarksinstore(repo):
2748 if not bookmod.bookmarksinstore(repo):
2745 lockandtr[0] = repo.wlock()
2749 lockandtr[0] = repo.wlock()
2746 lockandtr[1] = repo.lock()
2750 lockandtr[1] = repo.lock()
2747 lockandtr[2] = repo.transaction(source)
2751 lockandtr[2] = repo.transaction(source)
2748 lockandtr[2].hookargs[b'source'] = source
2752 lockandtr[2].hookargs[b'source'] = source
2749 lockandtr[2].hookargs[b'url'] = url
2753 lockandtr[2].hookargs[b'url'] = url
2750 lockandtr[2].hookargs[b'bundle2'] = b'1'
2754 lockandtr[2].hookargs[b'bundle2'] = b'1'
2751 return lockandtr[2]
2755 return lockandtr[2]
2752
2756
2753 # Do greedy locking by default until we're satisfied with lazy
2757 # Do greedy locking by default until we're satisfied with lazy
2754 # locking.
2758 # locking.
2755 if not repo.ui.configbool(
2759 if not repo.ui.configbool(
2756 b'experimental', b'bundle2lazylocking'
2760 b'experimental', b'bundle2lazylocking'
2757 ):
2761 ):
2758 gettransaction()
2762 gettransaction()
2759
2763
2760 op = bundle2.bundleoperation(
2764 op = bundle2.bundleoperation(
2761 repo,
2765 repo,
2762 gettransaction,
2766 gettransaction,
2763 captureoutput=captureoutput,
2767 captureoutput=captureoutput,
2764 source=b'push',
2768 source=b'push',
2765 )
2769 )
2766 try:
2770 try:
2767 op = bundle2.processbundle(repo, cg, op=op)
2771 op = bundle2.processbundle(repo, cg, op=op)
2768 finally:
2772 finally:
2769 r = op.reply
2773 r = op.reply
2770 if captureoutput and r is not None:
2774 if captureoutput and r is not None:
2771 repo.ui.pushbuffer(error=True, subproc=True)
2775 repo.ui.pushbuffer(error=True, subproc=True)
2772
2776
2773 def recordout(output):
2777 def recordout(output):
2774 r.newpart(b'output', data=output, mandatory=False)
2778 r.newpart(b'output', data=output, mandatory=False)
2775
2779
2776 if lockandtr[2] is not None:
2780 if lockandtr[2] is not None:
2777 lockandtr[2].close()
2781 lockandtr[2].close()
2778 except BaseException as exc:
2782 except BaseException as exc:
2779 exc.duringunbundle2 = True
2783 exc.duringunbundle2 = True
2780 if captureoutput and r is not None:
2784 if captureoutput and r is not None:
2781 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2785 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2782
2786
2783 def recordout(output):
2787 def recordout(output):
2784 part = bundle2.bundlepart(
2788 part = bundle2.bundlepart(
2785 b'output', data=output, mandatory=False
2789 b'output', data=output, mandatory=False
2786 )
2790 )
2787 parts.append(part)
2791 parts.append(part)
2788
2792
2789 raise
2793 raise
2790 finally:
2794 finally:
2791 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2795 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2792 if recordout is not None:
2796 if recordout is not None:
2793 recordout(repo.ui.popbuffer())
2797 recordout(repo.ui.popbuffer())
2794 return r
2798 return r
2795
2799
2796
2800
2797 def _maybeapplyclonebundle(pullop):
2801 def _maybeapplyclonebundle(pullop):
2798 """Apply a clone bundle from a remote, if possible."""
2802 """Apply a clone bundle from a remote, if possible."""
2799
2803
2800 repo = pullop.repo
2804 repo = pullop.repo
2801 remote = pullop.remote
2805 remote = pullop.remote
2802
2806
2803 if not repo.ui.configbool(b'ui', b'clonebundles'):
2807 if not repo.ui.configbool(b'ui', b'clonebundles'):
2804 return
2808 return
2805
2809
2806 # Only run if local repo is empty.
2810 # Only run if local repo is empty.
2807 if len(repo):
2811 if len(repo):
2808 return
2812 return
2809
2813
2810 if pullop.heads:
2814 if pullop.heads:
2811 return
2815 return
2812
2816
2813 if not remote.capable(b'clonebundles'):
2817 if not remote.capable(b'clonebundles'):
2814 return
2818 return
2815
2819
2816 with remote.commandexecutor() as e:
2820 with remote.commandexecutor() as e:
2817 res = e.callcommand(b'clonebundles', {}).result()
2821 res = e.callcommand(b'clonebundles', {}).result()
2818
2822
2819 # If we call the wire protocol command, that's good enough to record the
2823 # If we call the wire protocol command, that's good enough to record the
2820 # attempt.
2824 # attempt.
2821 pullop.clonebundleattempted = True
2825 pullop.clonebundleattempted = True
2822
2826
2823 entries = parseclonebundlesmanifest(repo, res)
2827 entries = parseclonebundlesmanifest(repo, res)
2824 if not entries:
2828 if not entries:
2825 repo.ui.note(
2829 repo.ui.note(
2826 _(
2830 _(
2827 b'no clone bundles available on remote; '
2831 b'no clone bundles available on remote; '
2828 b'falling back to regular clone\n'
2832 b'falling back to regular clone\n'
2829 )
2833 )
2830 )
2834 )
2831 return
2835 return
2832
2836
2833 entries = filterclonebundleentries(
2837 entries = filterclonebundleentries(
2834 repo, entries, streamclonerequested=pullop.streamclonerequested
2838 repo, entries, streamclonerequested=pullop.streamclonerequested
2835 )
2839 )
2836
2840
2837 if not entries:
2841 if not entries:
2838 # There is a thundering herd concern here. However, if a server
2842 # There is a thundering herd concern here. However, if a server
2839 # operator doesn't advertise bundles appropriate for its clients,
2843 # operator doesn't advertise bundles appropriate for its clients,
2840 # they deserve what's coming. Furthermore, from a client's
2844 # they deserve what's coming. Furthermore, from a client's
2841 # perspective, no automatic fallback would mean not being able to
2845 # perspective, no automatic fallback would mean not being able to
2842 # clone!
2846 # clone!
2843 repo.ui.warn(
2847 repo.ui.warn(
2844 _(
2848 _(
2845 b'no compatible clone bundles available on server; '
2849 b'no compatible clone bundles available on server; '
2846 b'falling back to regular clone\n'
2850 b'falling back to regular clone\n'
2847 )
2851 )
2848 )
2852 )
2849 repo.ui.warn(
2853 repo.ui.warn(
2850 _(b'(you may want to report this to the server operator)\n')
2854 _(b'(you may want to report this to the server operator)\n')
2851 )
2855 )
2852 return
2856 return
2853
2857
2854 entries = sortclonebundleentries(repo.ui, entries)
2858 entries = sortclonebundleentries(repo.ui, entries)
2855
2859
2856 url = entries[0][b'URL']
2860 url = entries[0][b'URL']
2857 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2861 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2858 if trypullbundlefromurl(repo.ui, repo, url):
2862 if trypullbundlefromurl(repo.ui, repo, url):
2859 repo.ui.status(_(b'finished applying clone bundle\n'))
2863 repo.ui.status(_(b'finished applying clone bundle\n'))
2860 # Bundle failed.
2864 # Bundle failed.
2861 #
2865 #
2862 # We abort by default to avoid the thundering herd of
2866 # We abort by default to avoid the thundering herd of
2863 # clients flooding a server that was expecting expensive
2867 # clients flooding a server that was expecting expensive
2864 # clone load to be offloaded.
2868 # clone load to be offloaded.
2865 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2869 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2866 repo.ui.warn(_(b'falling back to normal clone\n'))
2870 repo.ui.warn(_(b'falling back to normal clone\n'))
2867 else:
2871 else:
2868 raise error.Abort(
2872 raise error.Abort(
2869 _(b'error applying bundle'),
2873 _(b'error applying bundle'),
2870 hint=_(
2874 hint=_(
2871 b'if this error persists, consider contacting '
2875 b'if this error persists, consider contacting '
2872 b'the server operator or disable clone '
2876 b'the server operator or disable clone '
2873 b'bundles via '
2877 b'bundles via '
2874 b'"--config ui.clonebundles=false"'
2878 b'"--config ui.clonebundles=false"'
2875 ),
2879 ),
2876 )
2880 )
2877
2881
2878
2882
2879 def parseclonebundlesmanifest(repo, s):
2883 def parseclonebundlesmanifest(repo, s):
2880 """Parses the raw text of a clone bundles manifest.
2884 """Parses the raw text of a clone bundles manifest.
2881
2885
2882 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2886 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2883 to the URL and other keys are the attributes for the entry.
2887 to the URL and other keys are the attributes for the entry.
2884 """
2888 """
2885 m = []
2889 m = []
2886 for line in s.splitlines():
2890 for line in s.splitlines():
2887 fields = line.split()
2891 fields = line.split()
2888 if not fields:
2892 if not fields:
2889 continue
2893 continue
2890 attrs = {b'URL': fields[0]}
2894 attrs = {b'URL': fields[0]}
2891 for rawattr in fields[1:]:
2895 for rawattr in fields[1:]:
2892 key, value = rawattr.split(b'=', 1)
2896 key, value = rawattr.split(b'=', 1)
2893 key = urlreq.unquote(key)
2897 key = urlreq.unquote(key)
2894 value = urlreq.unquote(value)
2898 value = urlreq.unquote(value)
2895 attrs[key] = value
2899 attrs[key] = value
2896
2900
2897 # Parse BUNDLESPEC into components. This makes client-side
2901 # Parse BUNDLESPEC into components. This makes client-side
2898 # preferences easier to specify since you can prefer a single
2902 # preferences easier to specify since you can prefer a single
2899 # component of the BUNDLESPEC.
2903 # component of the BUNDLESPEC.
2900 if key == b'BUNDLESPEC':
2904 if key == b'BUNDLESPEC':
2901 try:
2905 try:
2902 bundlespec = parsebundlespec(repo, value)
2906 bundlespec = parsebundlespec(repo, value)
2903 attrs[b'COMPRESSION'] = bundlespec.compression
2907 attrs[b'COMPRESSION'] = bundlespec.compression
2904 attrs[b'VERSION'] = bundlespec.version
2908 attrs[b'VERSION'] = bundlespec.version
2905 except error.InvalidBundleSpecification:
2909 except error.InvalidBundleSpecification:
2906 pass
2910 pass
2907 except error.UnsupportedBundleSpecification:
2911 except error.UnsupportedBundleSpecification:
2908 pass
2912 pass
2909
2913
2910 m.append(attrs)
2914 m.append(attrs)
2911
2915
2912 return m
2916 return m
2913
2917
2914
2918
2915 def isstreamclonespec(bundlespec):
2919 def isstreamclonespec(bundlespec):
2916 # Stream clone v1
2920 # Stream clone v1
2917 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2921 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2918 return True
2922 return True
2919
2923
2920 # Stream clone v2
2924 # Stream clone v2
2921 if (
2925 if (
2922 bundlespec.wirecompression == b'UN'
2926 bundlespec.wirecompression == b'UN'
2923 and bundlespec.wireversion == b'02'
2927 and bundlespec.wireversion == b'02'
2924 and bundlespec.contentopts.get(b'streamv2')
2928 and bundlespec.contentopts.get(b'streamv2')
2925 ):
2929 ):
2926 return True
2930 return True
2927
2931
2928 return False
2932 return False
2929
2933
2930
2934
2931 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2935 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2932 """Remove incompatible clone bundle manifest entries.
2936 """Remove incompatible clone bundle manifest entries.
2933
2937
2934 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2938 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2935 and returns a new list consisting of only the entries that this client
2939 and returns a new list consisting of only the entries that this client
2936 should be able to apply.
2940 should be able to apply.
2937
2941
2938 There is no guarantee we'll be able to apply all returned entries because
2942 There is no guarantee we'll be able to apply all returned entries because
2939 the metadata we use to filter on may be missing or wrong.
2943 the metadata we use to filter on may be missing or wrong.
2940 """
2944 """
2941 newentries = []
2945 newentries = []
2942 for entry in entries:
2946 for entry in entries:
2943 spec = entry.get(b'BUNDLESPEC')
2947 spec = entry.get(b'BUNDLESPEC')
2944 if spec:
2948 if spec:
2945 try:
2949 try:
2946 bundlespec = parsebundlespec(repo, spec, strict=True)
2950 bundlespec = parsebundlespec(repo, spec, strict=True)
2947
2951
2948 # If a stream clone was requested, filter out non-streamclone
2952 # If a stream clone was requested, filter out non-streamclone
2949 # entries.
2953 # entries.
2950 if streamclonerequested and not isstreamclonespec(bundlespec):
2954 if streamclonerequested and not isstreamclonespec(bundlespec):
2951 repo.ui.debug(
2955 repo.ui.debug(
2952 b'filtering %s because not a stream clone\n'
2956 b'filtering %s because not a stream clone\n'
2953 % entry[b'URL']
2957 % entry[b'URL']
2954 )
2958 )
2955 continue
2959 continue
2956
2960
2957 except error.InvalidBundleSpecification as e:
2961 except error.InvalidBundleSpecification as e:
2958 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2962 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2959 continue
2963 continue
2960 except error.UnsupportedBundleSpecification as e:
2964 except error.UnsupportedBundleSpecification as e:
2961 repo.ui.debug(
2965 repo.ui.debug(
2962 b'filtering %s because unsupported bundle '
2966 b'filtering %s because unsupported bundle '
2963 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2967 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2964 )
2968 )
2965 continue
2969 continue
2966 # If we don't have a spec and requested a stream clone, we don't know
2970 # If we don't have a spec and requested a stream clone, we don't know
2967 # what the entry is so don't attempt to apply it.
2971 # what the entry is so don't attempt to apply it.
2968 elif streamclonerequested:
2972 elif streamclonerequested:
2969 repo.ui.debug(
2973 repo.ui.debug(
2970 b'filtering %s because cannot determine if a stream '
2974 b'filtering %s because cannot determine if a stream '
2971 b'clone bundle\n' % entry[b'URL']
2975 b'clone bundle\n' % entry[b'URL']
2972 )
2976 )
2973 continue
2977 continue
2974
2978
2975 if b'REQUIRESNI' in entry and not sslutil.hassni:
2979 if b'REQUIRESNI' in entry and not sslutil.hassni:
2976 repo.ui.debug(
2980 repo.ui.debug(
2977 b'filtering %s because SNI not supported\n' % entry[b'URL']
2981 b'filtering %s because SNI not supported\n' % entry[b'URL']
2978 )
2982 )
2979 continue
2983 continue
2980
2984
2981 newentries.append(entry)
2985 newentries.append(entry)
2982
2986
2983 return newentries
2987 return newentries
2984
2988
2985
2989
2986 class clonebundleentry(object):
2990 class clonebundleentry(object):
2987 """Represents an item in a clone bundles manifest.
2991 """Represents an item in a clone bundles manifest.
2988
2992
2989 This rich class is needed to support sorting since sorted() in Python 3
2993 This rich class is needed to support sorting since sorted() in Python 3
2990 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2994 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2991 won't work.
2995 won't work.
2992 """
2996 """
2993
2997
2994 def __init__(self, value, prefers):
2998 def __init__(self, value, prefers):
2995 self.value = value
2999 self.value = value
2996 self.prefers = prefers
3000 self.prefers = prefers
2997
3001
2998 def _cmp(self, other):
3002 def _cmp(self, other):
2999 for prefkey, prefvalue in self.prefers:
3003 for prefkey, prefvalue in self.prefers:
3000 avalue = self.value.get(prefkey)
3004 avalue = self.value.get(prefkey)
3001 bvalue = other.value.get(prefkey)
3005 bvalue = other.value.get(prefkey)
3002
3006
3003 # Special case for b missing attribute and a matches exactly.
3007 # Special case for b missing attribute and a matches exactly.
3004 if avalue is not None and bvalue is None and avalue == prefvalue:
3008 if avalue is not None and bvalue is None and avalue == prefvalue:
3005 return -1
3009 return -1
3006
3010
3007 # Special case for a missing attribute and b matches exactly.
3011 # Special case for a missing attribute and b matches exactly.
3008 if bvalue is not None and avalue is None and bvalue == prefvalue:
3012 if bvalue is not None and avalue is None and bvalue == prefvalue:
3009 return 1
3013 return 1
3010
3014
3011 # We can't compare unless attribute present on both.
3015 # We can't compare unless attribute present on both.
3012 if avalue is None or bvalue is None:
3016 if avalue is None or bvalue is None:
3013 continue
3017 continue
3014
3018
3015 # Same values should fall back to next attribute.
3019 # Same values should fall back to next attribute.
3016 if avalue == bvalue:
3020 if avalue == bvalue:
3017 continue
3021 continue
3018
3022
3019 # Exact matches come first.
3023 # Exact matches come first.
3020 if avalue == prefvalue:
3024 if avalue == prefvalue:
3021 return -1
3025 return -1
3022 if bvalue == prefvalue:
3026 if bvalue == prefvalue:
3023 return 1
3027 return 1
3024
3028
3025 # Fall back to next attribute.
3029 # Fall back to next attribute.
3026 continue
3030 continue
3027
3031
3028 # If we got here we couldn't sort by attributes and prefers. Fall
3032 # If we got here we couldn't sort by attributes and prefers. Fall
3029 # back to index order.
3033 # back to index order.
3030 return 0
3034 return 0
3031
3035
3032 def __lt__(self, other):
3036 def __lt__(self, other):
3033 return self._cmp(other) < 0
3037 return self._cmp(other) < 0
3034
3038
3035 def __gt__(self, other):
3039 def __gt__(self, other):
3036 return self._cmp(other) > 0
3040 return self._cmp(other) > 0
3037
3041
3038 def __eq__(self, other):
3042 def __eq__(self, other):
3039 return self._cmp(other) == 0
3043 return self._cmp(other) == 0
3040
3044
3041 def __le__(self, other):
3045 def __le__(self, other):
3042 return self._cmp(other) <= 0
3046 return self._cmp(other) <= 0
3043
3047
3044 def __ge__(self, other):
3048 def __ge__(self, other):
3045 return self._cmp(other) >= 0
3049 return self._cmp(other) >= 0
3046
3050
3047 def __ne__(self, other):
3051 def __ne__(self, other):
3048 return self._cmp(other) != 0
3052 return self._cmp(other) != 0
3049
3053
3050
3054
3051 def sortclonebundleentries(ui, entries):
3055 def sortclonebundleentries(ui, entries):
3052 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3056 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3053 if not prefers:
3057 if not prefers:
3054 return list(entries)
3058 return list(entries)
3055
3059
3056 prefers = [p.split(b'=', 1) for p in prefers]
3060 prefers = [p.split(b'=', 1) for p in prefers]
3057
3061
3058 items = sorted(clonebundleentry(v, prefers) for v in entries)
3062 items = sorted(clonebundleentry(v, prefers) for v in entries)
3059 return [i.value for i in items]
3063 return [i.value for i in items]
3060
3064
3061
3065
3062 def trypullbundlefromurl(ui, repo, url):
3066 def trypullbundlefromurl(ui, repo, url):
3063 """Attempt to apply a bundle from a URL."""
3067 """Attempt to apply a bundle from a URL."""
3064 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3068 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3065 try:
3069 try:
3066 fh = urlmod.open(ui, url)
3070 fh = urlmod.open(ui, url)
3067 cg = readbundle(ui, fh, b'stream')
3071 cg = readbundle(ui, fh, b'stream')
3068
3072
3069 if isinstance(cg, streamclone.streamcloneapplier):
3073 if isinstance(cg, streamclone.streamcloneapplier):
3070 cg.apply(repo)
3074 cg.apply(repo)
3071 else:
3075 else:
3072 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3076 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3073 return True
3077 return True
3074 except urlerr.httperror as e:
3078 except urlerr.httperror as e:
3075 ui.warn(
3079 ui.warn(
3076 _(b'HTTP error fetching bundle: %s\n')
3080 _(b'HTTP error fetching bundle: %s\n')
3077 % stringutil.forcebytestr(e)
3081 % stringutil.forcebytestr(e)
3078 )
3082 )
3079 except urlerr.urlerror as e:
3083 except urlerr.urlerror as e:
3080 ui.warn(
3084 ui.warn(
3081 _(b'error fetching bundle: %s\n')
3085 _(b'error fetching bundle: %s\n')
3082 % stringutil.forcebytestr(e.reason)
3086 % stringutil.forcebytestr(e.reason)
3083 )
3087 )
3084
3088
3085 return False
3089 return False
General Comments 0
You need to be logged in to leave comments. Login now