##// END OF EJS Templates
py3: add a r'' prefix in mercurial/exchange.py...
Pulkit Goyal -
r40388:dd816e53 default
parent child Browse files
Show More
@@ -1,2657 +1,2657 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from .thirdparty import (
20 from .thirdparty import (
21 attr,
21 attr,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks as bookmod,
24 bookmarks as bookmod,
25 bundle2,
25 bundle2,
26 changegroup,
26 changegroup,
27 discovery,
27 discovery,
28 error,
28 error,
29 exchangev2,
29 exchangev2,
30 lock as lockmod,
30 lock as lockmod,
31 logexchange,
31 logexchange,
32 narrowspec,
32 narrowspec,
33 obsolete,
33 obsolete,
34 phases,
34 phases,
35 pushkey,
35 pushkey,
36 pycompat,
36 pycompat,
37 repository,
37 repository,
38 scmutil,
38 scmutil,
39 sslutil,
39 sslutil,
40 streamclone,
40 streamclone,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 urlerr = util.urlerr
48 urlerr = util.urlerr
49 urlreq = util.urlreq
49 urlreq = util.urlreq
50
50
51 _NARROWACL_SECTION = 'narrowhgacl'
51 _NARROWACL_SECTION = 'narrowhgacl'
52
52
53 # Maps bundle version human names to changegroup versions.
53 # Maps bundle version human names to changegroup versions.
54 _bundlespeccgversions = {'v1': '01',
54 _bundlespeccgversions = {'v1': '01',
55 'v2': '02',
55 'v2': '02',
56 'packed1': 's1',
56 'packed1': 's1',
57 'bundle2': '02', #legacy
57 'bundle2': '02', #legacy
58 }
58 }
59
59
60 # Maps bundle version with content opts to choose which part to bundle
60 # Maps bundle version with content opts to choose which part to bundle
61 _bundlespeccontentopts = {
61 _bundlespeccontentopts = {
62 'v1': {
62 'v1': {
63 'changegroup': True,
63 'changegroup': True,
64 'cg.version': '01',
64 'cg.version': '01',
65 'obsolescence': False,
65 'obsolescence': False,
66 'phases': False,
66 'phases': False,
67 'tagsfnodescache': False,
67 'tagsfnodescache': False,
68 'revbranchcache': False
68 'revbranchcache': False
69 },
69 },
70 'v2': {
70 'v2': {
71 'changegroup': True,
71 'changegroup': True,
72 'cg.version': '02',
72 'cg.version': '02',
73 'obsolescence': False,
73 'obsolescence': False,
74 'phases': False,
74 'phases': False,
75 'tagsfnodescache': True,
75 'tagsfnodescache': True,
76 'revbranchcache': True
76 'revbranchcache': True
77 },
77 },
78 'packed1' : {
78 'packed1' : {
79 'cg.version': 's1'
79 'cg.version': 's1'
80 }
80 }
81 }
81 }
82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
83
83
84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
85 "tagsfnodescache": False,
85 "tagsfnodescache": False,
86 "revbranchcache": False}}
86 "revbranchcache": False}}
87
87
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
90
90
91 @attr.s
91 @attr.s
92 class bundlespec(object):
92 class bundlespec(object):
93 compression = attr.ib()
93 compression = attr.ib()
94 wirecompression = attr.ib()
94 wirecompression = attr.ib()
95 version = attr.ib()
95 version = attr.ib()
96 wireversion = attr.ib()
96 wireversion = attr.ib()
97 params = attr.ib()
97 params = attr.ib()
98 contentopts = attr.ib()
98 contentopts = attr.ib()
99
99
100 def parsebundlespec(repo, spec, strict=True):
100 def parsebundlespec(repo, spec, strict=True):
101 """Parse a bundle string specification into parts.
101 """Parse a bundle string specification into parts.
102
102
103 Bundle specifications denote a well-defined bundle/exchange format.
103 Bundle specifications denote a well-defined bundle/exchange format.
104 The content of a given specification should not change over time in
104 The content of a given specification should not change over time in
105 order to ensure that bundles produced by a newer version of Mercurial are
105 order to ensure that bundles produced by a newer version of Mercurial are
106 readable from an older version.
106 readable from an older version.
107
107
108 The string currently has the form:
108 The string currently has the form:
109
109
110 <compression>-<type>[;<parameter0>[;<parameter1>]]
110 <compression>-<type>[;<parameter0>[;<parameter1>]]
111
111
112 Where <compression> is one of the supported compression formats
112 Where <compression> is one of the supported compression formats
113 and <type> is (currently) a version string. A ";" can follow the type and
113 and <type> is (currently) a version string. A ";" can follow the type and
114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
115 pairs.
115 pairs.
116
116
117 If ``strict`` is True (the default) <compression> is required. Otherwise,
117 If ``strict`` is True (the default) <compression> is required. Otherwise,
118 it is optional.
118 it is optional.
119
119
120 Returns a bundlespec object of (compression, version, parameters).
120 Returns a bundlespec object of (compression, version, parameters).
121 Compression will be ``None`` if not in strict mode and a compression isn't
121 Compression will be ``None`` if not in strict mode and a compression isn't
122 defined.
122 defined.
123
123
124 An ``InvalidBundleSpecification`` is raised when the specification is
124 An ``InvalidBundleSpecification`` is raised when the specification is
125 not syntactically well formed.
125 not syntactically well formed.
126
126
127 An ``UnsupportedBundleSpecification`` is raised when the compression or
127 An ``UnsupportedBundleSpecification`` is raised when the compression or
128 bundle type/version is not recognized.
128 bundle type/version is not recognized.
129
129
130 Note: this function will likely eventually return a more complex data
130 Note: this function will likely eventually return a more complex data
131 structure, including bundle2 part information.
131 structure, including bundle2 part information.
132 """
132 """
133 def parseparams(s):
133 def parseparams(s):
134 if ';' not in s:
134 if ';' not in s:
135 return s, {}
135 return s, {}
136
136
137 params = {}
137 params = {}
138 version, paramstr = s.split(';', 1)
138 version, paramstr = s.split(';', 1)
139
139
140 for p in paramstr.split(';'):
140 for p in paramstr.split(';'):
141 if '=' not in p:
141 if '=' not in p:
142 raise error.InvalidBundleSpecification(
142 raise error.InvalidBundleSpecification(
143 _('invalid bundle specification: '
143 _('invalid bundle specification: '
144 'missing "=" in parameter: %s') % p)
144 'missing "=" in parameter: %s') % p)
145
145
146 key, value = p.split('=', 1)
146 key, value = p.split('=', 1)
147 key = urlreq.unquote(key)
147 key = urlreq.unquote(key)
148 value = urlreq.unquote(value)
148 value = urlreq.unquote(value)
149 params[key] = value
149 params[key] = value
150
150
151 return version, params
151 return version, params
152
152
153
153
154 if strict and '-' not in spec:
154 if strict and '-' not in spec:
155 raise error.InvalidBundleSpecification(
155 raise error.InvalidBundleSpecification(
156 _('invalid bundle specification; '
156 _('invalid bundle specification; '
157 'must be prefixed with compression: %s') % spec)
157 'must be prefixed with compression: %s') % spec)
158
158
159 if '-' in spec:
159 if '-' in spec:
160 compression, version = spec.split('-', 1)
160 compression, version = spec.split('-', 1)
161
161
162 if compression not in util.compengines.supportedbundlenames:
162 if compression not in util.compengines.supportedbundlenames:
163 raise error.UnsupportedBundleSpecification(
163 raise error.UnsupportedBundleSpecification(
164 _('%s compression is not supported') % compression)
164 _('%s compression is not supported') % compression)
165
165
166 version, params = parseparams(version)
166 version, params = parseparams(version)
167
167
168 if version not in _bundlespeccgversions:
168 if version not in _bundlespeccgversions:
169 raise error.UnsupportedBundleSpecification(
169 raise error.UnsupportedBundleSpecification(
170 _('%s is not a recognized bundle version') % version)
170 _('%s is not a recognized bundle version') % version)
171 else:
171 else:
172 # Value could be just the compression or just the version, in which
172 # Value could be just the compression or just the version, in which
173 # case some defaults are assumed (but only when not in strict mode).
173 # case some defaults are assumed (but only when not in strict mode).
174 assert not strict
174 assert not strict
175
175
176 spec, params = parseparams(spec)
176 spec, params = parseparams(spec)
177
177
178 if spec in util.compengines.supportedbundlenames:
178 if spec in util.compengines.supportedbundlenames:
179 compression = spec
179 compression = spec
180 version = 'v1'
180 version = 'v1'
181 # Generaldelta repos require v2.
181 # Generaldelta repos require v2.
182 if 'generaldelta' in repo.requirements:
182 if 'generaldelta' in repo.requirements:
183 version = 'v2'
183 version = 'v2'
184 # Modern compression engines require v2.
184 # Modern compression engines require v2.
185 if compression not in _bundlespecv1compengines:
185 if compression not in _bundlespecv1compengines:
186 version = 'v2'
186 version = 'v2'
187 elif spec in _bundlespeccgversions:
187 elif spec in _bundlespeccgversions:
188 if spec == 'packed1':
188 if spec == 'packed1':
189 compression = 'none'
189 compression = 'none'
190 else:
190 else:
191 compression = 'bzip2'
191 compression = 'bzip2'
192 version = spec
192 version = spec
193 else:
193 else:
194 raise error.UnsupportedBundleSpecification(
194 raise error.UnsupportedBundleSpecification(
195 _('%s is not a recognized bundle specification') % spec)
195 _('%s is not a recognized bundle specification') % spec)
196
196
197 # Bundle version 1 only supports a known set of compression engines.
197 # Bundle version 1 only supports a known set of compression engines.
198 if version == 'v1' and compression not in _bundlespecv1compengines:
198 if version == 'v1' and compression not in _bundlespecv1compengines:
199 raise error.UnsupportedBundleSpecification(
199 raise error.UnsupportedBundleSpecification(
200 _('compression engine %s is not supported on v1 bundles') %
200 _('compression engine %s is not supported on v1 bundles') %
201 compression)
201 compression)
202
202
203 # The specification for packed1 can optionally declare the data formats
203 # The specification for packed1 can optionally declare the data formats
204 # required to apply it. If we see this metadata, compare against what the
204 # required to apply it. If we see this metadata, compare against what the
205 # repo supports and error if the bundle isn't compatible.
205 # repo supports and error if the bundle isn't compatible.
206 if version == 'packed1' and 'requirements' in params:
206 if version == 'packed1' and 'requirements' in params:
207 requirements = set(params['requirements'].split(','))
207 requirements = set(params['requirements'].split(','))
208 missingreqs = requirements - repo.supportedformats
208 missingreqs = requirements - repo.supportedformats
209 if missingreqs:
209 if missingreqs:
210 raise error.UnsupportedBundleSpecification(
210 raise error.UnsupportedBundleSpecification(
211 _('missing support for repository features: %s') %
211 _('missing support for repository features: %s') %
212 ', '.join(sorted(missingreqs)))
212 ', '.join(sorted(missingreqs)))
213
213
214 # Compute contentopts based on the version
214 # Compute contentopts based on the version
215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
216
216
217 # Process the variants
217 # Process the variants
218 if "stream" in params and params["stream"] == "v2":
218 if "stream" in params and params["stream"] == "v2":
219 variant = _bundlespecvariants["streamv2"]
219 variant = _bundlespecvariants["streamv2"]
220 contentopts.update(variant)
220 contentopts.update(variant)
221
221
222 engine = util.compengines.forbundlename(compression)
222 engine = util.compengines.forbundlename(compression)
223 compression, wirecompression = engine.bundletype()
223 compression, wirecompression = engine.bundletype()
224 wireversion = _bundlespeccgversions[version]
224 wireversion = _bundlespeccgversions[version]
225
225
226 return bundlespec(compression, wirecompression, version, wireversion,
226 return bundlespec(compression, wirecompression, version, wireversion,
227 params, contentopts)
227 params, contentopts)
228
228
229 def readbundle(ui, fh, fname, vfs=None):
229 def readbundle(ui, fh, fname, vfs=None):
230 header = changegroup.readexactly(fh, 4)
230 header = changegroup.readexactly(fh, 4)
231
231
232 alg = None
232 alg = None
233 if not fname:
233 if not fname:
234 fname = "stream"
234 fname = "stream"
235 if not header.startswith('HG') and header.startswith('\0'):
235 if not header.startswith('HG') and header.startswith('\0'):
236 fh = changegroup.headerlessfixup(fh, header)
236 fh = changegroup.headerlessfixup(fh, header)
237 header = "HG10"
237 header = "HG10"
238 alg = 'UN'
238 alg = 'UN'
239 elif vfs:
239 elif vfs:
240 fname = vfs.join(fname)
240 fname = vfs.join(fname)
241
241
242 magic, version = header[0:2], header[2:4]
242 magic, version = header[0:2], header[2:4]
243
243
244 if magic != 'HG':
244 if magic != 'HG':
245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
246 if version == '10':
246 if version == '10':
247 if alg is None:
247 if alg is None:
248 alg = changegroup.readexactly(fh, 2)
248 alg = changegroup.readexactly(fh, 2)
249 return changegroup.cg1unpacker(fh, alg)
249 return changegroup.cg1unpacker(fh, alg)
250 elif version.startswith('2'):
250 elif version.startswith('2'):
251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
252 elif version == 'S1':
252 elif version == 'S1':
253 return streamclone.streamcloneapplier(fh)
253 return streamclone.streamcloneapplier(fh)
254 else:
254 else:
255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
256
256
257 def getbundlespec(ui, fh):
257 def getbundlespec(ui, fh):
258 """Infer the bundlespec from a bundle file handle.
258 """Infer the bundlespec from a bundle file handle.
259
259
260 The input file handle is seeked and the original seek position is not
260 The input file handle is seeked and the original seek position is not
261 restored.
261 restored.
262 """
262 """
263 def speccompression(alg):
263 def speccompression(alg):
264 try:
264 try:
265 return util.compengines.forbundletype(alg).bundletype()[0]
265 return util.compengines.forbundletype(alg).bundletype()[0]
266 except KeyError:
266 except KeyError:
267 return None
267 return None
268
268
269 b = readbundle(ui, fh, None)
269 b = readbundle(ui, fh, None)
270 if isinstance(b, changegroup.cg1unpacker):
270 if isinstance(b, changegroup.cg1unpacker):
271 alg = b._type
271 alg = b._type
272 if alg == '_truncatedBZ':
272 if alg == '_truncatedBZ':
273 alg = 'BZ'
273 alg = 'BZ'
274 comp = speccompression(alg)
274 comp = speccompression(alg)
275 if not comp:
275 if not comp:
276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
277 return '%s-v1' % comp
277 return '%s-v1' % comp
278 elif isinstance(b, bundle2.unbundle20):
278 elif isinstance(b, bundle2.unbundle20):
279 if 'Compression' in b.params:
279 if 'Compression' in b.params:
280 comp = speccompression(b.params['Compression'])
280 comp = speccompression(b.params['Compression'])
281 if not comp:
281 if not comp:
282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
283 else:
283 else:
284 comp = 'none'
284 comp = 'none'
285
285
286 version = None
286 version = None
287 for part in b.iterparts():
287 for part in b.iterparts():
288 if part.type == 'changegroup':
288 if part.type == 'changegroup':
289 version = part.params['version']
289 version = part.params['version']
290 if version in ('01', '02'):
290 if version in ('01', '02'):
291 version = 'v2'
291 version = 'v2'
292 else:
292 else:
293 raise error.Abort(_('changegroup version %s does not have '
293 raise error.Abort(_('changegroup version %s does not have '
294 'a known bundlespec') % version,
294 'a known bundlespec') % version,
295 hint=_('try upgrading your Mercurial '
295 hint=_('try upgrading your Mercurial '
296 'client'))
296 'client'))
297 elif part.type == 'stream2' and version is None:
297 elif part.type == 'stream2' and version is None:
298 # A stream2 part requires to be part of a v2 bundle
298 # A stream2 part requires to be part of a v2 bundle
299 version = "v2"
299 version = "v2"
300 requirements = urlreq.unquote(part.params['requirements'])
300 requirements = urlreq.unquote(part.params['requirements'])
301 splitted = requirements.split()
301 splitted = requirements.split()
302 params = bundle2._formatrequirementsparams(splitted)
302 params = bundle2._formatrequirementsparams(splitted)
303 return 'none-v2;stream=v2;%s' % params
303 return 'none-v2;stream=v2;%s' % params
304
304
305 if not version:
305 if not version:
306 raise error.Abort(_('could not identify changegroup version in '
306 raise error.Abort(_('could not identify changegroup version in '
307 'bundle'))
307 'bundle'))
308
308
309 return '%s-%s' % (comp, version)
309 return '%s-%s' % (comp, version)
310 elif isinstance(b, streamclone.streamcloneapplier):
310 elif isinstance(b, streamclone.streamcloneapplier):
311 requirements = streamclone.readbundle1header(fh)[2]
311 requirements = streamclone.readbundle1header(fh)[2]
312 formatted = bundle2._formatrequirementsparams(requirements)
312 formatted = bundle2._formatrequirementsparams(requirements)
313 return 'none-packed1;%s' % formatted
313 return 'none-packed1;%s' % formatted
314 else:
314 else:
315 raise error.Abort(_('unknown bundle type: %s') % b)
315 raise error.Abort(_('unknown bundle type: %s') % b)
316
316
317 def _computeoutgoing(repo, heads, common):
317 def _computeoutgoing(repo, heads, common):
318 """Computes which revs are outgoing given a set of common
318 """Computes which revs are outgoing given a set of common
319 and a set of heads.
319 and a set of heads.
320
320
321 This is a separate function so extensions can have access to
321 This is a separate function so extensions can have access to
322 the logic.
322 the logic.
323
323
324 Returns a discovery.outgoing object.
324 Returns a discovery.outgoing object.
325 """
325 """
326 cl = repo.changelog
326 cl = repo.changelog
327 if common:
327 if common:
328 hasnode = cl.hasnode
328 hasnode = cl.hasnode
329 common = [n for n in common if hasnode(n)]
329 common = [n for n in common if hasnode(n)]
330 else:
330 else:
331 common = [nullid]
331 common = [nullid]
332 if not heads:
332 if not heads:
333 heads = cl.heads()
333 heads = cl.heads()
334 return discovery.outgoing(repo, common, heads)
334 return discovery.outgoing(repo, common, heads)
335
335
336 def _forcebundle1(op):
336 def _forcebundle1(op):
337 """return true if a pull/push must use bundle1
337 """return true if a pull/push must use bundle1
338
338
339 This function is used to allow testing of the older bundle version"""
339 This function is used to allow testing of the older bundle version"""
340 ui = op.repo.ui
340 ui = op.repo.ui
341 # The goal is this config is to allow developer to choose the bundle
341 # The goal is this config is to allow developer to choose the bundle
342 # version used during exchanged. This is especially handy during test.
342 # version used during exchanged. This is especially handy during test.
343 # Value is a list of bundle version to be picked from, highest version
343 # Value is a list of bundle version to be picked from, highest version
344 # should be used.
344 # should be used.
345 #
345 #
346 # developer config: devel.legacy.exchange
346 # developer config: devel.legacy.exchange
347 exchange = ui.configlist('devel', 'legacy.exchange')
347 exchange = ui.configlist('devel', 'legacy.exchange')
348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
349 return forcebundle1 or not op.remote.capable('bundle2')
349 return forcebundle1 or not op.remote.capable('bundle2')
350
350
351 class pushoperation(object):
351 class pushoperation(object):
352 """A object that represent a single push operation
352 """A object that represent a single push operation
353
353
354 Its purpose is to carry push related state and very common operations.
354 Its purpose is to carry push related state and very common operations.
355
355
356 A new pushoperation should be created at the beginning of each push and
356 A new pushoperation should be created at the beginning of each push and
357 discarded afterward.
357 discarded afterward.
358 """
358 """
359
359
360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
361 bookmarks=(), pushvars=None):
361 bookmarks=(), pushvars=None):
362 # repo we push from
362 # repo we push from
363 self.repo = repo
363 self.repo = repo
364 self.ui = repo.ui
364 self.ui = repo.ui
365 # repo we push to
365 # repo we push to
366 self.remote = remote
366 self.remote = remote
367 # force option provided
367 # force option provided
368 self.force = force
368 self.force = force
369 # revs to be pushed (None is "all")
369 # revs to be pushed (None is "all")
370 self.revs = revs
370 self.revs = revs
371 # bookmark explicitly pushed
371 # bookmark explicitly pushed
372 self.bookmarks = bookmarks
372 self.bookmarks = bookmarks
373 # allow push of new branch
373 # allow push of new branch
374 self.newbranch = newbranch
374 self.newbranch = newbranch
375 # step already performed
375 # step already performed
376 # (used to check what steps have been already performed through bundle2)
376 # (used to check what steps have been already performed through bundle2)
377 self.stepsdone = set()
377 self.stepsdone = set()
378 # Integer version of the changegroup push result
378 # Integer version of the changegroup push result
379 # - None means nothing to push
379 # - None means nothing to push
380 # - 0 means HTTP error
380 # - 0 means HTTP error
381 # - 1 means we pushed and remote head count is unchanged *or*
381 # - 1 means we pushed and remote head count is unchanged *or*
382 # we have outgoing changesets but refused to push
382 # we have outgoing changesets but refused to push
383 # - other values as described by addchangegroup()
383 # - other values as described by addchangegroup()
384 self.cgresult = None
384 self.cgresult = None
385 # Boolean value for the bookmark push
385 # Boolean value for the bookmark push
386 self.bkresult = None
386 self.bkresult = None
387 # discover.outgoing object (contains common and outgoing data)
387 # discover.outgoing object (contains common and outgoing data)
388 self.outgoing = None
388 self.outgoing = None
389 # all remote topological heads before the push
389 # all remote topological heads before the push
390 self.remoteheads = None
390 self.remoteheads = None
391 # Details of the remote branch pre and post push
391 # Details of the remote branch pre and post push
392 #
392 #
393 # mapping: {'branch': ([remoteheads],
393 # mapping: {'branch': ([remoteheads],
394 # [newheads],
394 # [newheads],
395 # [unsyncedheads],
395 # [unsyncedheads],
396 # [discardedheads])}
396 # [discardedheads])}
397 # - branch: the branch name
397 # - branch: the branch name
398 # - remoteheads: the list of remote heads known locally
398 # - remoteheads: the list of remote heads known locally
399 # None if the branch is new
399 # None if the branch is new
400 # - newheads: the new remote heads (known locally) with outgoing pushed
400 # - newheads: the new remote heads (known locally) with outgoing pushed
401 # - unsyncedheads: the list of remote heads unknown locally.
401 # - unsyncedheads: the list of remote heads unknown locally.
402 # - discardedheads: the list of remote heads made obsolete by the push
402 # - discardedheads: the list of remote heads made obsolete by the push
403 self.pushbranchmap = None
403 self.pushbranchmap = None
404 # testable as a boolean indicating if any nodes are missing locally.
404 # testable as a boolean indicating if any nodes are missing locally.
405 self.incoming = None
405 self.incoming = None
406 # summary of the remote phase situation
406 # summary of the remote phase situation
407 self.remotephases = None
407 self.remotephases = None
408 # phases changes that must be pushed along side the changesets
408 # phases changes that must be pushed along side the changesets
409 self.outdatedphases = None
409 self.outdatedphases = None
410 # phases changes that must be pushed if changeset push fails
410 # phases changes that must be pushed if changeset push fails
411 self.fallbackoutdatedphases = None
411 self.fallbackoutdatedphases = None
412 # outgoing obsmarkers
412 # outgoing obsmarkers
413 self.outobsmarkers = set()
413 self.outobsmarkers = set()
414 # outgoing bookmarks
414 # outgoing bookmarks
415 self.outbookmarks = []
415 self.outbookmarks = []
416 # transaction manager
416 # transaction manager
417 self.trmanager = None
417 self.trmanager = None
418 # map { pushkey partid -> callback handling failure}
418 # map { pushkey partid -> callback handling failure}
419 # used to handle exception from mandatory pushkey part failure
419 # used to handle exception from mandatory pushkey part failure
420 self.pkfailcb = {}
420 self.pkfailcb = {}
421 # an iterable of pushvars or None
421 # an iterable of pushvars or None
422 self.pushvars = pushvars
422 self.pushvars = pushvars
423
423
424 @util.propertycache
424 @util.propertycache
425 def futureheads(self):
425 def futureheads(self):
426 """future remote heads if the changeset push succeeds"""
426 """future remote heads if the changeset push succeeds"""
427 return self.outgoing.missingheads
427 return self.outgoing.missingheads
428
428
429 @util.propertycache
429 @util.propertycache
430 def fallbackheads(self):
430 def fallbackheads(self):
431 """future remote heads if the changeset push fails"""
431 """future remote heads if the changeset push fails"""
432 if self.revs is None:
432 if self.revs is None:
433 # not target to push, all common are relevant
433 # not target to push, all common are relevant
434 return self.outgoing.commonheads
434 return self.outgoing.commonheads
435 unfi = self.repo.unfiltered()
435 unfi = self.repo.unfiltered()
436 # I want cheads = heads(::missingheads and ::commonheads)
436 # I want cheads = heads(::missingheads and ::commonheads)
437 # (missingheads is revs with secret changeset filtered out)
437 # (missingheads is revs with secret changeset filtered out)
438 #
438 #
439 # This can be expressed as:
439 # This can be expressed as:
440 # cheads = ( (missingheads and ::commonheads)
440 # cheads = ( (missingheads and ::commonheads)
441 # + (commonheads and ::missingheads))"
441 # + (commonheads and ::missingheads))"
442 # )
442 # )
443 #
443 #
444 # while trying to push we already computed the following:
444 # while trying to push we already computed the following:
445 # common = (::commonheads)
445 # common = (::commonheads)
446 # missing = ((commonheads::missingheads) - commonheads)
446 # missing = ((commonheads::missingheads) - commonheads)
447 #
447 #
448 # We can pick:
448 # We can pick:
449 # * missingheads part of common (::commonheads)
449 # * missingheads part of common (::commonheads)
450 common = self.outgoing.common
450 common = self.outgoing.common
451 nm = self.repo.changelog.nodemap
451 nm = self.repo.changelog.nodemap
452 cheads = [node for node in self.revs if nm[node] in common]
452 cheads = [node for node in self.revs if nm[node] in common]
453 # and
453 # and
454 # * commonheads parents on missing
454 # * commonheads parents on missing
455 revset = unfi.set('%ln and parents(roots(%ln))',
455 revset = unfi.set('%ln and parents(roots(%ln))',
456 self.outgoing.commonheads,
456 self.outgoing.commonheads,
457 self.outgoing.missing)
457 self.outgoing.missing)
458 cheads.extend(c.node() for c in revset)
458 cheads.extend(c.node() for c in revset)
459 return cheads
459 return cheads
460
460
461 @property
461 @property
462 def commonheads(self):
462 def commonheads(self):
463 """set of all common heads after changeset bundle push"""
463 """set of all common heads after changeset bundle push"""
464 if self.cgresult:
464 if self.cgresult:
465 return self.futureheads
465 return self.futureheads
466 else:
466 else:
467 return self.fallbackheads
467 return self.fallbackheads
468
468
469 # mapping of message used when pushing bookmark
469 # mapping of message used when pushing bookmark
470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
471 _('updating bookmark %s failed!\n')),
471 _('updating bookmark %s failed!\n')),
472 'export': (_("exporting bookmark %s\n"),
472 'export': (_("exporting bookmark %s\n"),
473 _('exporting bookmark %s failed!\n')),
473 _('exporting bookmark %s failed!\n')),
474 'delete': (_("deleting remote bookmark %s\n"),
474 'delete': (_("deleting remote bookmark %s\n"),
475 _('deleting remote bookmark %s failed!\n')),
475 _('deleting remote bookmark %s failed!\n')),
476 }
476 }
477
477
478
478
479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
480 opargs=None):
480 opargs=None):
481 '''Push outgoing changesets (limited by revs) from a local
481 '''Push outgoing changesets (limited by revs) from a local
482 repository to remote. Return an integer:
482 repository to remote. Return an integer:
483 - None means nothing to push
483 - None means nothing to push
484 - 0 means HTTP error
484 - 0 means HTTP error
485 - 1 means we pushed and remote head count is unchanged *or*
485 - 1 means we pushed and remote head count is unchanged *or*
486 we have outgoing changesets but refused to push
486 we have outgoing changesets but refused to push
487 - other values as described by addchangegroup()
487 - other values as described by addchangegroup()
488 '''
488 '''
489 if opargs is None:
489 if opargs is None:
490 opargs = {}
490 opargs = {}
491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
492 **pycompat.strkwargs(opargs))
492 **pycompat.strkwargs(opargs))
493 if pushop.remote.local():
493 if pushop.remote.local():
494 missing = (set(pushop.repo.requirements)
494 missing = (set(pushop.repo.requirements)
495 - pushop.remote.local().supported)
495 - pushop.remote.local().supported)
496 if missing:
496 if missing:
497 msg = _("required features are not"
497 msg = _("required features are not"
498 " supported in the destination:"
498 " supported in the destination:"
499 " %s") % (', '.join(sorted(missing)))
499 " %s") % (', '.join(sorted(missing)))
500 raise error.Abort(msg)
500 raise error.Abort(msg)
501
501
502 if not pushop.remote.canpush():
502 if not pushop.remote.canpush():
503 raise error.Abort(_("destination does not support push"))
503 raise error.Abort(_("destination does not support push"))
504
504
505 if not pushop.remote.capable('unbundle'):
505 if not pushop.remote.capable('unbundle'):
506 raise error.Abort(_('cannot push: destination does not support the '
506 raise error.Abort(_('cannot push: destination does not support the '
507 'unbundle wire protocol command'))
507 'unbundle wire protocol command'))
508
508
509 # get lock as we might write phase data
509 # get lock as we might write phase data
510 wlock = lock = None
510 wlock = lock = None
511 try:
511 try:
512 # bundle2 push may receive a reply bundle touching bookmarks or other
512 # bundle2 push may receive a reply bundle touching bookmarks or other
513 # things requiring the wlock. Take it now to ensure proper ordering.
513 # things requiring the wlock. Take it now to ensure proper ordering.
514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
515 if (not _forcebundle1(pushop)) and maypushback:
515 if (not _forcebundle1(pushop)) and maypushback:
516 wlock = pushop.repo.wlock()
516 wlock = pushop.repo.wlock()
517 lock = pushop.repo.lock()
517 lock = pushop.repo.lock()
518 pushop.trmanager = transactionmanager(pushop.repo,
518 pushop.trmanager = transactionmanager(pushop.repo,
519 'push-response',
519 'push-response',
520 pushop.remote.url())
520 pushop.remote.url())
521 except error.LockUnavailable as err:
521 except error.LockUnavailable as err:
522 # source repo cannot be locked.
522 # source repo cannot be locked.
523 # We do not abort the push, but just disable the local phase
523 # We do not abort the push, but just disable the local phase
524 # synchronisation.
524 # synchronisation.
525 msg = ('cannot lock source repository: %s\n'
525 msg = ('cannot lock source repository: %s\n'
526 % stringutil.forcebytestr(err))
526 % stringutil.forcebytestr(err))
527 pushop.ui.debug(msg)
527 pushop.ui.debug(msg)
528
528
529 with wlock or util.nullcontextmanager(), \
529 with wlock or util.nullcontextmanager(), \
530 lock or util.nullcontextmanager(), \
530 lock or util.nullcontextmanager(), \
531 pushop.trmanager or util.nullcontextmanager():
531 pushop.trmanager or util.nullcontextmanager():
532 pushop.repo.checkpush(pushop)
532 pushop.repo.checkpush(pushop)
533 _pushdiscovery(pushop)
533 _pushdiscovery(pushop)
534 if not _forcebundle1(pushop):
534 if not _forcebundle1(pushop):
535 _pushbundle2(pushop)
535 _pushbundle2(pushop)
536 _pushchangeset(pushop)
536 _pushchangeset(pushop)
537 _pushsyncphase(pushop)
537 _pushsyncphase(pushop)
538 _pushobsolete(pushop)
538 _pushobsolete(pushop)
539 _pushbookmark(pushop)
539 _pushbookmark(pushop)
540
540
541 if repo.ui.configbool('experimental', 'remotenames'):
541 if repo.ui.configbool('experimental', 'remotenames'):
542 logexchange.pullremotenames(repo, remote)
542 logexchange.pullremotenames(repo, remote)
543
543
544 return pushop
544 return pushop
545
545
546 # list of steps to perform discovery before push
546 # list of steps to perform discovery before push
547 pushdiscoveryorder = []
547 pushdiscoveryorder = []
548
548
549 # Mapping between step name and function
549 # Mapping between step name and function
550 #
550 #
551 # This exists to help extensions wrap steps if necessary
551 # This exists to help extensions wrap steps if necessary
552 pushdiscoverymapping = {}
552 pushdiscoverymapping = {}
553
553
554 def pushdiscovery(stepname):
554 def pushdiscovery(stepname):
555 """decorator for function performing discovery before push
555 """decorator for function performing discovery before push
556
556
557 The function is added to the step -> function mapping and appended to the
557 The function is added to the step -> function mapping and appended to the
558 list of steps. Beware that decorated function will be added in order (this
558 list of steps. Beware that decorated function will be added in order (this
559 may matter).
559 may matter).
560
560
561 You can only use this decorator for a new step, if you want to wrap a step
561 You can only use this decorator for a new step, if you want to wrap a step
562 from an extension, change the pushdiscovery dictionary directly."""
562 from an extension, change the pushdiscovery dictionary directly."""
563 def dec(func):
563 def dec(func):
564 assert stepname not in pushdiscoverymapping
564 assert stepname not in pushdiscoverymapping
565 pushdiscoverymapping[stepname] = func
565 pushdiscoverymapping[stepname] = func
566 pushdiscoveryorder.append(stepname)
566 pushdiscoveryorder.append(stepname)
567 return func
567 return func
568 return dec
568 return dec
569
569
570 def _pushdiscovery(pushop):
570 def _pushdiscovery(pushop):
571 """Run all discovery steps"""
571 """Run all discovery steps"""
572 for stepname in pushdiscoveryorder:
572 for stepname in pushdiscoveryorder:
573 step = pushdiscoverymapping[stepname]
573 step = pushdiscoverymapping[stepname]
574 step(pushop)
574 step(pushop)
575
575
576 @pushdiscovery('changeset')
576 @pushdiscovery('changeset')
577 def _pushdiscoverychangeset(pushop):
577 def _pushdiscoverychangeset(pushop):
578 """discover the changeset that need to be pushed"""
578 """discover the changeset that need to be pushed"""
579 fci = discovery.findcommonincoming
579 fci = discovery.findcommonincoming
580 if pushop.revs:
580 if pushop.revs:
581 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
581 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
582 ancestorsof=pushop.revs)
582 ancestorsof=pushop.revs)
583 else:
583 else:
584 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
584 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
585 common, inc, remoteheads = commoninc
585 common, inc, remoteheads = commoninc
586 fco = discovery.findcommonoutgoing
586 fco = discovery.findcommonoutgoing
587 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
587 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
588 commoninc=commoninc, force=pushop.force)
588 commoninc=commoninc, force=pushop.force)
589 pushop.outgoing = outgoing
589 pushop.outgoing = outgoing
590 pushop.remoteheads = remoteheads
590 pushop.remoteheads = remoteheads
591 pushop.incoming = inc
591 pushop.incoming = inc
592
592
593 @pushdiscovery('phase')
593 @pushdiscovery('phase')
594 def _pushdiscoveryphase(pushop):
594 def _pushdiscoveryphase(pushop):
595 """discover the phase that needs to be pushed
595 """discover the phase that needs to be pushed
596
596
597 (computed for both success and failure case for changesets push)"""
597 (computed for both success and failure case for changesets push)"""
598 outgoing = pushop.outgoing
598 outgoing = pushop.outgoing
599 unfi = pushop.repo.unfiltered()
599 unfi = pushop.repo.unfiltered()
600 remotephases = listkeys(pushop.remote, 'phases')
600 remotephases = listkeys(pushop.remote, 'phases')
601
601
602 if (pushop.ui.configbool('ui', '_usedassubrepo')
602 if (pushop.ui.configbool('ui', '_usedassubrepo')
603 and remotephases # server supports phases
603 and remotephases # server supports phases
604 and not pushop.outgoing.missing # no changesets to be pushed
604 and not pushop.outgoing.missing # no changesets to be pushed
605 and remotephases.get('publishing', False)):
605 and remotephases.get('publishing', False)):
606 # When:
606 # When:
607 # - this is a subrepo push
607 # - this is a subrepo push
608 # - and remote support phase
608 # - and remote support phase
609 # - and no changeset are to be pushed
609 # - and no changeset are to be pushed
610 # - and remote is publishing
610 # - and remote is publishing
611 # We may be in issue 3781 case!
611 # We may be in issue 3781 case!
612 # We drop the possible phase synchronisation done by
612 # We drop the possible phase synchronisation done by
613 # courtesy to publish changesets possibly locally draft
613 # courtesy to publish changesets possibly locally draft
614 # on the remote.
614 # on the remote.
615 pushop.outdatedphases = []
615 pushop.outdatedphases = []
616 pushop.fallbackoutdatedphases = []
616 pushop.fallbackoutdatedphases = []
617 return
617 return
618
618
619 pushop.remotephases = phases.remotephasessummary(pushop.repo,
619 pushop.remotephases = phases.remotephasessummary(pushop.repo,
620 pushop.fallbackheads,
620 pushop.fallbackheads,
621 remotephases)
621 remotephases)
622 droots = pushop.remotephases.draftroots
622 droots = pushop.remotephases.draftroots
623
623
624 extracond = ''
624 extracond = ''
625 if not pushop.remotephases.publishing:
625 if not pushop.remotephases.publishing:
626 extracond = ' and public()'
626 extracond = ' and public()'
627 revset = 'heads((%%ln::%%ln) %s)' % extracond
627 revset = 'heads((%%ln::%%ln) %s)' % extracond
628 # Get the list of all revs draft on remote by public here.
628 # Get the list of all revs draft on remote by public here.
629 # XXX Beware that revset break if droots is not strictly
629 # XXX Beware that revset break if droots is not strictly
630 # XXX root we may want to ensure it is but it is costly
630 # XXX root we may want to ensure it is but it is costly
631 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
631 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
632 if not outgoing.missing:
632 if not outgoing.missing:
633 future = fallback
633 future = fallback
634 else:
634 else:
635 # adds changeset we are going to push as draft
635 # adds changeset we are going to push as draft
636 #
636 #
637 # should not be necessary for publishing server, but because of an
637 # should not be necessary for publishing server, but because of an
638 # issue fixed in xxxxx we have to do it anyway.
638 # issue fixed in xxxxx we have to do it anyway.
639 fdroots = list(unfi.set('roots(%ln + %ln::)',
639 fdroots = list(unfi.set('roots(%ln + %ln::)',
640 outgoing.missing, droots))
640 outgoing.missing, droots))
641 fdroots = [f.node() for f in fdroots]
641 fdroots = [f.node() for f in fdroots]
642 future = list(unfi.set(revset, fdroots, pushop.futureheads))
642 future = list(unfi.set(revset, fdroots, pushop.futureheads))
643 pushop.outdatedphases = future
643 pushop.outdatedphases = future
644 pushop.fallbackoutdatedphases = fallback
644 pushop.fallbackoutdatedphases = fallback
645
645
646 @pushdiscovery('obsmarker')
646 @pushdiscovery('obsmarker')
647 def _pushdiscoveryobsmarkers(pushop):
647 def _pushdiscoveryobsmarkers(pushop):
648 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
648 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
649 return
649 return
650
650
651 if not pushop.repo.obsstore:
651 if not pushop.repo.obsstore:
652 return
652 return
653
653
654 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
654 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
655 return
655 return
656
656
657 repo = pushop.repo
657 repo = pushop.repo
658 # very naive computation, that can be quite expensive on big repo.
658 # very naive computation, that can be quite expensive on big repo.
659 # However: evolution is currently slow on them anyway.
659 # However: evolution is currently slow on them anyway.
660 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
660 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
661 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
661 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
662
662
663 @pushdiscovery('bookmarks')
663 @pushdiscovery('bookmarks')
664 def _pushdiscoverybookmarks(pushop):
664 def _pushdiscoverybookmarks(pushop):
665 ui = pushop.ui
665 ui = pushop.ui
666 repo = pushop.repo.unfiltered()
666 repo = pushop.repo.unfiltered()
667 remote = pushop.remote
667 remote = pushop.remote
668 ui.debug("checking for updated bookmarks\n")
668 ui.debug("checking for updated bookmarks\n")
669 ancestors = ()
669 ancestors = ()
670 if pushop.revs:
670 if pushop.revs:
671 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
671 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
672 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
672 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
673
673
674 remotebookmark = listkeys(remote, 'bookmarks')
674 remotebookmark = listkeys(remote, 'bookmarks')
675
675
676 explicit = set([repo._bookmarks.expandname(bookmark)
676 explicit = set([repo._bookmarks.expandname(bookmark)
677 for bookmark in pushop.bookmarks])
677 for bookmark in pushop.bookmarks])
678
678
679 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
679 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
680 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
680 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
681
681
682 def safehex(x):
682 def safehex(x):
683 if x is None:
683 if x is None:
684 return x
684 return x
685 return hex(x)
685 return hex(x)
686
686
687 def hexifycompbookmarks(bookmarks):
687 def hexifycompbookmarks(bookmarks):
688 return [(b, safehex(scid), safehex(dcid))
688 return [(b, safehex(scid), safehex(dcid))
689 for (b, scid, dcid) in bookmarks]
689 for (b, scid, dcid) in bookmarks]
690
690
691 comp = [hexifycompbookmarks(marks) for marks in comp]
691 comp = [hexifycompbookmarks(marks) for marks in comp]
692 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
692 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
693
693
694 def _processcompared(pushop, pushed, explicit, remotebms, comp):
694 def _processcompared(pushop, pushed, explicit, remotebms, comp):
695 """take decision on bookmark to pull from the remote bookmark
695 """take decision on bookmark to pull from the remote bookmark
696
696
697 Exist to help extensions who want to alter this behavior.
697 Exist to help extensions who want to alter this behavior.
698 """
698 """
699 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
699 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
700
700
701 repo = pushop.repo
701 repo = pushop.repo
702
702
703 for b, scid, dcid in advsrc:
703 for b, scid, dcid in advsrc:
704 if b in explicit:
704 if b in explicit:
705 explicit.remove(b)
705 explicit.remove(b)
706 if not pushed or repo[scid].rev() in pushed:
706 if not pushed or repo[scid].rev() in pushed:
707 pushop.outbookmarks.append((b, dcid, scid))
707 pushop.outbookmarks.append((b, dcid, scid))
708 # search added bookmark
708 # search added bookmark
709 for b, scid, dcid in addsrc:
709 for b, scid, dcid in addsrc:
710 if b in explicit:
710 if b in explicit:
711 explicit.remove(b)
711 explicit.remove(b)
712 pushop.outbookmarks.append((b, '', scid))
712 pushop.outbookmarks.append((b, '', scid))
713 # search for overwritten bookmark
713 # search for overwritten bookmark
714 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
714 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
715 if b in explicit:
715 if b in explicit:
716 explicit.remove(b)
716 explicit.remove(b)
717 pushop.outbookmarks.append((b, dcid, scid))
717 pushop.outbookmarks.append((b, dcid, scid))
718 # search for bookmark to delete
718 # search for bookmark to delete
719 for b, scid, dcid in adddst:
719 for b, scid, dcid in adddst:
720 if b in explicit:
720 if b in explicit:
721 explicit.remove(b)
721 explicit.remove(b)
722 # treat as "deleted locally"
722 # treat as "deleted locally"
723 pushop.outbookmarks.append((b, dcid, ''))
723 pushop.outbookmarks.append((b, dcid, ''))
724 # identical bookmarks shouldn't get reported
724 # identical bookmarks shouldn't get reported
725 for b, scid, dcid in same:
725 for b, scid, dcid in same:
726 if b in explicit:
726 if b in explicit:
727 explicit.remove(b)
727 explicit.remove(b)
728
728
729 if explicit:
729 if explicit:
730 explicit = sorted(explicit)
730 explicit = sorted(explicit)
731 # we should probably list all of them
731 # we should probably list all of them
732 pushop.ui.warn(_('bookmark %s does not exist on the local '
732 pushop.ui.warn(_('bookmark %s does not exist on the local '
733 'or remote repository!\n') % explicit[0])
733 'or remote repository!\n') % explicit[0])
734 pushop.bkresult = 2
734 pushop.bkresult = 2
735
735
736 pushop.outbookmarks.sort()
736 pushop.outbookmarks.sort()
737
737
738 def _pushcheckoutgoing(pushop):
738 def _pushcheckoutgoing(pushop):
739 outgoing = pushop.outgoing
739 outgoing = pushop.outgoing
740 unfi = pushop.repo.unfiltered()
740 unfi = pushop.repo.unfiltered()
741 if not outgoing.missing:
741 if not outgoing.missing:
742 # nothing to push
742 # nothing to push
743 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
743 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
744 return False
744 return False
745 # something to push
745 # something to push
746 if not pushop.force:
746 if not pushop.force:
747 # if repo.obsstore == False --> no obsolete
747 # if repo.obsstore == False --> no obsolete
748 # then, save the iteration
748 # then, save the iteration
749 if unfi.obsstore:
749 if unfi.obsstore:
750 # this message are here for 80 char limit reason
750 # this message are here for 80 char limit reason
751 mso = _("push includes obsolete changeset: %s!")
751 mso = _("push includes obsolete changeset: %s!")
752 mspd = _("push includes phase-divergent changeset: %s!")
752 mspd = _("push includes phase-divergent changeset: %s!")
753 mscd = _("push includes content-divergent changeset: %s!")
753 mscd = _("push includes content-divergent changeset: %s!")
754 mst = {"orphan": _("push includes orphan changeset: %s!"),
754 mst = {"orphan": _("push includes orphan changeset: %s!"),
755 "phase-divergent": mspd,
755 "phase-divergent": mspd,
756 "content-divergent": mscd}
756 "content-divergent": mscd}
757 # If we are to push if there is at least one
757 # If we are to push if there is at least one
758 # obsolete or unstable changeset in missing, at
758 # obsolete or unstable changeset in missing, at
759 # least one of the missinghead will be obsolete or
759 # least one of the missinghead will be obsolete or
760 # unstable. So checking heads only is ok
760 # unstable. So checking heads only is ok
761 for node in outgoing.missingheads:
761 for node in outgoing.missingheads:
762 ctx = unfi[node]
762 ctx = unfi[node]
763 if ctx.obsolete():
763 if ctx.obsolete():
764 raise error.Abort(mso % ctx)
764 raise error.Abort(mso % ctx)
765 elif ctx.isunstable():
765 elif ctx.isunstable():
766 # TODO print more than one instability in the abort
766 # TODO print more than one instability in the abort
767 # message
767 # message
768 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
768 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
769
769
770 discovery.checkheads(pushop)
770 discovery.checkheads(pushop)
771 return True
771 return True
772
772
773 # List of names of steps to perform for an outgoing bundle2, order matters.
773 # List of names of steps to perform for an outgoing bundle2, order matters.
774 b2partsgenorder = []
774 b2partsgenorder = []
775
775
776 # Mapping between step name and function
776 # Mapping between step name and function
777 #
777 #
778 # This exists to help extensions wrap steps if necessary
778 # This exists to help extensions wrap steps if necessary
779 b2partsgenmapping = {}
779 b2partsgenmapping = {}
780
780
781 def b2partsgenerator(stepname, idx=None):
781 def b2partsgenerator(stepname, idx=None):
782 """decorator for function generating bundle2 part
782 """decorator for function generating bundle2 part
783
783
784 The function is added to the step -> function mapping and appended to the
784 The function is added to the step -> function mapping and appended to the
785 list of steps. Beware that decorated functions will be added in order
785 list of steps. Beware that decorated functions will be added in order
786 (this may matter).
786 (this may matter).
787
787
788 You can only use this decorator for new steps, if you want to wrap a step
788 You can only use this decorator for new steps, if you want to wrap a step
789 from an extension, attack the b2partsgenmapping dictionary directly."""
789 from an extension, attack the b2partsgenmapping dictionary directly."""
790 def dec(func):
790 def dec(func):
791 assert stepname not in b2partsgenmapping
791 assert stepname not in b2partsgenmapping
792 b2partsgenmapping[stepname] = func
792 b2partsgenmapping[stepname] = func
793 if idx is None:
793 if idx is None:
794 b2partsgenorder.append(stepname)
794 b2partsgenorder.append(stepname)
795 else:
795 else:
796 b2partsgenorder.insert(idx, stepname)
796 b2partsgenorder.insert(idx, stepname)
797 return func
797 return func
798 return dec
798 return dec
799
799
800 def _pushb2ctxcheckheads(pushop, bundler):
800 def _pushb2ctxcheckheads(pushop, bundler):
801 """Generate race condition checking parts
801 """Generate race condition checking parts
802
802
803 Exists as an independent function to aid extensions
803 Exists as an independent function to aid extensions
804 """
804 """
805 # * 'force' do not check for push race,
805 # * 'force' do not check for push race,
806 # * if we don't push anything, there are nothing to check.
806 # * if we don't push anything, there are nothing to check.
807 if not pushop.force and pushop.outgoing.missingheads:
807 if not pushop.force and pushop.outgoing.missingheads:
808 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
808 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
809 emptyremote = pushop.pushbranchmap is None
809 emptyremote = pushop.pushbranchmap is None
810 if not allowunrelated or emptyremote:
810 if not allowunrelated or emptyremote:
811 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
811 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
812 else:
812 else:
813 affected = set()
813 affected = set()
814 for branch, heads in pushop.pushbranchmap.iteritems():
814 for branch, heads in pushop.pushbranchmap.iteritems():
815 remoteheads, newheads, unsyncedheads, discardedheads = heads
815 remoteheads, newheads, unsyncedheads, discardedheads = heads
816 if remoteheads is not None:
816 if remoteheads is not None:
817 remote = set(remoteheads)
817 remote = set(remoteheads)
818 affected |= set(discardedheads) & remote
818 affected |= set(discardedheads) & remote
819 affected |= remote - set(newheads)
819 affected |= remote - set(newheads)
820 if affected:
820 if affected:
821 data = iter(sorted(affected))
821 data = iter(sorted(affected))
822 bundler.newpart('check:updated-heads', data=data)
822 bundler.newpart('check:updated-heads', data=data)
823
823
824 def _pushing(pushop):
824 def _pushing(pushop):
825 """return True if we are pushing anything"""
825 """return True if we are pushing anything"""
826 return bool(pushop.outgoing.missing
826 return bool(pushop.outgoing.missing
827 or pushop.outdatedphases
827 or pushop.outdatedphases
828 or pushop.outobsmarkers
828 or pushop.outobsmarkers
829 or pushop.outbookmarks)
829 or pushop.outbookmarks)
830
830
831 @b2partsgenerator('check-bookmarks')
831 @b2partsgenerator('check-bookmarks')
832 def _pushb2checkbookmarks(pushop, bundler):
832 def _pushb2checkbookmarks(pushop, bundler):
833 """insert bookmark move checking"""
833 """insert bookmark move checking"""
834 if not _pushing(pushop) or pushop.force:
834 if not _pushing(pushop) or pushop.force:
835 return
835 return
836 b2caps = bundle2.bundle2caps(pushop.remote)
836 b2caps = bundle2.bundle2caps(pushop.remote)
837 hasbookmarkcheck = 'bookmarks' in b2caps
837 hasbookmarkcheck = 'bookmarks' in b2caps
838 if not (pushop.outbookmarks and hasbookmarkcheck):
838 if not (pushop.outbookmarks and hasbookmarkcheck):
839 return
839 return
840 data = []
840 data = []
841 for book, old, new in pushop.outbookmarks:
841 for book, old, new in pushop.outbookmarks:
842 old = bin(old)
842 old = bin(old)
843 data.append((book, old))
843 data.append((book, old))
844 checkdata = bookmod.binaryencode(data)
844 checkdata = bookmod.binaryencode(data)
845 bundler.newpart('check:bookmarks', data=checkdata)
845 bundler.newpart('check:bookmarks', data=checkdata)
846
846
847 @b2partsgenerator('check-phases')
847 @b2partsgenerator('check-phases')
848 def _pushb2checkphases(pushop, bundler):
848 def _pushb2checkphases(pushop, bundler):
849 """insert phase move checking"""
849 """insert phase move checking"""
850 if not _pushing(pushop) or pushop.force:
850 if not _pushing(pushop) or pushop.force:
851 return
851 return
852 b2caps = bundle2.bundle2caps(pushop.remote)
852 b2caps = bundle2.bundle2caps(pushop.remote)
853 hasphaseheads = 'heads' in b2caps.get('phases', ())
853 hasphaseheads = 'heads' in b2caps.get('phases', ())
854 if pushop.remotephases is not None and hasphaseheads:
854 if pushop.remotephases is not None and hasphaseheads:
855 # check that the remote phase has not changed
855 # check that the remote phase has not changed
856 checks = [[] for p in phases.allphases]
856 checks = [[] for p in phases.allphases]
857 checks[phases.public].extend(pushop.remotephases.publicheads)
857 checks[phases.public].extend(pushop.remotephases.publicheads)
858 checks[phases.draft].extend(pushop.remotephases.draftroots)
858 checks[phases.draft].extend(pushop.remotephases.draftroots)
859 if any(checks):
859 if any(checks):
860 for nodes in checks:
860 for nodes in checks:
861 nodes.sort()
861 nodes.sort()
862 checkdata = phases.binaryencode(checks)
862 checkdata = phases.binaryencode(checks)
863 bundler.newpart('check:phases', data=checkdata)
863 bundler.newpart('check:phases', data=checkdata)
864
864
865 @b2partsgenerator('changeset')
865 @b2partsgenerator('changeset')
866 def _pushb2ctx(pushop, bundler):
866 def _pushb2ctx(pushop, bundler):
867 """handle changegroup push through bundle2
867 """handle changegroup push through bundle2
868
868
869 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
869 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
870 """
870 """
871 if 'changesets' in pushop.stepsdone:
871 if 'changesets' in pushop.stepsdone:
872 return
872 return
873 pushop.stepsdone.add('changesets')
873 pushop.stepsdone.add('changesets')
874 # Send known heads to the server for race detection.
874 # Send known heads to the server for race detection.
875 if not _pushcheckoutgoing(pushop):
875 if not _pushcheckoutgoing(pushop):
876 return
876 return
877 pushop.repo.prepushoutgoinghooks(pushop)
877 pushop.repo.prepushoutgoinghooks(pushop)
878
878
879 _pushb2ctxcheckheads(pushop, bundler)
879 _pushb2ctxcheckheads(pushop, bundler)
880
880
881 b2caps = bundle2.bundle2caps(pushop.remote)
881 b2caps = bundle2.bundle2caps(pushop.remote)
882 version = '01'
882 version = '01'
883 cgversions = b2caps.get('changegroup')
883 cgversions = b2caps.get('changegroup')
884 if cgversions: # 3.1 and 3.2 ship with an empty value
884 if cgversions: # 3.1 and 3.2 ship with an empty value
885 cgversions = [v for v in cgversions
885 cgversions = [v for v in cgversions
886 if v in changegroup.supportedoutgoingversions(
886 if v in changegroup.supportedoutgoingversions(
887 pushop.repo)]
887 pushop.repo)]
888 if not cgversions:
888 if not cgversions:
889 raise ValueError(_('no common changegroup version'))
889 raise ValueError(_('no common changegroup version'))
890 version = max(cgversions)
890 version = max(cgversions)
891 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
891 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
892 'push')
892 'push')
893 cgpart = bundler.newpart('changegroup', data=cgstream)
893 cgpart = bundler.newpart('changegroup', data=cgstream)
894 if cgversions:
894 if cgversions:
895 cgpart.addparam('version', version)
895 cgpart.addparam('version', version)
896 if 'treemanifest' in pushop.repo.requirements:
896 if 'treemanifest' in pushop.repo.requirements:
897 cgpart.addparam('treemanifest', '1')
897 cgpart.addparam('treemanifest', '1')
898 def handlereply(op):
898 def handlereply(op):
899 """extract addchangegroup returns from server reply"""
899 """extract addchangegroup returns from server reply"""
900 cgreplies = op.records.getreplies(cgpart.id)
900 cgreplies = op.records.getreplies(cgpart.id)
901 assert len(cgreplies['changegroup']) == 1
901 assert len(cgreplies['changegroup']) == 1
902 pushop.cgresult = cgreplies['changegroup'][0]['return']
902 pushop.cgresult = cgreplies['changegroup'][0]['return']
903 return handlereply
903 return handlereply
904
904
905 @b2partsgenerator('phase')
905 @b2partsgenerator('phase')
906 def _pushb2phases(pushop, bundler):
906 def _pushb2phases(pushop, bundler):
907 """handle phase push through bundle2"""
907 """handle phase push through bundle2"""
908 if 'phases' in pushop.stepsdone:
908 if 'phases' in pushop.stepsdone:
909 return
909 return
910 b2caps = bundle2.bundle2caps(pushop.remote)
910 b2caps = bundle2.bundle2caps(pushop.remote)
911 ui = pushop.repo.ui
911 ui = pushop.repo.ui
912
912
913 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
913 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
914 haspushkey = 'pushkey' in b2caps
914 haspushkey = 'pushkey' in b2caps
915 hasphaseheads = 'heads' in b2caps.get('phases', ())
915 hasphaseheads = 'heads' in b2caps.get('phases', ())
916
916
917 if hasphaseheads and not legacyphase:
917 if hasphaseheads and not legacyphase:
918 return _pushb2phaseheads(pushop, bundler)
918 return _pushb2phaseheads(pushop, bundler)
919 elif haspushkey:
919 elif haspushkey:
920 return _pushb2phasespushkey(pushop, bundler)
920 return _pushb2phasespushkey(pushop, bundler)
921
921
922 def _pushb2phaseheads(pushop, bundler):
922 def _pushb2phaseheads(pushop, bundler):
923 """push phase information through a bundle2 - binary part"""
923 """push phase information through a bundle2 - binary part"""
924 pushop.stepsdone.add('phases')
924 pushop.stepsdone.add('phases')
925 if pushop.outdatedphases:
925 if pushop.outdatedphases:
926 updates = [[] for p in phases.allphases]
926 updates = [[] for p in phases.allphases]
927 updates[0].extend(h.node() for h in pushop.outdatedphases)
927 updates[0].extend(h.node() for h in pushop.outdatedphases)
928 phasedata = phases.binaryencode(updates)
928 phasedata = phases.binaryencode(updates)
929 bundler.newpart('phase-heads', data=phasedata)
929 bundler.newpart('phase-heads', data=phasedata)
930
930
931 def _pushb2phasespushkey(pushop, bundler):
931 def _pushb2phasespushkey(pushop, bundler):
932 """push phase information through a bundle2 - pushkey part"""
932 """push phase information through a bundle2 - pushkey part"""
933 pushop.stepsdone.add('phases')
933 pushop.stepsdone.add('phases')
934 part2node = []
934 part2node = []
935
935
936 def handlefailure(pushop, exc):
936 def handlefailure(pushop, exc):
937 targetid = int(exc.partid)
937 targetid = int(exc.partid)
938 for partid, node in part2node:
938 for partid, node in part2node:
939 if partid == targetid:
939 if partid == targetid:
940 raise error.Abort(_('updating %s to public failed') % node)
940 raise error.Abort(_('updating %s to public failed') % node)
941
941
942 enc = pushkey.encode
942 enc = pushkey.encode
943 for newremotehead in pushop.outdatedphases:
943 for newremotehead in pushop.outdatedphases:
944 part = bundler.newpart('pushkey')
944 part = bundler.newpart('pushkey')
945 part.addparam('namespace', enc('phases'))
945 part.addparam('namespace', enc('phases'))
946 part.addparam('key', enc(newremotehead.hex()))
946 part.addparam('key', enc(newremotehead.hex()))
947 part.addparam('old', enc('%d' % phases.draft))
947 part.addparam('old', enc('%d' % phases.draft))
948 part.addparam('new', enc('%d' % phases.public))
948 part.addparam('new', enc('%d' % phases.public))
949 part2node.append((part.id, newremotehead))
949 part2node.append((part.id, newremotehead))
950 pushop.pkfailcb[part.id] = handlefailure
950 pushop.pkfailcb[part.id] = handlefailure
951
951
952 def handlereply(op):
952 def handlereply(op):
953 for partid, node in part2node:
953 for partid, node in part2node:
954 partrep = op.records.getreplies(partid)
954 partrep = op.records.getreplies(partid)
955 results = partrep['pushkey']
955 results = partrep['pushkey']
956 assert len(results) <= 1
956 assert len(results) <= 1
957 msg = None
957 msg = None
958 if not results:
958 if not results:
959 msg = _('server ignored update of %s to public!\n') % node
959 msg = _('server ignored update of %s to public!\n') % node
960 elif not int(results[0]['return']):
960 elif not int(results[0]['return']):
961 msg = _('updating %s to public failed!\n') % node
961 msg = _('updating %s to public failed!\n') % node
962 if msg is not None:
962 if msg is not None:
963 pushop.ui.warn(msg)
963 pushop.ui.warn(msg)
964 return handlereply
964 return handlereply
965
965
966 @b2partsgenerator('obsmarkers')
966 @b2partsgenerator('obsmarkers')
967 def _pushb2obsmarkers(pushop, bundler):
967 def _pushb2obsmarkers(pushop, bundler):
968 if 'obsmarkers' in pushop.stepsdone:
968 if 'obsmarkers' in pushop.stepsdone:
969 return
969 return
970 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
970 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
971 if obsolete.commonversion(remoteversions) is None:
971 if obsolete.commonversion(remoteversions) is None:
972 return
972 return
973 pushop.stepsdone.add('obsmarkers')
973 pushop.stepsdone.add('obsmarkers')
974 if pushop.outobsmarkers:
974 if pushop.outobsmarkers:
975 markers = sorted(pushop.outobsmarkers)
975 markers = sorted(pushop.outobsmarkers)
976 bundle2.buildobsmarkerspart(bundler, markers)
976 bundle2.buildobsmarkerspart(bundler, markers)
977
977
978 @b2partsgenerator('bookmarks')
978 @b2partsgenerator('bookmarks')
979 def _pushb2bookmarks(pushop, bundler):
979 def _pushb2bookmarks(pushop, bundler):
980 """handle bookmark push through bundle2"""
980 """handle bookmark push through bundle2"""
981 if 'bookmarks' in pushop.stepsdone:
981 if 'bookmarks' in pushop.stepsdone:
982 return
982 return
983 b2caps = bundle2.bundle2caps(pushop.remote)
983 b2caps = bundle2.bundle2caps(pushop.remote)
984
984
985 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
985 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
986 legacybooks = 'bookmarks' in legacy
986 legacybooks = 'bookmarks' in legacy
987
987
988 if not legacybooks and 'bookmarks' in b2caps:
988 if not legacybooks and 'bookmarks' in b2caps:
989 return _pushb2bookmarkspart(pushop, bundler)
989 return _pushb2bookmarkspart(pushop, bundler)
990 elif 'pushkey' in b2caps:
990 elif 'pushkey' in b2caps:
991 return _pushb2bookmarkspushkey(pushop, bundler)
991 return _pushb2bookmarkspushkey(pushop, bundler)
992
992
993 def _bmaction(old, new):
993 def _bmaction(old, new):
994 """small utility for bookmark pushing"""
994 """small utility for bookmark pushing"""
995 if not old:
995 if not old:
996 return 'export'
996 return 'export'
997 elif not new:
997 elif not new:
998 return 'delete'
998 return 'delete'
999 return 'update'
999 return 'update'
1000
1000
1001 def _pushb2bookmarkspart(pushop, bundler):
1001 def _pushb2bookmarkspart(pushop, bundler):
1002 pushop.stepsdone.add('bookmarks')
1002 pushop.stepsdone.add('bookmarks')
1003 if not pushop.outbookmarks:
1003 if not pushop.outbookmarks:
1004 return
1004 return
1005
1005
1006 allactions = []
1006 allactions = []
1007 data = []
1007 data = []
1008 for book, old, new in pushop.outbookmarks:
1008 for book, old, new in pushop.outbookmarks:
1009 new = bin(new)
1009 new = bin(new)
1010 data.append((book, new))
1010 data.append((book, new))
1011 allactions.append((book, _bmaction(old, new)))
1011 allactions.append((book, _bmaction(old, new)))
1012 checkdata = bookmod.binaryencode(data)
1012 checkdata = bookmod.binaryencode(data)
1013 bundler.newpart('bookmarks', data=checkdata)
1013 bundler.newpart('bookmarks', data=checkdata)
1014
1014
1015 def handlereply(op):
1015 def handlereply(op):
1016 ui = pushop.ui
1016 ui = pushop.ui
1017 # if success
1017 # if success
1018 for book, action in allactions:
1018 for book, action in allactions:
1019 ui.status(bookmsgmap[action][0] % book)
1019 ui.status(bookmsgmap[action][0] % book)
1020
1020
1021 return handlereply
1021 return handlereply
1022
1022
1023 def _pushb2bookmarkspushkey(pushop, bundler):
1023 def _pushb2bookmarkspushkey(pushop, bundler):
1024 pushop.stepsdone.add('bookmarks')
1024 pushop.stepsdone.add('bookmarks')
1025 part2book = []
1025 part2book = []
1026 enc = pushkey.encode
1026 enc = pushkey.encode
1027
1027
1028 def handlefailure(pushop, exc):
1028 def handlefailure(pushop, exc):
1029 targetid = int(exc.partid)
1029 targetid = int(exc.partid)
1030 for partid, book, action in part2book:
1030 for partid, book, action in part2book:
1031 if partid == targetid:
1031 if partid == targetid:
1032 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1032 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1033 # we should not be called for part we did not generated
1033 # we should not be called for part we did not generated
1034 assert False
1034 assert False
1035
1035
1036 for book, old, new in pushop.outbookmarks:
1036 for book, old, new in pushop.outbookmarks:
1037 part = bundler.newpart('pushkey')
1037 part = bundler.newpart('pushkey')
1038 part.addparam('namespace', enc('bookmarks'))
1038 part.addparam('namespace', enc('bookmarks'))
1039 part.addparam('key', enc(book))
1039 part.addparam('key', enc(book))
1040 part.addparam('old', enc(old))
1040 part.addparam('old', enc(old))
1041 part.addparam('new', enc(new))
1041 part.addparam('new', enc(new))
1042 action = 'update'
1042 action = 'update'
1043 if not old:
1043 if not old:
1044 action = 'export'
1044 action = 'export'
1045 elif not new:
1045 elif not new:
1046 action = 'delete'
1046 action = 'delete'
1047 part2book.append((part.id, book, action))
1047 part2book.append((part.id, book, action))
1048 pushop.pkfailcb[part.id] = handlefailure
1048 pushop.pkfailcb[part.id] = handlefailure
1049
1049
1050 def handlereply(op):
1050 def handlereply(op):
1051 ui = pushop.ui
1051 ui = pushop.ui
1052 for partid, book, action in part2book:
1052 for partid, book, action in part2book:
1053 partrep = op.records.getreplies(partid)
1053 partrep = op.records.getreplies(partid)
1054 results = partrep['pushkey']
1054 results = partrep['pushkey']
1055 assert len(results) <= 1
1055 assert len(results) <= 1
1056 if not results:
1056 if not results:
1057 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1057 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1058 else:
1058 else:
1059 ret = int(results[0]['return'])
1059 ret = int(results[0]['return'])
1060 if ret:
1060 if ret:
1061 ui.status(bookmsgmap[action][0] % book)
1061 ui.status(bookmsgmap[action][0] % book)
1062 else:
1062 else:
1063 ui.warn(bookmsgmap[action][1] % book)
1063 ui.warn(bookmsgmap[action][1] % book)
1064 if pushop.bkresult is not None:
1064 if pushop.bkresult is not None:
1065 pushop.bkresult = 1
1065 pushop.bkresult = 1
1066 return handlereply
1066 return handlereply
1067
1067
1068 @b2partsgenerator('pushvars', idx=0)
1068 @b2partsgenerator('pushvars', idx=0)
1069 def _getbundlesendvars(pushop, bundler):
1069 def _getbundlesendvars(pushop, bundler):
1070 '''send shellvars via bundle2'''
1070 '''send shellvars via bundle2'''
1071 pushvars = pushop.pushvars
1071 pushvars = pushop.pushvars
1072 if pushvars:
1072 if pushvars:
1073 shellvars = {}
1073 shellvars = {}
1074 for raw in pushvars:
1074 for raw in pushvars:
1075 if '=' not in raw:
1075 if '=' not in raw:
1076 msg = ("unable to parse variable '%s', should follow "
1076 msg = ("unable to parse variable '%s', should follow "
1077 "'KEY=VALUE' or 'KEY=' format")
1077 "'KEY=VALUE' or 'KEY=' format")
1078 raise error.Abort(msg % raw)
1078 raise error.Abort(msg % raw)
1079 k, v = raw.split('=', 1)
1079 k, v = raw.split('=', 1)
1080 shellvars[k] = v
1080 shellvars[k] = v
1081
1081
1082 part = bundler.newpart('pushvars')
1082 part = bundler.newpart('pushvars')
1083
1083
1084 for key, value in shellvars.iteritems():
1084 for key, value in shellvars.iteritems():
1085 part.addparam(key, value, mandatory=False)
1085 part.addparam(key, value, mandatory=False)
1086
1086
1087 def _pushbundle2(pushop):
1087 def _pushbundle2(pushop):
1088 """push data to the remote using bundle2
1088 """push data to the remote using bundle2
1089
1089
1090 The only currently supported type of data is changegroup but this will
1090 The only currently supported type of data is changegroup but this will
1091 evolve in the future."""
1091 evolve in the future."""
1092 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1092 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1093 pushback = (pushop.trmanager
1093 pushback = (pushop.trmanager
1094 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1094 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1095
1095
1096 # create reply capability
1096 # create reply capability
1097 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1097 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1098 allowpushback=pushback,
1098 allowpushback=pushback,
1099 role='client'))
1099 role='client'))
1100 bundler.newpart('replycaps', data=capsblob)
1100 bundler.newpart('replycaps', data=capsblob)
1101 replyhandlers = []
1101 replyhandlers = []
1102 for partgenname in b2partsgenorder:
1102 for partgenname in b2partsgenorder:
1103 partgen = b2partsgenmapping[partgenname]
1103 partgen = b2partsgenmapping[partgenname]
1104 ret = partgen(pushop, bundler)
1104 ret = partgen(pushop, bundler)
1105 if callable(ret):
1105 if callable(ret):
1106 replyhandlers.append(ret)
1106 replyhandlers.append(ret)
1107 # do not push if nothing to push
1107 # do not push if nothing to push
1108 if bundler.nbparts <= 1:
1108 if bundler.nbparts <= 1:
1109 return
1109 return
1110 stream = util.chunkbuffer(bundler.getchunks())
1110 stream = util.chunkbuffer(bundler.getchunks())
1111 try:
1111 try:
1112 try:
1112 try:
1113 with pushop.remote.commandexecutor() as e:
1113 with pushop.remote.commandexecutor() as e:
1114 reply = e.callcommand('unbundle', {
1114 reply = e.callcommand('unbundle', {
1115 'bundle': stream,
1115 'bundle': stream,
1116 'heads': ['force'],
1116 'heads': ['force'],
1117 'url': pushop.remote.url(),
1117 'url': pushop.remote.url(),
1118 }).result()
1118 }).result()
1119 except error.BundleValueError as exc:
1119 except error.BundleValueError as exc:
1120 raise error.Abort(_('missing support for %s') % exc)
1120 raise error.Abort(_('missing support for %s') % exc)
1121 try:
1121 try:
1122 trgetter = None
1122 trgetter = None
1123 if pushback:
1123 if pushback:
1124 trgetter = pushop.trmanager.transaction
1124 trgetter = pushop.trmanager.transaction
1125 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1125 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1126 except error.BundleValueError as exc:
1126 except error.BundleValueError as exc:
1127 raise error.Abort(_('missing support for %s') % exc)
1127 raise error.Abort(_('missing support for %s') % exc)
1128 except bundle2.AbortFromPart as exc:
1128 except bundle2.AbortFromPart as exc:
1129 pushop.ui.status(_('remote: %s\n') % exc)
1129 pushop.ui.status(_('remote: %s\n') % exc)
1130 if exc.hint is not None:
1130 if exc.hint is not None:
1131 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1131 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1132 raise error.Abort(_('push failed on remote'))
1132 raise error.Abort(_('push failed on remote'))
1133 except error.PushkeyFailed as exc:
1133 except error.PushkeyFailed as exc:
1134 partid = int(exc.partid)
1134 partid = int(exc.partid)
1135 if partid not in pushop.pkfailcb:
1135 if partid not in pushop.pkfailcb:
1136 raise
1136 raise
1137 pushop.pkfailcb[partid](pushop, exc)
1137 pushop.pkfailcb[partid](pushop, exc)
1138 for rephand in replyhandlers:
1138 for rephand in replyhandlers:
1139 rephand(op)
1139 rephand(op)
1140
1140
1141 def _pushchangeset(pushop):
1141 def _pushchangeset(pushop):
1142 """Make the actual push of changeset bundle to remote repo"""
1142 """Make the actual push of changeset bundle to remote repo"""
1143 if 'changesets' in pushop.stepsdone:
1143 if 'changesets' in pushop.stepsdone:
1144 return
1144 return
1145 pushop.stepsdone.add('changesets')
1145 pushop.stepsdone.add('changesets')
1146 if not _pushcheckoutgoing(pushop):
1146 if not _pushcheckoutgoing(pushop):
1147 return
1147 return
1148
1148
1149 # Should have verified this in push().
1149 # Should have verified this in push().
1150 assert pushop.remote.capable('unbundle')
1150 assert pushop.remote.capable('unbundle')
1151
1151
1152 pushop.repo.prepushoutgoinghooks(pushop)
1152 pushop.repo.prepushoutgoinghooks(pushop)
1153 outgoing = pushop.outgoing
1153 outgoing = pushop.outgoing
1154 # TODO: get bundlecaps from remote
1154 # TODO: get bundlecaps from remote
1155 bundlecaps = None
1155 bundlecaps = None
1156 # create a changegroup from local
1156 # create a changegroup from local
1157 if pushop.revs is None and not (outgoing.excluded
1157 if pushop.revs is None and not (outgoing.excluded
1158 or pushop.repo.changelog.filteredrevs):
1158 or pushop.repo.changelog.filteredrevs):
1159 # push everything,
1159 # push everything,
1160 # use the fast path, no race possible on push
1160 # use the fast path, no race possible on push
1161 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1161 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1162 fastpath=True, bundlecaps=bundlecaps)
1162 fastpath=True, bundlecaps=bundlecaps)
1163 else:
1163 else:
1164 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1164 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1165 'push', bundlecaps=bundlecaps)
1165 'push', bundlecaps=bundlecaps)
1166
1166
1167 # apply changegroup to remote
1167 # apply changegroup to remote
1168 # local repo finds heads on server, finds out what
1168 # local repo finds heads on server, finds out what
1169 # revs it must push. once revs transferred, if server
1169 # revs it must push. once revs transferred, if server
1170 # finds it has different heads (someone else won
1170 # finds it has different heads (someone else won
1171 # commit/push race), server aborts.
1171 # commit/push race), server aborts.
1172 if pushop.force:
1172 if pushop.force:
1173 remoteheads = ['force']
1173 remoteheads = ['force']
1174 else:
1174 else:
1175 remoteheads = pushop.remoteheads
1175 remoteheads = pushop.remoteheads
1176 # ssh: return remote's addchangegroup()
1176 # ssh: return remote's addchangegroup()
1177 # http: return remote's addchangegroup() or 0 for error
1177 # http: return remote's addchangegroup() or 0 for error
1178 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1178 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1179 pushop.repo.url())
1179 pushop.repo.url())
1180
1180
1181 def _pushsyncphase(pushop):
1181 def _pushsyncphase(pushop):
1182 """synchronise phase information locally and remotely"""
1182 """synchronise phase information locally and remotely"""
1183 cheads = pushop.commonheads
1183 cheads = pushop.commonheads
1184 # even when we don't push, exchanging phase data is useful
1184 # even when we don't push, exchanging phase data is useful
1185 remotephases = listkeys(pushop.remote, 'phases')
1185 remotephases = listkeys(pushop.remote, 'phases')
1186 if (pushop.ui.configbool('ui', '_usedassubrepo')
1186 if (pushop.ui.configbool('ui', '_usedassubrepo')
1187 and remotephases # server supports phases
1187 and remotephases # server supports phases
1188 and pushop.cgresult is None # nothing was pushed
1188 and pushop.cgresult is None # nothing was pushed
1189 and remotephases.get('publishing', False)):
1189 and remotephases.get('publishing', False)):
1190 # When:
1190 # When:
1191 # - this is a subrepo push
1191 # - this is a subrepo push
1192 # - and remote support phase
1192 # - and remote support phase
1193 # - and no changeset was pushed
1193 # - and no changeset was pushed
1194 # - and remote is publishing
1194 # - and remote is publishing
1195 # We may be in issue 3871 case!
1195 # We may be in issue 3871 case!
1196 # We drop the possible phase synchronisation done by
1196 # We drop the possible phase synchronisation done by
1197 # courtesy to publish changesets possibly locally draft
1197 # courtesy to publish changesets possibly locally draft
1198 # on the remote.
1198 # on the remote.
1199 remotephases = {'publishing': 'True'}
1199 remotephases = {'publishing': 'True'}
1200 if not remotephases: # old server or public only reply from non-publishing
1200 if not remotephases: # old server or public only reply from non-publishing
1201 _localphasemove(pushop, cheads)
1201 _localphasemove(pushop, cheads)
1202 # don't push any phase data as there is nothing to push
1202 # don't push any phase data as there is nothing to push
1203 else:
1203 else:
1204 ana = phases.analyzeremotephases(pushop.repo, cheads,
1204 ana = phases.analyzeremotephases(pushop.repo, cheads,
1205 remotephases)
1205 remotephases)
1206 pheads, droots = ana
1206 pheads, droots = ana
1207 ### Apply remote phase on local
1207 ### Apply remote phase on local
1208 if remotephases.get('publishing', False):
1208 if remotephases.get('publishing', False):
1209 _localphasemove(pushop, cheads)
1209 _localphasemove(pushop, cheads)
1210 else: # publish = False
1210 else: # publish = False
1211 _localphasemove(pushop, pheads)
1211 _localphasemove(pushop, pheads)
1212 _localphasemove(pushop, cheads, phases.draft)
1212 _localphasemove(pushop, cheads, phases.draft)
1213 ### Apply local phase on remote
1213 ### Apply local phase on remote
1214
1214
1215 if pushop.cgresult:
1215 if pushop.cgresult:
1216 if 'phases' in pushop.stepsdone:
1216 if 'phases' in pushop.stepsdone:
1217 # phases already pushed though bundle2
1217 # phases already pushed though bundle2
1218 return
1218 return
1219 outdated = pushop.outdatedphases
1219 outdated = pushop.outdatedphases
1220 else:
1220 else:
1221 outdated = pushop.fallbackoutdatedphases
1221 outdated = pushop.fallbackoutdatedphases
1222
1222
1223 pushop.stepsdone.add('phases')
1223 pushop.stepsdone.add('phases')
1224
1224
1225 # filter heads already turned public by the push
1225 # filter heads already turned public by the push
1226 outdated = [c for c in outdated if c.node() not in pheads]
1226 outdated = [c for c in outdated if c.node() not in pheads]
1227 # fallback to independent pushkey command
1227 # fallback to independent pushkey command
1228 for newremotehead in outdated:
1228 for newremotehead in outdated:
1229 with pushop.remote.commandexecutor() as e:
1229 with pushop.remote.commandexecutor() as e:
1230 r = e.callcommand('pushkey', {
1230 r = e.callcommand('pushkey', {
1231 'namespace': 'phases',
1231 'namespace': 'phases',
1232 'key': newremotehead.hex(),
1232 'key': newremotehead.hex(),
1233 'old': '%d' % phases.draft,
1233 'old': '%d' % phases.draft,
1234 'new': '%d' % phases.public
1234 'new': '%d' % phases.public
1235 }).result()
1235 }).result()
1236
1236
1237 if not r:
1237 if not r:
1238 pushop.ui.warn(_('updating %s to public failed!\n')
1238 pushop.ui.warn(_('updating %s to public failed!\n')
1239 % newremotehead)
1239 % newremotehead)
1240
1240
1241 def _localphasemove(pushop, nodes, phase=phases.public):
1241 def _localphasemove(pushop, nodes, phase=phases.public):
1242 """move <nodes> to <phase> in the local source repo"""
1242 """move <nodes> to <phase> in the local source repo"""
1243 if pushop.trmanager:
1243 if pushop.trmanager:
1244 phases.advanceboundary(pushop.repo,
1244 phases.advanceboundary(pushop.repo,
1245 pushop.trmanager.transaction(),
1245 pushop.trmanager.transaction(),
1246 phase,
1246 phase,
1247 nodes)
1247 nodes)
1248 else:
1248 else:
1249 # repo is not locked, do not change any phases!
1249 # repo is not locked, do not change any phases!
1250 # Informs the user that phases should have been moved when
1250 # Informs the user that phases should have been moved when
1251 # applicable.
1251 # applicable.
1252 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1252 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1253 phasestr = phases.phasenames[phase]
1253 phasestr = phases.phasenames[phase]
1254 if actualmoves:
1254 if actualmoves:
1255 pushop.ui.status(_('cannot lock source repo, skipping '
1255 pushop.ui.status(_('cannot lock source repo, skipping '
1256 'local %s phase update\n') % phasestr)
1256 'local %s phase update\n') % phasestr)
1257
1257
1258 def _pushobsolete(pushop):
1258 def _pushobsolete(pushop):
1259 """utility function to push obsolete markers to a remote"""
1259 """utility function to push obsolete markers to a remote"""
1260 if 'obsmarkers' in pushop.stepsdone:
1260 if 'obsmarkers' in pushop.stepsdone:
1261 return
1261 return
1262 repo = pushop.repo
1262 repo = pushop.repo
1263 remote = pushop.remote
1263 remote = pushop.remote
1264 pushop.stepsdone.add('obsmarkers')
1264 pushop.stepsdone.add('obsmarkers')
1265 if pushop.outobsmarkers:
1265 if pushop.outobsmarkers:
1266 pushop.ui.debug('try to push obsolete markers to remote\n')
1266 pushop.ui.debug('try to push obsolete markers to remote\n')
1267 rslts = []
1267 rslts = []
1268 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1268 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1269 for key in sorted(remotedata, reverse=True):
1269 for key in sorted(remotedata, reverse=True):
1270 # reverse sort to ensure we end with dump0
1270 # reverse sort to ensure we end with dump0
1271 data = remotedata[key]
1271 data = remotedata[key]
1272 rslts.append(remote.pushkey('obsolete', key, '', data))
1272 rslts.append(remote.pushkey('obsolete', key, '', data))
1273 if [r for r in rslts if not r]:
1273 if [r for r in rslts if not r]:
1274 msg = _('failed to push some obsolete markers!\n')
1274 msg = _('failed to push some obsolete markers!\n')
1275 repo.ui.warn(msg)
1275 repo.ui.warn(msg)
1276
1276
1277 def _pushbookmark(pushop):
1277 def _pushbookmark(pushop):
1278 """Update bookmark position on remote"""
1278 """Update bookmark position on remote"""
1279 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1279 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1280 return
1280 return
1281 pushop.stepsdone.add('bookmarks')
1281 pushop.stepsdone.add('bookmarks')
1282 ui = pushop.ui
1282 ui = pushop.ui
1283 remote = pushop.remote
1283 remote = pushop.remote
1284
1284
1285 for b, old, new in pushop.outbookmarks:
1285 for b, old, new in pushop.outbookmarks:
1286 action = 'update'
1286 action = 'update'
1287 if not old:
1287 if not old:
1288 action = 'export'
1288 action = 'export'
1289 elif not new:
1289 elif not new:
1290 action = 'delete'
1290 action = 'delete'
1291
1291
1292 with remote.commandexecutor() as e:
1292 with remote.commandexecutor() as e:
1293 r = e.callcommand('pushkey', {
1293 r = e.callcommand('pushkey', {
1294 'namespace': 'bookmarks',
1294 'namespace': 'bookmarks',
1295 'key': b,
1295 'key': b,
1296 'old': old,
1296 'old': old,
1297 'new': new,
1297 'new': new,
1298 }).result()
1298 }).result()
1299
1299
1300 if r:
1300 if r:
1301 ui.status(bookmsgmap[action][0] % b)
1301 ui.status(bookmsgmap[action][0] % b)
1302 else:
1302 else:
1303 ui.warn(bookmsgmap[action][1] % b)
1303 ui.warn(bookmsgmap[action][1] % b)
1304 # discovery can have set the value form invalid entry
1304 # discovery can have set the value form invalid entry
1305 if pushop.bkresult is not None:
1305 if pushop.bkresult is not None:
1306 pushop.bkresult = 1
1306 pushop.bkresult = 1
1307
1307
1308 class pulloperation(object):
1308 class pulloperation(object):
1309 """A object that represent a single pull operation
1309 """A object that represent a single pull operation
1310
1310
1311 It purpose is to carry pull related state and very common operation.
1311 It purpose is to carry pull related state and very common operation.
1312
1312
1313 A new should be created at the beginning of each pull and discarded
1313 A new should be created at the beginning of each pull and discarded
1314 afterward.
1314 afterward.
1315 """
1315 """
1316
1316
1317 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1317 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1318 remotebookmarks=None, streamclonerequested=None,
1318 remotebookmarks=None, streamclonerequested=None,
1319 includepats=None, excludepats=None, depth=None):
1319 includepats=None, excludepats=None, depth=None):
1320 # repo we pull into
1320 # repo we pull into
1321 self.repo = repo
1321 self.repo = repo
1322 # repo we pull from
1322 # repo we pull from
1323 self.remote = remote
1323 self.remote = remote
1324 # revision we try to pull (None is "all")
1324 # revision we try to pull (None is "all")
1325 self.heads = heads
1325 self.heads = heads
1326 # bookmark pulled explicitly
1326 # bookmark pulled explicitly
1327 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1327 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1328 for bookmark in bookmarks]
1328 for bookmark in bookmarks]
1329 # do we force pull?
1329 # do we force pull?
1330 self.force = force
1330 self.force = force
1331 # whether a streaming clone was requested
1331 # whether a streaming clone was requested
1332 self.streamclonerequested = streamclonerequested
1332 self.streamclonerequested = streamclonerequested
1333 # transaction manager
1333 # transaction manager
1334 self.trmanager = None
1334 self.trmanager = None
1335 # set of common changeset between local and remote before pull
1335 # set of common changeset between local and remote before pull
1336 self.common = None
1336 self.common = None
1337 # set of pulled head
1337 # set of pulled head
1338 self.rheads = None
1338 self.rheads = None
1339 # list of missing changeset to fetch remotely
1339 # list of missing changeset to fetch remotely
1340 self.fetch = None
1340 self.fetch = None
1341 # remote bookmarks data
1341 # remote bookmarks data
1342 self.remotebookmarks = remotebookmarks
1342 self.remotebookmarks = remotebookmarks
1343 # result of changegroup pulling (used as return code by pull)
1343 # result of changegroup pulling (used as return code by pull)
1344 self.cgresult = None
1344 self.cgresult = None
1345 # list of step already done
1345 # list of step already done
1346 self.stepsdone = set()
1346 self.stepsdone = set()
1347 # Whether we attempted a clone from pre-generated bundles.
1347 # Whether we attempted a clone from pre-generated bundles.
1348 self.clonebundleattempted = False
1348 self.clonebundleattempted = False
1349 # Set of file patterns to include.
1349 # Set of file patterns to include.
1350 self.includepats = includepats
1350 self.includepats = includepats
1351 # Set of file patterns to exclude.
1351 # Set of file patterns to exclude.
1352 self.excludepats = excludepats
1352 self.excludepats = excludepats
1353 # Number of ancestor changesets to pull from each pulled head.
1353 # Number of ancestor changesets to pull from each pulled head.
1354 self.depth = depth
1354 self.depth = depth
1355
1355
1356 @util.propertycache
1356 @util.propertycache
1357 def pulledsubset(self):
1357 def pulledsubset(self):
1358 """heads of the set of changeset target by the pull"""
1358 """heads of the set of changeset target by the pull"""
1359 # compute target subset
1359 # compute target subset
1360 if self.heads is None:
1360 if self.heads is None:
1361 # We pulled every thing possible
1361 # We pulled every thing possible
1362 # sync on everything common
1362 # sync on everything common
1363 c = set(self.common)
1363 c = set(self.common)
1364 ret = list(self.common)
1364 ret = list(self.common)
1365 for n in self.rheads:
1365 for n in self.rheads:
1366 if n not in c:
1366 if n not in c:
1367 ret.append(n)
1367 ret.append(n)
1368 return ret
1368 return ret
1369 else:
1369 else:
1370 # We pulled a specific subset
1370 # We pulled a specific subset
1371 # sync on this subset
1371 # sync on this subset
1372 return self.heads
1372 return self.heads
1373
1373
1374 @util.propertycache
1374 @util.propertycache
1375 def canusebundle2(self):
1375 def canusebundle2(self):
1376 return not _forcebundle1(self)
1376 return not _forcebundle1(self)
1377
1377
1378 @util.propertycache
1378 @util.propertycache
1379 def remotebundle2caps(self):
1379 def remotebundle2caps(self):
1380 return bundle2.bundle2caps(self.remote)
1380 return bundle2.bundle2caps(self.remote)
1381
1381
1382 def gettransaction(self):
1382 def gettransaction(self):
1383 # deprecated; talk to trmanager directly
1383 # deprecated; talk to trmanager directly
1384 return self.trmanager.transaction()
1384 return self.trmanager.transaction()
1385
1385
1386 class transactionmanager(util.transactional):
1386 class transactionmanager(util.transactional):
1387 """An object to manage the life cycle of a transaction
1387 """An object to manage the life cycle of a transaction
1388
1388
1389 It creates the transaction on demand and calls the appropriate hooks when
1389 It creates the transaction on demand and calls the appropriate hooks when
1390 closing the transaction."""
1390 closing the transaction."""
1391 def __init__(self, repo, source, url):
1391 def __init__(self, repo, source, url):
1392 self.repo = repo
1392 self.repo = repo
1393 self.source = source
1393 self.source = source
1394 self.url = url
1394 self.url = url
1395 self._tr = None
1395 self._tr = None
1396
1396
1397 def transaction(self):
1397 def transaction(self):
1398 """Return an open transaction object, constructing if necessary"""
1398 """Return an open transaction object, constructing if necessary"""
1399 if not self._tr:
1399 if not self._tr:
1400 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1400 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1401 self._tr = self.repo.transaction(trname)
1401 self._tr = self.repo.transaction(trname)
1402 self._tr.hookargs['source'] = self.source
1402 self._tr.hookargs['source'] = self.source
1403 self._tr.hookargs['url'] = self.url
1403 self._tr.hookargs['url'] = self.url
1404 return self._tr
1404 return self._tr
1405
1405
1406 def close(self):
1406 def close(self):
1407 """close transaction if created"""
1407 """close transaction if created"""
1408 if self._tr is not None:
1408 if self._tr is not None:
1409 self._tr.close()
1409 self._tr.close()
1410
1410
1411 def release(self):
1411 def release(self):
1412 """release transaction if created"""
1412 """release transaction if created"""
1413 if self._tr is not None:
1413 if self._tr is not None:
1414 self._tr.release()
1414 self._tr.release()
1415
1415
1416 def listkeys(remote, namespace):
1416 def listkeys(remote, namespace):
1417 with remote.commandexecutor() as e:
1417 with remote.commandexecutor() as e:
1418 return e.callcommand('listkeys', {'namespace': namespace}).result()
1418 return e.callcommand('listkeys', {'namespace': namespace}).result()
1419
1419
1420 def _fullpullbundle2(repo, pullop):
1420 def _fullpullbundle2(repo, pullop):
1421 # The server may send a partial reply, i.e. when inlining
1421 # The server may send a partial reply, i.e. when inlining
1422 # pre-computed bundles. In that case, update the common
1422 # pre-computed bundles. In that case, update the common
1423 # set based on the results and pull another bundle.
1423 # set based on the results and pull another bundle.
1424 #
1424 #
1425 # There are two indicators that the process is finished:
1425 # There are two indicators that the process is finished:
1426 # - no changeset has been added, or
1426 # - no changeset has been added, or
1427 # - all remote heads are known locally.
1427 # - all remote heads are known locally.
1428 # The head check must use the unfiltered view as obsoletion
1428 # The head check must use the unfiltered view as obsoletion
1429 # markers can hide heads.
1429 # markers can hide heads.
1430 unfi = repo.unfiltered()
1430 unfi = repo.unfiltered()
1431 unficl = unfi.changelog
1431 unficl = unfi.changelog
1432 def headsofdiff(h1, h2):
1432 def headsofdiff(h1, h2):
1433 """Returns heads(h1 % h2)"""
1433 """Returns heads(h1 % h2)"""
1434 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1434 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1435 return set(ctx.node() for ctx in res)
1435 return set(ctx.node() for ctx in res)
1436 def headsofunion(h1, h2):
1436 def headsofunion(h1, h2):
1437 """Returns heads((h1 + h2) - null)"""
1437 """Returns heads((h1 + h2) - null)"""
1438 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1438 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1439 return set(ctx.node() for ctx in res)
1439 return set(ctx.node() for ctx in res)
1440 while True:
1440 while True:
1441 old_heads = unficl.heads()
1441 old_heads = unficl.heads()
1442 clstart = len(unficl)
1442 clstart = len(unficl)
1443 _pullbundle2(pullop)
1443 _pullbundle2(pullop)
1444 if repository.NARROW_REQUIREMENT in repo.requirements:
1444 if repository.NARROW_REQUIREMENT in repo.requirements:
1445 # XXX narrow clones filter the heads on the server side during
1445 # XXX narrow clones filter the heads on the server side during
1446 # XXX getbundle and result in partial replies as well.
1446 # XXX getbundle and result in partial replies as well.
1447 # XXX Disable pull bundles in this case as band aid to avoid
1447 # XXX Disable pull bundles in this case as band aid to avoid
1448 # XXX extra round trips.
1448 # XXX extra round trips.
1449 break
1449 break
1450 if clstart == len(unficl):
1450 if clstart == len(unficl):
1451 break
1451 break
1452 if all(unficl.hasnode(n) for n in pullop.rheads):
1452 if all(unficl.hasnode(n) for n in pullop.rheads):
1453 break
1453 break
1454 new_heads = headsofdiff(unficl.heads(), old_heads)
1454 new_heads = headsofdiff(unficl.heads(), old_heads)
1455 pullop.common = headsofunion(new_heads, pullop.common)
1455 pullop.common = headsofunion(new_heads, pullop.common)
1456 pullop.rheads = set(pullop.rheads) - pullop.common
1456 pullop.rheads = set(pullop.rheads) - pullop.common
1457
1457
1458 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1458 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1459 streamclonerequested=None, includepats=None, excludepats=None,
1459 streamclonerequested=None, includepats=None, excludepats=None,
1460 depth=None):
1460 depth=None):
1461 """Fetch repository data from a remote.
1461 """Fetch repository data from a remote.
1462
1462
1463 This is the main function used to retrieve data from a remote repository.
1463 This is the main function used to retrieve data from a remote repository.
1464
1464
1465 ``repo`` is the local repository to clone into.
1465 ``repo`` is the local repository to clone into.
1466 ``remote`` is a peer instance.
1466 ``remote`` is a peer instance.
1467 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1467 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1468 default) means to pull everything from the remote.
1468 default) means to pull everything from the remote.
1469 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1469 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1470 default, all remote bookmarks are pulled.
1470 default, all remote bookmarks are pulled.
1471 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1471 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1472 initialization.
1472 initialization.
1473 ``streamclonerequested`` is a boolean indicating whether a "streaming
1473 ``streamclonerequested`` is a boolean indicating whether a "streaming
1474 clone" is requested. A "streaming clone" is essentially a raw file copy
1474 clone" is requested. A "streaming clone" is essentially a raw file copy
1475 of revlogs from the server. This only works when the local repository is
1475 of revlogs from the server. This only works when the local repository is
1476 empty. The default value of ``None`` means to respect the server
1476 empty. The default value of ``None`` means to respect the server
1477 configuration for preferring stream clones.
1477 configuration for preferring stream clones.
1478 ``includepats`` and ``excludepats`` define explicit file patterns to
1478 ``includepats`` and ``excludepats`` define explicit file patterns to
1479 include and exclude in storage, respectively. If not defined, narrow
1479 include and exclude in storage, respectively. If not defined, narrow
1480 patterns from the repo instance are used, if available.
1480 patterns from the repo instance are used, if available.
1481 ``depth`` is an integer indicating the DAG depth of history we're
1481 ``depth`` is an integer indicating the DAG depth of history we're
1482 interested in. If defined, for each revision specified in ``heads``, we
1482 interested in. If defined, for each revision specified in ``heads``, we
1483 will fetch up to this many of its ancestors and data associated with them.
1483 will fetch up to this many of its ancestors and data associated with them.
1484
1484
1485 Returns the ``pulloperation`` created for this pull.
1485 Returns the ``pulloperation`` created for this pull.
1486 """
1486 """
1487 if opargs is None:
1487 if opargs is None:
1488 opargs = {}
1488 opargs = {}
1489
1489
1490 # We allow the narrow patterns to be passed in explicitly to provide more
1490 # We allow the narrow patterns to be passed in explicitly to provide more
1491 # flexibility for API consumers.
1491 # flexibility for API consumers.
1492 if includepats or excludepats:
1492 if includepats or excludepats:
1493 includepats = includepats or set()
1493 includepats = includepats or set()
1494 excludepats = excludepats or set()
1494 excludepats = excludepats or set()
1495 else:
1495 else:
1496 includepats, excludepats = repo.narrowpats
1496 includepats, excludepats = repo.narrowpats
1497
1497
1498 narrowspec.validatepatterns(includepats)
1498 narrowspec.validatepatterns(includepats)
1499 narrowspec.validatepatterns(excludepats)
1499 narrowspec.validatepatterns(excludepats)
1500
1500
1501 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1501 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1502 streamclonerequested=streamclonerequested,
1502 streamclonerequested=streamclonerequested,
1503 includepats=includepats, excludepats=excludepats,
1503 includepats=includepats, excludepats=excludepats,
1504 depth=depth,
1504 depth=depth,
1505 **pycompat.strkwargs(opargs))
1505 **pycompat.strkwargs(opargs))
1506
1506
1507 peerlocal = pullop.remote.local()
1507 peerlocal = pullop.remote.local()
1508 if peerlocal:
1508 if peerlocal:
1509 missing = set(peerlocal.requirements) - pullop.repo.supported
1509 missing = set(peerlocal.requirements) - pullop.repo.supported
1510 if missing:
1510 if missing:
1511 msg = _("required features are not"
1511 msg = _("required features are not"
1512 " supported in the destination:"
1512 " supported in the destination:"
1513 " %s") % (', '.join(sorted(missing)))
1513 " %s") % (', '.join(sorted(missing)))
1514 raise error.Abort(msg)
1514 raise error.Abort(msg)
1515
1515
1516 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1516 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1517 with repo.wlock(), repo.lock(), pullop.trmanager:
1517 with repo.wlock(), repo.lock(), pullop.trmanager:
1518 # Use the modern wire protocol, if available.
1518 # Use the modern wire protocol, if available.
1519 if remote.capable('command-changesetdata'):
1519 if remote.capable('command-changesetdata'):
1520 exchangev2.pull(pullop)
1520 exchangev2.pull(pullop)
1521 else:
1521 else:
1522 # This should ideally be in _pullbundle2(). However, it needs to run
1522 # This should ideally be in _pullbundle2(). However, it needs to run
1523 # before discovery to avoid extra work.
1523 # before discovery to avoid extra work.
1524 _maybeapplyclonebundle(pullop)
1524 _maybeapplyclonebundle(pullop)
1525 streamclone.maybeperformlegacystreamclone(pullop)
1525 streamclone.maybeperformlegacystreamclone(pullop)
1526 _pulldiscovery(pullop)
1526 _pulldiscovery(pullop)
1527 if pullop.canusebundle2:
1527 if pullop.canusebundle2:
1528 _fullpullbundle2(repo, pullop)
1528 _fullpullbundle2(repo, pullop)
1529 _pullchangeset(pullop)
1529 _pullchangeset(pullop)
1530 _pullphase(pullop)
1530 _pullphase(pullop)
1531 _pullbookmarks(pullop)
1531 _pullbookmarks(pullop)
1532 _pullobsolete(pullop)
1532 _pullobsolete(pullop)
1533
1533
1534 # storing remotenames
1534 # storing remotenames
1535 if repo.ui.configbool('experimental', 'remotenames'):
1535 if repo.ui.configbool('experimental', 'remotenames'):
1536 logexchange.pullremotenames(repo, remote)
1536 logexchange.pullremotenames(repo, remote)
1537
1537
1538 return pullop
1538 return pullop
1539
1539
1540 # list of steps to perform discovery before pull
1540 # list of steps to perform discovery before pull
1541 pulldiscoveryorder = []
1541 pulldiscoveryorder = []
1542
1542
1543 # Mapping between step name and function
1543 # Mapping between step name and function
1544 #
1544 #
1545 # This exists to help extensions wrap steps if necessary
1545 # This exists to help extensions wrap steps if necessary
1546 pulldiscoverymapping = {}
1546 pulldiscoverymapping = {}
1547
1547
1548 def pulldiscovery(stepname):
1548 def pulldiscovery(stepname):
1549 """decorator for function performing discovery before pull
1549 """decorator for function performing discovery before pull
1550
1550
1551 The function is added to the step -> function mapping and appended to the
1551 The function is added to the step -> function mapping and appended to the
1552 list of steps. Beware that decorated function will be added in order (this
1552 list of steps. Beware that decorated function will be added in order (this
1553 may matter).
1553 may matter).
1554
1554
1555 You can only use this decorator for a new step, if you want to wrap a step
1555 You can only use this decorator for a new step, if you want to wrap a step
1556 from an extension, change the pulldiscovery dictionary directly."""
1556 from an extension, change the pulldiscovery dictionary directly."""
1557 def dec(func):
1557 def dec(func):
1558 assert stepname not in pulldiscoverymapping
1558 assert stepname not in pulldiscoverymapping
1559 pulldiscoverymapping[stepname] = func
1559 pulldiscoverymapping[stepname] = func
1560 pulldiscoveryorder.append(stepname)
1560 pulldiscoveryorder.append(stepname)
1561 return func
1561 return func
1562 return dec
1562 return dec
1563
1563
1564 def _pulldiscovery(pullop):
1564 def _pulldiscovery(pullop):
1565 """Run all discovery steps"""
1565 """Run all discovery steps"""
1566 for stepname in pulldiscoveryorder:
1566 for stepname in pulldiscoveryorder:
1567 step = pulldiscoverymapping[stepname]
1567 step = pulldiscoverymapping[stepname]
1568 step(pullop)
1568 step(pullop)
1569
1569
1570 @pulldiscovery('b1:bookmarks')
1570 @pulldiscovery('b1:bookmarks')
1571 def _pullbookmarkbundle1(pullop):
1571 def _pullbookmarkbundle1(pullop):
1572 """fetch bookmark data in bundle1 case
1572 """fetch bookmark data in bundle1 case
1573
1573
1574 If not using bundle2, we have to fetch bookmarks before changeset
1574 If not using bundle2, we have to fetch bookmarks before changeset
1575 discovery to reduce the chance and impact of race conditions."""
1575 discovery to reduce the chance and impact of race conditions."""
1576 if pullop.remotebookmarks is not None:
1576 if pullop.remotebookmarks is not None:
1577 return
1577 return
1578 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1578 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1579 # all known bundle2 servers now support listkeys, but lets be nice with
1579 # all known bundle2 servers now support listkeys, but lets be nice with
1580 # new implementation.
1580 # new implementation.
1581 return
1581 return
1582 books = listkeys(pullop.remote, 'bookmarks')
1582 books = listkeys(pullop.remote, 'bookmarks')
1583 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1583 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1584
1584
1585
1585
1586 @pulldiscovery('changegroup')
1586 @pulldiscovery('changegroup')
1587 def _pulldiscoverychangegroup(pullop):
1587 def _pulldiscoverychangegroup(pullop):
1588 """discovery phase for the pull
1588 """discovery phase for the pull
1589
1589
1590 Current handle changeset discovery only, will change handle all discovery
1590 Current handle changeset discovery only, will change handle all discovery
1591 at some point."""
1591 at some point."""
1592 tmp = discovery.findcommonincoming(pullop.repo,
1592 tmp = discovery.findcommonincoming(pullop.repo,
1593 pullop.remote,
1593 pullop.remote,
1594 heads=pullop.heads,
1594 heads=pullop.heads,
1595 force=pullop.force)
1595 force=pullop.force)
1596 common, fetch, rheads = tmp
1596 common, fetch, rheads = tmp
1597 nm = pullop.repo.unfiltered().changelog.nodemap
1597 nm = pullop.repo.unfiltered().changelog.nodemap
1598 if fetch and rheads:
1598 if fetch and rheads:
1599 # If a remote heads is filtered locally, put in back in common.
1599 # If a remote heads is filtered locally, put in back in common.
1600 #
1600 #
1601 # This is a hackish solution to catch most of "common but locally
1601 # This is a hackish solution to catch most of "common but locally
1602 # hidden situation". We do not performs discovery on unfiltered
1602 # hidden situation". We do not performs discovery on unfiltered
1603 # repository because it end up doing a pathological amount of round
1603 # repository because it end up doing a pathological amount of round
1604 # trip for w huge amount of changeset we do not care about.
1604 # trip for w huge amount of changeset we do not care about.
1605 #
1605 #
1606 # If a set of such "common but filtered" changeset exist on the server
1606 # If a set of such "common but filtered" changeset exist on the server
1607 # but are not including a remote heads, we'll not be able to detect it,
1607 # but are not including a remote heads, we'll not be able to detect it,
1608 scommon = set(common)
1608 scommon = set(common)
1609 for n in rheads:
1609 for n in rheads:
1610 if n in nm:
1610 if n in nm:
1611 if n not in scommon:
1611 if n not in scommon:
1612 common.append(n)
1612 common.append(n)
1613 if set(rheads).issubset(set(common)):
1613 if set(rheads).issubset(set(common)):
1614 fetch = []
1614 fetch = []
1615 pullop.common = common
1615 pullop.common = common
1616 pullop.fetch = fetch
1616 pullop.fetch = fetch
1617 pullop.rheads = rheads
1617 pullop.rheads = rheads
1618
1618
1619 def _pullbundle2(pullop):
1619 def _pullbundle2(pullop):
1620 """pull data using bundle2
1620 """pull data using bundle2
1621
1621
1622 For now, the only supported data are changegroup."""
1622 For now, the only supported data are changegroup."""
1623 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1623 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1624
1624
1625 # make ui easier to access
1625 # make ui easier to access
1626 ui = pullop.repo.ui
1626 ui = pullop.repo.ui
1627
1627
1628 # At the moment we don't do stream clones over bundle2. If that is
1628 # At the moment we don't do stream clones over bundle2. If that is
1629 # implemented then here's where the check for that will go.
1629 # implemented then here's where the check for that will go.
1630 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1630 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1631
1631
1632 # declare pull perimeters
1632 # declare pull perimeters
1633 kwargs['common'] = pullop.common
1633 kwargs['common'] = pullop.common
1634 kwargs['heads'] = pullop.heads or pullop.rheads
1634 kwargs['heads'] = pullop.heads or pullop.rheads
1635
1635
1636 if streaming:
1636 if streaming:
1637 kwargs['cg'] = False
1637 kwargs['cg'] = False
1638 kwargs['stream'] = True
1638 kwargs['stream'] = True
1639 pullop.stepsdone.add('changegroup')
1639 pullop.stepsdone.add('changegroup')
1640 pullop.stepsdone.add('phases')
1640 pullop.stepsdone.add('phases')
1641
1641
1642 else:
1642 else:
1643 # pulling changegroup
1643 # pulling changegroup
1644 pullop.stepsdone.add('changegroup')
1644 pullop.stepsdone.add('changegroup')
1645
1645
1646 kwargs['cg'] = pullop.fetch
1646 kwargs['cg'] = pullop.fetch
1647
1647
1648 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1648 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1649 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1649 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1650 if (not legacyphase and hasbinaryphase):
1650 if (not legacyphase and hasbinaryphase):
1651 kwargs['phases'] = True
1651 kwargs['phases'] = True
1652 pullop.stepsdone.add('phases')
1652 pullop.stepsdone.add('phases')
1653
1653
1654 if 'listkeys' in pullop.remotebundle2caps:
1654 if 'listkeys' in pullop.remotebundle2caps:
1655 if 'phases' not in pullop.stepsdone:
1655 if 'phases' not in pullop.stepsdone:
1656 kwargs['listkeys'] = ['phases']
1656 kwargs['listkeys'] = ['phases']
1657
1657
1658 bookmarksrequested = False
1658 bookmarksrequested = False
1659 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1659 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1660 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1660 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1661
1661
1662 if pullop.remotebookmarks is not None:
1662 if pullop.remotebookmarks is not None:
1663 pullop.stepsdone.add('request-bookmarks')
1663 pullop.stepsdone.add('request-bookmarks')
1664
1664
1665 if ('request-bookmarks' not in pullop.stepsdone
1665 if ('request-bookmarks' not in pullop.stepsdone
1666 and pullop.remotebookmarks is None
1666 and pullop.remotebookmarks is None
1667 and not legacybookmark and hasbinarybook):
1667 and not legacybookmark and hasbinarybook):
1668 kwargs['bookmarks'] = True
1668 kwargs['bookmarks'] = True
1669 bookmarksrequested = True
1669 bookmarksrequested = True
1670
1670
1671 if 'listkeys' in pullop.remotebundle2caps:
1671 if 'listkeys' in pullop.remotebundle2caps:
1672 if 'request-bookmarks' not in pullop.stepsdone:
1672 if 'request-bookmarks' not in pullop.stepsdone:
1673 # make sure to always includes bookmark data when migrating
1673 # make sure to always includes bookmark data when migrating
1674 # `hg incoming --bundle` to using this function.
1674 # `hg incoming --bundle` to using this function.
1675 pullop.stepsdone.add('request-bookmarks')
1675 pullop.stepsdone.add('request-bookmarks')
1676 kwargs.setdefault('listkeys', []).append('bookmarks')
1676 kwargs.setdefault('listkeys', []).append('bookmarks')
1677
1677
1678 # If this is a full pull / clone and the server supports the clone bundles
1678 # If this is a full pull / clone and the server supports the clone bundles
1679 # feature, tell the server whether we attempted a clone bundle. The
1679 # feature, tell the server whether we attempted a clone bundle. The
1680 # presence of this flag indicates the client supports clone bundles. This
1680 # presence of this flag indicates the client supports clone bundles. This
1681 # will enable the server to treat clients that support clone bundles
1681 # will enable the server to treat clients that support clone bundles
1682 # differently from those that don't.
1682 # differently from those that don't.
1683 if (pullop.remote.capable('clonebundles')
1683 if (pullop.remote.capable('clonebundles')
1684 and pullop.heads is None and list(pullop.common) == [nullid]):
1684 and pullop.heads is None and list(pullop.common) == [nullid]):
1685 kwargs['cbattempted'] = pullop.clonebundleattempted
1685 kwargs['cbattempted'] = pullop.clonebundleattempted
1686
1686
1687 if streaming:
1687 if streaming:
1688 pullop.repo.ui.status(_('streaming all changes\n'))
1688 pullop.repo.ui.status(_('streaming all changes\n'))
1689 elif not pullop.fetch:
1689 elif not pullop.fetch:
1690 pullop.repo.ui.status(_("no changes found\n"))
1690 pullop.repo.ui.status(_("no changes found\n"))
1691 pullop.cgresult = 0
1691 pullop.cgresult = 0
1692 else:
1692 else:
1693 if pullop.heads is None and list(pullop.common) == [nullid]:
1693 if pullop.heads is None and list(pullop.common) == [nullid]:
1694 pullop.repo.ui.status(_("requesting all changes\n"))
1694 pullop.repo.ui.status(_("requesting all changes\n"))
1695 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1695 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1696 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1696 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1697 if obsolete.commonversion(remoteversions) is not None:
1697 if obsolete.commonversion(remoteversions) is not None:
1698 kwargs['obsmarkers'] = True
1698 kwargs['obsmarkers'] = True
1699 pullop.stepsdone.add('obsmarkers')
1699 pullop.stepsdone.add('obsmarkers')
1700 _pullbundle2extraprepare(pullop, kwargs)
1700 _pullbundle2extraprepare(pullop, kwargs)
1701
1701
1702 with pullop.remote.commandexecutor() as e:
1702 with pullop.remote.commandexecutor() as e:
1703 args = dict(kwargs)
1703 args = dict(kwargs)
1704 args['source'] = 'pull'
1704 args['source'] = 'pull'
1705 bundle = e.callcommand('getbundle', args).result()
1705 bundle = e.callcommand('getbundle', args).result()
1706
1706
1707 try:
1707 try:
1708 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1708 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1709 source='pull')
1709 source='pull')
1710 op.modes['bookmarks'] = 'records'
1710 op.modes['bookmarks'] = 'records'
1711 bundle2.processbundle(pullop.repo, bundle, op=op)
1711 bundle2.processbundle(pullop.repo, bundle, op=op)
1712 except bundle2.AbortFromPart as exc:
1712 except bundle2.AbortFromPart as exc:
1713 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1713 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1714 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1714 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1715 except error.BundleValueError as exc:
1715 except error.BundleValueError as exc:
1716 raise error.Abort(_('missing support for %s') % exc)
1716 raise error.Abort(_('missing support for %s') % exc)
1717
1717
1718 if pullop.fetch:
1718 if pullop.fetch:
1719 pullop.cgresult = bundle2.combinechangegroupresults(op)
1719 pullop.cgresult = bundle2.combinechangegroupresults(op)
1720
1720
1721 # processing phases change
1721 # processing phases change
1722 for namespace, value in op.records['listkeys']:
1722 for namespace, value in op.records['listkeys']:
1723 if namespace == 'phases':
1723 if namespace == 'phases':
1724 _pullapplyphases(pullop, value)
1724 _pullapplyphases(pullop, value)
1725
1725
1726 # processing bookmark update
1726 # processing bookmark update
1727 if bookmarksrequested:
1727 if bookmarksrequested:
1728 books = {}
1728 books = {}
1729 for record in op.records['bookmarks']:
1729 for record in op.records['bookmarks']:
1730 books[record['bookmark']] = record["node"]
1730 books[record['bookmark']] = record["node"]
1731 pullop.remotebookmarks = books
1731 pullop.remotebookmarks = books
1732 else:
1732 else:
1733 for namespace, value in op.records['listkeys']:
1733 for namespace, value in op.records['listkeys']:
1734 if namespace == 'bookmarks':
1734 if namespace == 'bookmarks':
1735 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1735 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1736
1736
1737 # bookmark data were either already there or pulled in the bundle
1737 # bookmark data were either already there or pulled in the bundle
1738 if pullop.remotebookmarks is not None:
1738 if pullop.remotebookmarks is not None:
1739 _pullbookmarks(pullop)
1739 _pullbookmarks(pullop)
1740
1740
1741 def _pullbundle2extraprepare(pullop, kwargs):
1741 def _pullbundle2extraprepare(pullop, kwargs):
1742 """hook function so that extensions can extend the getbundle call"""
1742 """hook function so that extensions can extend the getbundle call"""
1743
1743
1744 def _pullchangeset(pullop):
1744 def _pullchangeset(pullop):
1745 """pull changeset from unbundle into the local repo"""
1745 """pull changeset from unbundle into the local repo"""
1746 # We delay the open of the transaction as late as possible so we
1746 # We delay the open of the transaction as late as possible so we
1747 # don't open transaction for nothing or you break future useful
1747 # don't open transaction for nothing or you break future useful
1748 # rollback call
1748 # rollback call
1749 if 'changegroup' in pullop.stepsdone:
1749 if 'changegroup' in pullop.stepsdone:
1750 return
1750 return
1751 pullop.stepsdone.add('changegroup')
1751 pullop.stepsdone.add('changegroup')
1752 if not pullop.fetch:
1752 if not pullop.fetch:
1753 pullop.repo.ui.status(_("no changes found\n"))
1753 pullop.repo.ui.status(_("no changes found\n"))
1754 pullop.cgresult = 0
1754 pullop.cgresult = 0
1755 return
1755 return
1756 tr = pullop.gettransaction()
1756 tr = pullop.gettransaction()
1757 if pullop.heads is None and list(pullop.common) == [nullid]:
1757 if pullop.heads is None and list(pullop.common) == [nullid]:
1758 pullop.repo.ui.status(_("requesting all changes\n"))
1758 pullop.repo.ui.status(_("requesting all changes\n"))
1759 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1759 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1760 # issue1320, avoid a race if remote changed after discovery
1760 # issue1320, avoid a race if remote changed after discovery
1761 pullop.heads = pullop.rheads
1761 pullop.heads = pullop.rheads
1762
1762
1763 if pullop.remote.capable('getbundle'):
1763 if pullop.remote.capable('getbundle'):
1764 # TODO: get bundlecaps from remote
1764 # TODO: get bundlecaps from remote
1765 cg = pullop.remote.getbundle('pull', common=pullop.common,
1765 cg = pullop.remote.getbundle('pull', common=pullop.common,
1766 heads=pullop.heads or pullop.rheads)
1766 heads=pullop.heads or pullop.rheads)
1767 elif pullop.heads is None:
1767 elif pullop.heads is None:
1768 with pullop.remote.commandexecutor() as e:
1768 with pullop.remote.commandexecutor() as e:
1769 cg = e.callcommand('changegroup', {
1769 cg = e.callcommand('changegroup', {
1770 'nodes': pullop.fetch,
1770 'nodes': pullop.fetch,
1771 'source': 'pull',
1771 'source': 'pull',
1772 }).result()
1772 }).result()
1773
1773
1774 elif not pullop.remote.capable('changegroupsubset'):
1774 elif not pullop.remote.capable('changegroupsubset'):
1775 raise error.Abort(_("partial pull cannot be done because "
1775 raise error.Abort(_("partial pull cannot be done because "
1776 "other repository doesn't support "
1776 "other repository doesn't support "
1777 "changegroupsubset."))
1777 "changegroupsubset."))
1778 else:
1778 else:
1779 with pullop.remote.commandexecutor() as e:
1779 with pullop.remote.commandexecutor() as e:
1780 cg = e.callcommand('changegroupsubset', {
1780 cg = e.callcommand('changegroupsubset', {
1781 'bases': pullop.fetch,
1781 'bases': pullop.fetch,
1782 'heads': pullop.heads,
1782 'heads': pullop.heads,
1783 'source': 'pull',
1783 'source': 'pull',
1784 }).result()
1784 }).result()
1785
1785
1786 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1786 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1787 pullop.remote.url())
1787 pullop.remote.url())
1788 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1788 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1789
1789
1790 def _pullphase(pullop):
1790 def _pullphase(pullop):
1791 # Get remote phases data from remote
1791 # Get remote phases data from remote
1792 if 'phases' in pullop.stepsdone:
1792 if 'phases' in pullop.stepsdone:
1793 return
1793 return
1794 remotephases = listkeys(pullop.remote, 'phases')
1794 remotephases = listkeys(pullop.remote, 'phases')
1795 _pullapplyphases(pullop, remotephases)
1795 _pullapplyphases(pullop, remotephases)
1796
1796
1797 def _pullapplyphases(pullop, remotephases):
1797 def _pullapplyphases(pullop, remotephases):
1798 """apply phase movement from observed remote state"""
1798 """apply phase movement from observed remote state"""
1799 if 'phases' in pullop.stepsdone:
1799 if 'phases' in pullop.stepsdone:
1800 return
1800 return
1801 pullop.stepsdone.add('phases')
1801 pullop.stepsdone.add('phases')
1802 publishing = bool(remotephases.get('publishing', False))
1802 publishing = bool(remotephases.get('publishing', False))
1803 if remotephases and not publishing:
1803 if remotephases and not publishing:
1804 # remote is new and non-publishing
1804 # remote is new and non-publishing
1805 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1805 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1806 pullop.pulledsubset,
1806 pullop.pulledsubset,
1807 remotephases)
1807 remotephases)
1808 dheads = pullop.pulledsubset
1808 dheads = pullop.pulledsubset
1809 else:
1809 else:
1810 # Remote is old or publishing all common changesets
1810 # Remote is old or publishing all common changesets
1811 # should be seen as public
1811 # should be seen as public
1812 pheads = pullop.pulledsubset
1812 pheads = pullop.pulledsubset
1813 dheads = []
1813 dheads = []
1814 unfi = pullop.repo.unfiltered()
1814 unfi = pullop.repo.unfiltered()
1815 phase = unfi._phasecache.phase
1815 phase = unfi._phasecache.phase
1816 rev = unfi.changelog.nodemap.get
1816 rev = unfi.changelog.nodemap.get
1817 public = phases.public
1817 public = phases.public
1818 draft = phases.draft
1818 draft = phases.draft
1819
1819
1820 # exclude changesets already public locally and update the others
1820 # exclude changesets already public locally and update the others
1821 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1821 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1822 if pheads:
1822 if pheads:
1823 tr = pullop.gettransaction()
1823 tr = pullop.gettransaction()
1824 phases.advanceboundary(pullop.repo, tr, public, pheads)
1824 phases.advanceboundary(pullop.repo, tr, public, pheads)
1825
1825
1826 # exclude changesets already draft locally and update the others
1826 # exclude changesets already draft locally and update the others
1827 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1827 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1828 if dheads:
1828 if dheads:
1829 tr = pullop.gettransaction()
1829 tr = pullop.gettransaction()
1830 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1830 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1831
1831
1832 def _pullbookmarks(pullop):
1832 def _pullbookmarks(pullop):
1833 """process the remote bookmark information to update the local one"""
1833 """process the remote bookmark information to update the local one"""
1834 if 'bookmarks' in pullop.stepsdone:
1834 if 'bookmarks' in pullop.stepsdone:
1835 return
1835 return
1836 pullop.stepsdone.add('bookmarks')
1836 pullop.stepsdone.add('bookmarks')
1837 repo = pullop.repo
1837 repo = pullop.repo
1838 remotebookmarks = pullop.remotebookmarks
1838 remotebookmarks = pullop.remotebookmarks
1839 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1839 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1840 pullop.remote.url(),
1840 pullop.remote.url(),
1841 pullop.gettransaction,
1841 pullop.gettransaction,
1842 explicit=pullop.explicitbookmarks)
1842 explicit=pullop.explicitbookmarks)
1843
1843
1844 def _pullobsolete(pullop):
1844 def _pullobsolete(pullop):
1845 """utility function to pull obsolete markers from a remote
1845 """utility function to pull obsolete markers from a remote
1846
1846
1847 The `gettransaction` is function that return the pull transaction, creating
1847 The `gettransaction` is function that return the pull transaction, creating
1848 one if necessary. We return the transaction to inform the calling code that
1848 one if necessary. We return the transaction to inform the calling code that
1849 a new transaction have been created (when applicable).
1849 a new transaction have been created (when applicable).
1850
1850
1851 Exists mostly to allow overriding for experimentation purpose"""
1851 Exists mostly to allow overriding for experimentation purpose"""
1852 if 'obsmarkers' in pullop.stepsdone:
1852 if 'obsmarkers' in pullop.stepsdone:
1853 return
1853 return
1854 pullop.stepsdone.add('obsmarkers')
1854 pullop.stepsdone.add('obsmarkers')
1855 tr = None
1855 tr = None
1856 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1856 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1857 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1857 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1858 remoteobs = listkeys(pullop.remote, 'obsolete')
1858 remoteobs = listkeys(pullop.remote, 'obsolete')
1859 if 'dump0' in remoteobs:
1859 if 'dump0' in remoteobs:
1860 tr = pullop.gettransaction()
1860 tr = pullop.gettransaction()
1861 markers = []
1861 markers = []
1862 for key in sorted(remoteobs, reverse=True):
1862 for key in sorted(remoteobs, reverse=True):
1863 if key.startswith('dump'):
1863 if key.startswith('dump'):
1864 data = util.b85decode(remoteobs[key])
1864 data = util.b85decode(remoteobs[key])
1865 version, newmarks = obsolete._readmarkers(data)
1865 version, newmarks = obsolete._readmarkers(data)
1866 markers += newmarks
1866 markers += newmarks
1867 if markers:
1867 if markers:
1868 pullop.repo.obsstore.add(tr, markers)
1868 pullop.repo.obsstore.add(tr, markers)
1869 pullop.repo.invalidatevolatilesets()
1869 pullop.repo.invalidatevolatilesets()
1870 return tr
1870 return tr
1871
1871
1872 def applynarrowacl(repo, kwargs):
1872 def applynarrowacl(repo, kwargs):
1873 """Apply narrow fetch access control.
1873 """Apply narrow fetch access control.
1874
1874
1875 This massages the named arguments for getbundle wire protocol commands
1875 This massages the named arguments for getbundle wire protocol commands
1876 so requested data is filtered through access control rules.
1876 so requested data is filtered through access control rules.
1877 """
1877 """
1878 ui = repo.ui
1878 ui = repo.ui
1879 # TODO this assumes existence of HTTP and is a layering violation.
1879 # TODO this assumes existence of HTTP and is a layering violation.
1880 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1880 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1881 user_includes = ui.configlist(
1881 user_includes = ui.configlist(
1882 _NARROWACL_SECTION, username + '.includes',
1882 _NARROWACL_SECTION, username + '.includes',
1883 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1883 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1884 user_excludes = ui.configlist(
1884 user_excludes = ui.configlist(
1885 _NARROWACL_SECTION, username + '.excludes',
1885 _NARROWACL_SECTION, username + '.excludes',
1886 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1886 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1887 if not user_includes:
1887 if not user_includes:
1888 raise error.Abort(_("{} configuration for user {} is empty")
1888 raise error.Abort(_("{} configuration for user {} is empty")
1889 .format(_NARROWACL_SECTION, username))
1889 .format(_NARROWACL_SECTION, username))
1890
1890
1891 user_includes = [
1891 user_includes = [
1892 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1892 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1893 user_excludes = [
1893 user_excludes = [
1894 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1894 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1895
1895
1896 req_includes = set(kwargs.get(r'includepats', []))
1896 req_includes = set(kwargs.get(r'includepats', []))
1897 req_excludes = set(kwargs.get(r'excludepats', []))
1897 req_excludes = set(kwargs.get(r'excludepats', []))
1898
1898
1899 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1899 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1900 req_includes, req_excludes, user_includes, user_excludes)
1900 req_includes, req_excludes, user_includes, user_excludes)
1901
1901
1902 if invalid_includes:
1902 if invalid_includes:
1903 raise error.Abort(
1903 raise error.Abort(
1904 _("The following includes are not accessible for {}: {}")
1904 _("The following includes are not accessible for {}: {}")
1905 .format(username, invalid_includes))
1905 .format(username, invalid_includes))
1906
1906
1907 new_args = {}
1907 new_args = {}
1908 new_args.update(kwargs)
1908 new_args.update(kwargs)
1909 new_args[r'narrow'] = True
1909 new_args[r'narrow'] = True
1910 new_args[r'narrow_acl'] = True
1910 new_args[r'narrow_acl'] = True
1911 new_args[r'includepats'] = req_includes
1911 new_args[r'includepats'] = req_includes
1912 if req_excludes:
1912 if req_excludes:
1913 new_args[r'excludepats'] = req_excludes
1913 new_args[r'excludepats'] = req_excludes
1914
1914
1915 return new_args
1915 return new_args
1916
1916
1917 def _computeellipsis(repo, common, heads, known, match, depth=None):
1917 def _computeellipsis(repo, common, heads, known, match, depth=None):
1918 """Compute the shape of a narrowed DAG.
1918 """Compute the shape of a narrowed DAG.
1919
1919
1920 Args:
1920 Args:
1921 repo: The repository we're transferring.
1921 repo: The repository we're transferring.
1922 common: The roots of the DAG range we're transferring.
1922 common: The roots of the DAG range we're transferring.
1923 May be just [nullid], which means all ancestors of heads.
1923 May be just [nullid], which means all ancestors of heads.
1924 heads: The heads of the DAG range we're transferring.
1924 heads: The heads of the DAG range we're transferring.
1925 match: The narrowmatcher that allows us to identify relevant changes.
1925 match: The narrowmatcher that allows us to identify relevant changes.
1926 depth: If not None, only consider nodes to be full nodes if they are at
1926 depth: If not None, only consider nodes to be full nodes if they are at
1927 most depth changesets away from one of heads.
1927 most depth changesets away from one of heads.
1928
1928
1929 Returns:
1929 Returns:
1930 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1930 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1931
1931
1932 visitnodes: The list of nodes (either full or ellipsis) which
1932 visitnodes: The list of nodes (either full or ellipsis) which
1933 need to be sent to the client.
1933 need to be sent to the client.
1934 relevant_nodes: The set of changelog nodes which change a file inside
1934 relevant_nodes: The set of changelog nodes which change a file inside
1935 the narrowspec. The client needs these as non-ellipsis nodes.
1935 the narrowspec. The client needs these as non-ellipsis nodes.
1936 ellipsisroots: A dict of {rev: parents} that is used in
1936 ellipsisroots: A dict of {rev: parents} that is used in
1937 narrowchangegroup to produce ellipsis nodes with the
1937 narrowchangegroup to produce ellipsis nodes with the
1938 correct parents.
1938 correct parents.
1939 """
1939 """
1940 cl = repo.changelog
1940 cl = repo.changelog
1941 mfl = repo.manifestlog
1941 mfl = repo.manifestlog
1942
1942
1943 clrev = cl.rev
1943 clrev = cl.rev
1944
1944
1945 commonrevs = {clrev(n) for n in common} | {nullrev}
1945 commonrevs = {clrev(n) for n in common} | {nullrev}
1946 headsrevs = {clrev(n) for n in heads}
1946 headsrevs = {clrev(n) for n in heads}
1947
1947
1948 if depth:
1948 if depth:
1949 revdepth = {h: 0 for h in headsrevs}
1949 revdepth = {h: 0 for h in headsrevs}
1950
1950
1951 ellipsisheads = collections.defaultdict(set)
1951 ellipsisheads = collections.defaultdict(set)
1952 ellipsisroots = collections.defaultdict(set)
1952 ellipsisroots = collections.defaultdict(set)
1953
1953
1954 def addroot(head, curchange):
1954 def addroot(head, curchange):
1955 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1955 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1956 ellipsisroots[head].add(curchange)
1956 ellipsisroots[head].add(curchange)
1957 # Recursively split ellipsis heads with 3 roots by finding the
1957 # Recursively split ellipsis heads with 3 roots by finding the
1958 # roots' youngest common descendant which is an elided merge commit.
1958 # roots' youngest common descendant which is an elided merge commit.
1959 # That descendant takes 2 of the 3 roots as its own, and becomes a
1959 # That descendant takes 2 of the 3 roots as its own, and becomes a
1960 # root of the head.
1960 # root of the head.
1961 while len(ellipsisroots[head]) > 2:
1961 while len(ellipsisroots[head]) > 2:
1962 child, roots = splithead(head)
1962 child, roots = splithead(head)
1963 splitroots(head, child, roots)
1963 splitroots(head, child, roots)
1964 head = child # Recurse in case we just added a 3rd root
1964 head = child # Recurse in case we just added a 3rd root
1965
1965
1966 def splitroots(head, child, roots):
1966 def splitroots(head, child, roots):
1967 ellipsisroots[head].difference_update(roots)
1967 ellipsisroots[head].difference_update(roots)
1968 ellipsisroots[head].add(child)
1968 ellipsisroots[head].add(child)
1969 ellipsisroots[child].update(roots)
1969 ellipsisroots[child].update(roots)
1970 ellipsisroots[child].discard(child)
1970 ellipsisroots[child].discard(child)
1971
1971
1972 def splithead(head):
1972 def splithead(head):
1973 r1, r2, r3 = sorted(ellipsisroots[head])
1973 r1, r2, r3 = sorted(ellipsisroots[head])
1974 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1974 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1975 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1975 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1976 nr1, head, nr2, head)
1976 nr1, head, nr2, head)
1977 for j in mid:
1977 for j in mid:
1978 if j == nr2:
1978 if j == nr2:
1979 return nr2, (nr1, nr2)
1979 return nr2, (nr1, nr2)
1980 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1980 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1981 return j, (nr1, nr2)
1981 return j, (nr1, nr2)
1982 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1982 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1983 'roots: %d %d %d') % (head, r1, r2, r3))
1983 'roots: %d %d %d') % (head, r1, r2, r3))
1984
1984
1985 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1985 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1986 visit = reversed(missing)
1986 visit = reversed(missing)
1987 relevant_nodes = set()
1987 relevant_nodes = set()
1988 visitnodes = [cl.node(m) for m in missing]
1988 visitnodes = [cl.node(m) for m in missing]
1989 required = set(headsrevs) | known
1989 required = set(headsrevs) | known
1990 for rev in visit:
1990 for rev in visit:
1991 clrev = cl.changelogrevision(rev)
1991 clrev = cl.changelogrevision(rev)
1992 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1992 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1993 if depth is not None:
1993 if depth is not None:
1994 curdepth = revdepth[rev]
1994 curdepth = revdepth[rev]
1995 for p in ps:
1995 for p in ps:
1996 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1996 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1997 needed = False
1997 needed = False
1998 shallow_enough = depth is None or revdepth[rev] <= depth
1998 shallow_enough = depth is None or revdepth[rev] <= depth
1999 if shallow_enough:
1999 if shallow_enough:
2000 curmf = mfl[clrev.manifest].read()
2000 curmf = mfl[clrev.manifest].read()
2001 if ps:
2001 if ps:
2002 # We choose to not trust the changed files list in
2002 # We choose to not trust the changed files list in
2003 # changesets because it's not always correct. TODO: could
2003 # changesets because it's not always correct. TODO: could
2004 # we trust it for the non-merge case?
2004 # we trust it for the non-merge case?
2005 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2005 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2006 needed = bool(curmf.diff(p1mf, match))
2006 needed = bool(curmf.diff(p1mf, match))
2007 if not needed and len(ps) > 1:
2007 if not needed and len(ps) > 1:
2008 # For merge changes, the list of changed files is not
2008 # For merge changes, the list of changed files is not
2009 # helpful, since we need to emit the merge if a file
2009 # helpful, since we need to emit the merge if a file
2010 # in the narrow spec has changed on either side of the
2010 # in the narrow spec has changed on either side of the
2011 # merge. As a result, we do a manifest diff to check.
2011 # merge. As a result, we do a manifest diff to check.
2012 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2012 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2013 needed = bool(curmf.diff(p2mf, match))
2013 needed = bool(curmf.diff(p2mf, match))
2014 else:
2014 else:
2015 # For a root node, we need to include the node if any
2015 # For a root node, we need to include the node if any
2016 # files in the node match the narrowspec.
2016 # files in the node match the narrowspec.
2017 needed = any(curmf.walk(match))
2017 needed = any(curmf.walk(match))
2018
2018
2019 if needed:
2019 if needed:
2020 for head in ellipsisheads[rev]:
2020 for head in ellipsisheads[rev]:
2021 addroot(head, rev)
2021 addroot(head, rev)
2022 for p in ps:
2022 for p in ps:
2023 required.add(p)
2023 required.add(p)
2024 relevant_nodes.add(cl.node(rev))
2024 relevant_nodes.add(cl.node(rev))
2025 else:
2025 else:
2026 if not ps:
2026 if not ps:
2027 ps = [nullrev]
2027 ps = [nullrev]
2028 if rev in required:
2028 if rev in required:
2029 for head in ellipsisheads[rev]:
2029 for head in ellipsisheads[rev]:
2030 addroot(head, rev)
2030 addroot(head, rev)
2031 for p in ps:
2031 for p in ps:
2032 ellipsisheads[p].add(rev)
2032 ellipsisheads[p].add(rev)
2033 else:
2033 else:
2034 for p in ps:
2034 for p in ps:
2035 ellipsisheads[p] |= ellipsisheads[rev]
2035 ellipsisheads[p] |= ellipsisheads[rev]
2036
2036
2037 # add common changesets as roots of their reachable ellipsis heads
2037 # add common changesets as roots of their reachable ellipsis heads
2038 for c in commonrevs:
2038 for c in commonrevs:
2039 for head in ellipsisheads[c]:
2039 for head in ellipsisheads[c]:
2040 addroot(head, c)
2040 addroot(head, c)
2041 return visitnodes, relevant_nodes, ellipsisroots
2041 return visitnodes, relevant_nodes, ellipsisroots
2042
2042
2043 def caps20to10(repo, role):
2043 def caps20to10(repo, role):
2044 """return a set with appropriate options to use bundle20 during getbundle"""
2044 """return a set with appropriate options to use bundle20 during getbundle"""
2045 caps = {'HG20'}
2045 caps = {'HG20'}
2046 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2046 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2047 caps.add('bundle2=' + urlreq.quote(capsblob))
2047 caps.add('bundle2=' + urlreq.quote(capsblob))
2048 return caps
2048 return caps
2049
2049
2050 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2050 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2051 getbundle2partsorder = []
2051 getbundle2partsorder = []
2052
2052
2053 # Mapping between step name and function
2053 # Mapping between step name and function
2054 #
2054 #
2055 # This exists to help extensions wrap steps if necessary
2055 # This exists to help extensions wrap steps if necessary
2056 getbundle2partsmapping = {}
2056 getbundle2partsmapping = {}
2057
2057
2058 def getbundle2partsgenerator(stepname, idx=None):
2058 def getbundle2partsgenerator(stepname, idx=None):
2059 """decorator for function generating bundle2 part for getbundle
2059 """decorator for function generating bundle2 part for getbundle
2060
2060
2061 The function is added to the step -> function mapping and appended to the
2061 The function is added to the step -> function mapping and appended to the
2062 list of steps. Beware that decorated functions will be added in order
2062 list of steps. Beware that decorated functions will be added in order
2063 (this may matter).
2063 (this may matter).
2064
2064
2065 You can only use this decorator for new steps, if you want to wrap a step
2065 You can only use this decorator for new steps, if you want to wrap a step
2066 from an extension, attack the getbundle2partsmapping dictionary directly."""
2066 from an extension, attack the getbundle2partsmapping dictionary directly."""
2067 def dec(func):
2067 def dec(func):
2068 assert stepname not in getbundle2partsmapping
2068 assert stepname not in getbundle2partsmapping
2069 getbundle2partsmapping[stepname] = func
2069 getbundle2partsmapping[stepname] = func
2070 if idx is None:
2070 if idx is None:
2071 getbundle2partsorder.append(stepname)
2071 getbundle2partsorder.append(stepname)
2072 else:
2072 else:
2073 getbundle2partsorder.insert(idx, stepname)
2073 getbundle2partsorder.insert(idx, stepname)
2074 return func
2074 return func
2075 return dec
2075 return dec
2076
2076
2077 def bundle2requested(bundlecaps):
2077 def bundle2requested(bundlecaps):
2078 if bundlecaps is not None:
2078 if bundlecaps is not None:
2079 return any(cap.startswith('HG2') for cap in bundlecaps)
2079 return any(cap.startswith('HG2') for cap in bundlecaps)
2080 return False
2080 return False
2081
2081
2082 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2082 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2083 **kwargs):
2083 **kwargs):
2084 """Return chunks constituting a bundle's raw data.
2084 """Return chunks constituting a bundle's raw data.
2085
2085
2086 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2086 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2087 passed.
2087 passed.
2088
2088
2089 Returns a 2-tuple of a dict with metadata about the generated bundle
2089 Returns a 2-tuple of a dict with metadata about the generated bundle
2090 and an iterator over raw chunks (of varying sizes).
2090 and an iterator over raw chunks (of varying sizes).
2091 """
2091 """
2092 kwargs = pycompat.byteskwargs(kwargs)
2092 kwargs = pycompat.byteskwargs(kwargs)
2093 info = {}
2093 info = {}
2094 usebundle2 = bundle2requested(bundlecaps)
2094 usebundle2 = bundle2requested(bundlecaps)
2095 # bundle10 case
2095 # bundle10 case
2096 if not usebundle2:
2096 if not usebundle2:
2097 if bundlecaps and not kwargs.get('cg', True):
2097 if bundlecaps and not kwargs.get('cg', True):
2098 raise ValueError(_('request for bundle10 must include changegroup'))
2098 raise ValueError(_('request for bundle10 must include changegroup'))
2099
2099
2100 if kwargs:
2100 if kwargs:
2101 raise ValueError(_('unsupported getbundle arguments: %s')
2101 raise ValueError(_('unsupported getbundle arguments: %s')
2102 % ', '.join(sorted(kwargs.keys())))
2102 % ', '.join(sorted(kwargs.keys())))
2103 outgoing = _computeoutgoing(repo, heads, common)
2103 outgoing = _computeoutgoing(repo, heads, common)
2104 info['bundleversion'] = 1
2104 info['bundleversion'] = 1
2105 return info, changegroup.makestream(repo, outgoing, '01', source,
2105 return info, changegroup.makestream(repo, outgoing, '01', source,
2106 bundlecaps=bundlecaps)
2106 bundlecaps=bundlecaps)
2107
2107
2108 # bundle20 case
2108 # bundle20 case
2109 info['bundleversion'] = 2
2109 info['bundleversion'] = 2
2110 b2caps = {}
2110 b2caps = {}
2111 for bcaps in bundlecaps:
2111 for bcaps in bundlecaps:
2112 if bcaps.startswith('bundle2='):
2112 if bcaps.startswith('bundle2='):
2113 blob = urlreq.unquote(bcaps[len('bundle2='):])
2113 blob = urlreq.unquote(bcaps[len('bundle2='):])
2114 b2caps.update(bundle2.decodecaps(blob))
2114 b2caps.update(bundle2.decodecaps(blob))
2115 bundler = bundle2.bundle20(repo.ui, b2caps)
2115 bundler = bundle2.bundle20(repo.ui, b2caps)
2116
2116
2117 kwargs['heads'] = heads
2117 kwargs['heads'] = heads
2118 kwargs['common'] = common
2118 kwargs['common'] = common
2119
2119
2120 for name in getbundle2partsorder:
2120 for name in getbundle2partsorder:
2121 func = getbundle2partsmapping[name]
2121 func = getbundle2partsmapping[name]
2122 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2122 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2123 **pycompat.strkwargs(kwargs))
2123 **pycompat.strkwargs(kwargs))
2124
2124
2125 info['prefercompressed'] = bundler.prefercompressed
2125 info['prefercompressed'] = bundler.prefercompressed
2126
2126
2127 return info, bundler.getchunks()
2127 return info, bundler.getchunks()
2128
2128
2129 @getbundle2partsgenerator('stream2')
2129 @getbundle2partsgenerator('stream2')
2130 def _getbundlestream2(bundler, repo, *args, **kwargs):
2130 def _getbundlestream2(bundler, repo, *args, **kwargs):
2131 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2131 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2132
2132
2133 @getbundle2partsgenerator('changegroup')
2133 @getbundle2partsgenerator('changegroup')
2134 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2134 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2135 b2caps=None, heads=None, common=None, **kwargs):
2135 b2caps=None, heads=None, common=None, **kwargs):
2136 """add a changegroup part to the requested bundle"""
2136 """add a changegroup part to the requested bundle"""
2137 if not kwargs.get(r'cg', True):
2137 if not kwargs.get(r'cg', True):
2138 return
2138 return
2139
2139
2140 version = '01'
2140 version = '01'
2141 cgversions = b2caps.get('changegroup')
2141 cgversions = b2caps.get('changegroup')
2142 if cgversions: # 3.1 and 3.2 ship with an empty value
2142 if cgversions: # 3.1 and 3.2 ship with an empty value
2143 cgversions = [v for v in cgversions
2143 cgversions = [v for v in cgversions
2144 if v in changegroup.supportedoutgoingversions(repo)]
2144 if v in changegroup.supportedoutgoingversions(repo)]
2145 if not cgversions:
2145 if not cgversions:
2146 raise ValueError(_('no common changegroup version'))
2146 raise ValueError(_('no common changegroup version'))
2147 version = max(cgversions)
2147 version = max(cgversions)
2148
2148
2149 outgoing = _computeoutgoing(repo, heads, common)
2149 outgoing = _computeoutgoing(repo, heads, common)
2150 if not outgoing.missing:
2150 if not outgoing.missing:
2151 return
2151 return
2152
2152
2153 if kwargs.get(r'narrow', False):
2153 if kwargs.get(r'narrow', False):
2154 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2154 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2155 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2155 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2156 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2156 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2157 else:
2157 else:
2158 matcher = None
2158 matcher = None
2159
2159
2160 cgstream = changegroup.makestream(repo, outgoing, version, source,
2160 cgstream = changegroup.makestream(repo, outgoing, version, source,
2161 bundlecaps=bundlecaps, matcher=matcher)
2161 bundlecaps=bundlecaps, matcher=matcher)
2162
2162
2163 part = bundler.newpart('changegroup', data=cgstream)
2163 part = bundler.newpart('changegroup', data=cgstream)
2164 if cgversions:
2164 if cgversions:
2165 part.addparam('version', version)
2165 part.addparam('version', version)
2166
2166
2167 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2167 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2168 mandatory=False)
2168 mandatory=False)
2169
2169
2170 if 'treemanifest' in repo.requirements:
2170 if 'treemanifest' in repo.requirements:
2171 part.addparam('treemanifest', '1')
2171 part.addparam('treemanifest', '1')
2172
2172
2173 if (kwargs.get(r'narrow', False) and kwargs.get('narrow_acl', False)
2173 if (kwargs.get(r'narrow', False) and kwargs.get(r'narrow_acl', False)
2174 and (include or exclude)):
2174 and (include or exclude)):
2175 narrowspecpart = bundler.newpart('narrow:spec')
2175 narrowspecpart = bundler.newpart('narrow:spec')
2176 if include:
2176 if include:
2177 narrowspecpart.addparam(
2177 narrowspecpart.addparam(
2178 'include', '\n'.join(include), mandatory=True)
2178 'include', '\n'.join(include), mandatory=True)
2179 if exclude:
2179 if exclude:
2180 narrowspecpart.addparam(
2180 narrowspecpart.addparam(
2181 'exclude', '\n'.join(exclude), mandatory=True)
2181 'exclude', '\n'.join(exclude), mandatory=True)
2182
2182
2183 @getbundle2partsgenerator('bookmarks')
2183 @getbundle2partsgenerator('bookmarks')
2184 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2184 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2185 b2caps=None, **kwargs):
2185 b2caps=None, **kwargs):
2186 """add a bookmark part to the requested bundle"""
2186 """add a bookmark part to the requested bundle"""
2187 if not kwargs.get(r'bookmarks', False):
2187 if not kwargs.get(r'bookmarks', False):
2188 return
2188 return
2189 if 'bookmarks' not in b2caps:
2189 if 'bookmarks' not in b2caps:
2190 raise ValueError(_('no common bookmarks exchange method'))
2190 raise ValueError(_('no common bookmarks exchange method'))
2191 books = bookmod.listbinbookmarks(repo)
2191 books = bookmod.listbinbookmarks(repo)
2192 data = bookmod.binaryencode(books)
2192 data = bookmod.binaryencode(books)
2193 if data:
2193 if data:
2194 bundler.newpart('bookmarks', data=data)
2194 bundler.newpart('bookmarks', data=data)
2195
2195
2196 @getbundle2partsgenerator('listkeys')
2196 @getbundle2partsgenerator('listkeys')
2197 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2197 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2198 b2caps=None, **kwargs):
2198 b2caps=None, **kwargs):
2199 """add parts containing listkeys namespaces to the requested bundle"""
2199 """add parts containing listkeys namespaces to the requested bundle"""
2200 listkeys = kwargs.get(r'listkeys', ())
2200 listkeys = kwargs.get(r'listkeys', ())
2201 for namespace in listkeys:
2201 for namespace in listkeys:
2202 part = bundler.newpart('listkeys')
2202 part = bundler.newpart('listkeys')
2203 part.addparam('namespace', namespace)
2203 part.addparam('namespace', namespace)
2204 keys = repo.listkeys(namespace).items()
2204 keys = repo.listkeys(namespace).items()
2205 part.data = pushkey.encodekeys(keys)
2205 part.data = pushkey.encodekeys(keys)
2206
2206
2207 @getbundle2partsgenerator('obsmarkers')
2207 @getbundle2partsgenerator('obsmarkers')
2208 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2208 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2209 b2caps=None, heads=None, **kwargs):
2209 b2caps=None, heads=None, **kwargs):
2210 """add an obsolescence markers part to the requested bundle"""
2210 """add an obsolescence markers part to the requested bundle"""
2211 if kwargs.get(r'obsmarkers', False):
2211 if kwargs.get(r'obsmarkers', False):
2212 if heads is None:
2212 if heads is None:
2213 heads = repo.heads()
2213 heads = repo.heads()
2214 subset = [c.node() for c in repo.set('::%ln', heads)]
2214 subset = [c.node() for c in repo.set('::%ln', heads)]
2215 markers = repo.obsstore.relevantmarkers(subset)
2215 markers = repo.obsstore.relevantmarkers(subset)
2216 markers = sorted(markers)
2216 markers = sorted(markers)
2217 bundle2.buildobsmarkerspart(bundler, markers)
2217 bundle2.buildobsmarkerspart(bundler, markers)
2218
2218
2219 @getbundle2partsgenerator('phases')
2219 @getbundle2partsgenerator('phases')
2220 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2220 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2221 b2caps=None, heads=None, **kwargs):
2221 b2caps=None, heads=None, **kwargs):
2222 """add phase heads part to the requested bundle"""
2222 """add phase heads part to the requested bundle"""
2223 if kwargs.get(r'phases', False):
2223 if kwargs.get(r'phases', False):
2224 if not 'heads' in b2caps.get('phases'):
2224 if not 'heads' in b2caps.get('phases'):
2225 raise ValueError(_('no common phases exchange method'))
2225 raise ValueError(_('no common phases exchange method'))
2226 if heads is None:
2226 if heads is None:
2227 heads = repo.heads()
2227 heads = repo.heads()
2228
2228
2229 headsbyphase = collections.defaultdict(set)
2229 headsbyphase = collections.defaultdict(set)
2230 if repo.publishing():
2230 if repo.publishing():
2231 headsbyphase[phases.public] = heads
2231 headsbyphase[phases.public] = heads
2232 else:
2232 else:
2233 # find the appropriate heads to move
2233 # find the appropriate heads to move
2234
2234
2235 phase = repo._phasecache.phase
2235 phase = repo._phasecache.phase
2236 node = repo.changelog.node
2236 node = repo.changelog.node
2237 rev = repo.changelog.rev
2237 rev = repo.changelog.rev
2238 for h in heads:
2238 for h in heads:
2239 headsbyphase[phase(repo, rev(h))].add(h)
2239 headsbyphase[phase(repo, rev(h))].add(h)
2240 seenphases = list(headsbyphase.keys())
2240 seenphases = list(headsbyphase.keys())
2241
2241
2242 # We do not handle anything but public and draft phase for now)
2242 # We do not handle anything but public and draft phase for now)
2243 if seenphases:
2243 if seenphases:
2244 assert max(seenphases) <= phases.draft
2244 assert max(seenphases) <= phases.draft
2245
2245
2246 # if client is pulling non-public changesets, we need to find
2246 # if client is pulling non-public changesets, we need to find
2247 # intermediate public heads.
2247 # intermediate public heads.
2248 draftheads = headsbyphase.get(phases.draft, set())
2248 draftheads = headsbyphase.get(phases.draft, set())
2249 if draftheads:
2249 if draftheads:
2250 publicheads = headsbyphase.get(phases.public, set())
2250 publicheads = headsbyphase.get(phases.public, set())
2251
2251
2252 revset = 'heads(only(%ln, %ln) and public())'
2252 revset = 'heads(only(%ln, %ln) and public())'
2253 extraheads = repo.revs(revset, draftheads, publicheads)
2253 extraheads = repo.revs(revset, draftheads, publicheads)
2254 for r in extraheads:
2254 for r in extraheads:
2255 headsbyphase[phases.public].add(node(r))
2255 headsbyphase[phases.public].add(node(r))
2256
2256
2257 # transform data in a format used by the encoding function
2257 # transform data in a format used by the encoding function
2258 phasemapping = []
2258 phasemapping = []
2259 for phase in phases.allphases:
2259 for phase in phases.allphases:
2260 phasemapping.append(sorted(headsbyphase[phase]))
2260 phasemapping.append(sorted(headsbyphase[phase]))
2261
2261
2262 # generate the actual part
2262 # generate the actual part
2263 phasedata = phases.binaryencode(phasemapping)
2263 phasedata = phases.binaryencode(phasemapping)
2264 bundler.newpart('phase-heads', data=phasedata)
2264 bundler.newpart('phase-heads', data=phasedata)
2265
2265
2266 @getbundle2partsgenerator('hgtagsfnodes')
2266 @getbundle2partsgenerator('hgtagsfnodes')
2267 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2267 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2268 b2caps=None, heads=None, common=None,
2268 b2caps=None, heads=None, common=None,
2269 **kwargs):
2269 **kwargs):
2270 """Transfer the .hgtags filenodes mapping.
2270 """Transfer the .hgtags filenodes mapping.
2271
2271
2272 Only values for heads in this bundle will be transferred.
2272 Only values for heads in this bundle will be transferred.
2273
2273
2274 The part data consists of pairs of 20 byte changeset node and .hgtags
2274 The part data consists of pairs of 20 byte changeset node and .hgtags
2275 filenodes raw values.
2275 filenodes raw values.
2276 """
2276 """
2277 # Don't send unless:
2277 # Don't send unless:
2278 # - changeset are being exchanged,
2278 # - changeset are being exchanged,
2279 # - the client supports it.
2279 # - the client supports it.
2280 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2280 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2281 return
2281 return
2282
2282
2283 outgoing = _computeoutgoing(repo, heads, common)
2283 outgoing = _computeoutgoing(repo, heads, common)
2284 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2284 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2285
2285
2286 @getbundle2partsgenerator('cache:rev-branch-cache')
2286 @getbundle2partsgenerator('cache:rev-branch-cache')
2287 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2287 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2288 b2caps=None, heads=None, common=None,
2288 b2caps=None, heads=None, common=None,
2289 **kwargs):
2289 **kwargs):
2290 """Transfer the rev-branch-cache mapping
2290 """Transfer the rev-branch-cache mapping
2291
2291
2292 The payload is a series of data related to each branch
2292 The payload is a series of data related to each branch
2293
2293
2294 1) branch name length
2294 1) branch name length
2295 2) number of open heads
2295 2) number of open heads
2296 3) number of closed heads
2296 3) number of closed heads
2297 4) open heads nodes
2297 4) open heads nodes
2298 5) closed heads nodes
2298 5) closed heads nodes
2299 """
2299 """
2300 # Don't send unless:
2300 # Don't send unless:
2301 # - changeset are being exchanged,
2301 # - changeset are being exchanged,
2302 # - the client supports it.
2302 # - the client supports it.
2303 # - narrow bundle isn't in play (not currently compatible).
2303 # - narrow bundle isn't in play (not currently compatible).
2304 if (not kwargs.get(r'cg', True)
2304 if (not kwargs.get(r'cg', True)
2305 or 'rev-branch-cache' not in b2caps
2305 or 'rev-branch-cache' not in b2caps
2306 or kwargs.get(r'narrow', False)
2306 or kwargs.get(r'narrow', False)
2307 or repo.ui.has_section(_NARROWACL_SECTION)):
2307 or repo.ui.has_section(_NARROWACL_SECTION)):
2308 return
2308 return
2309
2309
2310 outgoing = _computeoutgoing(repo, heads, common)
2310 outgoing = _computeoutgoing(repo, heads, common)
2311 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2311 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2312
2312
2313 def check_heads(repo, their_heads, context):
2313 def check_heads(repo, their_heads, context):
2314 """check if the heads of a repo have been modified
2314 """check if the heads of a repo have been modified
2315
2315
2316 Used by peer for unbundling.
2316 Used by peer for unbundling.
2317 """
2317 """
2318 heads = repo.heads()
2318 heads = repo.heads()
2319 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2319 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2320 if not (their_heads == ['force'] or their_heads == heads or
2320 if not (their_heads == ['force'] or their_heads == heads or
2321 their_heads == ['hashed', heads_hash]):
2321 their_heads == ['hashed', heads_hash]):
2322 # someone else committed/pushed/unbundled while we
2322 # someone else committed/pushed/unbundled while we
2323 # were transferring data
2323 # were transferring data
2324 raise error.PushRaced('repository changed while %s - '
2324 raise error.PushRaced('repository changed while %s - '
2325 'please try again' % context)
2325 'please try again' % context)
2326
2326
2327 def unbundle(repo, cg, heads, source, url):
2327 def unbundle(repo, cg, heads, source, url):
2328 """Apply a bundle to a repo.
2328 """Apply a bundle to a repo.
2329
2329
2330 this function makes sure the repo is locked during the application and have
2330 this function makes sure the repo is locked during the application and have
2331 mechanism to check that no push race occurred between the creation of the
2331 mechanism to check that no push race occurred between the creation of the
2332 bundle and its application.
2332 bundle and its application.
2333
2333
2334 If the push was raced as PushRaced exception is raised."""
2334 If the push was raced as PushRaced exception is raised."""
2335 r = 0
2335 r = 0
2336 # need a transaction when processing a bundle2 stream
2336 # need a transaction when processing a bundle2 stream
2337 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2337 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2338 lockandtr = [None, None, None]
2338 lockandtr = [None, None, None]
2339 recordout = None
2339 recordout = None
2340 # quick fix for output mismatch with bundle2 in 3.4
2340 # quick fix for output mismatch with bundle2 in 3.4
2341 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2341 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2342 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2342 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2343 captureoutput = True
2343 captureoutput = True
2344 try:
2344 try:
2345 # note: outside bundle1, 'heads' is expected to be empty and this
2345 # note: outside bundle1, 'heads' is expected to be empty and this
2346 # 'check_heads' call wil be a no-op
2346 # 'check_heads' call wil be a no-op
2347 check_heads(repo, heads, 'uploading changes')
2347 check_heads(repo, heads, 'uploading changes')
2348 # push can proceed
2348 # push can proceed
2349 if not isinstance(cg, bundle2.unbundle20):
2349 if not isinstance(cg, bundle2.unbundle20):
2350 # legacy case: bundle1 (changegroup 01)
2350 # legacy case: bundle1 (changegroup 01)
2351 txnname = "\n".join([source, util.hidepassword(url)])
2351 txnname = "\n".join([source, util.hidepassword(url)])
2352 with repo.lock(), repo.transaction(txnname) as tr:
2352 with repo.lock(), repo.transaction(txnname) as tr:
2353 op = bundle2.applybundle(repo, cg, tr, source, url)
2353 op = bundle2.applybundle(repo, cg, tr, source, url)
2354 r = bundle2.combinechangegroupresults(op)
2354 r = bundle2.combinechangegroupresults(op)
2355 else:
2355 else:
2356 r = None
2356 r = None
2357 try:
2357 try:
2358 def gettransaction():
2358 def gettransaction():
2359 if not lockandtr[2]:
2359 if not lockandtr[2]:
2360 lockandtr[0] = repo.wlock()
2360 lockandtr[0] = repo.wlock()
2361 lockandtr[1] = repo.lock()
2361 lockandtr[1] = repo.lock()
2362 lockandtr[2] = repo.transaction(source)
2362 lockandtr[2] = repo.transaction(source)
2363 lockandtr[2].hookargs['source'] = source
2363 lockandtr[2].hookargs['source'] = source
2364 lockandtr[2].hookargs['url'] = url
2364 lockandtr[2].hookargs['url'] = url
2365 lockandtr[2].hookargs['bundle2'] = '1'
2365 lockandtr[2].hookargs['bundle2'] = '1'
2366 return lockandtr[2]
2366 return lockandtr[2]
2367
2367
2368 # Do greedy locking by default until we're satisfied with lazy
2368 # Do greedy locking by default until we're satisfied with lazy
2369 # locking.
2369 # locking.
2370 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2370 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2371 gettransaction()
2371 gettransaction()
2372
2372
2373 op = bundle2.bundleoperation(repo, gettransaction,
2373 op = bundle2.bundleoperation(repo, gettransaction,
2374 captureoutput=captureoutput,
2374 captureoutput=captureoutput,
2375 source='push')
2375 source='push')
2376 try:
2376 try:
2377 op = bundle2.processbundle(repo, cg, op=op)
2377 op = bundle2.processbundle(repo, cg, op=op)
2378 finally:
2378 finally:
2379 r = op.reply
2379 r = op.reply
2380 if captureoutput and r is not None:
2380 if captureoutput and r is not None:
2381 repo.ui.pushbuffer(error=True, subproc=True)
2381 repo.ui.pushbuffer(error=True, subproc=True)
2382 def recordout(output):
2382 def recordout(output):
2383 r.newpart('output', data=output, mandatory=False)
2383 r.newpart('output', data=output, mandatory=False)
2384 if lockandtr[2] is not None:
2384 if lockandtr[2] is not None:
2385 lockandtr[2].close()
2385 lockandtr[2].close()
2386 except BaseException as exc:
2386 except BaseException as exc:
2387 exc.duringunbundle2 = True
2387 exc.duringunbundle2 = True
2388 if captureoutput and r is not None:
2388 if captureoutput and r is not None:
2389 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2389 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2390 def recordout(output):
2390 def recordout(output):
2391 part = bundle2.bundlepart('output', data=output,
2391 part = bundle2.bundlepart('output', data=output,
2392 mandatory=False)
2392 mandatory=False)
2393 parts.append(part)
2393 parts.append(part)
2394 raise
2394 raise
2395 finally:
2395 finally:
2396 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2396 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2397 if recordout is not None:
2397 if recordout is not None:
2398 recordout(repo.ui.popbuffer())
2398 recordout(repo.ui.popbuffer())
2399 return r
2399 return r
2400
2400
2401 def _maybeapplyclonebundle(pullop):
2401 def _maybeapplyclonebundle(pullop):
2402 """Apply a clone bundle from a remote, if possible."""
2402 """Apply a clone bundle from a remote, if possible."""
2403
2403
2404 repo = pullop.repo
2404 repo = pullop.repo
2405 remote = pullop.remote
2405 remote = pullop.remote
2406
2406
2407 if not repo.ui.configbool('ui', 'clonebundles'):
2407 if not repo.ui.configbool('ui', 'clonebundles'):
2408 return
2408 return
2409
2409
2410 # Only run if local repo is empty.
2410 # Only run if local repo is empty.
2411 if len(repo):
2411 if len(repo):
2412 return
2412 return
2413
2413
2414 if pullop.heads:
2414 if pullop.heads:
2415 return
2415 return
2416
2416
2417 if not remote.capable('clonebundles'):
2417 if not remote.capable('clonebundles'):
2418 return
2418 return
2419
2419
2420 with remote.commandexecutor() as e:
2420 with remote.commandexecutor() as e:
2421 res = e.callcommand('clonebundles', {}).result()
2421 res = e.callcommand('clonebundles', {}).result()
2422
2422
2423 # If we call the wire protocol command, that's good enough to record the
2423 # If we call the wire protocol command, that's good enough to record the
2424 # attempt.
2424 # attempt.
2425 pullop.clonebundleattempted = True
2425 pullop.clonebundleattempted = True
2426
2426
2427 entries = parseclonebundlesmanifest(repo, res)
2427 entries = parseclonebundlesmanifest(repo, res)
2428 if not entries:
2428 if not entries:
2429 repo.ui.note(_('no clone bundles available on remote; '
2429 repo.ui.note(_('no clone bundles available on remote; '
2430 'falling back to regular clone\n'))
2430 'falling back to regular clone\n'))
2431 return
2431 return
2432
2432
2433 entries = filterclonebundleentries(
2433 entries = filterclonebundleentries(
2434 repo, entries, streamclonerequested=pullop.streamclonerequested)
2434 repo, entries, streamclonerequested=pullop.streamclonerequested)
2435
2435
2436 if not entries:
2436 if not entries:
2437 # There is a thundering herd concern here. However, if a server
2437 # There is a thundering herd concern here. However, if a server
2438 # operator doesn't advertise bundles appropriate for its clients,
2438 # operator doesn't advertise bundles appropriate for its clients,
2439 # they deserve what's coming. Furthermore, from a client's
2439 # they deserve what's coming. Furthermore, from a client's
2440 # perspective, no automatic fallback would mean not being able to
2440 # perspective, no automatic fallback would mean not being able to
2441 # clone!
2441 # clone!
2442 repo.ui.warn(_('no compatible clone bundles available on server; '
2442 repo.ui.warn(_('no compatible clone bundles available on server; '
2443 'falling back to regular clone\n'))
2443 'falling back to regular clone\n'))
2444 repo.ui.warn(_('(you may want to report this to the server '
2444 repo.ui.warn(_('(you may want to report this to the server '
2445 'operator)\n'))
2445 'operator)\n'))
2446 return
2446 return
2447
2447
2448 entries = sortclonebundleentries(repo.ui, entries)
2448 entries = sortclonebundleentries(repo.ui, entries)
2449
2449
2450 url = entries[0]['URL']
2450 url = entries[0]['URL']
2451 repo.ui.status(_('applying clone bundle from %s\n') % url)
2451 repo.ui.status(_('applying clone bundle from %s\n') % url)
2452 if trypullbundlefromurl(repo.ui, repo, url):
2452 if trypullbundlefromurl(repo.ui, repo, url):
2453 repo.ui.status(_('finished applying clone bundle\n'))
2453 repo.ui.status(_('finished applying clone bundle\n'))
2454 # Bundle failed.
2454 # Bundle failed.
2455 #
2455 #
2456 # We abort by default to avoid the thundering herd of
2456 # We abort by default to avoid the thundering herd of
2457 # clients flooding a server that was expecting expensive
2457 # clients flooding a server that was expecting expensive
2458 # clone load to be offloaded.
2458 # clone load to be offloaded.
2459 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2459 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2460 repo.ui.warn(_('falling back to normal clone\n'))
2460 repo.ui.warn(_('falling back to normal clone\n'))
2461 else:
2461 else:
2462 raise error.Abort(_('error applying bundle'),
2462 raise error.Abort(_('error applying bundle'),
2463 hint=_('if this error persists, consider contacting '
2463 hint=_('if this error persists, consider contacting '
2464 'the server operator or disable clone '
2464 'the server operator or disable clone '
2465 'bundles via '
2465 'bundles via '
2466 '"--config ui.clonebundles=false"'))
2466 '"--config ui.clonebundles=false"'))
2467
2467
2468 def parseclonebundlesmanifest(repo, s):
2468 def parseclonebundlesmanifest(repo, s):
2469 """Parses the raw text of a clone bundles manifest.
2469 """Parses the raw text of a clone bundles manifest.
2470
2470
2471 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2471 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2472 to the URL and other keys are the attributes for the entry.
2472 to the URL and other keys are the attributes for the entry.
2473 """
2473 """
2474 m = []
2474 m = []
2475 for line in s.splitlines():
2475 for line in s.splitlines():
2476 fields = line.split()
2476 fields = line.split()
2477 if not fields:
2477 if not fields:
2478 continue
2478 continue
2479 attrs = {'URL': fields[0]}
2479 attrs = {'URL': fields[0]}
2480 for rawattr in fields[1:]:
2480 for rawattr in fields[1:]:
2481 key, value = rawattr.split('=', 1)
2481 key, value = rawattr.split('=', 1)
2482 key = urlreq.unquote(key)
2482 key = urlreq.unquote(key)
2483 value = urlreq.unquote(value)
2483 value = urlreq.unquote(value)
2484 attrs[key] = value
2484 attrs[key] = value
2485
2485
2486 # Parse BUNDLESPEC into components. This makes client-side
2486 # Parse BUNDLESPEC into components. This makes client-side
2487 # preferences easier to specify since you can prefer a single
2487 # preferences easier to specify since you can prefer a single
2488 # component of the BUNDLESPEC.
2488 # component of the BUNDLESPEC.
2489 if key == 'BUNDLESPEC':
2489 if key == 'BUNDLESPEC':
2490 try:
2490 try:
2491 bundlespec = parsebundlespec(repo, value)
2491 bundlespec = parsebundlespec(repo, value)
2492 attrs['COMPRESSION'] = bundlespec.compression
2492 attrs['COMPRESSION'] = bundlespec.compression
2493 attrs['VERSION'] = bundlespec.version
2493 attrs['VERSION'] = bundlespec.version
2494 except error.InvalidBundleSpecification:
2494 except error.InvalidBundleSpecification:
2495 pass
2495 pass
2496 except error.UnsupportedBundleSpecification:
2496 except error.UnsupportedBundleSpecification:
2497 pass
2497 pass
2498
2498
2499 m.append(attrs)
2499 m.append(attrs)
2500
2500
2501 return m
2501 return m
2502
2502
2503 def isstreamclonespec(bundlespec):
2503 def isstreamclonespec(bundlespec):
2504 # Stream clone v1
2504 # Stream clone v1
2505 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2505 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2506 return True
2506 return True
2507
2507
2508 # Stream clone v2
2508 # Stream clone v2
2509 if (bundlespec.wirecompression == 'UN' and \
2509 if (bundlespec.wirecompression == 'UN' and \
2510 bundlespec.wireversion == '02' and \
2510 bundlespec.wireversion == '02' and \
2511 bundlespec.contentopts.get('streamv2')):
2511 bundlespec.contentopts.get('streamv2')):
2512 return True
2512 return True
2513
2513
2514 return False
2514 return False
2515
2515
2516 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2516 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2517 """Remove incompatible clone bundle manifest entries.
2517 """Remove incompatible clone bundle manifest entries.
2518
2518
2519 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2519 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2520 and returns a new list consisting of only the entries that this client
2520 and returns a new list consisting of only the entries that this client
2521 should be able to apply.
2521 should be able to apply.
2522
2522
2523 There is no guarantee we'll be able to apply all returned entries because
2523 There is no guarantee we'll be able to apply all returned entries because
2524 the metadata we use to filter on may be missing or wrong.
2524 the metadata we use to filter on may be missing or wrong.
2525 """
2525 """
2526 newentries = []
2526 newentries = []
2527 for entry in entries:
2527 for entry in entries:
2528 spec = entry.get('BUNDLESPEC')
2528 spec = entry.get('BUNDLESPEC')
2529 if spec:
2529 if spec:
2530 try:
2530 try:
2531 bundlespec = parsebundlespec(repo, spec, strict=True)
2531 bundlespec = parsebundlespec(repo, spec, strict=True)
2532
2532
2533 # If a stream clone was requested, filter out non-streamclone
2533 # If a stream clone was requested, filter out non-streamclone
2534 # entries.
2534 # entries.
2535 if streamclonerequested and not isstreamclonespec(bundlespec):
2535 if streamclonerequested and not isstreamclonespec(bundlespec):
2536 repo.ui.debug('filtering %s because not a stream clone\n' %
2536 repo.ui.debug('filtering %s because not a stream clone\n' %
2537 entry['URL'])
2537 entry['URL'])
2538 continue
2538 continue
2539
2539
2540 except error.InvalidBundleSpecification as e:
2540 except error.InvalidBundleSpecification as e:
2541 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2541 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2542 continue
2542 continue
2543 except error.UnsupportedBundleSpecification as e:
2543 except error.UnsupportedBundleSpecification as e:
2544 repo.ui.debug('filtering %s because unsupported bundle '
2544 repo.ui.debug('filtering %s because unsupported bundle '
2545 'spec: %s\n' % (
2545 'spec: %s\n' % (
2546 entry['URL'], stringutil.forcebytestr(e)))
2546 entry['URL'], stringutil.forcebytestr(e)))
2547 continue
2547 continue
2548 # If we don't have a spec and requested a stream clone, we don't know
2548 # If we don't have a spec and requested a stream clone, we don't know
2549 # what the entry is so don't attempt to apply it.
2549 # what the entry is so don't attempt to apply it.
2550 elif streamclonerequested:
2550 elif streamclonerequested:
2551 repo.ui.debug('filtering %s because cannot determine if a stream '
2551 repo.ui.debug('filtering %s because cannot determine if a stream '
2552 'clone bundle\n' % entry['URL'])
2552 'clone bundle\n' % entry['URL'])
2553 continue
2553 continue
2554
2554
2555 if 'REQUIRESNI' in entry and not sslutil.hassni:
2555 if 'REQUIRESNI' in entry and not sslutil.hassni:
2556 repo.ui.debug('filtering %s because SNI not supported\n' %
2556 repo.ui.debug('filtering %s because SNI not supported\n' %
2557 entry['URL'])
2557 entry['URL'])
2558 continue
2558 continue
2559
2559
2560 newentries.append(entry)
2560 newentries.append(entry)
2561
2561
2562 return newentries
2562 return newentries
2563
2563
2564 class clonebundleentry(object):
2564 class clonebundleentry(object):
2565 """Represents an item in a clone bundles manifest.
2565 """Represents an item in a clone bundles manifest.
2566
2566
2567 This rich class is needed to support sorting since sorted() in Python 3
2567 This rich class is needed to support sorting since sorted() in Python 3
2568 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2568 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2569 won't work.
2569 won't work.
2570 """
2570 """
2571
2571
2572 def __init__(self, value, prefers):
2572 def __init__(self, value, prefers):
2573 self.value = value
2573 self.value = value
2574 self.prefers = prefers
2574 self.prefers = prefers
2575
2575
2576 def _cmp(self, other):
2576 def _cmp(self, other):
2577 for prefkey, prefvalue in self.prefers:
2577 for prefkey, prefvalue in self.prefers:
2578 avalue = self.value.get(prefkey)
2578 avalue = self.value.get(prefkey)
2579 bvalue = other.value.get(prefkey)
2579 bvalue = other.value.get(prefkey)
2580
2580
2581 # Special case for b missing attribute and a matches exactly.
2581 # Special case for b missing attribute and a matches exactly.
2582 if avalue is not None and bvalue is None and avalue == prefvalue:
2582 if avalue is not None and bvalue is None and avalue == prefvalue:
2583 return -1
2583 return -1
2584
2584
2585 # Special case for a missing attribute and b matches exactly.
2585 # Special case for a missing attribute and b matches exactly.
2586 if bvalue is not None and avalue is None and bvalue == prefvalue:
2586 if bvalue is not None and avalue is None and bvalue == prefvalue:
2587 return 1
2587 return 1
2588
2588
2589 # We can't compare unless attribute present on both.
2589 # We can't compare unless attribute present on both.
2590 if avalue is None or bvalue is None:
2590 if avalue is None or bvalue is None:
2591 continue
2591 continue
2592
2592
2593 # Same values should fall back to next attribute.
2593 # Same values should fall back to next attribute.
2594 if avalue == bvalue:
2594 if avalue == bvalue:
2595 continue
2595 continue
2596
2596
2597 # Exact matches come first.
2597 # Exact matches come first.
2598 if avalue == prefvalue:
2598 if avalue == prefvalue:
2599 return -1
2599 return -1
2600 if bvalue == prefvalue:
2600 if bvalue == prefvalue:
2601 return 1
2601 return 1
2602
2602
2603 # Fall back to next attribute.
2603 # Fall back to next attribute.
2604 continue
2604 continue
2605
2605
2606 # If we got here we couldn't sort by attributes and prefers. Fall
2606 # If we got here we couldn't sort by attributes and prefers. Fall
2607 # back to index order.
2607 # back to index order.
2608 return 0
2608 return 0
2609
2609
2610 def __lt__(self, other):
2610 def __lt__(self, other):
2611 return self._cmp(other) < 0
2611 return self._cmp(other) < 0
2612
2612
2613 def __gt__(self, other):
2613 def __gt__(self, other):
2614 return self._cmp(other) > 0
2614 return self._cmp(other) > 0
2615
2615
2616 def __eq__(self, other):
2616 def __eq__(self, other):
2617 return self._cmp(other) == 0
2617 return self._cmp(other) == 0
2618
2618
2619 def __le__(self, other):
2619 def __le__(self, other):
2620 return self._cmp(other) <= 0
2620 return self._cmp(other) <= 0
2621
2621
2622 def __ge__(self, other):
2622 def __ge__(self, other):
2623 return self._cmp(other) >= 0
2623 return self._cmp(other) >= 0
2624
2624
2625 def __ne__(self, other):
2625 def __ne__(self, other):
2626 return self._cmp(other) != 0
2626 return self._cmp(other) != 0
2627
2627
2628 def sortclonebundleentries(ui, entries):
2628 def sortclonebundleentries(ui, entries):
2629 prefers = ui.configlist('ui', 'clonebundleprefers')
2629 prefers = ui.configlist('ui', 'clonebundleprefers')
2630 if not prefers:
2630 if not prefers:
2631 return list(entries)
2631 return list(entries)
2632
2632
2633 prefers = [p.split('=', 1) for p in prefers]
2633 prefers = [p.split('=', 1) for p in prefers]
2634
2634
2635 items = sorted(clonebundleentry(v, prefers) for v in entries)
2635 items = sorted(clonebundleentry(v, prefers) for v in entries)
2636 return [i.value for i in items]
2636 return [i.value for i in items]
2637
2637
2638 def trypullbundlefromurl(ui, repo, url):
2638 def trypullbundlefromurl(ui, repo, url):
2639 """Attempt to apply a bundle from a URL."""
2639 """Attempt to apply a bundle from a URL."""
2640 with repo.lock(), repo.transaction('bundleurl') as tr:
2640 with repo.lock(), repo.transaction('bundleurl') as tr:
2641 try:
2641 try:
2642 fh = urlmod.open(ui, url)
2642 fh = urlmod.open(ui, url)
2643 cg = readbundle(ui, fh, 'stream')
2643 cg = readbundle(ui, fh, 'stream')
2644
2644
2645 if isinstance(cg, streamclone.streamcloneapplier):
2645 if isinstance(cg, streamclone.streamcloneapplier):
2646 cg.apply(repo)
2646 cg.apply(repo)
2647 else:
2647 else:
2648 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2648 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2649 return True
2649 return True
2650 except urlerr.httperror as e:
2650 except urlerr.httperror as e:
2651 ui.warn(_('HTTP error fetching bundle: %s\n') %
2651 ui.warn(_('HTTP error fetching bundle: %s\n') %
2652 stringutil.forcebytestr(e))
2652 stringutil.forcebytestr(e))
2653 except urlerr.urlerror as e:
2653 except urlerr.urlerror as e:
2654 ui.warn(_('error fetching bundle: %s\n') %
2654 ui.warn(_('error fetching bundle: %s\n') %
2655 stringutil.forcebytestr(e.reason))
2655 stringutil.forcebytestr(e.reason))
2656
2656
2657 return False
2657 return False
General Comments 0
You need to be logged in to leave comments. Login now