##// END OF EJS Templates
exchange: don't use dagutil...
Gregory Szorc -
r39194:b0c73866 default
parent child Browse files
Show More
@@ -1,2623 +1,2623 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from .thirdparty import (
20 from .thirdparty import (
21 attr,
21 attr,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks as bookmod,
24 bookmarks as bookmod,
25 bundle2,
25 bundle2,
26 changegroup,
26 changegroup,
27 dagutil,
28 discovery,
27 discovery,
29 error,
28 error,
30 lock as lockmod,
29 lock as lockmod,
31 logexchange,
30 logexchange,
32 narrowspec,
31 narrowspec,
33 obsolete,
32 obsolete,
34 phases,
33 phases,
35 pushkey,
34 pushkey,
36 pycompat,
35 pycompat,
37 repository,
36 repository,
38 scmutil,
37 scmutil,
39 sslutil,
38 sslutil,
40 streamclone,
39 streamclone,
41 url as urlmod,
40 url as urlmod,
42 util,
41 util,
43 )
42 )
44 from .utils import (
43 from .utils import (
45 stringutil,
44 stringutil,
46 )
45 )
47
46
48 urlerr = util.urlerr
47 urlerr = util.urlerr
49 urlreq = util.urlreq
48 urlreq = util.urlreq
50
49
51 _NARROWACL_SECTION = 'narrowhgacl'
50 _NARROWACL_SECTION = 'narrowhgacl'
52
51
53 # Maps bundle version human names to changegroup versions.
52 # Maps bundle version human names to changegroup versions.
54 _bundlespeccgversions = {'v1': '01',
53 _bundlespeccgversions = {'v1': '01',
55 'v2': '02',
54 'v2': '02',
56 'packed1': 's1',
55 'packed1': 's1',
57 'bundle2': '02', #legacy
56 'bundle2': '02', #legacy
58 }
57 }
59
58
60 # Maps bundle version with content opts to choose which part to bundle
59 # Maps bundle version with content opts to choose which part to bundle
61 _bundlespeccontentopts = {
60 _bundlespeccontentopts = {
62 'v1': {
61 'v1': {
63 'changegroup': True,
62 'changegroup': True,
64 'cg.version': '01',
63 'cg.version': '01',
65 'obsolescence': False,
64 'obsolescence': False,
66 'phases': False,
65 'phases': False,
67 'tagsfnodescache': False,
66 'tagsfnodescache': False,
68 'revbranchcache': False
67 'revbranchcache': False
69 },
68 },
70 'v2': {
69 'v2': {
71 'changegroup': True,
70 'changegroup': True,
72 'cg.version': '02',
71 'cg.version': '02',
73 'obsolescence': False,
72 'obsolescence': False,
74 'phases': False,
73 'phases': False,
75 'tagsfnodescache': True,
74 'tagsfnodescache': True,
76 'revbranchcache': True
75 'revbranchcache': True
77 },
76 },
78 'packed1' : {
77 'packed1' : {
79 'cg.version': 's1'
78 'cg.version': 's1'
80 }
79 }
81 }
80 }
82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
81 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
83
82
84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
83 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
85 "tagsfnodescache": False,
84 "tagsfnodescache": False,
86 "revbranchcache": False}}
85 "revbranchcache": False}}
87
86
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
87 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
88 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
90
89
91 @attr.s
90 @attr.s
92 class bundlespec(object):
91 class bundlespec(object):
93 compression = attr.ib()
92 compression = attr.ib()
94 wirecompression = attr.ib()
93 wirecompression = attr.ib()
95 version = attr.ib()
94 version = attr.ib()
96 wireversion = attr.ib()
95 wireversion = attr.ib()
97 params = attr.ib()
96 params = attr.ib()
98 contentopts = attr.ib()
97 contentopts = attr.ib()
99
98
100 def parsebundlespec(repo, spec, strict=True):
99 def parsebundlespec(repo, spec, strict=True):
101 """Parse a bundle string specification into parts.
100 """Parse a bundle string specification into parts.
102
101
103 Bundle specifications denote a well-defined bundle/exchange format.
102 Bundle specifications denote a well-defined bundle/exchange format.
104 The content of a given specification should not change over time in
103 The content of a given specification should not change over time in
105 order to ensure that bundles produced by a newer version of Mercurial are
104 order to ensure that bundles produced by a newer version of Mercurial are
106 readable from an older version.
105 readable from an older version.
107
106
108 The string currently has the form:
107 The string currently has the form:
109
108
110 <compression>-<type>[;<parameter0>[;<parameter1>]]
109 <compression>-<type>[;<parameter0>[;<parameter1>]]
111
110
112 Where <compression> is one of the supported compression formats
111 Where <compression> is one of the supported compression formats
113 and <type> is (currently) a version string. A ";" can follow the type and
112 and <type> is (currently) a version string. A ";" can follow the type and
114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
113 all text afterwards is interpreted as URI encoded, ";" delimited key=value
115 pairs.
114 pairs.
116
115
117 If ``strict`` is True (the default) <compression> is required. Otherwise,
116 If ``strict`` is True (the default) <compression> is required. Otherwise,
118 it is optional.
117 it is optional.
119
118
120 Returns a bundlespec object of (compression, version, parameters).
119 Returns a bundlespec object of (compression, version, parameters).
121 Compression will be ``None`` if not in strict mode and a compression isn't
120 Compression will be ``None`` if not in strict mode and a compression isn't
122 defined.
121 defined.
123
122
124 An ``InvalidBundleSpecification`` is raised when the specification is
123 An ``InvalidBundleSpecification`` is raised when the specification is
125 not syntactically well formed.
124 not syntactically well formed.
126
125
127 An ``UnsupportedBundleSpecification`` is raised when the compression or
126 An ``UnsupportedBundleSpecification`` is raised when the compression or
128 bundle type/version is not recognized.
127 bundle type/version is not recognized.
129
128
130 Note: this function will likely eventually return a more complex data
129 Note: this function will likely eventually return a more complex data
131 structure, including bundle2 part information.
130 structure, including bundle2 part information.
132 """
131 """
133 def parseparams(s):
132 def parseparams(s):
134 if ';' not in s:
133 if ';' not in s:
135 return s, {}
134 return s, {}
136
135
137 params = {}
136 params = {}
138 version, paramstr = s.split(';', 1)
137 version, paramstr = s.split(';', 1)
139
138
140 for p in paramstr.split(';'):
139 for p in paramstr.split(';'):
141 if '=' not in p:
140 if '=' not in p:
142 raise error.InvalidBundleSpecification(
141 raise error.InvalidBundleSpecification(
143 _('invalid bundle specification: '
142 _('invalid bundle specification: '
144 'missing "=" in parameter: %s') % p)
143 'missing "=" in parameter: %s') % p)
145
144
146 key, value = p.split('=', 1)
145 key, value = p.split('=', 1)
147 key = urlreq.unquote(key)
146 key = urlreq.unquote(key)
148 value = urlreq.unquote(value)
147 value = urlreq.unquote(value)
149 params[key] = value
148 params[key] = value
150
149
151 return version, params
150 return version, params
152
151
153
152
154 if strict and '-' not in spec:
153 if strict and '-' not in spec:
155 raise error.InvalidBundleSpecification(
154 raise error.InvalidBundleSpecification(
156 _('invalid bundle specification; '
155 _('invalid bundle specification; '
157 'must be prefixed with compression: %s') % spec)
156 'must be prefixed with compression: %s') % spec)
158
157
159 if '-' in spec:
158 if '-' in spec:
160 compression, version = spec.split('-', 1)
159 compression, version = spec.split('-', 1)
161
160
162 if compression not in util.compengines.supportedbundlenames:
161 if compression not in util.compengines.supportedbundlenames:
163 raise error.UnsupportedBundleSpecification(
162 raise error.UnsupportedBundleSpecification(
164 _('%s compression is not supported') % compression)
163 _('%s compression is not supported') % compression)
165
164
166 version, params = parseparams(version)
165 version, params = parseparams(version)
167
166
168 if version not in _bundlespeccgversions:
167 if version not in _bundlespeccgversions:
169 raise error.UnsupportedBundleSpecification(
168 raise error.UnsupportedBundleSpecification(
170 _('%s is not a recognized bundle version') % version)
169 _('%s is not a recognized bundle version') % version)
171 else:
170 else:
172 # Value could be just the compression or just the version, in which
171 # Value could be just the compression or just the version, in which
173 # case some defaults are assumed (but only when not in strict mode).
172 # case some defaults are assumed (but only when not in strict mode).
174 assert not strict
173 assert not strict
175
174
176 spec, params = parseparams(spec)
175 spec, params = parseparams(spec)
177
176
178 if spec in util.compengines.supportedbundlenames:
177 if spec in util.compengines.supportedbundlenames:
179 compression = spec
178 compression = spec
180 version = 'v1'
179 version = 'v1'
181 # Generaldelta repos require v2.
180 # Generaldelta repos require v2.
182 if 'generaldelta' in repo.requirements:
181 if 'generaldelta' in repo.requirements:
183 version = 'v2'
182 version = 'v2'
184 # Modern compression engines require v2.
183 # Modern compression engines require v2.
185 if compression not in _bundlespecv1compengines:
184 if compression not in _bundlespecv1compengines:
186 version = 'v2'
185 version = 'v2'
187 elif spec in _bundlespeccgversions:
186 elif spec in _bundlespeccgversions:
188 if spec == 'packed1':
187 if spec == 'packed1':
189 compression = 'none'
188 compression = 'none'
190 else:
189 else:
191 compression = 'bzip2'
190 compression = 'bzip2'
192 version = spec
191 version = spec
193 else:
192 else:
194 raise error.UnsupportedBundleSpecification(
193 raise error.UnsupportedBundleSpecification(
195 _('%s is not a recognized bundle specification') % spec)
194 _('%s is not a recognized bundle specification') % spec)
196
195
197 # Bundle version 1 only supports a known set of compression engines.
196 # Bundle version 1 only supports a known set of compression engines.
198 if version == 'v1' and compression not in _bundlespecv1compengines:
197 if version == 'v1' and compression not in _bundlespecv1compengines:
199 raise error.UnsupportedBundleSpecification(
198 raise error.UnsupportedBundleSpecification(
200 _('compression engine %s is not supported on v1 bundles') %
199 _('compression engine %s is not supported on v1 bundles') %
201 compression)
200 compression)
202
201
203 # The specification for packed1 can optionally declare the data formats
202 # The specification for packed1 can optionally declare the data formats
204 # required to apply it. If we see this metadata, compare against what the
203 # required to apply it. If we see this metadata, compare against what the
205 # repo supports and error if the bundle isn't compatible.
204 # repo supports and error if the bundle isn't compatible.
206 if version == 'packed1' and 'requirements' in params:
205 if version == 'packed1' and 'requirements' in params:
207 requirements = set(params['requirements'].split(','))
206 requirements = set(params['requirements'].split(','))
208 missingreqs = requirements - repo.supportedformats
207 missingreqs = requirements - repo.supportedformats
209 if missingreqs:
208 if missingreqs:
210 raise error.UnsupportedBundleSpecification(
209 raise error.UnsupportedBundleSpecification(
211 _('missing support for repository features: %s') %
210 _('missing support for repository features: %s') %
212 ', '.join(sorted(missingreqs)))
211 ', '.join(sorted(missingreqs)))
213
212
214 # Compute contentopts based on the version
213 # Compute contentopts based on the version
215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
214 contentopts = _bundlespeccontentopts.get(version, {}).copy()
216
215
217 # Process the variants
216 # Process the variants
218 if "stream" in params and params["stream"] == "v2":
217 if "stream" in params and params["stream"] == "v2":
219 variant = _bundlespecvariants["streamv2"]
218 variant = _bundlespecvariants["streamv2"]
220 contentopts.update(variant)
219 contentopts.update(variant)
221
220
222 engine = util.compengines.forbundlename(compression)
221 engine = util.compengines.forbundlename(compression)
223 compression, wirecompression = engine.bundletype()
222 compression, wirecompression = engine.bundletype()
224 wireversion = _bundlespeccgversions[version]
223 wireversion = _bundlespeccgversions[version]
225
224
226 return bundlespec(compression, wirecompression, version, wireversion,
225 return bundlespec(compression, wirecompression, version, wireversion,
227 params, contentopts)
226 params, contentopts)
228
227
229 def readbundle(ui, fh, fname, vfs=None):
228 def readbundle(ui, fh, fname, vfs=None):
230 header = changegroup.readexactly(fh, 4)
229 header = changegroup.readexactly(fh, 4)
231
230
232 alg = None
231 alg = None
233 if not fname:
232 if not fname:
234 fname = "stream"
233 fname = "stream"
235 if not header.startswith('HG') and header.startswith('\0'):
234 if not header.startswith('HG') and header.startswith('\0'):
236 fh = changegroup.headerlessfixup(fh, header)
235 fh = changegroup.headerlessfixup(fh, header)
237 header = "HG10"
236 header = "HG10"
238 alg = 'UN'
237 alg = 'UN'
239 elif vfs:
238 elif vfs:
240 fname = vfs.join(fname)
239 fname = vfs.join(fname)
241
240
242 magic, version = header[0:2], header[2:4]
241 magic, version = header[0:2], header[2:4]
243
242
244 if magic != 'HG':
243 if magic != 'HG':
245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
244 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
246 if version == '10':
245 if version == '10':
247 if alg is None:
246 if alg is None:
248 alg = changegroup.readexactly(fh, 2)
247 alg = changegroup.readexactly(fh, 2)
249 return changegroup.cg1unpacker(fh, alg)
248 return changegroup.cg1unpacker(fh, alg)
250 elif version.startswith('2'):
249 elif version.startswith('2'):
251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
250 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
252 elif version == 'S1':
251 elif version == 'S1':
253 return streamclone.streamcloneapplier(fh)
252 return streamclone.streamcloneapplier(fh)
254 else:
253 else:
255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
254 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
256
255
257 def getbundlespec(ui, fh):
256 def getbundlespec(ui, fh):
258 """Infer the bundlespec from a bundle file handle.
257 """Infer the bundlespec from a bundle file handle.
259
258
260 The input file handle is seeked and the original seek position is not
259 The input file handle is seeked and the original seek position is not
261 restored.
260 restored.
262 """
261 """
263 def speccompression(alg):
262 def speccompression(alg):
264 try:
263 try:
265 return util.compengines.forbundletype(alg).bundletype()[0]
264 return util.compengines.forbundletype(alg).bundletype()[0]
266 except KeyError:
265 except KeyError:
267 return None
266 return None
268
267
269 b = readbundle(ui, fh, None)
268 b = readbundle(ui, fh, None)
270 if isinstance(b, changegroup.cg1unpacker):
269 if isinstance(b, changegroup.cg1unpacker):
271 alg = b._type
270 alg = b._type
272 if alg == '_truncatedBZ':
271 if alg == '_truncatedBZ':
273 alg = 'BZ'
272 alg = 'BZ'
274 comp = speccompression(alg)
273 comp = speccompression(alg)
275 if not comp:
274 if not comp:
276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
275 raise error.Abort(_('unknown compression algorithm: %s') % alg)
277 return '%s-v1' % comp
276 return '%s-v1' % comp
278 elif isinstance(b, bundle2.unbundle20):
277 elif isinstance(b, bundle2.unbundle20):
279 if 'Compression' in b.params:
278 if 'Compression' in b.params:
280 comp = speccompression(b.params['Compression'])
279 comp = speccompression(b.params['Compression'])
281 if not comp:
280 if not comp:
282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
281 raise error.Abort(_('unknown compression algorithm: %s') % comp)
283 else:
282 else:
284 comp = 'none'
283 comp = 'none'
285
284
286 version = None
285 version = None
287 for part in b.iterparts():
286 for part in b.iterparts():
288 if part.type == 'changegroup':
287 if part.type == 'changegroup':
289 version = part.params['version']
288 version = part.params['version']
290 if version in ('01', '02'):
289 if version in ('01', '02'):
291 version = 'v2'
290 version = 'v2'
292 else:
291 else:
293 raise error.Abort(_('changegroup version %s does not have '
292 raise error.Abort(_('changegroup version %s does not have '
294 'a known bundlespec') % version,
293 'a known bundlespec') % version,
295 hint=_('try upgrading your Mercurial '
294 hint=_('try upgrading your Mercurial '
296 'client'))
295 'client'))
297 elif part.type == 'stream2' and version is None:
296 elif part.type == 'stream2' and version is None:
298 # A stream2 part requires to be part of a v2 bundle
297 # A stream2 part requires to be part of a v2 bundle
299 version = "v2"
298 version = "v2"
300 requirements = urlreq.unquote(part.params['requirements'])
299 requirements = urlreq.unquote(part.params['requirements'])
301 splitted = requirements.split()
300 splitted = requirements.split()
302 params = bundle2._formatrequirementsparams(splitted)
301 params = bundle2._formatrequirementsparams(splitted)
303 return 'none-v2;stream=v2;%s' % params
302 return 'none-v2;stream=v2;%s' % params
304
303
305 if not version:
304 if not version:
306 raise error.Abort(_('could not identify changegroup version in '
305 raise error.Abort(_('could not identify changegroup version in '
307 'bundle'))
306 'bundle'))
308
307
309 return '%s-%s' % (comp, version)
308 return '%s-%s' % (comp, version)
310 elif isinstance(b, streamclone.streamcloneapplier):
309 elif isinstance(b, streamclone.streamcloneapplier):
311 requirements = streamclone.readbundle1header(fh)[2]
310 requirements = streamclone.readbundle1header(fh)[2]
312 formatted = bundle2._formatrequirementsparams(requirements)
311 formatted = bundle2._formatrequirementsparams(requirements)
313 return 'none-packed1;%s' % formatted
312 return 'none-packed1;%s' % formatted
314 else:
313 else:
315 raise error.Abort(_('unknown bundle type: %s') % b)
314 raise error.Abort(_('unknown bundle type: %s') % b)
316
315
317 def _computeoutgoing(repo, heads, common):
316 def _computeoutgoing(repo, heads, common):
318 """Computes which revs are outgoing given a set of common
317 """Computes which revs are outgoing given a set of common
319 and a set of heads.
318 and a set of heads.
320
319
321 This is a separate function so extensions can have access to
320 This is a separate function so extensions can have access to
322 the logic.
321 the logic.
323
322
324 Returns a discovery.outgoing object.
323 Returns a discovery.outgoing object.
325 """
324 """
326 cl = repo.changelog
325 cl = repo.changelog
327 if common:
326 if common:
328 hasnode = cl.hasnode
327 hasnode = cl.hasnode
329 common = [n for n in common if hasnode(n)]
328 common = [n for n in common if hasnode(n)]
330 else:
329 else:
331 common = [nullid]
330 common = [nullid]
332 if not heads:
331 if not heads:
333 heads = cl.heads()
332 heads = cl.heads()
334 return discovery.outgoing(repo, common, heads)
333 return discovery.outgoing(repo, common, heads)
335
334
336 def _forcebundle1(op):
335 def _forcebundle1(op):
337 """return true if a pull/push must use bundle1
336 """return true if a pull/push must use bundle1
338
337
339 This function is used to allow testing of the older bundle version"""
338 This function is used to allow testing of the older bundle version"""
340 ui = op.repo.ui
339 ui = op.repo.ui
341 # The goal is this config is to allow developer to choose the bundle
340 # The goal is this config is to allow developer to choose the bundle
342 # version used during exchanged. This is especially handy during test.
341 # version used during exchanged. This is especially handy during test.
343 # Value is a list of bundle version to be picked from, highest version
342 # Value is a list of bundle version to be picked from, highest version
344 # should be used.
343 # should be used.
345 #
344 #
346 # developer config: devel.legacy.exchange
345 # developer config: devel.legacy.exchange
347 exchange = ui.configlist('devel', 'legacy.exchange')
346 exchange = ui.configlist('devel', 'legacy.exchange')
348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
347 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
349 return forcebundle1 or not op.remote.capable('bundle2')
348 return forcebundle1 or not op.remote.capable('bundle2')
350
349
351 class pushoperation(object):
350 class pushoperation(object):
352 """A object that represent a single push operation
351 """A object that represent a single push operation
353
352
354 Its purpose is to carry push related state and very common operations.
353 Its purpose is to carry push related state and very common operations.
355
354
356 A new pushoperation should be created at the beginning of each push and
355 A new pushoperation should be created at the beginning of each push and
357 discarded afterward.
356 discarded afterward.
358 """
357 """
359
358
360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
359 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
361 bookmarks=(), pushvars=None):
360 bookmarks=(), pushvars=None):
362 # repo we push from
361 # repo we push from
363 self.repo = repo
362 self.repo = repo
364 self.ui = repo.ui
363 self.ui = repo.ui
365 # repo we push to
364 # repo we push to
366 self.remote = remote
365 self.remote = remote
367 # force option provided
366 # force option provided
368 self.force = force
367 self.force = force
369 # revs to be pushed (None is "all")
368 # revs to be pushed (None is "all")
370 self.revs = revs
369 self.revs = revs
371 # bookmark explicitly pushed
370 # bookmark explicitly pushed
372 self.bookmarks = bookmarks
371 self.bookmarks = bookmarks
373 # allow push of new branch
372 # allow push of new branch
374 self.newbranch = newbranch
373 self.newbranch = newbranch
375 # step already performed
374 # step already performed
376 # (used to check what steps have been already performed through bundle2)
375 # (used to check what steps have been already performed through bundle2)
377 self.stepsdone = set()
376 self.stepsdone = set()
378 # Integer version of the changegroup push result
377 # Integer version of the changegroup push result
379 # - None means nothing to push
378 # - None means nothing to push
380 # - 0 means HTTP error
379 # - 0 means HTTP error
381 # - 1 means we pushed and remote head count is unchanged *or*
380 # - 1 means we pushed and remote head count is unchanged *or*
382 # we have outgoing changesets but refused to push
381 # we have outgoing changesets but refused to push
383 # - other values as described by addchangegroup()
382 # - other values as described by addchangegroup()
384 self.cgresult = None
383 self.cgresult = None
385 # Boolean value for the bookmark push
384 # Boolean value for the bookmark push
386 self.bkresult = None
385 self.bkresult = None
387 # discover.outgoing object (contains common and outgoing data)
386 # discover.outgoing object (contains common and outgoing data)
388 self.outgoing = None
387 self.outgoing = None
389 # all remote topological heads before the push
388 # all remote topological heads before the push
390 self.remoteheads = None
389 self.remoteheads = None
391 # Details of the remote branch pre and post push
390 # Details of the remote branch pre and post push
392 #
391 #
393 # mapping: {'branch': ([remoteheads],
392 # mapping: {'branch': ([remoteheads],
394 # [newheads],
393 # [newheads],
395 # [unsyncedheads],
394 # [unsyncedheads],
396 # [discardedheads])}
395 # [discardedheads])}
397 # - branch: the branch name
396 # - branch: the branch name
398 # - remoteheads: the list of remote heads known locally
397 # - remoteheads: the list of remote heads known locally
399 # None if the branch is new
398 # None if the branch is new
400 # - newheads: the new remote heads (known locally) with outgoing pushed
399 # - newheads: the new remote heads (known locally) with outgoing pushed
401 # - unsyncedheads: the list of remote heads unknown locally.
400 # - unsyncedheads: the list of remote heads unknown locally.
402 # - discardedheads: the list of remote heads made obsolete by the push
401 # - discardedheads: the list of remote heads made obsolete by the push
403 self.pushbranchmap = None
402 self.pushbranchmap = None
404 # testable as a boolean indicating if any nodes are missing locally.
403 # testable as a boolean indicating if any nodes are missing locally.
405 self.incoming = None
404 self.incoming = None
406 # summary of the remote phase situation
405 # summary of the remote phase situation
407 self.remotephases = None
406 self.remotephases = None
408 # phases changes that must be pushed along side the changesets
407 # phases changes that must be pushed along side the changesets
409 self.outdatedphases = None
408 self.outdatedphases = None
410 # phases changes that must be pushed if changeset push fails
409 # phases changes that must be pushed if changeset push fails
411 self.fallbackoutdatedphases = None
410 self.fallbackoutdatedphases = None
412 # outgoing obsmarkers
411 # outgoing obsmarkers
413 self.outobsmarkers = set()
412 self.outobsmarkers = set()
414 # outgoing bookmarks
413 # outgoing bookmarks
415 self.outbookmarks = []
414 self.outbookmarks = []
416 # transaction manager
415 # transaction manager
417 self.trmanager = None
416 self.trmanager = None
418 # map { pushkey partid -> callback handling failure}
417 # map { pushkey partid -> callback handling failure}
419 # used to handle exception from mandatory pushkey part failure
418 # used to handle exception from mandatory pushkey part failure
420 self.pkfailcb = {}
419 self.pkfailcb = {}
421 # an iterable of pushvars or None
420 # an iterable of pushvars or None
422 self.pushvars = pushvars
421 self.pushvars = pushvars
423
422
424 @util.propertycache
423 @util.propertycache
425 def futureheads(self):
424 def futureheads(self):
426 """future remote heads if the changeset push succeeds"""
425 """future remote heads if the changeset push succeeds"""
427 return self.outgoing.missingheads
426 return self.outgoing.missingheads
428
427
429 @util.propertycache
428 @util.propertycache
430 def fallbackheads(self):
429 def fallbackheads(self):
431 """future remote heads if the changeset push fails"""
430 """future remote heads if the changeset push fails"""
432 if self.revs is None:
431 if self.revs is None:
433 # not target to push, all common are relevant
432 # not target to push, all common are relevant
434 return self.outgoing.commonheads
433 return self.outgoing.commonheads
435 unfi = self.repo.unfiltered()
434 unfi = self.repo.unfiltered()
436 # I want cheads = heads(::missingheads and ::commonheads)
435 # I want cheads = heads(::missingheads and ::commonheads)
437 # (missingheads is revs with secret changeset filtered out)
436 # (missingheads is revs with secret changeset filtered out)
438 #
437 #
439 # This can be expressed as:
438 # This can be expressed as:
440 # cheads = ( (missingheads and ::commonheads)
439 # cheads = ( (missingheads and ::commonheads)
441 # + (commonheads and ::missingheads))"
440 # + (commonheads and ::missingheads))"
442 # )
441 # )
443 #
442 #
444 # while trying to push we already computed the following:
443 # while trying to push we already computed the following:
445 # common = (::commonheads)
444 # common = (::commonheads)
446 # missing = ((commonheads::missingheads) - commonheads)
445 # missing = ((commonheads::missingheads) - commonheads)
447 #
446 #
448 # We can pick:
447 # We can pick:
449 # * missingheads part of common (::commonheads)
448 # * missingheads part of common (::commonheads)
450 common = self.outgoing.common
449 common = self.outgoing.common
451 nm = self.repo.changelog.nodemap
450 nm = self.repo.changelog.nodemap
452 cheads = [node for node in self.revs if nm[node] in common]
451 cheads = [node for node in self.revs if nm[node] in common]
453 # and
452 # and
454 # * commonheads parents on missing
453 # * commonheads parents on missing
455 revset = unfi.set('%ln and parents(roots(%ln))',
454 revset = unfi.set('%ln and parents(roots(%ln))',
456 self.outgoing.commonheads,
455 self.outgoing.commonheads,
457 self.outgoing.missing)
456 self.outgoing.missing)
458 cheads.extend(c.node() for c in revset)
457 cheads.extend(c.node() for c in revset)
459 return cheads
458 return cheads
460
459
461 @property
460 @property
462 def commonheads(self):
461 def commonheads(self):
463 """set of all common heads after changeset bundle push"""
462 """set of all common heads after changeset bundle push"""
464 if self.cgresult:
463 if self.cgresult:
465 return self.futureheads
464 return self.futureheads
466 else:
465 else:
467 return self.fallbackheads
466 return self.fallbackheads
468
467
469 # mapping of message used when pushing bookmark
468 # mapping of message used when pushing bookmark
470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
469 bookmsgmap = {'update': (_("updating bookmark %s\n"),
471 _('updating bookmark %s failed!\n')),
470 _('updating bookmark %s failed!\n')),
472 'export': (_("exporting bookmark %s\n"),
471 'export': (_("exporting bookmark %s\n"),
473 _('exporting bookmark %s failed!\n')),
472 _('exporting bookmark %s failed!\n')),
474 'delete': (_("deleting remote bookmark %s\n"),
473 'delete': (_("deleting remote bookmark %s\n"),
475 _('deleting remote bookmark %s failed!\n')),
474 _('deleting remote bookmark %s failed!\n')),
476 }
475 }
477
476
478
477
479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
478 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
480 opargs=None):
479 opargs=None):
481 '''Push outgoing changesets (limited by revs) from a local
480 '''Push outgoing changesets (limited by revs) from a local
482 repository to remote. Return an integer:
481 repository to remote. Return an integer:
483 - None means nothing to push
482 - None means nothing to push
484 - 0 means HTTP error
483 - 0 means HTTP error
485 - 1 means we pushed and remote head count is unchanged *or*
484 - 1 means we pushed and remote head count is unchanged *or*
486 we have outgoing changesets but refused to push
485 we have outgoing changesets but refused to push
487 - other values as described by addchangegroup()
486 - other values as described by addchangegroup()
488 '''
487 '''
489 if opargs is None:
488 if opargs is None:
490 opargs = {}
489 opargs = {}
491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
490 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
492 **pycompat.strkwargs(opargs))
491 **pycompat.strkwargs(opargs))
493 if pushop.remote.local():
492 if pushop.remote.local():
494 missing = (set(pushop.repo.requirements)
493 missing = (set(pushop.repo.requirements)
495 - pushop.remote.local().supported)
494 - pushop.remote.local().supported)
496 if missing:
495 if missing:
497 msg = _("required features are not"
496 msg = _("required features are not"
498 " supported in the destination:"
497 " supported in the destination:"
499 " %s") % (', '.join(sorted(missing)))
498 " %s") % (', '.join(sorted(missing)))
500 raise error.Abort(msg)
499 raise error.Abort(msg)
501
500
502 if not pushop.remote.canpush():
501 if not pushop.remote.canpush():
503 raise error.Abort(_("destination does not support push"))
502 raise error.Abort(_("destination does not support push"))
504
503
505 if not pushop.remote.capable('unbundle'):
504 if not pushop.remote.capable('unbundle'):
506 raise error.Abort(_('cannot push: destination does not support the '
505 raise error.Abort(_('cannot push: destination does not support the '
507 'unbundle wire protocol command'))
506 'unbundle wire protocol command'))
508
507
509 # get lock as we might write phase data
508 # get lock as we might write phase data
510 wlock = lock = None
509 wlock = lock = None
511 try:
510 try:
512 # bundle2 push may receive a reply bundle touching bookmarks or other
511 # bundle2 push may receive a reply bundle touching bookmarks or other
513 # things requiring the wlock. Take it now to ensure proper ordering.
512 # things requiring the wlock. Take it now to ensure proper ordering.
514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
513 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
515 if (not _forcebundle1(pushop)) and maypushback:
514 if (not _forcebundle1(pushop)) and maypushback:
516 wlock = pushop.repo.wlock()
515 wlock = pushop.repo.wlock()
517 lock = pushop.repo.lock()
516 lock = pushop.repo.lock()
518 pushop.trmanager = transactionmanager(pushop.repo,
517 pushop.trmanager = transactionmanager(pushop.repo,
519 'push-response',
518 'push-response',
520 pushop.remote.url())
519 pushop.remote.url())
521 except error.LockUnavailable as err:
520 except error.LockUnavailable as err:
522 # source repo cannot be locked.
521 # source repo cannot be locked.
523 # We do not abort the push, but just disable the local phase
522 # We do not abort the push, but just disable the local phase
524 # synchronisation.
523 # synchronisation.
525 msg = 'cannot lock source repository: %s\n' % err
524 msg = 'cannot lock source repository: %s\n' % err
526 pushop.ui.debug(msg)
525 pushop.ui.debug(msg)
527
526
528 with wlock or util.nullcontextmanager(), \
527 with wlock or util.nullcontextmanager(), \
529 lock or util.nullcontextmanager(), \
528 lock or util.nullcontextmanager(), \
530 pushop.trmanager or util.nullcontextmanager():
529 pushop.trmanager or util.nullcontextmanager():
531 pushop.repo.checkpush(pushop)
530 pushop.repo.checkpush(pushop)
532 _pushdiscovery(pushop)
531 _pushdiscovery(pushop)
533 if not _forcebundle1(pushop):
532 if not _forcebundle1(pushop):
534 _pushbundle2(pushop)
533 _pushbundle2(pushop)
535 _pushchangeset(pushop)
534 _pushchangeset(pushop)
536 _pushsyncphase(pushop)
535 _pushsyncphase(pushop)
537 _pushobsolete(pushop)
536 _pushobsolete(pushop)
538 _pushbookmark(pushop)
537 _pushbookmark(pushop)
539
538
540 if repo.ui.configbool('experimental', 'remotenames'):
539 if repo.ui.configbool('experimental', 'remotenames'):
541 logexchange.pullremotenames(repo, remote)
540 logexchange.pullremotenames(repo, remote)
542
541
543 return pushop
542 return pushop
544
543
545 # list of steps to perform discovery before push
544 # list of steps to perform discovery before push
546 pushdiscoveryorder = []
545 pushdiscoveryorder = []
547
546
548 # Mapping between step name and function
547 # Mapping between step name and function
549 #
548 #
550 # This exists to help extensions wrap steps if necessary
549 # This exists to help extensions wrap steps if necessary
551 pushdiscoverymapping = {}
550 pushdiscoverymapping = {}
552
551
553 def pushdiscovery(stepname):
552 def pushdiscovery(stepname):
554 """decorator for function performing discovery before push
553 """decorator for function performing discovery before push
555
554
556 The function is added to the step -> function mapping and appended to the
555 The function is added to the step -> function mapping and appended to the
557 list of steps. Beware that decorated function will be added in order (this
556 list of steps. Beware that decorated function will be added in order (this
558 may matter).
557 may matter).
559
558
560 You can only use this decorator for a new step, if you want to wrap a step
559 You can only use this decorator for a new step, if you want to wrap a step
561 from an extension, change the pushdiscovery dictionary directly."""
560 from an extension, change the pushdiscovery dictionary directly."""
562 def dec(func):
561 def dec(func):
563 assert stepname not in pushdiscoverymapping
562 assert stepname not in pushdiscoverymapping
564 pushdiscoverymapping[stepname] = func
563 pushdiscoverymapping[stepname] = func
565 pushdiscoveryorder.append(stepname)
564 pushdiscoveryorder.append(stepname)
566 return func
565 return func
567 return dec
566 return dec
568
567
569 def _pushdiscovery(pushop):
568 def _pushdiscovery(pushop):
570 """Run all discovery steps"""
569 """Run all discovery steps"""
571 for stepname in pushdiscoveryorder:
570 for stepname in pushdiscoveryorder:
572 step = pushdiscoverymapping[stepname]
571 step = pushdiscoverymapping[stepname]
573 step(pushop)
572 step(pushop)
574
573
575 @pushdiscovery('changeset')
574 @pushdiscovery('changeset')
576 def _pushdiscoverychangeset(pushop):
575 def _pushdiscoverychangeset(pushop):
577 """discover the changeset that need to be pushed"""
576 """discover the changeset that need to be pushed"""
578 fci = discovery.findcommonincoming
577 fci = discovery.findcommonincoming
579 if pushop.revs:
578 if pushop.revs:
580 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
579 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
581 ancestorsof=pushop.revs)
580 ancestorsof=pushop.revs)
582 else:
581 else:
583 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
582 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
584 common, inc, remoteheads = commoninc
583 common, inc, remoteheads = commoninc
585 fco = discovery.findcommonoutgoing
584 fco = discovery.findcommonoutgoing
586 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
585 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
587 commoninc=commoninc, force=pushop.force)
586 commoninc=commoninc, force=pushop.force)
588 pushop.outgoing = outgoing
587 pushop.outgoing = outgoing
589 pushop.remoteheads = remoteheads
588 pushop.remoteheads = remoteheads
590 pushop.incoming = inc
589 pushop.incoming = inc
591
590
592 @pushdiscovery('phase')
591 @pushdiscovery('phase')
593 def _pushdiscoveryphase(pushop):
592 def _pushdiscoveryphase(pushop):
594 """discover the phase that needs to be pushed
593 """discover the phase that needs to be pushed
595
594
596 (computed for both success and failure case for changesets push)"""
595 (computed for both success and failure case for changesets push)"""
597 outgoing = pushop.outgoing
596 outgoing = pushop.outgoing
598 unfi = pushop.repo.unfiltered()
597 unfi = pushop.repo.unfiltered()
599 remotephases = listkeys(pushop.remote, 'phases')
598 remotephases = listkeys(pushop.remote, 'phases')
600
599
601 if (pushop.ui.configbool('ui', '_usedassubrepo')
600 if (pushop.ui.configbool('ui', '_usedassubrepo')
602 and remotephases # server supports phases
601 and remotephases # server supports phases
603 and not pushop.outgoing.missing # no changesets to be pushed
602 and not pushop.outgoing.missing # no changesets to be pushed
604 and remotephases.get('publishing', False)):
603 and remotephases.get('publishing', False)):
605 # When:
604 # When:
606 # - this is a subrepo push
605 # - this is a subrepo push
607 # - and remote support phase
606 # - and remote support phase
608 # - and no changeset are to be pushed
607 # - and no changeset are to be pushed
609 # - and remote is publishing
608 # - and remote is publishing
610 # We may be in issue 3781 case!
609 # We may be in issue 3781 case!
611 # We drop the possible phase synchronisation done by
610 # We drop the possible phase synchronisation done by
612 # courtesy to publish changesets possibly locally draft
611 # courtesy to publish changesets possibly locally draft
613 # on the remote.
612 # on the remote.
614 pushop.outdatedphases = []
613 pushop.outdatedphases = []
615 pushop.fallbackoutdatedphases = []
614 pushop.fallbackoutdatedphases = []
616 return
615 return
617
616
618 pushop.remotephases = phases.remotephasessummary(pushop.repo,
617 pushop.remotephases = phases.remotephasessummary(pushop.repo,
619 pushop.fallbackheads,
618 pushop.fallbackheads,
620 remotephases)
619 remotephases)
621 droots = pushop.remotephases.draftroots
620 droots = pushop.remotephases.draftroots
622
621
623 extracond = ''
622 extracond = ''
624 if not pushop.remotephases.publishing:
623 if not pushop.remotephases.publishing:
625 extracond = ' and public()'
624 extracond = ' and public()'
626 revset = 'heads((%%ln::%%ln) %s)' % extracond
625 revset = 'heads((%%ln::%%ln) %s)' % extracond
627 # Get the list of all revs draft on remote by public here.
626 # Get the list of all revs draft on remote by public here.
628 # XXX Beware that revset break if droots is not strictly
627 # XXX Beware that revset break if droots is not strictly
629 # XXX root we may want to ensure it is but it is costly
628 # XXX root we may want to ensure it is but it is costly
630 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
629 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
631 if not outgoing.missing:
630 if not outgoing.missing:
632 future = fallback
631 future = fallback
633 else:
632 else:
634 # adds changeset we are going to push as draft
633 # adds changeset we are going to push as draft
635 #
634 #
636 # should not be necessary for publishing server, but because of an
635 # should not be necessary for publishing server, but because of an
637 # issue fixed in xxxxx we have to do it anyway.
636 # issue fixed in xxxxx we have to do it anyway.
638 fdroots = list(unfi.set('roots(%ln + %ln::)',
637 fdroots = list(unfi.set('roots(%ln + %ln::)',
639 outgoing.missing, droots))
638 outgoing.missing, droots))
640 fdroots = [f.node() for f in fdroots]
639 fdroots = [f.node() for f in fdroots]
641 future = list(unfi.set(revset, fdroots, pushop.futureheads))
640 future = list(unfi.set(revset, fdroots, pushop.futureheads))
642 pushop.outdatedphases = future
641 pushop.outdatedphases = future
643 pushop.fallbackoutdatedphases = fallback
642 pushop.fallbackoutdatedphases = fallback
644
643
645 @pushdiscovery('obsmarker')
644 @pushdiscovery('obsmarker')
646 def _pushdiscoveryobsmarkers(pushop):
645 def _pushdiscoveryobsmarkers(pushop):
647 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
646 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
648 return
647 return
649
648
650 if not pushop.repo.obsstore:
649 if not pushop.repo.obsstore:
651 return
650 return
652
651
653 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
652 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
654 return
653 return
655
654
656 repo = pushop.repo
655 repo = pushop.repo
657 # very naive computation, that can be quite expensive on big repo.
656 # very naive computation, that can be quite expensive on big repo.
658 # However: evolution is currently slow on them anyway.
657 # However: evolution is currently slow on them anyway.
659 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
658 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
660 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
659 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
661
660
662 @pushdiscovery('bookmarks')
661 @pushdiscovery('bookmarks')
663 def _pushdiscoverybookmarks(pushop):
662 def _pushdiscoverybookmarks(pushop):
664 ui = pushop.ui
663 ui = pushop.ui
665 repo = pushop.repo.unfiltered()
664 repo = pushop.repo.unfiltered()
666 remote = pushop.remote
665 remote = pushop.remote
667 ui.debug("checking for updated bookmarks\n")
666 ui.debug("checking for updated bookmarks\n")
668 ancestors = ()
667 ancestors = ()
669 if pushop.revs:
668 if pushop.revs:
670 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
669 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
671 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
670 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
672
671
673 remotebookmark = listkeys(remote, 'bookmarks')
672 remotebookmark = listkeys(remote, 'bookmarks')
674
673
675 explicit = set([repo._bookmarks.expandname(bookmark)
674 explicit = set([repo._bookmarks.expandname(bookmark)
676 for bookmark in pushop.bookmarks])
675 for bookmark in pushop.bookmarks])
677
676
678 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
677 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
679 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
678 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
680
679
681 def safehex(x):
680 def safehex(x):
682 if x is None:
681 if x is None:
683 return x
682 return x
684 return hex(x)
683 return hex(x)
685
684
686 def hexifycompbookmarks(bookmarks):
685 def hexifycompbookmarks(bookmarks):
687 return [(b, safehex(scid), safehex(dcid))
686 return [(b, safehex(scid), safehex(dcid))
688 for (b, scid, dcid) in bookmarks]
687 for (b, scid, dcid) in bookmarks]
689
688
690 comp = [hexifycompbookmarks(marks) for marks in comp]
689 comp = [hexifycompbookmarks(marks) for marks in comp]
691 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
690 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
692
691
693 def _processcompared(pushop, pushed, explicit, remotebms, comp):
692 def _processcompared(pushop, pushed, explicit, remotebms, comp):
694 """take decision on bookmark to pull from the remote bookmark
693 """take decision on bookmark to pull from the remote bookmark
695
694
696 Exist to help extensions who want to alter this behavior.
695 Exist to help extensions who want to alter this behavior.
697 """
696 """
698 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
697 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
699
698
700 repo = pushop.repo
699 repo = pushop.repo
701
700
702 for b, scid, dcid in advsrc:
701 for b, scid, dcid in advsrc:
703 if b in explicit:
702 if b in explicit:
704 explicit.remove(b)
703 explicit.remove(b)
705 if not pushed or repo[scid].rev() in pushed:
704 if not pushed or repo[scid].rev() in pushed:
706 pushop.outbookmarks.append((b, dcid, scid))
705 pushop.outbookmarks.append((b, dcid, scid))
707 # search added bookmark
706 # search added bookmark
708 for b, scid, dcid in addsrc:
707 for b, scid, dcid in addsrc:
709 if b in explicit:
708 if b in explicit:
710 explicit.remove(b)
709 explicit.remove(b)
711 pushop.outbookmarks.append((b, '', scid))
710 pushop.outbookmarks.append((b, '', scid))
712 # search for overwritten bookmark
711 # search for overwritten bookmark
713 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
712 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
714 if b in explicit:
713 if b in explicit:
715 explicit.remove(b)
714 explicit.remove(b)
716 pushop.outbookmarks.append((b, dcid, scid))
715 pushop.outbookmarks.append((b, dcid, scid))
717 # search for bookmark to delete
716 # search for bookmark to delete
718 for b, scid, dcid in adddst:
717 for b, scid, dcid in adddst:
719 if b in explicit:
718 if b in explicit:
720 explicit.remove(b)
719 explicit.remove(b)
721 # treat as "deleted locally"
720 # treat as "deleted locally"
722 pushop.outbookmarks.append((b, dcid, ''))
721 pushop.outbookmarks.append((b, dcid, ''))
723 # identical bookmarks shouldn't get reported
722 # identical bookmarks shouldn't get reported
724 for b, scid, dcid in same:
723 for b, scid, dcid in same:
725 if b in explicit:
724 if b in explicit:
726 explicit.remove(b)
725 explicit.remove(b)
727
726
728 if explicit:
727 if explicit:
729 explicit = sorted(explicit)
728 explicit = sorted(explicit)
730 # we should probably list all of them
729 # we should probably list all of them
731 pushop.ui.warn(_('bookmark %s does not exist on the local '
730 pushop.ui.warn(_('bookmark %s does not exist on the local '
732 'or remote repository!\n') % explicit[0])
731 'or remote repository!\n') % explicit[0])
733 pushop.bkresult = 2
732 pushop.bkresult = 2
734
733
735 pushop.outbookmarks.sort()
734 pushop.outbookmarks.sort()
736
735
737 def _pushcheckoutgoing(pushop):
736 def _pushcheckoutgoing(pushop):
738 outgoing = pushop.outgoing
737 outgoing = pushop.outgoing
739 unfi = pushop.repo.unfiltered()
738 unfi = pushop.repo.unfiltered()
740 if not outgoing.missing:
739 if not outgoing.missing:
741 # nothing to push
740 # nothing to push
742 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
741 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
743 return False
742 return False
744 # something to push
743 # something to push
745 if not pushop.force:
744 if not pushop.force:
746 # if repo.obsstore == False --> no obsolete
745 # if repo.obsstore == False --> no obsolete
747 # then, save the iteration
746 # then, save the iteration
748 if unfi.obsstore:
747 if unfi.obsstore:
749 # this message are here for 80 char limit reason
748 # this message are here for 80 char limit reason
750 mso = _("push includes obsolete changeset: %s!")
749 mso = _("push includes obsolete changeset: %s!")
751 mspd = _("push includes phase-divergent changeset: %s!")
750 mspd = _("push includes phase-divergent changeset: %s!")
752 mscd = _("push includes content-divergent changeset: %s!")
751 mscd = _("push includes content-divergent changeset: %s!")
753 mst = {"orphan": _("push includes orphan changeset: %s!"),
752 mst = {"orphan": _("push includes orphan changeset: %s!"),
754 "phase-divergent": mspd,
753 "phase-divergent": mspd,
755 "content-divergent": mscd}
754 "content-divergent": mscd}
756 # If we are to push if there is at least one
755 # If we are to push if there is at least one
757 # obsolete or unstable changeset in missing, at
756 # obsolete or unstable changeset in missing, at
758 # least one of the missinghead will be obsolete or
757 # least one of the missinghead will be obsolete or
759 # unstable. So checking heads only is ok
758 # unstable. So checking heads only is ok
760 for node in outgoing.missingheads:
759 for node in outgoing.missingheads:
761 ctx = unfi[node]
760 ctx = unfi[node]
762 if ctx.obsolete():
761 if ctx.obsolete():
763 raise error.Abort(mso % ctx)
762 raise error.Abort(mso % ctx)
764 elif ctx.isunstable():
763 elif ctx.isunstable():
765 # TODO print more than one instability in the abort
764 # TODO print more than one instability in the abort
766 # message
765 # message
767 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
766 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
768
767
769 discovery.checkheads(pushop)
768 discovery.checkheads(pushop)
770 return True
769 return True
771
770
772 # List of names of steps to perform for an outgoing bundle2, order matters.
771 # List of names of steps to perform for an outgoing bundle2, order matters.
773 b2partsgenorder = []
772 b2partsgenorder = []
774
773
775 # Mapping between step name and function
774 # Mapping between step name and function
776 #
775 #
777 # This exists to help extensions wrap steps if necessary
776 # This exists to help extensions wrap steps if necessary
778 b2partsgenmapping = {}
777 b2partsgenmapping = {}
779
778
780 def b2partsgenerator(stepname, idx=None):
779 def b2partsgenerator(stepname, idx=None):
781 """decorator for function generating bundle2 part
780 """decorator for function generating bundle2 part
782
781
783 The function is added to the step -> function mapping and appended to the
782 The function is added to the step -> function mapping and appended to the
784 list of steps. Beware that decorated functions will be added in order
783 list of steps. Beware that decorated functions will be added in order
785 (this may matter).
784 (this may matter).
786
785
787 You can only use this decorator for new steps, if you want to wrap a step
786 You can only use this decorator for new steps, if you want to wrap a step
788 from an extension, attack the b2partsgenmapping dictionary directly."""
787 from an extension, attack the b2partsgenmapping dictionary directly."""
789 def dec(func):
788 def dec(func):
790 assert stepname not in b2partsgenmapping
789 assert stepname not in b2partsgenmapping
791 b2partsgenmapping[stepname] = func
790 b2partsgenmapping[stepname] = func
792 if idx is None:
791 if idx is None:
793 b2partsgenorder.append(stepname)
792 b2partsgenorder.append(stepname)
794 else:
793 else:
795 b2partsgenorder.insert(idx, stepname)
794 b2partsgenorder.insert(idx, stepname)
796 return func
795 return func
797 return dec
796 return dec
798
797
799 def _pushb2ctxcheckheads(pushop, bundler):
798 def _pushb2ctxcheckheads(pushop, bundler):
800 """Generate race condition checking parts
799 """Generate race condition checking parts
801
800
802 Exists as an independent function to aid extensions
801 Exists as an independent function to aid extensions
803 """
802 """
804 # * 'force' do not check for push race,
803 # * 'force' do not check for push race,
805 # * if we don't push anything, there are nothing to check.
804 # * if we don't push anything, there are nothing to check.
806 if not pushop.force and pushop.outgoing.missingheads:
805 if not pushop.force and pushop.outgoing.missingheads:
807 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
806 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
808 emptyremote = pushop.pushbranchmap is None
807 emptyremote = pushop.pushbranchmap is None
809 if not allowunrelated or emptyremote:
808 if not allowunrelated or emptyremote:
810 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
809 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
811 else:
810 else:
812 affected = set()
811 affected = set()
813 for branch, heads in pushop.pushbranchmap.iteritems():
812 for branch, heads in pushop.pushbranchmap.iteritems():
814 remoteheads, newheads, unsyncedheads, discardedheads = heads
813 remoteheads, newheads, unsyncedheads, discardedheads = heads
815 if remoteheads is not None:
814 if remoteheads is not None:
816 remote = set(remoteheads)
815 remote = set(remoteheads)
817 affected |= set(discardedheads) & remote
816 affected |= set(discardedheads) & remote
818 affected |= remote - set(newheads)
817 affected |= remote - set(newheads)
819 if affected:
818 if affected:
820 data = iter(sorted(affected))
819 data = iter(sorted(affected))
821 bundler.newpart('check:updated-heads', data=data)
820 bundler.newpart('check:updated-heads', data=data)
822
821
823 def _pushing(pushop):
822 def _pushing(pushop):
824 """return True if we are pushing anything"""
823 """return True if we are pushing anything"""
825 return bool(pushop.outgoing.missing
824 return bool(pushop.outgoing.missing
826 or pushop.outdatedphases
825 or pushop.outdatedphases
827 or pushop.outobsmarkers
826 or pushop.outobsmarkers
828 or pushop.outbookmarks)
827 or pushop.outbookmarks)
829
828
830 @b2partsgenerator('check-bookmarks')
829 @b2partsgenerator('check-bookmarks')
831 def _pushb2checkbookmarks(pushop, bundler):
830 def _pushb2checkbookmarks(pushop, bundler):
832 """insert bookmark move checking"""
831 """insert bookmark move checking"""
833 if not _pushing(pushop) or pushop.force:
832 if not _pushing(pushop) or pushop.force:
834 return
833 return
835 b2caps = bundle2.bundle2caps(pushop.remote)
834 b2caps = bundle2.bundle2caps(pushop.remote)
836 hasbookmarkcheck = 'bookmarks' in b2caps
835 hasbookmarkcheck = 'bookmarks' in b2caps
837 if not (pushop.outbookmarks and hasbookmarkcheck):
836 if not (pushop.outbookmarks and hasbookmarkcheck):
838 return
837 return
839 data = []
838 data = []
840 for book, old, new in pushop.outbookmarks:
839 for book, old, new in pushop.outbookmarks:
841 old = bin(old)
840 old = bin(old)
842 data.append((book, old))
841 data.append((book, old))
843 checkdata = bookmod.binaryencode(data)
842 checkdata = bookmod.binaryencode(data)
844 bundler.newpart('check:bookmarks', data=checkdata)
843 bundler.newpart('check:bookmarks', data=checkdata)
845
844
846 @b2partsgenerator('check-phases')
845 @b2partsgenerator('check-phases')
847 def _pushb2checkphases(pushop, bundler):
846 def _pushb2checkphases(pushop, bundler):
848 """insert phase move checking"""
847 """insert phase move checking"""
849 if not _pushing(pushop) or pushop.force:
848 if not _pushing(pushop) or pushop.force:
850 return
849 return
851 b2caps = bundle2.bundle2caps(pushop.remote)
850 b2caps = bundle2.bundle2caps(pushop.remote)
852 hasphaseheads = 'heads' in b2caps.get('phases', ())
851 hasphaseheads = 'heads' in b2caps.get('phases', ())
853 if pushop.remotephases is not None and hasphaseheads:
852 if pushop.remotephases is not None and hasphaseheads:
854 # check that the remote phase has not changed
853 # check that the remote phase has not changed
855 checks = [[] for p in phases.allphases]
854 checks = [[] for p in phases.allphases]
856 checks[phases.public].extend(pushop.remotephases.publicheads)
855 checks[phases.public].extend(pushop.remotephases.publicheads)
857 checks[phases.draft].extend(pushop.remotephases.draftroots)
856 checks[phases.draft].extend(pushop.remotephases.draftroots)
858 if any(checks):
857 if any(checks):
859 for nodes in checks:
858 for nodes in checks:
860 nodes.sort()
859 nodes.sort()
861 checkdata = phases.binaryencode(checks)
860 checkdata = phases.binaryencode(checks)
862 bundler.newpart('check:phases', data=checkdata)
861 bundler.newpart('check:phases', data=checkdata)
863
862
864 @b2partsgenerator('changeset')
863 @b2partsgenerator('changeset')
865 def _pushb2ctx(pushop, bundler):
864 def _pushb2ctx(pushop, bundler):
866 """handle changegroup push through bundle2
865 """handle changegroup push through bundle2
867
866
868 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
867 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
869 """
868 """
870 if 'changesets' in pushop.stepsdone:
869 if 'changesets' in pushop.stepsdone:
871 return
870 return
872 pushop.stepsdone.add('changesets')
871 pushop.stepsdone.add('changesets')
873 # Send known heads to the server for race detection.
872 # Send known heads to the server for race detection.
874 if not _pushcheckoutgoing(pushop):
873 if not _pushcheckoutgoing(pushop):
875 return
874 return
876 pushop.repo.prepushoutgoinghooks(pushop)
875 pushop.repo.prepushoutgoinghooks(pushop)
877
876
878 _pushb2ctxcheckheads(pushop, bundler)
877 _pushb2ctxcheckheads(pushop, bundler)
879
878
880 b2caps = bundle2.bundle2caps(pushop.remote)
879 b2caps = bundle2.bundle2caps(pushop.remote)
881 version = '01'
880 version = '01'
882 cgversions = b2caps.get('changegroup')
881 cgversions = b2caps.get('changegroup')
883 if cgversions: # 3.1 and 3.2 ship with an empty value
882 if cgversions: # 3.1 and 3.2 ship with an empty value
884 cgversions = [v for v in cgversions
883 cgversions = [v for v in cgversions
885 if v in changegroup.supportedoutgoingversions(
884 if v in changegroup.supportedoutgoingversions(
886 pushop.repo)]
885 pushop.repo)]
887 if not cgversions:
886 if not cgversions:
888 raise ValueError(_('no common changegroup version'))
887 raise ValueError(_('no common changegroup version'))
889 version = max(cgversions)
888 version = max(cgversions)
890 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
889 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
891 'push')
890 'push')
892 cgpart = bundler.newpart('changegroup', data=cgstream)
891 cgpart = bundler.newpart('changegroup', data=cgstream)
893 if cgversions:
892 if cgversions:
894 cgpart.addparam('version', version)
893 cgpart.addparam('version', version)
895 if 'treemanifest' in pushop.repo.requirements:
894 if 'treemanifest' in pushop.repo.requirements:
896 cgpart.addparam('treemanifest', '1')
895 cgpart.addparam('treemanifest', '1')
897 def handlereply(op):
896 def handlereply(op):
898 """extract addchangegroup returns from server reply"""
897 """extract addchangegroup returns from server reply"""
899 cgreplies = op.records.getreplies(cgpart.id)
898 cgreplies = op.records.getreplies(cgpart.id)
900 assert len(cgreplies['changegroup']) == 1
899 assert len(cgreplies['changegroup']) == 1
901 pushop.cgresult = cgreplies['changegroup'][0]['return']
900 pushop.cgresult = cgreplies['changegroup'][0]['return']
902 return handlereply
901 return handlereply
903
902
904 @b2partsgenerator('phase')
903 @b2partsgenerator('phase')
905 def _pushb2phases(pushop, bundler):
904 def _pushb2phases(pushop, bundler):
906 """handle phase push through bundle2"""
905 """handle phase push through bundle2"""
907 if 'phases' in pushop.stepsdone:
906 if 'phases' in pushop.stepsdone:
908 return
907 return
909 b2caps = bundle2.bundle2caps(pushop.remote)
908 b2caps = bundle2.bundle2caps(pushop.remote)
910 ui = pushop.repo.ui
909 ui = pushop.repo.ui
911
910
912 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
911 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
913 haspushkey = 'pushkey' in b2caps
912 haspushkey = 'pushkey' in b2caps
914 hasphaseheads = 'heads' in b2caps.get('phases', ())
913 hasphaseheads = 'heads' in b2caps.get('phases', ())
915
914
916 if hasphaseheads and not legacyphase:
915 if hasphaseheads and not legacyphase:
917 return _pushb2phaseheads(pushop, bundler)
916 return _pushb2phaseheads(pushop, bundler)
918 elif haspushkey:
917 elif haspushkey:
919 return _pushb2phasespushkey(pushop, bundler)
918 return _pushb2phasespushkey(pushop, bundler)
920
919
921 def _pushb2phaseheads(pushop, bundler):
920 def _pushb2phaseheads(pushop, bundler):
922 """push phase information through a bundle2 - binary part"""
921 """push phase information through a bundle2 - binary part"""
923 pushop.stepsdone.add('phases')
922 pushop.stepsdone.add('phases')
924 if pushop.outdatedphases:
923 if pushop.outdatedphases:
925 updates = [[] for p in phases.allphases]
924 updates = [[] for p in phases.allphases]
926 updates[0].extend(h.node() for h in pushop.outdatedphases)
925 updates[0].extend(h.node() for h in pushop.outdatedphases)
927 phasedata = phases.binaryencode(updates)
926 phasedata = phases.binaryencode(updates)
928 bundler.newpart('phase-heads', data=phasedata)
927 bundler.newpart('phase-heads', data=phasedata)
929
928
930 def _pushb2phasespushkey(pushop, bundler):
929 def _pushb2phasespushkey(pushop, bundler):
931 """push phase information through a bundle2 - pushkey part"""
930 """push phase information through a bundle2 - pushkey part"""
932 pushop.stepsdone.add('phases')
931 pushop.stepsdone.add('phases')
933 part2node = []
932 part2node = []
934
933
935 def handlefailure(pushop, exc):
934 def handlefailure(pushop, exc):
936 targetid = int(exc.partid)
935 targetid = int(exc.partid)
937 for partid, node in part2node:
936 for partid, node in part2node:
938 if partid == targetid:
937 if partid == targetid:
939 raise error.Abort(_('updating %s to public failed') % node)
938 raise error.Abort(_('updating %s to public failed') % node)
940
939
941 enc = pushkey.encode
940 enc = pushkey.encode
942 for newremotehead in pushop.outdatedphases:
941 for newremotehead in pushop.outdatedphases:
943 part = bundler.newpart('pushkey')
942 part = bundler.newpart('pushkey')
944 part.addparam('namespace', enc('phases'))
943 part.addparam('namespace', enc('phases'))
945 part.addparam('key', enc(newremotehead.hex()))
944 part.addparam('key', enc(newremotehead.hex()))
946 part.addparam('old', enc('%d' % phases.draft))
945 part.addparam('old', enc('%d' % phases.draft))
947 part.addparam('new', enc('%d' % phases.public))
946 part.addparam('new', enc('%d' % phases.public))
948 part2node.append((part.id, newremotehead))
947 part2node.append((part.id, newremotehead))
949 pushop.pkfailcb[part.id] = handlefailure
948 pushop.pkfailcb[part.id] = handlefailure
950
949
951 def handlereply(op):
950 def handlereply(op):
952 for partid, node in part2node:
951 for partid, node in part2node:
953 partrep = op.records.getreplies(partid)
952 partrep = op.records.getreplies(partid)
954 results = partrep['pushkey']
953 results = partrep['pushkey']
955 assert len(results) <= 1
954 assert len(results) <= 1
956 msg = None
955 msg = None
957 if not results:
956 if not results:
958 msg = _('server ignored update of %s to public!\n') % node
957 msg = _('server ignored update of %s to public!\n') % node
959 elif not int(results[0]['return']):
958 elif not int(results[0]['return']):
960 msg = _('updating %s to public failed!\n') % node
959 msg = _('updating %s to public failed!\n') % node
961 if msg is not None:
960 if msg is not None:
962 pushop.ui.warn(msg)
961 pushop.ui.warn(msg)
963 return handlereply
962 return handlereply
964
963
965 @b2partsgenerator('obsmarkers')
964 @b2partsgenerator('obsmarkers')
966 def _pushb2obsmarkers(pushop, bundler):
965 def _pushb2obsmarkers(pushop, bundler):
967 if 'obsmarkers' in pushop.stepsdone:
966 if 'obsmarkers' in pushop.stepsdone:
968 return
967 return
969 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
968 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
970 if obsolete.commonversion(remoteversions) is None:
969 if obsolete.commonversion(remoteversions) is None:
971 return
970 return
972 pushop.stepsdone.add('obsmarkers')
971 pushop.stepsdone.add('obsmarkers')
973 if pushop.outobsmarkers:
972 if pushop.outobsmarkers:
974 markers = sorted(pushop.outobsmarkers)
973 markers = sorted(pushop.outobsmarkers)
975 bundle2.buildobsmarkerspart(bundler, markers)
974 bundle2.buildobsmarkerspart(bundler, markers)
976
975
977 @b2partsgenerator('bookmarks')
976 @b2partsgenerator('bookmarks')
978 def _pushb2bookmarks(pushop, bundler):
977 def _pushb2bookmarks(pushop, bundler):
979 """handle bookmark push through bundle2"""
978 """handle bookmark push through bundle2"""
980 if 'bookmarks' in pushop.stepsdone:
979 if 'bookmarks' in pushop.stepsdone:
981 return
980 return
982 b2caps = bundle2.bundle2caps(pushop.remote)
981 b2caps = bundle2.bundle2caps(pushop.remote)
983
982
984 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
983 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
985 legacybooks = 'bookmarks' in legacy
984 legacybooks = 'bookmarks' in legacy
986
985
987 if not legacybooks and 'bookmarks' in b2caps:
986 if not legacybooks and 'bookmarks' in b2caps:
988 return _pushb2bookmarkspart(pushop, bundler)
987 return _pushb2bookmarkspart(pushop, bundler)
989 elif 'pushkey' in b2caps:
988 elif 'pushkey' in b2caps:
990 return _pushb2bookmarkspushkey(pushop, bundler)
989 return _pushb2bookmarkspushkey(pushop, bundler)
991
990
992 def _bmaction(old, new):
991 def _bmaction(old, new):
993 """small utility for bookmark pushing"""
992 """small utility for bookmark pushing"""
994 if not old:
993 if not old:
995 return 'export'
994 return 'export'
996 elif not new:
995 elif not new:
997 return 'delete'
996 return 'delete'
998 return 'update'
997 return 'update'
999
998
1000 def _pushb2bookmarkspart(pushop, bundler):
999 def _pushb2bookmarkspart(pushop, bundler):
1001 pushop.stepsdone.add('bookmarks')
1000 pushop.stepsdone.add('bookmarks')
1002 if not pushop.outbookmarks:
1001 if not pushop.outbookmarks:
1003 return
1002 return
1004
1003
1005 allactions = []
1004 allactions = []
1006 data = []
1005 data = []
1007 for book, old, new in pushop.outbookmarks:
1006 for book, old, new in pushop.outbookmarks:
1008 new = bin(new)
1007 new = bin(new)
1009 data.append((book, new))
1008 data.append((book, new))
1010 allactions.append((book, _bmaction(old, new)))
1009 allactions.append((book, _bmaction(old, new)))
1011 checkdata = bookmod.binaryencode(data)
1010 checkdata = bookmod.binaryencode(data)
1012 bundler.newpart('bookmarks', data=checkdata)
1011 bundler.newpart('bookmarks', data=checkdata)
1013
1012
1014 def handlereply(op):
1013 def handlereply(op):
1015 ui = pushop.ui
1014 ui = pushop.ui
1016 # if success
1015 # if success
1017 for book, action in allactions:
1016 for book, action in allactions:
1018 ui.status(bookmsgmap[action][0] % book)
1017 ui.status(bookmsgmap[action][0] % book)
1019
1018
1020 return handlereply
1019 return handlereply
1021
1020
1022 def _pushb2bookmarkspushkey(pushop, bundler):
1021 def _pushb2bookmarkspushkey(pushop, bundler):
1023 pushop.stepsdone.add('bookmarks')
1022 pushop.stepsdone.add('bookmarks')
1024 part2book = []
1023 part2book = []
1025 enc = pushkey.encode
1024 enc = pushkey.encode
1026
1025
1027 def handlefailure(pushop, exc):
1026 def handlefailure(pushop, exc):
1028 targetid = int(exc.partid)
1027 targetid = int(exc.partid)
1029 for partid, book, action in part2book:
1028 for partid, book, action in part2book:
1030 if partid == targetid:
1029 if partid == targetid:
1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1030 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1032 # we should not be called for part we did not generated
1031 # we should not be called for part we did not generated
1033 assert False
1032 assert False
1034
1033
1035 for book, old, new in pushop.outbookmarks:
1034 for book, old, new in pushop.outbookmarks:
1036 part = bundler.newpart('pushkey')
1035 part = bundler.newpart('pushkey')
1037 part.addparam('namespace', enc('bookmarks'))
1036 part.addparam('namespace', enc('bookmarks'))
1038 part.addparam('key', enc(book))
1037 part.addparam('key', enc(book))
1039 part.addparam('old', enc(old))
1038 part.addparam('old', enc(old))
1040 part.addparam('new', enc(new))
1039 part.addparam('new', enc(new))
1041 action = 'update'
1040 action = 'update'
1042 if not old:
1041 if not old:
1043 action = 'export'
1042 action = 'export'
1044 elif not new:
1043 elif not new:
1045 action = 'delete'
1044 action = 'delete'
1046 part2book.append((part.id, book, action))
1045 part2book.append((part.id, book, action))
1047 pushop.pkfailcb[part.id] = handlefailure
1046 pushop.pkfailcb[part.id] = handlefailure
1048
1047
1049 def handlereply(op):
1048 def handlereply(op):
1050 ui = pushop.ui
1049 ui = pushop.ui
1051 for partid, book, action in part2book:
1050 for partid, book, action in part2book:
1052 partrep = op.records.getreplies(partid)
1051 partrep = op.records.getreplies(partid)
1053 results = partrep['pushkey']
1052 results = partrep['pushkey']
1054 assert len(results) <= 1
1053 assert len(results) <= 1
1055 if not results:
1054 if not results:
1056 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1055 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1057 else:
1056 else:
1058 ret = int(results[0]['return'])
1057 ret = int(results[0]['return'])
1059 if ret:
1058 if ret:
1060 ui.status(bookmsgmap[action][0] % book)
1059 ui.status(bookmsgmap[action][0] % book)
1061 else:
1060 else:
1062 ui.warn(bookmsgmap[action][1] % book)
1061 ui.warn(bookmsgmap[action][1] % book)
1063 if pushop.bkresult is not None:
1062 if pushop.bkresult is not None:
1064 pushop.bkresult = 1
1063 pushop.bkresult = 1
1065 return handlereply
1064 return handlereply
1066
1065
1067 @b2partsgenerator('pushvars', idx=0)
1066 @b2partsgenerator('pushvars', idx=0)
1068 def _getbundlesendvars(pushop, bundler):
1067 def _getbundlesendvars(pushop, bundler):
1069 '''send shellvars via bundle2'''
1068 '''send shellvars via bundle2'''
1070 pushvars = pushop.pushvars
1069 pushvars = pushop.pushvars
1071 if pushvars:
1070 if pushvars:
1072 shellvars = {}
1071 shellvars = {}
1073 for raw in pushvars:
1072 for raw in pushvars:
1074 if '=' not in raw:
1073 if '=' not in raw:
1075 msg = ("unable to parse variable '%s', should follow "
1074 msg = ("unable to parse variable '%s', should follow "
1076 "'KEY=VALUE' or 'KEY=' format")
1075 "'KEY=VALUE' or 'KEY=' format")
1077 raise error.Abort(msg % raw)
1076 raise error.Abort(msg % raw)
1078 k, v = raw.split('=', 1)
1077 k, v = raw.split('=', 1)
1079 shellvars[k] = v
1078 shellvars[k] = v
1080
1079
1081 part = bundler.newpart('pushvars')
1080 part = bundler.newpart('pushvars')
1082
1081
1083 for key, value in shellvars.iteritems():
1082 for key, value in shellvars.iteritems():
1084 part.addparam(key, value, mandatory=False)
1083 part.addparam(key, value, mandatory=False)
1085
1084
1086 def _pushbundle2(pushop):
1085 def _pushbundle2(pushop):
1087 """push data to the remote using bundle2
1086 """push data to the remote using bundle2
1088
1087
1089 The only currently supported type of data is changegroup but this will
1088 The only currently supported type of data is changegroup but this will
1090 evolve in the future."""
1089 evolve in the future."""
1091 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1090 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1092 pushback = (pushop.trmanager
1091 pushback = (pushop.trmanager
1093 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1092 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1094
1093
1095 # create reply capability
1094 # create reply capability
1096 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1095 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1097 allowpushback=pushback,
1096 allowpushback=pushback,
1098 role='client'))
1097 role='client'))
1099 bundler.newpart('replycaps', data=capsblob)
1098 bundler.newpart('replycaps', data=capsblob)
1100 replyhandlers = []
1099 replyhandlers = []
1101 for partgenname in b2partsgenorder:
1100 for partgenname in b2partsgenorder:
1102 partgen = b2partsgenmapping[partgenname]
1101 partgen = b2partsgenmapping[partgenname]
1103 ret = partgen(pushop, bundler)
1102 ret = partgen(pushop, bundler)
1104 if callable(ret):
1103 if callable(ret):
1105 replyhandlers.append(ret)
1104 replyhandlers.append(ret)
1106 # do not push if nothing to push
1105 # do not push if nothing to push
1107 if bundler.nbparts <= 1:
1106 if bundler.nbparts <= 1:
1108 return
1107 return
1109 stream = util.chunkbuffer(bundler.getchunks())
1108 stream = util.chunkbuffer(bundler.getchunks())
1110 try:
1109 try:
1111 try:
1110 try:
1112 with pushop.remote.commandexecutor() as e:
1111 with pushop.remote.commandexecutor() as e:
1113 reply = e.callcommand('unbundle', {
1112 reply = e.callcommand('unbundle', {
1114 'bundle': stream,
1113 'bundle': stream,
1115 'heads': ['force'],
1114 'heads': ['force'],
1116 'url': pushop.remote.url(),
1115 'url': pushop.remote.url(),
1117 }).result()
1116 }).result()
1118 except error.BundleValueError as exc:
1117 except error.BundleValueError as exc:
1119 raise error.Abort(_('missing support for %s') % exc)
1118 raise error.Abort(_('missing support for %s') % exc)
1120 try:
1119 try:
1121 trgetter = None
1120 trgetter = None
1122 if pushback:
1121 if pushback:
1123 trgetter = pushop.trmanager.transaction
1122 trgetter = pushop.trmanager.transaction
1124 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1123 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1125 except error.BundleValueError as exc:
1124 except error.BundleValueError as exc:
1126 raise error.Abort(_('missing support for %s') % exc)
1125 raise error.Abort(_('missing support for %s') % exc)
1127 except bundle2.AbortFromPart as exc:
1126 except bundle2.AbortFromPart as exc:
1128 pushop.ui.status(_('remote: %s\n') % exc)
1127 pushop.ui.status(_('remote: %s\n') % exc)
1129 if exc.hint is not None:
1128 if exc.hint is not None:
1130 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1129 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1131 raise error.Abort(_('push failed on remote'))
1130 raise error.Abort(_('push failed on remote'))
1132 except error.PushkeyFailed as exc:
1131 except error.PushkeyFailed as exc:
1133 partid = int(exc.partid)
1132 partid = int(exc.partid)
1134 if partid not in pushop.pkfailcb:
1133 if partid not in pushop.pkfailcb:
1135 raise
1134 raise
1136 pushop.pkfailcb[partid](pushop, exc)
1135 pushop.pkfailcb[partid](pushop, exc)
1137 for rephand in replyhandlers:
1136 for rephand in replyhandlers:
1138 rephand(op)
1137 rephand(op)
1139
1138
1140 def _pushchangeset(pushop):
1139 def _pushchangeset(pushop):
1141 """Make the actual push of changeset bundle to remote repo"""
1140 """Make the actual push of changeset bundle to remote repo"""
1142 if 'changesets' in pushop.stepsdone:
1141 if 'changesets' in pushop.stepsdone:
1143 return
1142 return
1144 pushop.stepsdone.add('changesets')
1143 pushop.stepsdone.add('changesets')
1145 if not _pushcheckoutgoing(pushop):
1144 if not _pushcheckoutgoing(pushop):
1146 return
1145 return
1147
1146
1148 # Should have verified this in push().
1147 # Should have verified this in push().
1149 assert pushop.remote.capable('unbundle')
1148 assert pushop.remote.capable('unbundle')
1150
1149
1151 pushop.repo.prepushoutgoinghooks(pushop)
1150 pushop.repo.prepushoutgoinghooks(pushop)
1152 outgoing = pushop.outgoing
1151 outgoing = pushop.outgoing
1153 # TODO: get bundlecaps from remote
1152 # TODO: get bundlecaps from remote
1154 bundlecaps = None
1153 bundlecaps = None
1155 # create a changegroup from local
1154 # create a changegroup from local
1156 if pushop.revs is None and not (outgoing.excluded
1155 if pushop.revs is None and not (outgoing.excluded
1157 or pushop.repo.changelog.filteredrevs):
1156 or pushop.repo.changelog.filteredrevs):
1158 # push everything,
1157 # push everything,
1159 # use the fast path, no race possible on push
1158 # use the fast path, no race possible on push
1160 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1159 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1161 fastpath=True, bundlecaps=bundlecaps)
1160 fastpath=True, bundlecaps=bundlecaps)
1162 else:
1161 else:
1163 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1162 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1164 'push', bundlecaps=bundlecaps)
1163 'push', bundlecaps=bundlecaps)
1165
1164
1166 # apply changegroup to remote
1165 # apply changegroup to remote
1167 # local repo finds heads on server, finds out what
1166 # local repo finds heads on server, finds out what
1168 # revs it must push. once revs transferred, if server
1167 # revs it must push. once revs transferred, if server
1169 # finds it has different heads (someone else won
1168 # finds it has different heads (someone else won
1170 # commit/push race), server aborts.
1169 # commit/push race), server aborts.
1171 if pushop.force:
1170 if pushop.force:
1172 remoteheads = ['force']
1171 remoteheads = ['force']
1173 else:
1172 else:
1174 remoteheads = pushop.remoteheads
1173 remoteheads = pushop.remoteheads
1175 # ssh: return remote's addchangegroup()
1174 # ssh: return remote's addchangegroup()
1176 # http: return remote's addchangegroup() or 0 for error
1175 # http: return remote's addchangegroup() or 0 for error
1177 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1176 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1178 pushop.repo.url())
1177 pushop.repo.url())
1179
1178
1180 def _pushsyncphase(pushop):
1179 def _pushsyncphase(pushop):
1181 """synchronise phase information locally and remotely"""
1180 """synchronise phase information locally and remotely"""
1182 cheads = pushop.commonheads
1181 cheads = pushop.commonheads
1183 # even when we don't push, exchanging phase data is useful
1182 # even when we don't push, exchanging phase data is useful
1184 remotephases = listkeys(pushop.remote, 'phases')
1183 remotephases = listkeys(pushop.remote, 'phases')
1185 if (pushop.ui.configbool('ui', '_usedassubrepo')
1184 if (pushop.ui.configbool('ui', '_usedassubrepo')
1186 and remotephases # server supports phases
1185 and remotephases # server supports phases
1187 and pushop.cgresult is None # nothing was pushed
1186 and pushop.cgresult is None # nothing was pushed
1188 and remotephases.get('publishing', False)):
1187 and remotephases.get('publishing', False)):
1189 # When:
1188 # When:
1190 # - this is a subrepo push
1189 # - this is a subrepo push
1191 # - and remote support phase
1190 # - and remote support phase
1192 # - and no changeset was pushed
1191 # - and no changeset was pushed
1193 # - and remote is publishing
1192 # - and remote is publishing
1194 # We may be in issue 3871 case!
1193 # We may be in issue 3871 case!
1195 # We drop the possible phase synchronisation done by
1194 # We drop the possible phase synchronisation done by
1196 # courtesy to publish changesets possibly locally draft
1195 # courtesy to publish changesets possibly locally draft
1197 # on the remote.
1196 # on the remote.
1198 remotephases = {'publishing': 'True'}
1197 remotephases = {'publishing': 'True'}
1199 if not remotephases: # old server or public only reply from non-publishing
1198 if not remotephases: # old server or public only reply from non-publishing
1200 _localphasemove(pushop, cheads)
1199 _localphasemove(pushop, cheads)
1201 # don't push any phase data as there is nothing to push
1200 # don't push any phase data as there is nothing to push
1202 else:
1201 else:
1203 ana = phases.analyzeremotephases(pushop.repo, cheads,
1202 ana = phases.analyzeremotephases(pushop.repo, cheads,
1204 remotephases)
1203 remotephases)
1205 pheads, droots = ana
1204 pheads, droots = ana
1206 ### Apply remote phase on local
1205 ### Apply remote phase on local
1207 if remotephases.get('publishing', False):
1206 if remotephases.get('publishing', False):
1208 _localphasemove(pushop, cheads)
1207 _localphasemove(pushop, cheads)
1209 else: # publish = False
1208 else: # publish = False
1210 _localphasemove(pushop, pheads)
1209 _localphasemove(pushop, pheads)
1211 _localphasemove(pushop, cheads, phases.draft)
1210 _localphasemove(pushop, cheads, phases.draft)
1212 ### Apply local phase on remote
1211 ### Apply local phase on remote
1213
1212
1214 if pushop.cgresult:
1213 if pushop.cgresult:
1215 if 'phases' in pushop.stepsdone:
1214 if 'phases' in pushop.stepsdone:
1216 # phases already pushed though bundle2
1215 # phases already pushed though bundle2
1217 return
1216 return
1218 outdated = pushop.outdatedphases
1217 outdated = pushop.outdatedphases
1219 else:
1218 else:
1220 outdated = pushop.fallbackoutdatedphases
1219 outdated = pushop.fallbackoutdatedphases
1221
1220
1222 pushop.stepsdone.add('phases')
1221 pushop.stepsdone.add('phases')
1223
1222
1224 # filter heads already turned public by the push
1223 # filter heads already turned public by the push
1225 outdated = [c for c in outdated if c.node() not in pheads]
1224 outdated = [c for c in outdated if c.node() not in pheads]
1226 # fallback to independent pushkey command
1225 # fallback to independent pushkey command
1227 for newremotehead in outdated:
1226 for newremotehead in outdated:
1228 with pushop.remote.commandexecutor() as e:
1227 with pushop.remote.commandexecutor() as e:
1229 r = e.callcommand('pushkey', {
1228 r = e.callcommand('pushkey', {
1230 'namespace': 'phases',
1229 'namespace': 'phases',
1231 'key': newremotehead.hex(),
1230 'key': newremotehead.hex(),
1232 'old': '%d' % phases.draft,
1231 'old': '%d' % phases.draft,
1233 'new': '%d' % phases.public
1232 'new': '%d' % phases.public
1234 }).result()
1233 }).result()
1235
1234
1236 if not r:
1235 if not r:
1237 pushop.ui.warn(_('updating %s to public failed!\n')
1236 pushop.ui.warn(_('updating %s to public failed!\n')
1238 % newremotehead)
1237 % newremotehead)
1239
1238
1240 def _localphasemove(pushop, nodes, phase=phases.public):
1239 def _localphasemove(pushop, nodes, phase=phases.public):
1241 """move <nodes> to <phase> in the local source repo"""
1240 """move <nodes> to <phase> in the local source repo"""
1242 if pushop.trmanager:
1241 if pushop.trmanager:
1243 phases.advanceboundary(pushop.repo,
1242 phases.advanceboundary(pushop.repo,
1244 pushop.trmanager.transaction(),
1243 pushop.trmanager.transaction(),
1245 phase,
1244 phase,
1246 nodes)
1245 nodes)
1247 else:
1246 else:
1248 # repo is not locked, do not change any phases!
1247 # repo is not locked, do not change any phases!
1249 # Informs the user that phases should have been moved when
1248 # Informs the user that phases should have been moved when
1250 # applicable.
1249 # applicable.
1251 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1250 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1252 phasestr = phases.phasenames[phase]
1251 phasestr = phases.phasenames[phase]
1253 if actualmoves:
1252 if actualmoves:
1254 pushop.ui.status(_('cannot lock source repo, skipping '
1253 pushop.ui.status(_('cannot lock source repo, skipping '
1255 'local %s phase update\n') % phasestr)
1254 'local %s phase update\n') % phasestr)
1256
1255
1257 def _pushobsolete(pushop):
1256 def _pushobsolete(pushop):
1258 """utility function to push obsolete markers to a remote"""
1257 """utility function to push obsolete markers to a remote"""
1259 if 'obsmarkers' in pushop.stepsdone:
1258 if 'obsmarkers' in pushop.stepsdone:
1260 return
1259 return
1261 repo = pushop.repo
1260 repo = pushop.repo
1262 remote = pushop.remote
1261 remote = pushop.remote
1263 pushop.stepsdone.add('obsmarkers')
1262 pushop.stepsdone.add('obsmarkers')
1264 if pushop.outobsmarkers:
1263 if pushop.outobsmarkers:
1265 pushop.ui.debug('try to push obsolete markers to remote\n')
1264 pushop.ui.debug('try to push obsolete markers to remote\n')
1266 rslts = []
1265 rslts = []
1267 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1266 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1268 for key in sorted(remotedata, reverse=True):
1267 for key in sorted(remotedata, reverse=True):
1269 # reverse sort to ensure we end with dump0
1268 # reverse sort to ensure we end with dump0
1270 data = remotedata[key]
1269 data = remotedata[key]
1271 rslts.append(remote.pushkey('obsolete', key, '', data))
1270 rslts.append(remote.pushkey('obsolete', key, '', data))
1272 if [r for r in rslts if not r]:
1271 if [r for r in rslts if not r]:
1273 msg = _('failed to push some obsolete markers!\n')
1272 msg = _('failed to push some obsolete markers!\n')
1274 repo.ui.warn(msg)
1273 repo.ui.warn(msg)
1275
1274
1276 def _pushbookmark(pushop):
1275 def _pushbookmark(pushop):
1277 """Update bookmark position on remote"""
1276 """Update bookmark position on remote"""
1278 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1277 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1279 return
1278 return
1280 pushop.stepsdone.add('bookmarks')
1279 pushop.stepsdone.add('bookmarks')
1281 ui = pushop.ui
1280 ui = pushop.ui
1282 remote = pushop.remote
1281 remote = pushop.remote
1283
1282
1284 for b, old, new in pushop.outbookmarks:
1283 for b, old, new in pushop.outbookmarks:
1285 action = 'update'
1284 action = 'update'
1286 if not old:
1285 if not old:
1287 action = 'export'
1286 action = 'export'
1288 elif not new:
1287 elif not new:
1289 action = 'delete'
1288 action = 'delete'
1290
1289
1291 with remote.commandexecutor() as e:
1290 with remote.commandexecutor() as e:
1292 r = e.callcommand('pushkey', {
1291 r = e.callcommand('pushkey', {
1293 'namespace': 'bookmarks',
1292 'namespace': 'bookmarks',
1294 'key': b,
1293 'key': b,
1295 'old': old,
1294 'old': old,
1296 'new': new,
1295 'new': new,
1297 }).result()
1296 }).result()
1298
1297
1299 if r:
1298 if r:
1300 ui.status(bookmsgmap[action][0] % b)
1299 ui.status(bookmsgmap[action][0] % b)
1301 else:
1300 else:
1302 ui.warn(bookmsgmap[action][1] % b)
1301 ui.warn(bookmsgmap[action][1] % b)
1303 # discovery can have set the value form invalid entry
1302 # discovery can have set the value form invalid entry
1304 if pushop.bkresult is not None:
1303 if pushop.bkresult is not None:
1305 pushop.bkresult = 1
1304 pushop.bkresult = 1
1306
1305
1307 class pulloperation(object):
1306 class pulloperation(object):
1308 """A object that represent a single pull operation
1307 """A object that represent a single pull operation
1309
1308
1310 It purpose is to carry pull related state and very common operation.
1309 It purpose is to carry pull related state and very common operation.
1311
1310
1312 A new should be created at the beginning of each pull and discarded
1311 A new should be created at the beginning of each pull and discarded
1313 afterward.
1312 afterward.
1314 """
1313 """
1315
1314
1316 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1315 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1317 remotebookmarks=None, streamclonerequested=None):
1316 remotebookmarks=None, streamclonerequested=None):
1318 # repo we pull into
1317 # repo we pull into
1319 self.repo = repo
1318 self.repo = repo
1320 # repo we pull from
1319 # repo we pull from
1321 self.remote = remote
1320 self.remote = remote
1322 # revision we try to pull (None is "all")
1321 # revision we try to pull (None is "all")
1323 self.heads = heads
1322 self.heads = heads
1324 # bookmark pulled explicitly
1323 # bookmark pulled explicitly
1325 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1324 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1326 for bookmark in bookmarks]
1325 for bookmark in bookmarks]
1327 # do we force pull?
1326 # do we force pull?
1328 self.force = force
1327 self.force = force
1329 # whether a streaming clone was requested
1328 # whether a streaming clone was requested
1330 self.streamclonerequested = streamclonerequested
1329 self.streamclonerequested = streamclonerequested
1331 # transaction manager
1330 # transaction manager
1332 self.trmanager = None
1331 self.trmanager = None
1333 # set of common changeset between local and remote before pull
1332 # set of common changeset between local and remote before pull
1334 self.common = None
1333 self.common = None
1335 # set of pulled head
1334 # set of pulled head
1336 self.rheads = None
1335 self.rheads = None
1337 # list of missing changeset to fetch remotely
1336 # list of missing changeset to fetch remotely
1338 self.fetch = None
1337 self.fetch = None
1339 # remote bookmarks data
1338 # remote bookmarks data
1340 self.remotebookmarks = remotebookmarks
1339 self.remotebookmarks = remotebookmarks
1341 # result of changegroup pulling (used as return code by pull)
1340 # result of changegroup pulling (used as return code by pull)
1342 self.cgresult = None
1341 self.cgresult = None
1343 # list of step already done
1342 # list of step already done
1344 self.stepsdone = set()
1343 self.stepsdone = set()
1345 # Whether we attempted a clone from pre-generated bundles.
1344 # Whether we attempted a clone from pre-generated bundles.
1346 self.clonebundleattempted = False
1345 self.clonebundleattempted = False
1347
1346
1348 @util.propertycache
1347 @util.propertycache
1349 def pulledsubset(self):
1348 def pulledsubset(self):
1350 """heads of the set of changeset target by the pull"""
1349 """heads of the set of changeset target by the pull"""
1351 # compute target subset
1350 # compute target subset
1352 if self.heads is None:
1351 if self.heads is None:
1353 # We pulled every thing possible
1352 # We pulled every thing possible
1354 # sync on everything common
1353 # sync on everything common
1355 c = set(self.common)
1354 c = set(self.common)
1356 ret = list(self.common)
1355 ret = list(self.common)
1357 for n in self.rheads:
1356 for n in self.rheads:
1358 if n not in c:
1357 if n not in c:
1359 ret.append(n)
1358 ret.append(n)
1360 return ret
1359 return ret
1361 else:
1360 else:
1362 # We pulled a specific subset
1361 # We pulled a specific subset
1363 # sync on this subset
1362 # sync on this subset
1364 return self.heads
1363 return self.heads
1365
1364
1366 @util.propertycache
1365 @util.propertycache
1367 def canusebundle2(self):
1366 def canusebundle2(self):
1368 return not _forcebundle1(self)
1367 return not _forcebundle1(self)
1369
1368
1370 @util.propertycache
1369 @util.propertycache
1371 def remotebundle2caps(self):
1370 def remotebundle2caps(self):
1372 return bundle2.bundle2caps(self.remote)
1371 return bundle2.bundle2caps(self.remote)
1373
1372
1374 def gettransaction(self):
1373 def gettransaction(self):
1375 # deprecated; talk to trmanager directly
1374 # deprecated; talk to trmanager directly
1376 return self.trmanager.transaction()
1375 return self.trmanager.transaction()
1377
1376
1378 class transactionmanager(util.transactional):
1377 class transactionmanager(util.transactional):
1379 """An object to manage the life cycle of a transaction
1378 """An object to manage the life cycle of a transaction
1380
1379
1381 It creates the transaction on demand and calls the appropriate hooks when
1380 It creates the transaction on demand and calls the appropriate hooks when
1382 closing the transaction."""
1381 closing the transaction."""
1383 def __init__(self, repo, source, url):
1382 def __init__(self, repo, source, url):
1384 self.repo = repo
1383 self.repo = repo
1385 self.source = source
1384 self.source = source
1386 self.url = url
1385 self.url = url
1387 self._tr = None
1386 self._tr = None
1388
1387
1389 def transaction(self):
1388 def transaction(self):
1390 """Return an open transaction object, constructing if necessary"""
1389 """Return an open transaction object, constructing if necessary"""
1391 if not self._tr:
1390 if not self._tr:
1392 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1391 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1393 self._tr = self.repo.transaction(trname)
1392 self._tr = self.repo.transaction(trname)
1394 self._tr.hookargs['source'] = self.source
1393 self._tr.hookargs['source'] = self.source
1395 self._tr.hookargs['url'] = self.url
1394 self._tr.hookargs['url'] = self.url
1396 return self._tr
1395 return self._tr
1397
1396
1398 def close(self):
1397 def close(self):
1399 """close transaction if created"""
1398 """close transaction if created"""
1400 if self._tr is not None:
1399 if self._tr is not None:
1401 self._tr.close()
1400 self._tr.close()
1402
1401
1403 def release(self):
1402 def release(self):
1404 """release transaction if created"""
1403 """release transaction if created"""
1405 if self._tr is not None:
1404 if self._tr is not None:
1406 self._tr.release()
1405 self._tr.release()
1407
1406
1408 def listkeys(remote, namespace):
1407 def listkeys(remote, namespace):
1409 with remote.commandexecutor() as e:
1408 with remote.commandexecutor() as e:
1410 return e.callcommand('listkeys', {'namespace': namespace}).result()
1409 return e.callcommand('listkeys', {'namespace': namespace}).result()
1411
1410
1412 def _fullpullbundle2(repo, pullop):
1411 def _fullpullbundle2(repo, pullop):
1413 # The server may send a partial reply, i.e. when inlining
1412 # The server may send a partial reply, i.e. when inlining
1414 # pre-computed bundles. In that case, update the common
1413 # pre-computed bundles. In that case, update the common
1415 # set based on the results and pull another bundle.
1414 # set based on the results and pull another bundle.
1416 #
1415 #
1417 # There are two indicators that the process is finished:
1416 # There are two indicators that the process is finished:
1418 # - no changeset has been added, or
1417 # - no changeset has been added, or
1419 # - all remote heads are known locally.
1418 # - all remote heads are known locally.
1420 # The head check must use the unfiltered view as obsoletion
1419 # The head check must use the unfiltered view as obsoletion
1421 # markers can hide heads.
1420 # markers can hide heads.
1422 unfi = repo.unfiltered()
1421 unfi = repo.unfiltered()
1423 unficl = unfi.changelog
1422 unficl = unfi.changelog
1424 def headsofdiff(h1, h2):
1423 def headsofdiff(h1, h2):
1425 """Returns heads(h1 % h2)"""
1424 """Returns heads(h1 % h2)"""
1426 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1425 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1427 return set(ctx.node() for ctx in res)
1426 return set(ctx.node() for ctx in res)
1428 def headsofunion(h1, h2):
1427 def headsofunion(h1, h2):
1429 """Returns heads((h1 + h2) - null)"""
1428 """Returns heads((h1 + h2) - null)"""
1430 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1429 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1431 return set(ctx.node() for ctx in res)
1430 return set(ctx.node() for ctx in res)
1432 while True:
1431 while True:
1433 old_heads = unficl.heads()
1432 old_heads = unficl.heads()
1434 clstart = len(unficl)
1433 clstart = len(unficl)
1435 _pullbundle2(pullop)
1434 _pullbundle2(pullop)
1436 if repository.NARROW_REQUIREMENT in repo.requirements:
1435 if repository.NARROW_REQUIREMENT in repo.requirements:
1437 # XXX narrow clones filter the heads on the server side during
1436 # XXX narrow clones filter the heads on the server side during
1438 # XXX getbundle and result in partial replies as well.
1437 # XXX getbundle and result in partial replies as well.
1439 # XXX Disable pull bundles in this case as band aid to avoid
1438 # XXX Disable pull bundles in this case as band aid to avoid
1440 # XXX extra round trips.
1439 # XXX extra round trips.
1441 break
1440 break
1442 if clstart == len(unficl):
1441 if clstart == len(unficl):
1443 break
1442 break
1444 if all(unficl.hasnode(n) for n in pullop.rheads):
1443 if all(unficl.hasnode(n) for n in pullop.rheads):
1445 break
1444 break
1446 new_heads = headsofdiff(unficl.heads(), old_heads)
1445 new_heads = headsofdiff(unficl.heads(), old_heads)
1447 pullop.common = headsofunion(new_heads, pullop.common)
1446 pullop.common = headsofunion(new_heads, pullop.common)
1448 pullop.rheads = set(pullop.rheads) - pullop.common
1447 pullop.rheads = set(pullop.rheads) - pullop.common
1449
1448
1450 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1449 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1451 streamclonerequested=None):
1450 streamclonerequested=None):
1452 """Fetch repository data from a remote.
1451 """Fetch repository data from a remote.
1453
1452
1454 This is the main function used to retrieve data from a remote repository.
1453 This is the main function used to retrieve data from a remote repository.
1455
1454
1456 ``repo`` is the local repository to clone into.
1455 ``repo`` is the local repository to clone into.
1457 ``remote`` is a peer instance.
1456 ``remote`` is a peer instance.
1458 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1457 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1459 default) means to pull everything from the remote.
1458 default) means to pull everything from the remote.
1460 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1459 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1461 default, all remote bookmarks are pulled.
1460 default, all remote bookmarks are pulled.
1462 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1461 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1463 initialization.
1462 initialization.
1464 ``streamclonerequested`` is a boolean indicating whether a "streaming
1463 ``streamclonerequested`` is a boolean indicating whether a "streaming
1465 clone" is requested. A "streaming clone" is essentially a raw file copy
1464 clone" is requested. A "streaming clone" is essentially a raw file copy
1466 of revlogs from the server. This only works when the local repository is
1465 of revlogs from the server. This only works when the local repository is
1467 empty. The default value of ``None`` means to respect the server
1466 empty. The default value of ``None`` means to respect the server
1468 configuration for preferring stream clones.
1467 configuration for preferring stream clones.
1469
1468
1470 Returns the ``pulloperation`` created for this pull.
1469 Returns the ``pulloperation`` created for this pull.
1471 """
1470 """
1472 if opargs is None:
1471 if opargs is None:
1473 opargs = {}
1472 opargs = {}
1474 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1473 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1475 streamclonerequested=streamclonerequested,
1474 streamclonerequested=streamclonerequested,
1476 **pycompat.strkwargs(opargs))
1475 **pycompat.strkwargs(opargs))
1477
1476
1478 peerlocal = pullop.remote.local()
1477 peerlocal = pullop.remote.local()
1479 if peerlocal:
1478 if peerlocal:
1480 missing = set(peerlocal.requirements) - pullop.repo.supported
1479 missing = set(peerlocal.requirements) - pullop.repo.supported
1481 if missing:
1480 if missing:
1482 msg = _("required features are not"
1481 msg = _("required features are not"
1483 " supported in the destination:"
1482 " supported in the destination:"
1484 " %s") % (', '.join(sorted(missing)))
1483 " %s") % (', '.join(sorted(missing)))
1485 raise error.Abort(msg)
1484 raise error.Abort(msg)
1486
1485
1487 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1486 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1488 with repo.wlock(), repo.lock(), pullop.trmanager:
1487 with repo.wlock(), repo.lock(), pullop.trmanager:
1489 # This should ideally be in _pullbundle2(). However, it needs to run
1488 # This should ideally be in _pullbundle2(). However, it needs to run
1490 # before discovery to avoid extra work.
1489 # before discovery to avoid extra work.
1491 _maybeapplyclonebundle(pullop)
1490 _maybeapplyclonebundle(pullop)
1492 streamclone.maybeperformlegacystreamclone(pullop)
1491 streamclone.maybeperformlegacystreamclone(pullop)
1493 _pulldiscovery(pullop)
1492 _pulldiscovery(pullop)
1494 if pullop.canusebundle2:
1493 if pullop.canusebundle2:
1495 _fullpullbundle2(repo, pullop)
1494 _fullpullbundle2(repo, pullop)
1496 _pullchangeset(pullop)
1495 _pullchangeset(pullop)
1497 _pullphase(pullop)
1496 _pullphase(pullop)
1498 _pullbookmarks(pullop)
1497 _pullbookmarks(pullop)
1499 _pullobsolete(pullop)
1498 _pullobsolete(pullop)
1500
1499
1501 # storing remotenames
1500 # storing remotenames
1502 if repo.ui.configbool('experimental', 'remotenames'):
1501 if repo.ui.configbool('experimental', 'remotenames'):
1503 logexchange.pullremotenames(repo, remote)
1502 logexchange.pullremotenames(repo, remote)
1504
1503
1505 return pullop
1504 return pullop
1506
1505
1507 # list of steps to perform discovery before pull
1506 # list of steps to perform discovery before pull
1508 pulldiscoveryorder = []
1507 pulldiscoveryorder = []
1509
1508
1510 # Mapping between step name and function
1509 # Mapping between step name and function
1511 #
1510 #
1512 # This exists to help extensions wrap steps if necessary
1511 # This exists to help extensions wrap steps if necessary
1513 pulldiscoverymapping = {}
1512 pulldiscoverymapping = {}
1514
1513
1515 def pulldiscovery(stepname):
1514 def pulldiscovery(stepname):
1516 """decorator for function performing discovery before pull
1515 """decorator for function performing discovery before pull
1517
1516
1518 The function is added to the step -> function mapping and appended to the
1517 The function is added to the step -> function mapping and appended to the
1519 list of steps. Beware that decorated function will be added in order (this
1518 list of steps. Beware that decorated function will be added in order (this
1520 may matter).
1519 may matter).
1521
1520
1522 You can only use this decorator for a new step, if you want to wrap a step
1521 You can only use this decorator for a new step, if you want to wrap a step
1523 from an extension, change the pulldiscovery dictionary directly."""
1522 from an extension, change the pulldiscovery dictionary directly."""
1524 def dec(func):
1523 def dec(func):
1525 assert stepname not in pulldiscoverymapping
1524 assert stepname not in pulldiscoverymapping
1526 pulldiscoverymapping[stepname] = func
1525 pulldiscoverymapping[stepname] = func
1527 pulldiscoveryorder.append(stepname)
1526 pulldiscoveryorder.append(stepname)
1528 return func
1527 return func
1529 return dec
1528 return dec
1530
1529
1531 def _pulldiscovery(pullop):
1530 def _pulldiscovery(pullop):
1532 """Run all discovery steps"""
1531 """Run all discovery steps"""
1533 for stepname in pulldiscoveryorder:
1532 for stepname in pulldiscoveryorder:
1534 step = pulldiscoverymapping[stepname]
1533 step = pulldiscoverymapping[stepname]
1535 step(pullop)
1534 step(pullop)
1536
1535
1537 @pulldiscovery('b1:bookmarks')
1536 @pulldiscovery('b1:bookmarks')
1538 def _pullbookmarkbundle1(pullop):
1537 def _pullbookmarkbundle1(pullop):
1539 """fetch bookmark data in bundle1 case
1538 """fetch bookmark data in bundle1 case
1540
1539
1541 If not using bundle2, we have to fetch bookmarks before changeset
1540 If not using bundle2, we have to fetch bookmarks before changeset
1542 discovery to reduce the chance and impact of race conditions."""
1541 discovery to reduce the chance and impact of race conditions."""
1543 if pullop.remotebookmarks is not None:
1542 if pullop.remotebookmarks is not None:
1544 return
1543 return
1545 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1544 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1546 # all known bundle2 servers now support listkeys, but lets be nice with
1545 # all known bundle2 servers now support listkeys, but lets be nice with
1547 # new implementation.
1546 # new implementation.
1548 return
1547 return
1549 books = listkeys(pullop.remote, 'bookmarks')
1548 books = listkeys(pullop.remote, 'bookmarks')
1550 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1549 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1551
1550
1552
1551
1553 @pulldiscovery('changegroup')
1552 @pulldiscovery('changegroup')
1554 def _pulldiscoverychangegroup(pullop):
1553 def _pulldiscoverychangegroup(pullop):
1555 """discovery phase for the pull
1554 """discovery phase for the pull
1556
1555
1557 Current handle changeset discovery only, will change handle all discovery
1556 Current handle changeset discovery only, will change handle all discovery
1558 at some point."""
1557 at some point."""
1559 tmp = discovery.findcommonincoming(pullop.repo,
1558 tmp = discovery.findcommonincoming(pullop.repo,
1560 pullop.remote,
1559 pullop.remote,
1561 heads=pullop.heads,
1560 heads=pullop.heads,
1562 force=pullop.force)
1561 force=pullop.force)
1563 common, fetch, rheads = tmp
1562 common, fetch, rheads = tmp
1564 nm = pullop.repo.unfiltered().changelog.nodemap
1563 nm = pullop.repo.unfiltered().changelog.nodemap
1565 if fetch and rheads:
1564 if fetch and rheads:
1566 # If a remote heads is filtered locally, put in back in common.
1565 # If a remote heads is filtered locally, put in back in common.
1567 #
1566 #
1568 # This is a hackish solution to catch most of "common but locally
1567 # This is a hackish solution to catch most of "common but locally
1569 # hidden situation". We do not performs discovery on unfiltered
1568 # hidden situation". We do not performs discovery on unfiltered
1570 # repository because it end up doing a pathological amount of round
1569 # repository because it end up doing a pathological amount of round
1571 # trip for w huge amount of changeset we do not care about.
1570 # trip for w huge amount of changeset we do not care about.
1572 #
1571 #
1573 # If a set of such "common but filtered" changeset exist on the server
1572 # If a set of such "common but filtered" changeset exist on the server
1574 # but are not including a remote heads, we'll not be able to detect it,
1573 # but are not including a remote heads, we'll not be able to detect it,
1575 scommon = set(common)
1574 scommon = set(common)
1576 for n in rheads:
1575 for n in rheads:
1577 if n in nm:
1576 if n in nm:
1578 if n not in scommon:
1577 if n not in scommon:
1579 common.append(n)
1578 common.append(n)
1580 if set(rheads).issubset(set(common)):
1579 if set(rheads).issubset(set(common)):
1581 fetch = []
1580 fetch = []
1582 pullop.common = common
1581 pullop.common = common
1583 pullop.fetch = fetch
1582 pullop.fetch = fetch
1584 pullop.rheads = rheads
1583 pullop.rheads = rheads
1585
1584
1586 def _pullbundle2(pullop):
1585 def _pullbundle2(pullop):
1587 """pull data using bundle2
1586 """pull data using bundle2
1588
1587
1589 For now, the only supported data are changegroup."""
1588 For now, the only supported data are changegroup."""
1590 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1589 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1591
1590
1592 # make ui easier to access
1591 # make ui easier to access
1593 ui = pullop.repo.ui
1592 ui = pullop.repo.ui
1594
1593
1595 # At the moment we don't do stream clones over bundle2. If that is
1594 # At the moment we don't do stream clones over bundle2. If that is
1596 # implemented then here's where the check for that will go.
1595 # implemented then here's where the check for that will go.
1597 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1596 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1598
1597
1599 # declare pull perimeters
1598 # declare pull perimeters
1600 kwargs['common'] = pullop.common
1599 kwargs['common'] = pullop.common
1601 kwargs['heads'] = pullop.heads or pullop.rheads
1600 kwargs['heads'] = pullop.heads or pullop.rheads
1602
1601
1603 if streaming:
1602 if streaming:
1604 kwargs['cg'] = False
1603 kwargs['cg'] = False
1605 kwargs['stream'] = True
1604 kwargs['stream'] = True
1606 pullop.stepsdone.add('changegroup')
1605 pullop.stepsdone.add('changegroup')
1607 pullop.stepsdone.add('phases')
1606 pullop.stepsdone.add('phases')
1608
1607
1609 else:
1608 else:
1610 # pulling changegroup
1609 # pulling changegroup
1611 pullop.stepsdone.add('changegroup')
1610 pullop.stepsdone.add('changegroup')
1612
1611
1613 kwargs['cg'] = pullop.fetch
1612 kwargs['cg'] = pullop.fetch
1614
1613
1615 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1614 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1616 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1615 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1617 if (not legacyphase and hasbinaryphase):
1616 if (not legacyphase and hasbinaryphase):
1618 kwargs['phases'] = True
1617 kwargs['phases'] = True
1619 pullop.stepsdone.add('phases')
1618 pullop.stepsdone.add('phases')
1620
1619
1621 if 'listkeys' in pullop.remotebundle2caps:
1620 if 'listkeys' in pullop.remotebundle2caps:
1622 if 'phases' not in pullop.stepsdone:
1621 if 'phases' not in pullop.stepsdone:
1623 kwargs['listkeys'] = ['phases']
1622 kwargs['listkeys'] = ['phases']
1624
1623
1625 bookmarksrequested = False
1624 bookmarksrequested = False
1626 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1625 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1627 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1626 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1628
1627
1629 if pullop.remotebookmarks is not None:
1628 if pullop.remotebookmarks is not None:
1630 pullop.stepsdone.add('request-bookmarks')
1629 pullop.stepsdone.add('request-bookmarks')
1631
1630
1632 if ('request-bookmarks' not in pullop.stepsdone
1631 if ('request-bookmarks' not in pullop.stepsdone
1633 and pullop.remotebookmarks is None
1632 and pullop.remotebookmarks is None
1634 and not legacybookmark and hasbinarybook):
1633 and not legacybookmark and hasbinarybook):
1635 kwargs['bookmarks'] = True
1634 kwargs['bookmarks'] = True
1636 bookmarksrequested = True
1635 bookmarksrequested = True
1637
1636
1638 if 'listkeys' in pullop.remotebundle2caps:
1637 if 'listkeys' in pullop.remotebundle2caps:
1639 if 'request-bookmarks' not in pullop.stepsdone:
1638 if 'request-bookmarks' not in pullop.stepsdone:
1640 # make sure to always includes bookmark data when migrating
1639 # make sure to always includes bookmark data when migrating
1641 # `hg incoming --bundle` to using this function.
1640 # `hg incoming --bundle` to using this function.
1642 pullop.stepsdone.add('request-bookmarks')
1641 pullop.stepsdone.add('request-bookmarks')
1643 kwargs.setdefault('listkeys', []).append('bookmarks')
1642 kwargs.setdefault('listkeys', []).append('bookmarks')
1644
1643
1645 # If this is a full pull / clone and the server supports the clone bundles
1644 # If this is a full pull / clone and the server supports the clone bundles
1646 # feature, tell the server whether we attempted a clone bundle. The
1645 # feature, tell the server whether we attempted a clone bundle. The
1647 # presence of this flag indicates the client supports clone bundles. This
1646 # presence of this flag indicates the client supports clone bundles. This
1648 # will enable the server to treat clients that support clone bundles
1647 # will enable the server to treat clients that support clone bundles
1649 # differently from those that don't.
1648 # differently from those that don't.
1650 if (pullop.remote.capable('clonebundles')
1649 if (pullop.remote.capable('clonebundles')
1651 and pullop.heads is None and list(pullop.common) == [nullid]):
1650 and pullop.heads is None and list(pullop.common) == [nullid]):
1652 kwargs['cbattempted'] = pullop.clonebundleattempted
1651 kwargs['cbattempted'] = pullop.clonebundleattempted
1653
1652
1654 if streaming:
1653 if streaming:
1655 pullop.repo.ui.status(_('streaming all changes\n'))
1654 pullop.repo.ui.status(_('streaming all changes\n'))
1656 elif not pullop.fetch:
1655 elif not pullop.fetch:
1657 pullop.repo.ui.status(_("no changes found\n"))
1656 pullop.repo.ui.status(_("no changes found\n"))
1658 pullop.cgresult = 0
1657 pullop.cgresult = 0
1659 else:
1658 else:
1660 if pullop.heads is None and list(pullop.common) == [nullid]:
1659 if pullop.heads is None and list(pullop.common) == [nullid]:
1661 pullop.repo.ui.status(_("requesting all changes\n"))
1660 pullop.repo.ui.status(_("requesting all changes\n"))
1662 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1661 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1663 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1662 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1664 if obsolete.commonversion(remoteversions) is not None:
1663 if obsolete.commonversion(remoteversions) is not None:
1665 kwargs['obsmarkers'] = True
1664 kwargs['obsmarkers'] = True
1666 pullop.stepsdone.add('obsmarkers')
1665 pullop.stepsdone.add('obsmarkers')
1667 _pullbundle2extraprepare(pullop, kwargs)
1666 _pullbundle2extraprepare(pullop, kwargs)
1668
1667
1669 with pullop.remote.commandexecutor() as e:
1668 with pullop.remote.commandexecutor() as e:
1670 args = dict(kwargs)
1669 args = dict(kwargs)
1671 args['source'] = 'pull'
1670 args['source'] = 'pull'
1672 bundle = e.callcommand('getbundle', args).result()
1671 bundle = e.callcommand('getbundle', args).result()
1673
1672
1674 try:
1673 try:
1675 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1674 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1676 source='pull')
1675 source='pull')
1677 op.modes['bookmarks'] = 'records'
1676 op.modes['bookmarks'] = 'records'
1678 bundle2.processbundle(pullop.repo, bundle, op=op)
1677 bundle2.processbundle(pullop.repo, bundle, op=op)
1679 except bundle2.AbortFromPart as exc:
1678 except bundle2.AbortFromPart as exc:
1680 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1679 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1681 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1680 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1682 except error.BundleValueError as exc:
1681 except error.BundleValueError as exc:
1683 raise error.Abort(_('missing support for %s') % exc)
1682 raise error.Abort(_('missing support for %s') % exc)
1684
1683
1685 if pullop.fetch:
1684 if pullop.fetch:
1686 pullop.cgresult = bundle2.combinechangegroupresults(op)
1685 pullop.cgresult = bundle2.combinechangegroupresults(op)
1687
1686
1688 # processing phases change
1687 # processing phases change
1689 for namespace, value in op.records['listkeys']:
1688 for namespace, value in op.records['listkeys']:
1690 if namespace == 'phases':
1689 if namespace == 'phases':
1691 _pullapplyphases(pullop, value)
1690 _pullapplyphases(pullop, value)
1692
1691
1693 # processing bookmark update
1692 # processing bookmark update
1694 if bookmarksrequested:
1693 if bookmarksrequested:
1695 books = {}
1694 books = {}
1696 for record in op.records['bookmarks']:
1695 for record in op.records['bookmarks']:
1697 books[record['bookmark']] = record["node"]
1696 books[record['bookmark']] = record["node"]
1698 pullop.remotebookmarks = books
1697 pullop.remotebookmarks = books
1699 else:
1698 else:
1700 for namespace, value in op.records['listkeys']:
1699 for namespace, value in op.records['listkeys']:
1701 if namespace == 'bookmarks':
1700 if namespace == 'bookmarks':
1702 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1701 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1703
1702
1704 # bookmark data were either already there or pulled in the bundle
1703 # bookmark data were either already there or pulled in the bundle
1705 if pullop.remotebookmarks is not None:
1704 if pullop.remotebookmarks is not None:
1706 _pullbookmarks(pullop)
1705 _pullbookmarks(pullop)
1707
1706
1708 def _pullbundle2extraprepare(pullop, kwargs):
1707 def _pullbundle2extraprepare(pullop, kwargs):
1709 """hook function so that extensions can extend the getbundle call"""
1708 """hook function so that extensions can extend the getbundle call"""
1710
1709
1711 def _pullchangeset(pullop):
1710 def _pullchangeset(pullop):
1712 """pull changeset from unbundle into the local repo"""
1711 """pull changeset from unbundle into the local repo"""
1713 # We delay the open of the transaction as late as possible so we
1712 # We delay the open of the transaction as late as possible so we
1714 # don't open transaction for nothing or you break future useful
1713 # don't open transaction for nothing or you break future useful
1715 # rollback call
1714 # rollback call
1716 if 'changegroup' in pullop.stepsdone:
1715 if 'changegroup' in pullop.stepsdone:
1717 return
1716 return
1718 pullop.stepsdone.add('changegroup')
1717 pullop.stepsdone.add('changegroup')
1719 if not pullop.fetch:
1718 if not pullop.fetch:
1720 pullop.repo.ui.status(_("no changes found\n"))
1719 pullop.repo.ui.status(_("no changes found\n"))
1721 pullop.cgresult = 0
1720 pullop.cgresult = 0
1722 return
1721 return
1723 tr = pullop.gettransaction()
1722 tr = pullop.gettransaction()
1724 if pullop.heads is None and list(pullop.common) == [nullid]:
1723 if pullop.heads is None and list(pullop.common) == [nullid]:
1725 pullop.repo.ui.status(_("requesting all changes\n"))
1724 pullop.repo.ui.status(_("requesting all changes\n"))
1726 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1725 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1727 # issue1320, avoid a race if remote changed after discovery
1726 # issue1320, avoid a race if remote changed after discovery
1728 pullop.heads = pullop.rheads
1727 pullop.heads = pullop.rheads
1729
1728
1730 if pullop.remote.capable('getbundle'):
1729 if pullop.remote.capable('getbundle'):
1731 # TODO: get bundlecaps from remote
1730 # TODO: get bundlecaps from remote
1732 cg = pullop.remote.getbundle('pull', common=pullop.common,
1731 cg = pullop.remote.getbundle('pull', common=pullop.common,
1733 heads=pullop.heads or pullop.rheads)
1732 heads=pullop.heads or pullop.rheads)
1734 elif pullop.heads is None:
1733 elif pullop.heads is None:
1735 with pullop.remote.commandexecutor() as e:
1734 with pullop.remote.commandexecutor() as e:
1736 cg = e.callcommand('changegroup', {
1735 cg = e.callcommand('changegroup', {
1737 'nodes': pullop.fetch,
1736 'nodes': pullop.fetch,
1738 'source': 'pull',
1737 'source': 'pull',
1739 }).result()
1738 }).result()
1740
1739
1741 elif not pullop.remote.capable('changegroupsubset'):
1740 elif not pullop.remote.capable('changegroupsubset'):
1742 raise error.Abort(_("partial pull cannot be done because "
1741 raise error.Abort(_("partial pull cannot be done because "
1743 "other repository doesn't support "
1742 "other repository doesn't support "
1744 "changegroupsubset."))
1743 "changegroupsubset."))
1745 else:
1744 else:
1746 with pullop.remote.commandexecutor() as e:
1745 with pullop.remote.commandexecutor() as e:
1747 cg = e.callcommand('changegroupsubset', {
1746 cg = e.callcommand('changegroupsubset', {
1748 'bases': pullop.fetch,
1747 'bases': pullop.fetch,
1749 'heads': pullop.heads,
1748 'heads': pullop.heads,
1750 'source': 'pull',
1749 'source': 'pull',
1751 }).result()
1750 }).result()
1752
1751
1753 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1752 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1754 pullop.remote.url())
1753 pullop.remote.url())
1755 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1754 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1756
1755
1757 def _pullphase(pullop):
1756 def _pullphase(pullop):
1758 # Get remote phases data from remote
1757 # Get remote phases data from remote
1759 if 'phases' in pullop.stepsdone:
1758 if 'phases' in pullop.stepsdone:
1760 return
1759 return
1761 remotephases = listkeys(pullop.remote, 'phases')
1760 remotephases = listkeys(pullop.remote, 'phases')
1762 _pullapplyphases(pullop, remotephases)
1761 _pullapplyphases(pullop, remotephases)
1763
1762
1764 def _pullapplyphases(pullop, remotephases):
1763 def _pullapplyphases(pullop, remotephases):
1765 """apply phase movement from observed remote state"""
1764 """apply phase movement from observed remote state"""
1766 if 'phases' in pullop.stepsdone:
1765 if 'phases' in pullop.stepsdone:
1767 return
1766 return
1768 pullop.stepsdone.add('phases')
1767 pullop.stepsdone.add('phases')
1769 publishing = bool(remotephases.get('publishing', False))
1768 publishing = bool(remotephases.get('publishing', False))
1770 if remotephases and not publishing:
1769 if remotephases and not publishing:
1771 # remote is new and non-publishing
1770 # remote is new and non-publishing
1772 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1771 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1773 pullop.pulledsubset,
1772 pullop.pulledsubset,
1774 remotephases)
1773 remotephases)
1775 dheads = pullop.pulledsubset
1774 dheads = pullop.pulledsubset
1776 else:
1775 else:
1777 # Remote is old or publishing all common changesets
1776 # Remote is old or publishing all common changesets
1778 # should be seen as public
1777 # should be seen as public
1779 pheads = pullop.pulledsubset
1778 pheads = pullop.pulledsubset
1780 dheads = []
1779 dheads = []
1781 unfi = pullop.repo.unfiltered()
1780 unfi = pullop.repo.unfiltered()
1782 phase = unfi._phasecache.phase
1781 phase = unfi._phasecache.phase
1783 rev = unfi.changelog.nodemap.get
1782 rev = unfi.changelog.nodemap.get
1784 public = phases.public
1783 public = phases.public
1785 draft = phases.draft
1784 draft = phases.draft
1786
1785
1787 # exclude changesets already public locally and update the others
1786 # exclude changesets already public locally and update the others
1788 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1787 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1789 if pheads:
1788 if pheads:
1790 tr = pullop.gettransaction()
1789 tr = pullop.gettransaction()
1791 phases.advanceboundary(pullop.repo, tr, public, pheads)
1790 phases.advanceboundary(pullop.repo, tr, public, pheads)
1792
1791
1793 # exclude changesets already draft locally and update the others
1792 # exclude changesets already draft locally and update the others
1794 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1793 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1795 if dheads:
1794 if dheads:
1796 tr = pullop.gettransaction()
1795 tr = pullop.gettransaction()
1797 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1796 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1798
1797
1799 def _pullbookmarks(pullop):
1798 def _pullbookmarks(pullop):
1800 """process the remote bookmark information to update the local one"""
1799 """process the remote bookmark information to update the local one"""
1801 if 'bookmarks' in pullop.stepsdone:
1800 if 'bookmarks' in pullop.stepsdone:
1802 return
1801 return
1803 pullop.stepsdone.add('bookmarks')
1802 pullop.stepsdone.add('bookmarks')
1804 repo = pullop.repo
1803 repo = pullop.repo
1805 remotebookmarks = pullop.remotebookmarks
1804 remotebookmarks = pullop.remotebookmarks
1806 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1805 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1807 pullop.remote.url(),
1806 pullop.remote.url(),
1808 pullop.gettransaction,
1807 pullop.gettransaction,
1809 explicit=pullop.explicitbookmarks)
1808 explicit=pullop.explicitbookmarks)
1810
1809
1811 def _pullobsolete(pullop):
1810 def _pullobsolete(pullop):
1812 """utility function to pull obsolete markers from a remote
1811 """utility function to pull obsolete markers from a remote
1813
1812
1814 The `gettransaction` is function that return the pull transaction, creating
1813 The `gettransaction` is function that return the pull transaction, creating
1815 one if necessary. We return the transaction to inform the calling code that
1814 one if necessary. We return the transaction to inform the calling code that
1816 a new transaction have been created (when applicable).
1815 a new transaction have been created (when applicable).
1817
1816
1818 Exists mostly to allow overriding for experimentation purpose"""
1817 Exists mostly to allow overriding for experimentation purpose"""
1819 if 'obsmarkers' in pullop.stepsdone:
1818 if 'obsmarkers' in pullop.stepsdone:
1820 return
1819 return
1821 pullop.stepsdone.add('obsmarkers')
1820 pullop.stepsdone.add('obsmarkers')
1822 tr = None
1821 tr = None
1823 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1822 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1824 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1823 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1825 remoteobs = listkeys(pullop.remote, 'obsolete')
1824 remoteobs = listkeys(pullop.remote, 'obsolete')
1826 if 'dump0' in remoteobs:
1825 if 'dump0' in remoteobs:
1827 tr = pullop.gettransaction()
1826 tr = pullop.gettransaction()
1828 markers = []
1827 markers = []
1829 for key in sorted(remoteobs, reverse=True):
1828 for key in sorted(remoteobs, reverse=True):
1830 if key.startswith('dump'):
1829 if key.startswith('dump'):
1831 data = util.b85decode(remoteobs[key])
1830 data = util.b85decode(remoteobs[key])
1832 version, newmarks = obsolete._readmarkers(data)
1831 version, newmarks = obsolete._readmarkers(data)
1833 markers += newmarks
1832 markers += newmarks
1834 if markers:
1833 if markers:
1835 pullop.repo.obsstore.add(tr, markers)
1834 pullop.repo.obsstore.add(tr, markers)
1836 pullop.repo.invalidatevolatilesets()
1835 pullop.repo.invalidatevolatilesets()
1837 return tr
1836 return tr
1838
1837
1839 def applynarrowacl(repo, kwargs):
1838 def applynarrowacl(repo, kwargs):
1840 """Apply narrow fetch access control.
1839 """Apply narrow fetch access control.
1841
1840
1842 This massages the named arguments for getbundle wire protocol commands
1841 This massages the named arguments for getbundle wire protocol commands
1843 so requested data is filtered through access control rules.
1842 so requested data is filtered through access control rules.
1844 """
1843 """
1845 ui = repo.ui
1844 ui = repo.ui
1846 # TODO this assumes existence of HTTP and is a layering violation.
1845 # TODO this assumes existence of HTTP and is a layering violation.
1847 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1846 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1848 user_includes = ui.configlist(
1847 user_includes = ui.configlist(
1849 _NARROWACL_SECTION, username + '.includes',
1848 _NARROWACL_SECTION, username + '.includes',
1850 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1849 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1851 user_excludes = ui.configlist(
1850 user_excludes = ui.configlist(
1852 _NARROWACL_SECTION, username + '.excludes',
1851 _NARROWACL_SECTION, username + '.excludes',
1853 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1852 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1854 if not user_includes:
1853 if not user_includes:
1855 raise error.Abort(_("{} configuration for user {} is empty")
1854 raise error.Abort(_("{} configuration for user {} is empty")
1856 .format(_NARROWACL_SECTION, username))
1855 .format(_NARROWACL_SECTION, username))
1857
1856
1858 user_includes = [
1857 user_includes = [
1859 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1858 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1860 user_excludes = [
1859 user_excludes = [
1861 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1860 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1862
1861
1863 req_includes = set(kwargs.get(r'includepats', []))
1862 req_includes = set(kwargs.get(r'includepats', []))
1864 req_excludes = set(kwargs.get(r'excludepats', []))
1863 req_excludes = set(kwargs.get(r'excludepats', []))
1865
1864
1866 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1865 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1867 req_includes, req_excludes, user_includes, user_excludes)
1866 req_includes, req_excludes, user_includes, user_excludes)
1868
1867
1869 if invalid_includes:
1868 if invalid_includes:
1870 raise error.Abort(
1869 raise error.Abort(
1871 _("The following includes are not accessible for {}: {}")
1870 _("The following includes are not accessible for {}: {}")
1872 .format(username, invalid_includes))
1871 .format(username, invalid_includes))
1873
1872
1874 new_args = {}
1873 new_args = {}
1875 new_args.update(kwargs)
1874 new_args.update(kwargs)
1876 new_args[r'narrow'] = True
1875 new_args[r'narrow'] = True
1877 new_args[r'includepats'] = req_includes
1876 new_args[r'includepats'] = req_includes
1878 if req_excludes:
1877 if req_excludes:
1879 new_args[r'excludepats'] = req_excludes
1878 new_args[r'excludepats'] = req_excludes
1880
1879
1881 return new_args
1880 return new_args
1882
1881
1883 def _computeellipsis(repo, common, heads, known, match, depth=None):
1882 def _computeellipsis(repo, common, heads, known, match, depth=None):
1884 """Compute the shape of a narrowed DAG.
1883 """Compute the shape of a narrowed DAG.
1885
1884
1886 Args:
1885 Args:
1887 repo: The repository we're transferring.
1886 repo: The repository we're transferring.
1888 common: The roots of the DAG range we're transferring.
1887 common: The roots of the DAG range we're transferring.
1889 May be just [nullid], which means all ancestors of heads.
1888 May be just [nullid], which means all ancestors of heads.
1890 heads: The heads of the DAG range we're transferring.
1889 heads: The heads of the DAG range we're transferring.
1891 match: The narrowmatcher that allows us to identify relevant changes.
1890 match: The narrowmatcher that allows us to identify relevant changes.
1892 depth: If not None, only consider nodes to be full nodes if they are at
1891 depth: If not None, only consider nodes to be full nodes if they are at
1893 most depth changesets away from one of heads.
1892 most depth changesets away from one of heads.
1894
1893
1895 Returns:
1894 Returns:
1896 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1895 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1897
1896
1898 visitnodes: The list of nodes (either full or ellipsis) which
1897 visitnodes: The list of nodes (either full or ellipsis) which
1899 need to be sent to the client.
1898 need to be sent to the client.
1900 relevant_nodes: The set of changelog nodes which change a file inside
1899 relevant_nodes: The set of changelog nodes which change a file inside
1901 the narrowspec. The client needs these as non-ellipsis nodes.
1900 the narrowspec. The client needs these as non-ellipsis nodes.
1902 ellipsisroots: A dict of {rev: parents} that is used in
1901 ellipsisroots: A dict of {rev: parents} that is used in
1903 narrowchangegroup to produce ellipsis nodes with the
1902 narrowchangegroup to produce ellipsis nodes with the
1904 correct parents.
1903 correct parents.
1905 """
1904 """
1906 cl = repo.changelog
1905 cl = repo.changelog
1907 mfl = repo.manifestlog
1906 mfl = repo.manifestlog
1908
1907
1909 cldag = dagutil.revlogdag(cl)
1908 clrev = cl.rev
1910 # dagutil does not like nullid/nullrev
1909
1911 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
1910 commonrevs = {clrev(n) for n in common} | {nullrev}
1912 headsrevs = cldag.internalizeall(heads)
1911 headsrevs = {clrev(n) for n in heads}
1912
1913 if depth:
1913 if depth:
1914 revdepth = {h: 0 for h in headsrevs}
1914 revdepth = {h: 0 for h in headsrevs}
1915
1915
1916 ellipsisheads = collections.defaultdict(set)
1916 ellipsisheads = collections.defaultdict(set)
1917 ellipsisroots = collections.defaultdict(set)
1917 ellipsisroots = collections.defaultdict(set)
1918
1918
1919 def addroot(head, curchange):
1919 def addroot(head, curchange):
1920 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1920 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1921 ellipsisroots[head].add(curchange)
1921 ellipsisroots[head].add(curchange)
1922 # Recursively split ellipsis heads with 3 roots by finding the
1922 # Recursively split ellipsis heads with 3 roots by finding the
1923 # roots' youngest common descendant which is an elided merge commit.
1923 # roots' youngest common descendant which is an elided merge commit.
1924 # That descendant takes 2 of the 3 roots as its own, and becomes a
1924 # That descendant takes 2 of the 3 roots as its own, and becomes a
1925 # root of the head.
1925 # root of the head.
1926 while len(ellipsisroots[head]) > 2:
1926 while len(ellipsisroots[head]) > 2:
1927 child, roots = splithead(head)
1927 child, roots = splithead(head)
1928 splitroots(head, child, roots)
1928 splitroots(head, child, roots)
1929 head = child # Recurse in case we just added a 3rd root
1929 head = child # Recurse in case we just added a 3rd root
1930
1930
1931 def splitroots(head, child, roots):
1931 def splitroots(head, child, roots):
1932 ellipsisroots[head].difference_update(roots)
1932 ellipsisroots[head].difference_update(roots)
1933 ellipsisroots[head].add(child)
1933 ellipsisroots[head].add(child)
1934 ellipsisroots[child].update(roots)
1934 ellipsisroots[child].update(roots)
1935 ellipsisroots[child].discard(child)
1935 ellipsisroots[child].discard(child)
1936
1936
1937 def splithead(head):
1937 def splithead(head):
1938 r1, r2, r3 = sorted(ellipsisroots[head])
1938 r1, r2, r3 = sorted(ellipsisroots[head])
1939 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1939 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1940 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1940 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1941 nr1, head, nr2, head)
1941 nr1, head, nr2, head)
1942 for j in mid:
1942 for j in mid:
1943 if j == nr2:
1943 if j == nr2:
1944 return nr2, (nr1, nr2)
1944 return nr2, (nr1, nr2)
1945 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1945 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1946 return j, (nr1, nr2)
1946 return j, (nr1, nr2)
1947 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1947 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1948 'roots: %d %d %d') % (head, r1, r2, r3))
1948 'roots: %d %d %d') % (head, r1, r2, r3))
1949
1949
1950 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1950 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1951 visit = reversed(missing)
1951 visit = reversed(missing)
1952 relevant_nodes = set()
1952 relevant_nodes = set()
1953 visitnodes = [cl.node(m) for m in missing]
1953 visitnodes = [cl.node(m) for m in missing]
1954 required = set(headsrevs) | known
1954 required = set(headsrevs) | known
1955 for rev in visit:
1955 for rev in visit:
1956 clrev = cl.changelogrevision(rev)
1956 clrev = cl.changelogrevision(rev)
1957 ps = cldag.parents(rev)
1957 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1958 if depth is not None:
1958 if depth is not None:
1959 curdepth = revdepth[rev]
1959 curdepth = revdepth[rev]
1960 for p in ps:
1960 for p in ps:
1961 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1961 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1962 needed = False
1962 needed = False
1963 shallow_enough = depth is None or revdepth[rev] <= depth
1963 shallow_enough = depth is None or revdepth[rev] <= depth
1964 if shallow_enough:
1964 if shallow_enough:
1965 curmf = mfl[clrev.manifest].read()
1965 curmf = mfl[clrev.manifest].read()
1966 if ps:
1966 if ps:
1967 # We choose to not trust the changed files list in
1967 # We choose to not trust the changed files list in
1968 # changesets because it's not always correct. TODO: could
1968 # changesets because it's not always correct. TODO: could
1969 # we trust it for the non-merge case?
1969 # we trust it for the non-merge case?
1970 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1970 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1971 needed = bool(curmf.diff(p1mf, match))
1971 needed = bool(curmf.diff(p1mf, match))
1972 if not needed and len(ps) > 1:
1972 if not needed and len(ps) > 1:
1973 # For merge changes, the list of changed files is not
1973 # For merge changes, the list of changed files is not
1974 # helpful, since we need to emit the merge if a file
1974 # helpful, since we need to emit the merge if a file
1975 # in the narrow spec has changed on either side of the
1975 # in the narrow spec has changed on either side of the
1976 # merge. As a result, we do a manifest diff to check.
1976 # merge. As a result, we do a manifest diff to check.
1977 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
1977 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
1978 needed = bool(curmf.diff(p2mf, match))
1978 needed = bool(curmf.diff(p2mf, match))
1979 else:
1979 else:
1980 # For a root node, we need to include the node if any
1980 # For a root node, we need to include the node if any
1981 # files in the node match the narrowspec.
1981 # files in the node match the narrowspec.
1982 needed = any(curmf.walk(match))
1982 needed = any(curmf.walk(match))
1983
1983
1984 if needed:
1984 if needed:
1985 for head in ellipsisheads[rev]:
1985 for head in ellipsisheads[rev]:
1986 addroot(head, rev)
1986 addroot(head, rev)
1987 for p in ps:
1987 for p in ps:
1988 required.add(p)
1988 required.add(p)
1989 relevant_nodes.add(cl.node(rev))
1989 relevant_nodes.add(cl.node(rev))
1990 else:
1990 else:
1991 if not ps:
1991 if not ps:
1992 ps = [nullrev]
1992 ps = [nullrev]
1993 if rev in required:
1993 if rev in required:
1994 for head in ellipsisheads[rev]:
1994 for head in ellipsisheads[rev]:
1995 addroot(head, rev)
1995 addroot(head, rev)
1996 for p in ps:
1996 for p in ps:
1997 ellipsisheads[p].add(rev)
1997 ellipsisheads[p].add(rev)
1998 else:
1998 else:
1999 for p in ps:
1999 for p in ps:
2000 ellipsisheads[p] |= ellipsisheads[rev]
2000 ellipsisheads[p] |= ellipsisheads[rev]
2001
2001
2002 # add common changesets as roots of their reachable ellipsis heads
2002 # add common changesets as roots of their reachable ellipsis heads
2003 for c in commonrevs:
2003 for c in commonrevs:
2004 for head in ellipsisheads[c]:
2004 for head in ellipsisheads[c]:
2005 addroot(head, c)
2005 addroot(head, c)
2006 return visitnodes, relevant_nodes, ellipsisroots
2006 return visitnodes, relevant_nodes, ellipsisroots
2007
2007
2008 def caps20to10(repo, role):
2008 def caps20to10(repo, role):
2009 """return a set with appropriate options to use bundle20 during getbundle"""
2009 """return a set with appropriate options to use bundle20 during getbundle"""
2010 caps = {'HG20'}
2010 caps = {'HG20'}
2011 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2011 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2012 caps.add('bundle2=' + urlreq.quote(capsblob))
2012 caps.add('bundle2=' + urlreq.quote(capsblob))
2013 return caps
2013 return caps
2014
2014
2015 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2015 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2016 getbundle2partsorder = []
2016 getbundle2partsorder = []
2017
2017
2018 # Mapping between step name and function
2018 # Mapping between step name and function
2019 #
2019 #
2020 # This exists to help extensions wrap steps if necessary
2020 # This exists to help extensions wrap steps if necessary
2021 getbundle2partsmapping = {}
2021 getbundle2partsmapping = {}
2022
2022
2023 def getbundle2partsgenerator(stepname, idx=None):
2023 def getbundle2partsgenerator(stepname, idx=None):
2024 """decorator for function generating bundle2 part for getbundle
2024 """decorator for function generating bundle2 part for getbundle
2025
2025
2026 The function is added to the step -> function mapping and appended to the
2026 The function is added to the step -> function mapping and appended to the
2027 list of steps. Beware that decorated functions will be added in order
2027 list of steps. Beware that decorated functions will be added in order
2028 (this may matter).
2028 (this may matter).
2029
2029
2030 You can only use this decorator for new steps, if you want to wrap a step
2030 You can only use this decorator for new steps, if you want to wrap a step
2031 from an extension, attack the getbundle2partsmapping dictionary directly."""
2031 from an extension, attack the getbundle2partsmapping dictionary directly."""
2032 def dec(func):
2032 def dec(func):
2033 assert stepname not in getbundle2partsmapping
2033 assert stepname not in getbundle2partsmapping
2034 getbundle2partsmapping[stepname] = func
2034 getbundle2partsmapping[stepname] = func
2035 if idx is None:
2035 if idx is None:
2036 getbundle2partsorder.append(stepname)
2036 getbundle2partsorder.append(stepname)
2037 else:
2037 else:
2038 getbundle2partsorder.insert(idx, stepname)
2038 getbundle2partsorder.insert(idx, stepname)
2039 return func
2039 return func
2040 return dec
2040 return dec
2041
2041
2042 def bundle2requested(bundlecaps):
2042 def bundle2requested(bundlecaps):
2043 if bundlecaps is not None:
2043 if bundlecaps is not None:
2044 return any(cap.startswith('HG2') for cap in bundlecaps)
2044 return any(cap.startswith('HG2') for cap in bundlecaps)
2045 return False
2045 return False
2046
2046
2047 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2047 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2048 **kwargs):
2048 **kwargs):
2049 """Return chunks constituting a bundle's raw data.
2049 """Return chunks constituting a bundle's raw data.
2050
2050
2051 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2051 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2052 passed.
2052 passed.
2053
2053
2054 Returns a 2-tuple of a dict with metadata about the generated bundle
2054 Returns a 2-tuple of a dict with metadata about the generated bundle
2055 and an iterator over raw chunks (of varying sizes).
2055 and an iterator over raw chunks (of varying sizes).
2056 """
2056 """
2057 kwargs = pycompat.byteskwargs(kwargs)
2057 kwargs = pycompat.byteskwargs(kwargs)
2058 info = {}
2058 info = {}
2059 usebundle2 = bundle2requested(bundlecaps)
2059 usebundle2 = bundle2requested(bundlecaps)
2060 # bundle10 case
2060 # bundle10 case
2061 if not usebundle2:
2061 if not usebundle2:
2062 if bundlecaps and not kwargs.get('cg', True):
2062 if bundlecaps and not kwargs.get('cg', True):
2063 raise ValueError(_('request for bundle10 must include changegroup'))
2063 raise ValueError(_('request for bundle10 must include changegroup'))
2064
2064
2065 if kwargs:
2065 if kwargs:
2066 raise ValueError(_('unsupported getbundle arguments: %s')
2066 raise ValueError(_('unsupported getbundle arguments: %s')
2067 % ', '.join(sorted(kwargs.keys())))
2067 % ', '.join(sorted(kwargs.keys())))
2068 outgoing = _computeoutgoing(repo, heads, common)
2068 outgoing = _computeoutgoing(repo, heads, common)
2069 info['bundleversion'] = 1
2069 info['bundleversion'] = 1
2070 return info, changegroup.makestream(repo, outgoing, '01', source,
2070 return info, changegroup.makestream(repo, outgoing, '01', source,
2071 bundlecaps=bundlecaps)
2071 bundlecaps=bundlecaps)
2072
2072
2073 # bundle20 case
2073 # bundle20 case
2074 info['bundleversion'] = 2
2074 info['bundleversion'] = 2
2075 b2caps = {}
2075 b2caps = {}
2076 for bcaps in bundlecaps:
2076 for bcaps in bundlecaps:
2077 if bcaps.startswith('bundle2='):
2077 if bcaps.startswith('bundle2='):
2078 blob = urlreq.unquote(bcaps[len('bundle2='):])
2078 blob = urlreq.unquote(bcaps[len('bundle2='):])
2079 b2caps.update(bundle2.decodecaps(blob))
2079 b2caps.update(bundle2.decodecaps(blob))
2080 bundler = bundle2.bundle20(repo.ui, b2caps)
2080 bundler = bundle2.bundle20(repo.ui, b2caps)
2081
2081
2082 kwargs['heads'] = heads
2082 kwargs['heads'] = heads
2083 kwargs['common'] = common
2083 kwargs['common'] = common
2084
2084
2085 for name in getbundle2partsorder:
2085 for name in getbundle2partsorder:
2086 func = getbundle2partsmapping[name]
2086 func = getbundle2partsmapping[name]
2087 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2087 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2088 **pycompat.strkwargs(kwargs))
2088 **pycompat.strkwargs(kwargs))
2089
2089
2090 info['prefercompressed'] = bundler.prefercompressed
2090 info['prefercompressed'] = bundler.prefercompressed
2091
2091
2092 return info, bundler.getchunks()
2092 return info, bundler.getchunks()
2093
2093
2094 @getbundle2partsgenerator('stream2')
2094 @getbundle2partsgenerator('stream2')
2095 def _getbundlestream2(bundler, repo, *args, **kwargs):
2095 def _getbundlestream2(bundler, repo, *args, **kwargs):
2096 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2096 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2097
2097
2098 @getbundle2partsgenerator('changegroup')
2098 @getbundle2partsgenerator('changegroup')
2099 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2099 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2100 b2caps=None, heads=None, common=None, **kwargs):
2100 b2caps=None, heads=None, common=None, **kwargs):
2101 """add a changegroup part to the requested bundle"""
2101 """add a changegroup part to the requested bundle"""
2102 if not kwargs.get(r'cg', True):
2102 if not kwargs.get(r'cg', True):
2103 return
2103 return
2104
2104
2105 version = '01'
2105 version = '01'
2106 cgversions = b2caps.get('changegroup')
2106 cgversions = b2caps.get('changegroup')
2107 if cgversions: # 3.1 and 3.2 ship with an empty value
2107 if cgversions: # 3.1 and 3.2 ship with an empty value
2108 cgversions = [v for v in cgversions
2108 cgversions = [v for v in cgversions
2109 if v in changegroup.supportedoutgoingversions(repo)]
2109 if v in changegroup.supportedoutgoingversions(repo)]
2110 if not cgversions:
2110 if not cgversions:
2111 raise ValueError(_('no common changegroup version'))
2111 raise ValueError(_('no common changegroup version'))
2112 version = max(cgversions)
2112 version = max(cgversions)
2113
2113
2114 outgoing = _computeoutgoing(repo, heads, common)
2114 outgoing = _computeoutgoing(repo, heads, common)
2115 if not outgoing.missing:
2115 if not outgoing.missing:
2116 return
2116 return
2117
2117
2118 if kwargs.get(r'narrow', False):
2118 if kwargs.get(r'narrow', False):
2119 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2119 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2120 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2120 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2121 filematcher = narrowspec.match(repo.root, include=include,
2121 filematcher = narrowspec.match(repo.root, include=include,
2122 exclude=exclude)
2122 exclude=exclude)
2123 else:
2123 else:
2124 filematcher = None
2124 filematcher = None
2125
2125
2126 cgstream = changegroup.makestream(repo, outgoing, version, source,
2126 cgstream = changegroup.makestream(repo, outgoing, version, source,
2127 bundlecaps=bundlecaps,
2127 bundlecaps=bundlecaps,
2128 filematcher=filematcher)
2128 filematcher=filematcher)
2129
2129
2130 part = bundler.newpart('changegroup', data=cgstream)
2130 part = bundler.newpart('changegroup', data=cgstream)
2131 if cgversions:
2131 if cgversions:
2132 part.addparam('version', version)
2132 part.addparam('version', version)
2133
2133
2134 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2134 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2135 mandatory=False)
2135 mandatory=False)
2136
2136
2137 if 'treemanifest' in repo.requirements:
2137 if 'treemanifest' in repo.requirements:
2138 part.addparam('treemanifest', '1')
2138 part.addparam('treemanifest', '1')
2139
2139
2140 if kwargs.get(r'narrow', False) and (include or exclude):
2140 if kwargs.get(r'narrow', False) and (include or exclude):
2141 narrowspecpart = bundler.newpart('narrow:spec')
2141 narrowspecpart = bundler.newpart('narrow:spec')
2142 if include:
2142 if include:
2143 narrowspecpart.addparam(
2143 narrowspecpart.addparam(
2144 'include', '\n'.join(include), mandatory=True)
2144 'include', '\n'.join(include), mandatory=True)
2145 if exclude:
2145 if exclude:
2146 narrowspecpart.addparam(
2146 narrowspecpart.addparam(
2147 'exclude', '\n'.join(exclude), mandatory=True)
2147 'exclude', '\n'.join(exclude), mandatory=True)
2148
2148
2149 @getbundle2partsgenerator('bookmarks')
2149 @getbundle2partsgenerator('bookmarks')
2150 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2150 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2151 b2caps=None, **kwargs):
2151 b2caps=None, **kwargs):
2152 """add a bookmark part to the requested bundle"""
2152 """add a bookmark part to the requested bundle"""
2153 if not kwargs.get(r'bookmarks', False):
2153 if not kwargs.get(r'bookmarks', False):
2154 return
2154 return
2155 if 'bookmarks' not in b2caps:
2155 if 'bookmarks' not in b2caps:
2156 raise ValueError(_('no common bookmarks exchange method'))
2156 raise ValueError(_('no common bookmarks exchange method'))
2157 books = bookmod.listbinbookmarks(repo)
2157 books = bookmod.listbinbookmarks(repo)
2158 data = bookmod.binaryencode(books)
2158 data = bookmod.binaryencode(books)
2159 if data:
2159 if data:
2160 bundler.newpart('bookmarks', data=data)
2160 bundler.newpart('bookmarks', data=data)
2161
2161
2162 @getbundle2partsgenerator('listkeys')
2162 @getbundle2partsgenerator('listkeys')
2163 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2163 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2164 b2caps=None, **kwargs):
2164 b2caps=None, **kwargs):
2165 """add parts containing listkeys namespaces to the requested bundle"""
2165 """add parts containing listkeys namespaces to the requested bundle"""
2166 listkeys = kwargs.get(r'listkeys', ())
2166 listkeys = kwargs.get(r'listkeys', ())
2167 for namespace in listkeys:
2167 for namespace in listkeys:
2168 part = bundler.newpart('listkeys')
2168 part = bundler.newpart('listkeys')
2169 part.addparam('namespace', namespace)
2169 part.addparam('namespace', namespace)
2170 keys = repo.listkeys(namespace).items()
2170 keys = repo.listkeys(namespace).items()
2171 part.data = pushkey.encodekeys(keys)
2171 part.data = pushkey.encodekeys(keys)
2172
2172
2173 @getbundle2partsgenerator('obsmarkers')
2173 @getbundle2partsgenerator('obsmarkers')
2174 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2174 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2175 b2caps=None, heads=None, **kwargs):
2175 b2caps=None, heads=None, **kwargs):
2176 """add an obsolescence markers part to the requested bundle"""
2176 """add an obsolescence markers part to the requested bundle"""
2177 if kwargs.get(r'obsmarkers', False):
2177 if kwargs.get(r'obsmarkers', False):
2178 if heads is None:
2178 if heads is None:
2179 heads = repo.heads()
2179 heads = repo.heads()
2180 subset = [c.node() for c in repo.set('::%ln', heads)]
2180 subset = [c.node() for c in repo.set('::%ln', heads)]
2181 markers = repo.obsstore.relevantmarkers(subset)
2181 markers = repo.obsstore.relevantmarkers(subset)
2182 markers = sorted(markers)
2182 markers = sorted(markers)
2183 bundle2.buildobsmarkerspart(bundler, markers)
2183 bundle2.buildobsmarkerspart(bundler, markers)
2184
2184
2185 @getbundle2partsgenerator('phases')
2185 @getbundle2partsgenerator('phases')
2186 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2186 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2187 b2caps=None, heads=None, **kwargs):
2187 b2caps=None, heads=None, **kwargs):
2188 """add phase heads part to the requested bundle"""
2188 """add phase heads part to the requested bundle"""
2189 if kwargs.get(r'phases', False):
2189 if kwargs.get(r'phases', False):
2190 if not 'heads' in b2caps.get('phases'):
2190 if not 'heads' in b2caps.get('phases'):
2191 raise ValueError(_('no common phases exchange method'))
2191 raise ValueError(_('no common phases exchange method'))
2192 if heads is None:
2192 if heads is None:
2193 heads = repo.heads()
2193 heads = repo.heads()
2194
2194
2195 headsbyphase = collections.defaultdict(set)
2195 headsbyphase = collections.defaultdict(set)
2196 if repo.publishing():
2196 if repo.publishing():
2197 headsbyphase[phases.public] = heads
2197 headsbyphase[phases.public] = heads
2198 else:
2198 else:
2199 # find the appropriate heads to move
2199 # find the appropriate heads to move
2200
2200
2201 phase = repo._phasecache.phase
2201 phase = repo._phasecache.phase
2202 node = repo.changelog.node
2202 node = repo.changelog.node
2203 rev = repo.changelog.rev
2203 rev = repo.changelog.rev
2204 for h in heads:
2204 for h in heads:
2205 headsbyphase[phase(repo, rev(h))].add(h)
2205 headsbyphase[phase(repo, rev(h))].add(h)
2206 seenphases = list(headsbyphase.keys())
2206 seenphases = list(headsbyphase.keys())
2207
2207
2208 # We do not handle anything but public and draft phase for now)
2208 # We do not handle anything but public and draft phase for now)
2209 if seenphases:
2209 if seenphases:
2210 assert max(seenphases) <= phases.draft
2210 assert max(seenphases) <= phases.draft
2211
2211
2212 # if client is pulling non-public changesets, we need to find
2212 # if client is pulling non-public changesets, we need to find
2213 # intermediate public heads.
2213 # intermediate public heads.
2214 draftheads = headsbyphase.get(phases.draft, set())
2214 draftheads = headsbyphase.get(phases.draft, set())
2215 if draftheads:
2215 if draftheads:
2216 publicheads = headsbyphase.get(phases.public, set())
2216 publicheads = headsbyphase.get(phases.public, set())
2217
2217
2218 revset = 'heads(only(%ln, %ln) and public())'
2218 revset = 'heads(only(%ln, %ln) and public())'
2219 extraheads = repo.revs(revset, draftheads, publicheads)
2219 extraheads = repo.revs(revset, draftheads, publicheads)
2220 for r in extraheads:
2220 for r in extraheads:
2221 headsbyphase[phases.public].add(node(r))
2221 headsbyphase[phases.public].add(node(r))
2222
2222
2223 # transform data in a format used by the encoding function
2223 # transform data in a format used by the encoding function
2224 phasemapping = []
2224 phasemapping = []
2225 for phase in phases.allphases:
2225 for phase in phases.allphases:
2226 phasemapping.append(sorted(headsbyphase[phase]))
2226 phasemapping.append(sorted(headsbyphase[phase]))
2227
2227
2228 # generate the actual part
2228 # generate the actual part
2229 phasedata = phases.binaryencode(phasemapping)
2229 phasedata = phases.binaryencode(phasemapping)
2230 bundler.newpart('phase-heads', data=phasedata)
2230 bundler.newpart('phase-heads', data=phasedata)
2231
2231
2232 @getbundle2partsgenerator('hgtagsfnodes')
2232 @getbundle2partsgenerator('hgtagsfnodes')
2233 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2233 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2234 b2caps=None, heads=None, common=None,
2234 b2caps=None, heads=None, common=None,
2235 **kwargs):
2235 **kwargs):
2236 """Transfer the .hgtags filenodes mapping.
2236 """Transfer the .hgtags filenodes mapping.
2237
2237
2238 Only values for heads in this bundle will be transferred.
2238 Only values for heads in this bundle will be transferred.
2239
2239
2240 The part data consists of pairs of 20 byte changeset node and .hgtags
2240 The part data consists of pairs of 20 byte changeset node and .hgtags
2241 filenodes raw values.
2241 filenodes raw values.
2242 """
2242 """
2243 # Don't send unless:
2243 # Don't send unless:
2244 # - changeset are being exchanged,
2244 # - changeset are being exchanged,
2245 # - the client supports it.
2245 # - the client supports it.
2246 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2246 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2247 return
2247 return
2248
2248
2249 outgoing = _computeoutgoing(repo, heads, common)
2249 outgoing = _computeoutgoing(repo, heads, common)
2250 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2250 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2251
2251
2252 @getbundle2partsgenerator('cache:rev-branch-cache')
2252 @getbundle2partsgenerator('cache:rev-branch-cache')
2253 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2253 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2254 b2caps=None, heads=None, common=None,
2254 b2caps=None, heads=None, common=None,
2255 **kwargs):
2255 **kwargs):
2256 """Transfer the rev-branch-cache mapping
2256 """Transfer the rev-branch-cache mapping
2257
2257
2258 The payload is a series of data related to each branch
2258 The payload is a series of data related to each branch
2259
2259
2260 1) branch name length
2260 1) branch name length
2261 2) number of open heads
2261 2) number of open heads
2262 3) number of closed heads
2262 3) number of closed heads
2263 4) open heads nodes
2263 4) open heads nodes
2264 5) closed heads nodes
2264 5) closed heads nodes
2265 """
2265 """
2266 # Don't send unless:
2266 # Don't send unless:
2267 # - changeset are being exchanged,
2267 # - changeset are being exchanged,
2268 # - the client supports it.
2268 # - the client supports it.
2269 # - narrow bundle isn't in play (not currently compatible).
2269 # - narrow bundle isn't in play (not currently compatible).
2270 if (not kwargs.get(r'cg', True)
2270 if (not kwargs.get(r'cg', True)
2271 or 'rev-branch-cache' not in b2caps
2271 or 'rev-branch-cache' not in b2caps
2272 or kwargs.get(r'narrow', False)
2272 or kwargs.get(r'narrow', False)
2273 or repo.ui.has_section(_NARROWACL_SECTION)):
2273 or repo.ui.has_section(_NARROWACL_SECTION)):
2274 return
2274 return
2275
2275
2276 outgoing = _computeoutgoing(repo, heads, common)
2276 outgoing = _computeoutgoing(repo, heads, common)
2277 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2277 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2278
2278
2279 def check_heads(repo, their_heads, context):
2279 def check_heads(repo, their_heads, context):
2280 """check if the heads of a repo have been modified
2280 """check if the heads of a repo have been modified
2281
2281
2282 Used by peer for unbundling.
2282 Used by peer for unbundling.
2283 """
2283 """
2284 heads = repo.heads()
2284 heads = repo.heads()
2285 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2285 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2286 if not (their_heads == ['force'] or their_heads == heads or
2286 if not (their_heads == ['force'] or their_heads == heads or
2287 their_heads == ['hashed', heads_hash]):
2287 their_heads == ['hashed', heads_hash]):
2288 # someone else committed/pushed/unbundled while we
2288 # someone else committed/pushed/unbundled while we
2289 # were transferring data
2289 # were transferring data
2290 raise error.PushRaced('repository changed while %s - '
2290 raise error.PushRaced('repository changed while %s - '
2291 'please try again' % context)
2291 'please try again' % context)
2292
2292
2293 def unbundle(repo, cg, heads, source, url):
2293 def unbundle(repo, cg, heads, source, url):
2294 """Apply a bundle to a repo.
2294 """Apply a bundle to a repo.
2295
2295
2296 this function makes sure the repo is locked during the application and have
2296 this function makes sure the repo is locked during the application and have
2297 mechanism to check that no push race occurred between the creation of the
2297 mechanism to check that no push race occurred between the creation of the
2298 bundle and its application.
2298 bundle and its application.
2299
2299
2300 If the push was raced as PushRaced exception is raised."""
2300 If the push was raced as PushRaced exception is raised."""
2301 r = 0
2301 r = 0
2302 # need a transaction when processing a bundle2 stream
2302 # need a transaction when processing a bundle2 stream
2303 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2303 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2304 lockandtr = [None, None, None]
2304 lockandtr = [None, None, None]
2305 recordout = None
2305 recordout = None
2306 # quick fix for output mismatch with bundle2 in 3.4
2306 # quick fix for output mismatch with bundle2 in 3.4
2307 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2307 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2308 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2308 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2309 captureoutput = True
2309 captureoutput = True
2310 try:
2310 try:
2311 # note: outside bundle1, 'heads' is expected to be empty and this
2311 # note: outside bundle1, 'heads' is expected to be empty and this
2312 # 'check_heads' call wil be a no-op
2312 # 'check_heads' call wil be a no-op
2313 check_heads(repo, heads, 'uploading changes')
2313 check_heads(repo, heads, 'uploading changes')
2314 # push can proceed
2314 # push can proceed
2315 if not isinstance(cg, bundle2.unbundle20):
2315 if not isinstance(cg, bundle2.unbundle20):
2316 # legacy case: bundle1 (changegroup 01)
2316 # legacy case: bundle1 (changegroup 01)
2317 txnname = "\n".join([source, util.hidepassword(url)])
2317 txnname = "\n".join([source, util.hidepassword(url)])
2318 with repo.lock(), repo.transaction(txnname) as tr:
2318 with repo.lock(), repo.transaction(txnname) as tr:
2319 op = bundle2.applybundle(repo, cg, tr, source, url)
2319 op = bundle2.applybundle(repo, cg, tr, source, url)
2320 r = bundle2.combinechangegroupresults(op)
2320 r = bundle2.combinechangegroupresults(op)
2321 else:
2321 else:
2322 r = None
2322 r = None
2323 try:
2323 try:
2324 def gettransaction():
2324 def gettransaction():
2325 if not lockandtr[2]:
2325 if not lockandtr[2]:
2326 lockandtr[0] = repo.wlock()
2326 lockandtr[0] = repo.wlock()
2327 lockandtr[1] = repo.lock()
2327 lockandtr[1] = repo.lock()
2328 lockandtr[2] = repo.transaction(source)
2328 lockandtr[2] = repo.transaction(source)
2329 lockandtr[2].hookargs['source'] = source
2329 lockandtr[2].hookargs['source'] = source
2330 lockandtr[2].hookargs['url'] = url
2330 lockandtr[2].hookargs['url'] = url
2331 lockandtr[2].hookargs['bundle2'] = '1'
2331 lockandtr[2].hookargs['bundle2'] = '1'
2332 return lockandtr[2]
2332 return lockandtr[2]
2333
2333
2334 # Do greedy locking by default until we're satisfied with lazy
2334 # Do greedy locking by default until we're satisfied with lazy
2335 # locking.
2335 # locking.
2336 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2336 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2337 gettransaction()
2337 gettransaction()
2338
2338
2339 op = bundle2.bundleoperation(repo, gettransaction,
2339 op = bundle2.bundleoperation(repo, gettransaction,
2340 captureoutput=captureoutput,
2340 captureoutput=captureoutput,
2341 source='push')
2341 source='push')
2342 try:
2342 try:
2343 op = bundle2.processbundle(repo, cg, op=op)
2343 op = bundle2.processbundle(repo, cg, op=op)
2344 finally:
2344 finally:
2345 r = op.reply
2345 r = op.reply
2346 if captureoutput and r is not None:
2346 if captureoutput and r is not None:
2347 repo.ui.pushbuffer(error=True, subproc=True)
2347 repo.ui.pushbuffer(error=True, subproc=True)
2348 def recordout(output):
2348 def recordout(output):
2349 r.newpart('output', data=output, mandatory=False)
2349 r.newpart('output', data=output, mandatory=False)
2350 if lockandtr[2] is not None:
2350 if lockandtr[2] is not None:
2351 lockandtr[2].close()
2351 lockandtr[2].close()
2352 except BaseException as exc:
2352 except BaseException as exc:
2353 exc.duringunbundle2 = True
2353 exc.duringunbundle2 = True
2354 if captureoutput and r is not None:
2354 if captureoutput and r is not None:
2355 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2355 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2356 def recordout(output):
2356 def recordout(output):
2357 part = bundle2.bundlepart('output', data=output,
2357 part = bundle2.bundlepart('output', data=output,
2358 mandatory=False)
2358 mandatory=False)
2359 parts.append(part)
2359 parts.append(part)
2360 raise
2360 raise
2361 finally:
2361 finally:
2362 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2362 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2363 if recordout is not None:
2363 if recordout is not None:
2364 recordout(repo.ui.popbuffer())
2364 recordout(repo.ui.popbuffer())
2365 return r
2365 return r
2366
2366
2367 def _maybeapplyclonebundle(pullop):
2367 def _maybeapplyclonebundle(pullop):
2368 """Apply a clone bundle from a remote, if possible."""
2368 """Apply a clone bundle from a remote, if possible."""
2369
2369
2370 repo = pullop.repo
2370 repo = pullop.repo
2371 remote = pullop.remote
2371 remote = pullop.remote
2372
2372
2373 if not repo.ui.configbool('ui', 'clonebundles'):
2373 if not repo.ui.configbool('ui', 'clonebundles'):
2374 return
2374 return
2375
2375
2376 # Only run if local repo is empty.
2376 # Only run if local repo is empty.
2377 if len(repo):
2377 if len(repo):
2378 return
2378 return
2379
2379
2380 if pullop.heads:
2380 if pullop.heads:
2381 return
2381 return
2382
2382
2383 if not remote.capable('clonebundles'):
2383 if not remote.capable('clonebundles'):
2384 return
2384 return
2385
2385
2386 with remote.commandexecutor() as e:
2386 with remote.commandexecutor() as e:
2387 res = e.callcommand('clonebundles', {}).result()
2387 res = e.callcommand('clonebundles', {}).result()
2388
2388
2389 # If we call the wire protocol command, that's good enough to record the
2389 # If we call the wire protocol command, that's good enough to record the
2390 # attempt.
2390 # attempt.
2391 pullop.clonebundleattempted = True
2391 pullop.clonebundleattempted = True
2392
2392
2393 entries = parseclonebundlesmanifest(repo, res)
2393 entries = parseclonebundlesmanifest(repo, res)
2394 if not entries:
2394 if not entries:
2395 repo.ui.note(_('no clone bundles available on remote; '
2395 repo.ui.note(_('no clone bundles available on remote; '
2396 'falling back to regular clone\n'))
2396 'falling back to regular clone\n'))
2397 return
2397 return
2398
2398
2399 entries = filterclonebundleentries(
2399 entries = filterclonebundleentries(
2400 repo, entries, streamclonerequested=pullop.streamclonerequested)
2400 repo, entries, streamclonerequested=pullop.streamclonerequested)
2401
2401
2402 if not entries:
2402 if not entries:
2403 # There is a thundering herd concern here. However, if a server
2403 # There is a thundering herd concern here. However, if a server
2404 # operator doesn't advertise bundles appropriate for its clients,
2404 # operator doesn't advertise bundles appropriate for its clients,
2405 # they deserve what's coming. Furthermore, from a client's
2405 # they deserve what's coming. Furthermore, from a client's
2406 # perspective, no automatic fallback would mean not being able to
2406 # perspective, no automatic fallback would mean not being able to
2407 # clone!
2407 # clone!
2408 repo.ui.warn(_('no compatible clone bundles available on server; '
2408 repo.ui.warn(_('no compatible clone bundles available on server; '
2409 'falling back to regular clone\n'))
2409 'falling back to regular clone\n'))
2410 repo.ui.warn(_('(you may want to report this to the server '
2410 repo.ui.warn(_('(you may want to report this to the server '
2411 'operator)\n'))
2411 'operator)\n'))
2412 return
2412 return
2413
2413
2414 entries = sortclonebundleentries(repo.ui, entries)
2414 entries = sortclonebundleentries(repo.ui, entries)
2415
2415
2416 url = entries[0]['URL']
2416 url = entries[0]['URL']
2417 repo.ui.status(_('applying clone bundle from %s\n') % url)
2417 repo.ui.status(_('applying clone bundle from %s\n') % url)
2418 if trypullbundlefromurl(repo.ui, repo, url):
2418 if trypullbundlefromurl(repo.ui, repo, url):
2419 repo.ui.status(_('finished applying clone bundle\n'))
2419 repo.ui.status(_('finished applying clone bundle\n'))
2420 # Bundle failed.
2420 # Bundle failed.
2421 #
2421 #
2422 # We abort by default to avoid the thundering herd of
2422 # We abort by default to avoid the thundering herd of
2423 # clients flooding a server that was expecting expensive
2423 # clients flooding a server that was expecting expensive
2424 # clone load to be offloaded.
2424 # clone load to be offloaded.
2425 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2425 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2426 repo.ui.warn(_('falling back to normal clone\n'))
2426 repo.ui.warn(_('falling back to normal clone\n'))
2427 else:
2427 else:
2428 raise error.Abort(_('error applying bundle'),
2428 raise error.Abort(_('error applying bundle'),
2429 hint=_('if this error persists, consider contacting '
2429 hint=_('if this error persists, consider contacting '
2430 'the server operator or disable clone '
2430 'the server operator or disable clone '
2431 'bundles via '
2431 'bundles via '
2432 '"--config ui.clonebundles=false"'))
2432 '"--config ui.clonebundles=false"'))
2433
2433
2434 def parseclonebundlesmanifest(repo, s):
2434 def parseclonebundlesmanifest(repo, s):
2435 """Parses the raw text of a clone bundles manifest.
2435 """Parses the raw text of a clone bundles manifest.
2436
2436
2437 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2437 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2438 to the URL and other keys are the attributes for the entry.
2438 to the URL and other keys are the attributes for the entry.
2439 """
2439 """
2440 m = []
2440 m = []
2441 for line in s.splitlines():
2441 for line in s.splitlines():
2442 fields = line.split()
2442 fields = line.split()
2443 if not fields:
2443 if not fields:
2444 continue
2444 continue
2445 attrs = {'URL': fields[0]}
2445 attrs = {'URL': fields[0]}
2446 for rawattr in fields[1:]:
2446 for rawattr in fields[1:]:
2447 key, value = rawattr.split('=', 1)
2447 key, value = rawattr.split('=', 1)
2448 key = urlreq.unquote(key)
2448 key = urlreq.unquote(key)
2449 value = urlreq.unquote(value)
2449 value = urlreq.unquote(value)
2450 attrs[key] = value
2450 attrs[key] = value
2451
2451
2452 # Parse BUNDLESPEC into components. This makes client-side
2452 # Parse BUNDLESPEC into components. This makes client-side
2453 # preferences easier to specify since you can prefer a single
2453 # preferences easier to specify since you can prefer a single
2454 # component of the BUNDLESPEC.
2454 # component of the BUNDLESPEC.
2455 if key == 'BUNDLESPEC':
2455 if key == 'BUNDLESPEC':
2456 try:
2456 try:
2457 bundlespec = parsebundlespec(repo, value)
2457 bundlespec = parsebundlespec(repo, value)
2458 attrs['COMPRESSION'] = bundlespec.compression
2458 attrs['COMPRESSION'] = bundlespec.compression
2459 attrs['VERSION'] = bundlespec.version
2459 attrs['VERSION'] = bundlespec.version
2460 except error.InvalidBundleSpecification:
2460 except error.InvalidBundleSpecification:
2461 pass
2461 pass
2462 except error.UnsupportedBundleSpecification:
2462 except error.UnsupportedBundleSpecification:
2463 pass
2463 pass
2464
2464
2465 m.append(attrs)
2465 m.append(attrs)
2466
2466
2467 return m
2467 return m
2468
2468
2469 def isstreamclonespec(bundlespec):
2469 def isstreamclonespec(bundlespec):
2470 # Stream clone v1
2470 # Stream clone v1
2471 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2471 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2472 return True
2472 return True
2473
2473
2474 # Stream clone v2
2474 # Stream clone v2
2475 if (bundlespec.wirecompression == 'UN' and \
2475 if (bundlespec.wirecompression == 'UN' and \
2476 bundlespec.wireversion == '02' and \
2476 bundlespec.wireversion == '02' and \
2477 bundlespec.contentopts.get('streamv2')):
2477 bundlespec.contentopts.get('streamv2')):
2478 return True
2478 return True
2479
2479
2480 return False
2480 return False
2481
2481
2482 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2482 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2483 """Remove incompatible clone bundle manifest entries.
2483 """Remove incompatible clone bundle manifest entries.
2484
2484
2485 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2485 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2486 and returns a new list consisting of only the entries that this client
2486 and returns a new list consisting of only the entries that this client
2487 should be able to apply.
2487 should be able to apply.
2488
2488
2489 There is no guarantee we'll be able to apply all returned entries because
2489 There is no guarantee we'll be able to apply all returned entries because
2490 the metadata we use to filter on may be missing or wrong.
2490 the metadata we use to filter on may be missing or wrong.
2491 """
2491 """
2492 newentries = []
2492 newentries = []
2493 for entry in entries:
2493 for entry in entries:
2494 spec = entry.get('BUNDLESPEC')
2494 spec = entry.get('BUNDLESPEC')
2495 if spec:
2495 if spec:
2496 try:
2496 try:
2497 bundlespec = parsebundlespec(repo, spec, strict=True)
2497 bundlespec = parsebundlespec(repo, spec, strict=True)
2498
2498
2499 # If a stream clone was requested, filter out non-streamclone
2499 # If a stream clone was requested, filter out non-streamclone
2500 # entries.
2500 # entries.
2501 if streamclonerequested and not isstreamclonespec(bundlespec):
2501 if streamclonerequested and not isstreamclonespec(bundlespec):
2502 repo.ui.debug('filtering %s because not a stream clone\n' %
2502 repo.ui.debug('filtering %s because not a stream clone\n' %
2503 entry['URL'])
2503 entry['URL'])
2504 continue
2504 continue
2505
2505
2506 except error.InvalidBundleSpecification as e:
2506 except error.InvalidBundleSpecification as e:
2507 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2507 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2508 continue
2508 continue
2509 except error.UnsupportedBundleSpecification as e:
2509 except error.UnsupportedBundleSpecification as e:
2510 repo.ui.debug('filtering %s because unsupported bundle '
2510 repo.ui.debug('filtering %s because unsupported bundle '
2511 'spec: %s\n' % (
2511 'spec: %s\n' % (
2512 entry['URL'], stringutil.forcebytestr(e)))
2512 entry['URL'], stringutil.forcebytestr(e)))
2513 continue
2513 continue
2514 # If we don't have a spec and requested a stream clone, we don't know
2514 # If we don't have a spec and requested a stream clone, we don't know
2515 # what the entry is so don't attempt to apply it.
2515 # what the entry is so don't attempt to apply it.
2516 elif streamclonerequested:
2516 elif streamclonerequested:
2517 repo.ui.debug('filtering %s because cannot determine if a stream '
2517 repo.ui.debug('filtering %s because cannot determine if a stream '
2518 'clone bundle\n' % entry['URL'])
2518 'clone bundle\n' % entry['URL'])
2519 continue
2519 continue
2520
2520
2521 if 'REQUIRESNI' in entry and not sslutil.hassni:
2521 if 'REQUIRESNI' in entry and not sslutil.hassni:
2522 repo.ui.debug('filtering %s because SNI not supported\n' %
2522 repo.ui.debug('filtering %s because SNI not supported\n' %
2523 entry['URL'])
2523 entry['URL'])
2524 continue
2524 continue
2525
2525
2526 newentries.append(entry)
2526 newentries.append(entry)
2527
2527
2528 return newentries
2528 return newentries
2529
2529
2530 class clonebundleentry(object):
2530 class clonebundleentry(object):
2531 """Represents an item in a clone bundles manifest.
2531 """Represents an item in a clone bundles manifest.
2532
2532
2533 This rich class is needed to support sorting since sorted() in Python 3
2533 This rich class is needed to support sorting since sorted() in Python 3
2534 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2534 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2535 won't work.
2535 won't work.
2536 """
2536 """
2537
2537
2538 def __init__(self, value, prefers):
2538 def __init__(self, value, prefers):
2539 self.value = value
2539 self.value = value
2540 self.prefers = prefers
2540 self.prefers = prefers
2541
2541
2542 def _cmp(self, other):
2542 def _cmp(self, other):
2543 for prefkey, prefvalue in self.prefers:
2543 for prefkey, prefvalue in self.prefers:
2544 avalue = self.value.get(prefkey)
2544 avalue = self.value.get(prefkey)
2545 bvalue = other.value.get(prefkey)
2545 bvalue = other.value.get(prefkey)
2546
2546
2547 # Special case for b missing attribute and a matches exactly.
2547 # Special case for b missing attribute and a matches exactly.
2548 if avalue is not None and bvalue is None and avalue == prefvalue:
2548 if avalue is not None and bvalue is None and avalue == prefvalue:
2549 return -1
2549 return -1
2550
2550
2551 # Special case for a missing attribute and b matches exactly.
2551 # Special case for a missing attribute and b matches exactly.
2552 if bvalue is not None and avalue is None and bvalue == prefvalue:
2552 if bvalue is not None and avalue is None and bvalue == prefvalue:
2553 return 1
2553 return 1
2554
2554
2555 # We can't compare unless attribute present on both.
2555 # We can't compare unless attribute present on both.
2556 if avalue is None or bvalue is None:
2556 if avalue is None or bvalue is None:
2557 continue
2557 continue
2558
2558
2559 # Same values should fall back to next attribute.
2559 # Same values should fall back to next attribute.
2560 if avalue == bvalue:
2560 if avalue == bvalue:
2561 continue
2561 continue
2562
2562
2563 # Exact matches come first.
2563 # Exact matches come first.
2564 if avalue == prefvalue:
2564 if avalue == prefvalue:
2565 return -1
2565 return -1
2566 if bvalue == prefvalue:
2566 if bvalue == prefvalue:
2567 return 1
2567 return 1
2568
2568
2569 # Fall back to next attribute.
2569 # Fall back to next attribute.
2570 continue
2570 continue
2571
2571
2572 # If we got here we couldn't sort by attributes and prefers. Fall
2572 # If we got here we couldn't sort by attributes and prefers. Fall
2573 # back to index order.
2573 # back to index order.
2574 return 0
2574 return 0
2575
2575
2576 def __lt__(self, other):
2576 def __lt__(self, other):
2577 return self._cmp(other) < 0
2577 return self._cmp(other) < 0
2578
2578
2579 def __gt__(self, other):
2579 def __gt__(self, other):
2580 return self._cmp(other) > 0
2580 return self._cmp(other) > 0
2581
2581
2582 def __eq__(self, other):
2582 def __eq__(self, other):
2583 return self._cmp(other) == 0
2583 return self._cmp(other) == 0
2584
2584
2585 def __le__(self, other):
2585 def __le__(self, other):
2586 return self._cmp(other) <= 0
2586 return self._cmp(other) <= 0
2587
2587
2588 def __ge__(self, other):
2588 def __ge__(self, other):
2589 return self._cmp(other) >= 0
2589 return self._cmp(other) >= 0
2590
2590
2591 def __ne__(self, other):
2591 def __ne__(self, other):
2592 return self._cmp(other) != 0
2592 return self._cmp(other) != 0
2593
2593
2594 def sortclonebundleentries(ui, entries):
2594 def sortclonebundleentries(ui, entries):
2595 prefers = ui.configlist('ui', 'clonebundleprefers')
2595 prefers = ui.configlist('ui', 'clonebundleprefers')
2596 if not prefers:
2596 if not prefers:
2597 return list(entries)
2597 return list(entries)
2598
2598
2599 prefers = [p.split('=', 1) for p in prefers]
2599 prefers = [p.split('=', 1) for p in prefers]
2600
2600
2601 items = sorted(clonebundleentry(v, prefers) for v in entries)
2601 items = sorted(clonebundleentry(v, prefers) for v in entries)
2602 return [i.value for i in items]
2602 return [i.value for i in items]
2603
2603
2604 def trypullbundlefromurl(ui, repo, url):
2604 def trypullbundlefromurl(ui, repo, url):
2605 """Attempt to apply a bundle from a URL."""
2605 """Attempt to apply a bundle from a URL."""
2606 with repo.lock(), repo.transaction('bundleurl') as tr:
2606 with repo.lock(), repo.transaction('bundleurl') as tr:
2607 try:
2607 try:
2608 fh = urlmod.open(ui, url)
2608 fh = urlmod.open(ui, url)
2609 cg = readbundle(ui, fh, 'stream')
2609 cg = readbundle(ui, fh, 'stream')
2610
2610
2611 if isinstance(cg, streamclone.streamcloneapplier):
2611 if isinstance(cg, streamclone.streamcloneapplier):
2612 cg.apply(repo)
2612 cg.apply(repo)
2613 else:
2613 else:
2614 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2614 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2615 return True
2615 return True
2616 except urlerr.httperror as e:
2616 except urlerr.httperror as e:
2617 ui.warn(_('HTTP error fetching bundle: %s\n') %
2617 ui.warn(_('HTTP error fetching bundle: %s\n') %
2618 stringutil.forcebytestr(e))
2618 stringutil.forcebytestr(e))
2619 except urlerr.urlerror as e:
2619 except urlerr.urlerror as e:
2620 ui.warn(_('error fetching bundle: %s\n') %
2620 ui.warn(_('error fetching bundle: %s\n') %
2621 stringutil.forcebytestr(e.reason))
2621 stringutil.forcebytestr(e.reason))
2622
2622
2623 return False
2623 return False
General Comments 0
You need to be logged in to leave comments. Login now