##// END OF EJS Templates
push: move bundle2-pushkey based bookmarks exchange in its own function...
Boris Feld -
r35263:3fd5f05a default
parent child Browse files
Show More
@@ -1,2137 +1,2139 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 lock as lockmod,
26 lock as lockmod,
27 obsolete,
27 obsolete,
28 phases,
28 phases,
29 pushkey,
29 pushkey,
30 pycompat,
30 pycompat,
31 remotenames,
31 remotenames,
32 scmutil,
32 scmutil,
33 sslutil,
33 sslutil,
34 streamclone,
34 streamclone,
35 url as urlmod,
35 url as urlmod,
36 util,
36 util,
37 )
37 )
38
38
39 urlerr = util.urlerr
39 urlerr = util.urlerr
40 urlreq = util.urlreq
40 urlreq = util.urlreq
41
41
42 # Maps bundle version human names to changegroup versions.
42 # Maps bundle version human names to changegroup versions.
43 _bundlespeccgversions = {'v1': '01',
43 _bundlespeccgversions = {'v1': '01',
44 'v2': '02',
44 'v2': '02',
45 'packed1': 's1',
45 'packed1': 's1',
46 'bundle2': '02', #legacy
46 'bundle2': '02', #legacy
47 }
47 }
48
48
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51
51
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 """Parse a bundle string specification into parts.
53 """Parse a bundle string specification into parts.
54
54
55 Bundle specifications denote a well-defined bundle/exchange format.
55 Bundle specifications denote a well-defined bundle/exchange format.
56 The content of a given specification should not change over time in
56 The content of a given specification should not change over time in
57 order to ensure that bundles produced by a newer version of Mercurial are
57 order to ensure that bundles produced by a newer version of Mercurial are
58 readable from an older version.
58 readable from an older version.
59
59
60 The string currently has the form:
60 The string currently has the form:
61
61
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63
63
64 Where <compression> is one of the supported compression formats
64 Where <compression> is one of the supported compression formats
65 and <type> is (currently) a version string. A ";" can follow the type and
65 and <type> is (currently) a version string. A ";" can follow the type and
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 pairs.
67 pairs.
68
68
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 it is optional.
70 it is optional.
71
71
72 If ``externalnames`` is False (the default), the human-centric names will
72 If ``externalnames`` is False (the default), the human-centric names will
73 be converted to their internal representation.
73 be converted to their internal representation.
74
74
75 Returns a 3-tuple of (compression, version, parameters). Compression will
75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 be ``None`` if not in strict mode and a compression isn't defined.
76 be ``None`` if not in strict mode and a compression isn't defined.
77
77
78 An ``InvalidBundleSpecification`` is raised when the specification is
78 An ``InvalidBundleSpecification`` is raised when the specification is
79 not syntactically well formed.
79 not syntactically well formed.
80
80
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 bundle type/version is not recognized.
82 bundle type/version is not recognized.
83
83
84 Note: this function will likely eventually return a more complex data
84 Note: this function will likely eventually return a more complex data
85 structure, including bundle2 part information.
85 structure, including bundle2 part information.
86 """
86 """
87 def parseparams(s):
87 def parseparams(s):
88 if ';' not in s:
88 if ';' not in s:
89 return s, {}
89 return s, {}
90
90
91 params = {}
91 params = {}
92 version, paramstr = s.split(';', 1)
92 version, paramstr = s.split(';', 1)
93
93
94 for p in paramstr.split(';'):
94 for p in paramstr.split(';'):
95 if '=' not in p:
95 if '=' not in p:
96 raise error.InvalidBundleSpecification(
96 raise error.InvalidBundleSpecification(
97 _('invalid bundle specification: '
97 _('invalid bundle specification: '
98 'missing "=" in parameter: %s') % p)
98 'missing "=" in parameter: %s') % p)
99
99
100 key, value = p.split('=', 1)
100 key, value = p.split('=', 1)
101 key = urlreq.unquote(key)
101 key = urlreq.unquote(key)
102 value = urlreq.unquote(value)
102 value = urlreq.unquote(value)
103 params[key] = value
103 params[key] = value
104
104
105 return version, params
105 return version, params
106
106
107
107
108 if strict and '-' not in spec:
108 if strict and '-' not in spec:
109 raise error.InvalidBundleSpecification(
109 raise error.InvalidBundleSpecification(
110 _('invalid bundle specification; '
110 _('invalid bundle specification; '
111 'must be prefixed with compression: %s') % spec)
111 'must be prefixed with compression: %s') % spec)
112
112
113 if '-' in spec:
113 if '-' in spec:
114 compression, version = spec.split('-', 1)
114 compression, version = spec.split('-', 1)
115
115
116 if compression not in util.compengines.supportedbundlenames:
116 if compression not in util.compengines.supportedbundlenames:
117 raise error.UnsupportedBundleSpecification(
117 raise error.UnsupportedBundleSpecification(
118 _('%s compression is not supported') % compression)
118 _('%s compression is not supported') % compression)
119
119
120 version, params = parseparams(version)
120 version, params = parseparams(version)
121
121
122 if version not in _bundlespeccgversions:
122 if version not in _bundlespeccgversions:
123 raise error.UnsupportedBundleSpecification(
123 raise error.UnsupportedBundleSpecification(
124 _('%s is not a recognized bundle version') % version)
124 _('%s is not a recognized bundle version') % version)
125 else:
125 else:
126 # Value could be just the compression or just the version, in which
126 # Value could be just the compression or just the version, in which
127 # case some defaults are assumed (but only when not in strict mode).
127 # case some defaults are assumed (but only when not in strict mode).
128 assert not strict
128 assert not strict
129
129
130 spec, params = parseparams(spec)
130 spec, params = parseparams(spec)
131
131
132 if spec in util.compengines.supportedbundlenames:
132 if spec in util.compengines.supportedbundlenames:
133 compression = spec
133 compression = spec
134 version = 'v1'
134 version = 'v1'
135 # Generaldelta repos require v2.
135 # Generaldelta repos require v2.
136 if 'generaldelta' in repo.requirements:
136 if 'generaldelta' in repo.requirements:
137 version = 'v2'
137 version = 'v2'
138 # Modern compression engines require v2.
138 # Modern compression engines require v2.
139 if compression not in _bundlespecv1compengines:
139 if compression not in _bundlespecv1compengines:
140 version = 'v2'
140 version = 'v2'
141 elif spec in _bundlespeccgversions:
141 elif spec in _bundlespeccgversions:
142 if spec == 'packed1':
142 if spec == 'packed1':
143 compression = 'none'
143 compression = 'none'
144 else:
144 else:
145 compression = 'bzip2'
145 compression = 'bzip2'
146 version = spec
146 version = spec
147 else:
147 else:
148 raise error.UnsupportedBundleSpecification(
148 raise error.UnsupportedBundleSpecification(
149 _('%s is not a recognized bundle specification') % spec)
149 _('%s is not a recognized bundle specification') % spec)
150
150
151 # Bundle version 1 only supports a known set of compression engines.
151 # Bundle version 1 only supports a known set of compression engines.
152 if version == 'v1' and compression not in _bundlespecv1compengines:
152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 raise error.UnsupportedBundleSpecification(
153 raise error.UnsupportedBundleSpecification(
154 _('compression engine %s is not supported on v1 bundles') %
154 _('compression engine %s is not supported on v1 bundles') %
155 compression)
155 compression)
156
156
157 # The specification for packed1 can optionally declare the data formats
157 # The specification for packed1 can optionally declare the data formats
158 # required to apply it. If we see this metadata, compare against what the
158 # required to apply it. If we see this metadata, compare against what the
159 # repo supports and error if the bundle isn't compatible.
159 # repo supports and error if the bundle isn't compatible.
160 if version == 'packed1' and 'requirements' in params:
160 if version == 'packed1' and 'requirements' in params:
161 requirements = set(params['requirements'].split(','))
161 requirements = set(params['requirements'].split(','))
162 missingreqs = requirements - repo.supportedformats
162 missingreqs = requirements - repo.supportedformats
163 if missingreqs:
163 if missingreqs:
164 raise error.UnsupportedBundleSpecification(
164 raise error.UnsupportedBundleSpecification(
165 _('missing support for repository features: %s') %
165 _('missing support for repository features: %s') %
166 ', '.join(sorted(missingreqs)))
166 ', '.join(sorted(missingreqs)))
167
167
168 if not externalnames:
168 if not externalnames:
169 engine = util.compengines.forbundlename(compression)
169 engine = util.compengines.forbundlename(compression)
170 compression = engine.bundletype()[1]
170 compression = engine.bundletype()[1]
171 version = _bundlespeccgversions[version]
171 version = _bundlespeccgversions[version]
172 return compression, version, params
172 return compression, version, params
173
173
174 def readbundle(ui, fh, fname, vfs=None):
174 def readbundle(ui, fh, fname, vfs=None):
175 header = changegroup.readexactly(fh, 4)
175 header = changegroup.readexactly(fh, 4)
176
176
177 alg = None
177 alg = None
178 if not fname:
178 if not fname:
179 fname = "stream"
179 fname = "stream"
180 if not header.startswith('HG') and header.startswith('\0'):
180 if not header.startswith('HG') and header.startswith('\0'):
181 fh = changegroup.headerlessfixup(fh, header)
181 fh = changegroup.headerlessfixup(fh, header)
182 header = "HG10"
182 header = "HG10"
183 alg = 'UN'
183 alg = 'UN'
184 elif vfs:
184 elif vfs:
185 fname = vfs.join(fname)
185 fname = vfs.join(fname)
186
186
187 magic, version = header[0:2], header[2:4]
187 magic, version = header[0:2], header[2:4]
188
188
189 if magic != 'HG':
189 if magic != 'HG':
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 if version == '10':
191 if version == '10':
192 if alg is None:
192 if alg is None:
193 alg = changegroup.readexactly(fh, 2)
193 alg = changegroup.readexactly(fh, 2)
194 return changegroup.cg1unpacker(fh, alg)
194 return changegroup.cg1unpacker(fh, alg)
195 elif version.startswith('2'):
195 elif version.startswith('2'):
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 elif version == 'S1':
197 elif version == 'S1':
198 return streamclone.streamcloneapplier(fh)
198 return streamclone.streamcloneapplier(fh)
199 else:
199 else:
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201
201
202 def getbundlespec(ui, fh):
202 def getbundlespec(ui, fh):
203 """Infer the bundlespec from a bundle file handle.
203 """Infer the bundlespec from a bundle file handle.
204
204
205 The input file handle is seeked and the original seek position is not
205 The input file handle is seeked and the original seek position is not
206 restored.
206 restored.
207 """
207 """
208 def speccompression(alg):
208 def speccompression(alg):
209 try:
209 try:
210 return util.compengines.forbundletype(alg).bundletype()[0]
210 return util.compengines.forbundletype(alg).bundletype()[0]
211 except KeyError:
211 except KeyError:
212 return None
212 return None
213
213
214 b = readbundle(ui, fh, None)
214 b = readbundle(ui, fh, None)
215 if isinstance(b, changegroup.cg1unpacker):
215 if isinstance(b, changegroup.cg1unpacker):
216 alg = b._type
216 alg = b._type
217 if alg == '_truncatedBZ':
217 if alg == '_truncatedBZ':
218 alg = 'BZ'
218 alg = 'BZ'
219 comp = speccompression(alg)
219 comp = speccompression(alg)
220 if not comp:
220 if not comp:
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 return '%s-v1' % comp
222 return '%s-v1' % comp
223 elif isinstance(b, bundle2.unbundle20):
223 elif isinstance(b, bundle2.unbundle20):
224 if 'Compression' in b.params:
224 if 'Compression' in b.params:
225 comp = speccompression(b.params['Compression'])
225 comp = speccompression(b.params['Compression'])
226 if not comp:
226 if not comp:
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 else:
228 else:
229 comp = 'none'
229 comp = 'none'
230
230
231 version = None
231 version = None
232 for part in b.iterparts():
232 for part in b.iterparts():
233 if part.type == 'changegroup':
233 if part.type == 'changegroup':
234 version = part.params['version']
234 version = part.params['version']
235 if version in ('01', '02'):
235 if version in ('01', '02'):
236 version = 'v2'
236 version = 'v2'
237 else:
237 else:
238 raise error.Abort(_('changegroup version %s does not have '
238 raise error.Abort(_('changegroup version %s does not have '
239 'a known bundlespec') % version,
239 'a known bundlespec') % version,
240 hint=_('try upgrading your Mercurial '
240 hint=_('try upgrading your Mercurial '
241 'client'))
241 'client'))
242
242
243 if not version:
243 if not version:
244 raise error.Abort(_('could not identify changegroup version in '
244 raise error.Abort(_('could not identify changegroup version in '
245 'bundle'))
245 'bundle'))
246
246
247 return '%s-%s' % (comp, version)
247 return '%s-%s' % (comp, version)
248 elif isinstance(b, streamclone.streamcloneapplier):
248 elif isinstance(b, streamclone.streamcloneapplier):
249 requirements = streamclone.readbundle1header(fh)[2]
249 requirements = streamclone.readbundle1header(fh)[2]
250 params = 'requirements=%s' % ','.join(sorted(requirements))
250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 return 'none-packed1;%s' % urlreq.quote(params)
251 return 'none-packed1;%s' % urlreq.quote(params)
252 else:
252 else:
253 raise error.Abort(_('unknown bundle type: %s') % b)
253 raise error.Abort(_('unknown bundle type: %s') % b)
254
254
255 def _computeoutgoing(repo, heads, common):
255 def _computeoutgoing(repo, heads, common):
256 """Computes which revs are outgoing given a set of common
256 """Computes which revs are outgoing given a set of common
257 and a set of heads.
257 and a set of heads.
258
258
259 This is a separate function so extensions can have access to
259 This is a separate function so extensions can have access to
260 the logic.
260 the logic.
261
261
262 Returns a discovery.outgoing object.
262 Returns a discovery.outgoing object.
263 """
263 """
264 cl = repo.changelog
264 cl = repo.changelog
265 if common:
265 if common:
266 hasnode = cl.hasnode
266 hasnode = cl.hasnode
267 common = [n for n in common if hasnode(n)]
267 common = [n for n in common if hasnode(n)]
268 else:
268 else:
269 common = [nullid]
269 common = [nullid]
270 if not heads:
270 if not heads:
271 heads = cl.heads()
271 heads = cl.heads()
272 return discovery.outgoing(repo, common, heads)
272 return discovery.outgoing(repo, common, heads)
273
273
274 def _forcebundle1(op):
274 def _forcebundle1(op):
275 """return true if a pull/push must use bundle1
275 """return true if a pull/push must use bundle1
276
276
277 This function is used to allow testing of the older bundle version"""
277 This function is used to allow testing of the older bundle version"""
278 ui = op.repo.ui
278 ui = op.repo.ui
279 forcebundle1 = False
279 forcebundle1 = False
280 # The goal is this config is to allow developer to choose the bundle
280 # The goal is this config is to allow developer to choose the bundle
281 # version used during exchanged. This is especially handy during test.
281 # version used during exchanged. This is especially handy during test.
282 # Value is a list of bundle version to be picked from, highest version
282 # Value is a list of bundle version to be picked from, highest version
283 # should be used.
283 # should be used.
284 #
284 #
285 # developer config: devel.legacy.exchange
285 # developer config: devel.legacy.exchange
286 exchange = ui.configlist('devel', 'legacy.exchange')
286 exchange = ui.configlist('devel', 'legacy.exchange')
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 return forcebundle1 or not op.remote.capable('bundle2')
288 return forcebundle1 or not op.remote.capable('bundle2')
289
289
290 class pushoperation(object):
290 class pushoperation(object):
291 """A object that represent a single push operation
291 """A object that represent a single push operation
292
292
293 Its purpose is to carry push related state and very common operations.
293 Its purpose is to carry push related state and very common operations.
294
294
295 A new pushoperation should be created at the beginning of each push and
295 A new pushoperation should be created at the beginning of each push and
296 discarded afterward.
296 discarded afterward.
297 """
297 """
298
298
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 bookmarks=(), pushvars=None):
300 bookmarks=(), pushvars=None):
301 # repo we push from
301 # repo we push from
302 self.repo = repo
302 self.repo = repo
303 self.ui = repo.ui
303 self.ui = repo.ui
304 # repo we push to
304 # repo we push to
305 self.remote = remote
305 self.remote = remote
306 # force option provided
306 # force option provided
307 self.force = force
307 self.force = force
308 # revs to be pushed (None is "all")
308 # revs to be pushed (None is "all")
309 self.revs = revs
309 self.revs = revs
310 # bookmark explicitly pushed
310 # bookmark explicitly pushed
311 self.bookmarks = bookmarks
311 self.bookmarks = bookmarks
312 # allow push of new branch
312 # allow push of new branch
313 self.newbranch = newbranch
313 self.newbranch = newbranch
314 # step already performed
314 # step already performed
315 # (used to check what steps have been already performed through bundle2)
315 # (used to check what steps have been already performed through bundle2)
316 self.stepsdone = set()
316 self.stepsdone = set()
317 # Integer version of the changegroup push result
317 # Integer version of the changegroup push result
318 # - None means nothing to push
318 # - None means nothing to push
319 # - 0 means HTTP error
319 # - 0 means HTTP error
320 # - 1 means we pushed and remote head count is unchanged *or*
320 # - 1 means we pushed and remote head count is unchanged *or*
321 # we have outgoing changesets but refused to push
321 # we have outgoing changesets but refused to push
322 # - other values as described by addchangegroup()
322 # - other values as described by addchangegroup()
323 self.cgresult = None
323 self.cgresult = None
324 # Boolean value for the bookmark push
324 # Boolean value for the bookmark push
325 self.bkresult = None
325 self.bkresult = None
326 # discover.outgoing object (contains common and outgoing data)
326 # discover.outgoing object (contains common and outgoing data)
327 self.outgoing = None
327 self.outgoing = None
328 # all remote topological heads before the push
328 # all remote topological heads before the push
329 self.remoteheads = None
329 self.remoteheads = None
330 # Details of the remote branch pre and post push
330 # Details of the remote branch pre and post push
331 #
331 #
332 # mapping: {'branch': ([remoteheads],
332 # mapping: {'branch': ([remoteheads],
333 # [newheads],
333 # [newheads],
334 # [unsyncedheads],
334 # [unsyncedheads],
335 # [discardedheads])}
335 # [discardedheads])}
336 # - branch: the branch name
336 # - branch: the branch name
337 # - remoteheads: the list of remote heads known locally
337 # - remoteheads: the list of remote heads known locally
338 # None if the branch is new
338 # None if the branch is new
339 # - newheads: the new remote heads (known locally) with outgoing pushed
339 # - newheads: the new remote heads (known locally) with outgoing pushed
340 # - unsyncedheads: the list of remote heads unknown locally.
340 # - unsyncedheads: the list of remote heads unknown locally.
341 # - discardedheads: the list of remote heads made obsolete by the push
341 # - discardedheads: the list of remote heads made obsolete by the push
342 self.pushbranchmap = None
342 self.pushbranchmap = None
343 # testable as a boolean indicating if any nodes are missing locally.
343 # testable as a boolean indicating if any nodes are missing locally.
344 self.incoming = None
344 self.incoming = None
345 # summary of the remote phase situation
345 # summary of the remote phase situation
346 self.remotephases = None
346 self.remotephases = None
347 # phases changes that must be pushed along side the changesets
347 # phases changes that must be pushed along side the changesets
348 self.outdatedphases = None
348 self.outdatedphases = None
349 # phases changes that must be pushed if changeset push fails
349 # phases changes that must be pushed if changeset push fails
350 self.fallbackoutdatedphases = None
350 self.fallbackoutdatedphases = None
351 # outgoing obsmarkers
351 # outgoing obsmarkers
352 self.outobsmarkers = set()
352 self.outobsmarkers = set()
353 # outgoing bookmarks
353 # outgoing bookmarks
354 self.outbookmarks = []
354 self.outbookmarks = []
355 # transaction manager
355 # transaction manager
356 self.trmanager = None
356 self.trmanager = None
357 # map { pushkey partid -> callback handling failure}
357 # map { pushkey partid -> callback handling failure}
358 # used to handle exception from mandatory pushkey part failure
358 # used to handle exception from mandatory pushkey part failure
359 self.pkfailcb = {}
359 self.pkfailcb = {}
360 # an iterable of pushvars or None
360 # an iterable of pushvars or None
361 self.pushvars = pushvars
361 self.pushvars = pushvars
362
362
363 @util.propertycache
363 @util.propertycache
364 def futureheads(self):
364 def futureheads(self):
365 """future remote heads if the changeset push succeeds"""
365 """future remote heads if the changeset push succeeds"""
366 return self.outgoing.missingheads
366 return self.outgoing.missingheads
367
367
368 @util.propertycache
368 @util.propertycache
369 def fallbackheads(self):
369 def fallbackheads(self):
370 """future remote heads if the changeset push fails"""
370 """future remote heads if the changeset push fails"""
371 if self.revs is None:
371 if self.revs is None:
372 # not target to push, all common are relevant
372 # not target to push, all common are relevant
373 return self.outgoing.commonheads
373 return self.outgoing.commonheads
374 unfi = self.repo.unfiltered()
374 unfi = self.repo.unfiltered()
375 # I want cheads = heads(::missingheads and ::commonheads)
375 # I want cheads = heads(::missingheads and ::commonheads)
376 # (missingheads is revs with secret changeset filtered out)
376 # (missingheads is revs with secret changeset filtered out)
377 #
377 #
378 # This can be expressed as:
378 # This can be expressed as:
379 # cheads = ( (missingheads and ::commonheads)
379 # cheads = ( (missingheads and ::commonheads)
380 # + (commonheads and ::missingheads))"
380 # + (commonheads and ::missingheads))"
381 # )
381 # )
382 #
382 #
383 # while trying to push we already computed the following:
383 # while trying to push we already computed the following:
384 # common = (::commonheads)
384 # common = (::commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
386 #
386 #
387 # We can pick:
387 # We can pick:
388 # * missingheads part of common (::commonheads)
388 # * missingheads part of common (::commonheads)
389 common = self.outgoing.common
389 common = self.outgoing.common
390 nm = self.repo.changelog.nodemap
390 nm = self.repo.changelog.nodemap
391 cheads = [node for node in self.revs if nm[node] in common]
391 cheads = [node for node in self.revs if nm[node] in common]
392 # and
392 # and
393 # * commonheads parents on missing
393 # * commonheads parents on missing
394 revset = unfi.set('%ln and parents(roots(%ln))',
394 revset = unfi.set('%ln and parents(roots(%ln))',
395 self.outgoing.commonheads,
395 self.outgoing.commonheads,
396 self.outgoing.missing)
396 self.outgoing.missing)
397 cheads.extend(c.node() for c in revset)
397 cheads.extend(c.node() for c in revset)
398 return cheads
398 return cheads
399
399
400 @property
400 @property
401 def commonheads(self):
401 def commonheads(self):
402 """set of all common heads after changeset bundle push"""
402 """set of all common heads after changeset bundle push"""
403 if self.cgresult:
403 if self.cgresult:
404 return self.futureheads
404 return self.futureheads
405 else:
405 else:
406 return self.fallbackheads
406 return self.fallbackheads
407
407
408 # mapping of message used when pushing bookmark
408 # mapping of message used when pushing bookmark
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 _('updating bookmark %s failed!\n')),
410 _('updating bookmark %s failed!\n')),
411 'export': (_("exporting bookmark %s\n"),
411 'export': (_("exporting bookmark %s\n"),
412 _('exporting bookmark %s failed!\n')),
412 _('exporting bookmark %s failed!\n')),
413 'delete': (_("deleting remote bookmark %s\n"),
413 'delete': (_("deleting remote bookmark %s\n"),
414 _('deleting remote bookmark %s failed!\n')),
414 _('deleting remote bookmark %s failed!\n')),
415 }
415 }
416
416
417
417
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 opargs=None):
419 opargs=None):
420 '''Push outgoing changesets (limited by revs) from a local
420 '''Push outgoing changesets (limited by revs) from a local
421 repository to remote. Return an integer:
421 repository to remote. Return an integer:
422 - None means nothing to push
422 - None means nothing to push
423 - 0 means HTTP error
423 - 0 means HTTP error
424 - 1 means we pushed and remote head count is unchanged *or*
424 - 1 means we pushed and remote head count is unchanged *or*
425 we have outgoing changesets but refused to push
425 we have outgoing changesets but refused to push
426 - other values as described by addchangegroup()
426 - other values as described by addchangegroup()
427 '''
427 '''
428 if opargs is None:
428 if opargs is None:
429 opargs = {}
429 opargs = {}
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 **pycompat.strkwargs(opargs))
431 **pycompat.strkwargs(opargs))
432 if pushop.remote.local():
432 if pushop.remote.local():
433 missing = (set(pushop.repo.requirements)
433 missing = (set(pushop.repo.requirements)
434 - pushop.remote.local().supported)
434 - pushop.remote.local().supported)
435 if missing:
435 if missing:
436 msg = _("required features are not"
436 msg = _("required features are not"
437 " supported in the destination:"
437 " supported in the destination:"
438 " %s") % (', '.join(sorted(missing)))
438 " %s") % (', '.join(sorted(missing)))
439 raise error.Abort(msg)
439 raise error.Abort(msg)
440
440
441 if not pushop.remote.canpush():
441 if not pushop.remote.canpush():
442 raise error.Abort(_("destination does not support push"))
442 raise error.Abort(_("destination does not support push"))
443
443
444 if not pushop.remote.capable('unbundle'):
444 if not pushop.remote.capable('unbundle'):
445 raise error.Abort(_('cannot push: destination does not support the '
445 raise error.Abort(_('cannot push: destination does not support the '
446 'unbundle wire protocol command'))
446 'unbundle wire protocol command'))
447
447
448 # get lock as we might write phase data
448 # get lock as we might write phase data
449 wlock = lock = None
449 wlock = lock = None
450 try:
450 try:
451 # bundle2 push may receive a reply bundle touching bookmarks or other
451 # bundle2 push may receive a reply bundle touching bookmarks or other
452 # things requiring the wlock. Take it now to ensure proper ordering.
452 # things requiring the wlock. Take it now to ensure proper ordering.
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 if (not _forcebundle1(pushop)) and maypushback:
454 if (not _forcebundle1(pushop)) and maypushback:
455 wlock = pushop.repo.wlock()
455 wlock = pushop.repo.wlock()
456 lock = pushop.repo.lock()
456 lock = pushop.repo.lock()
457 pushop.trmanager = transactionmanager(pushop.repo,
457 pushop.trmanager = transactionmanager(pushop.repo,
458 'push-response',
458 'push-response',
459 pushop.remote.url())
459 pushop.remote.url())
460 except IOError as err:
460 except IOError as err:
461 if err.errno != errno.EACCES:
461 if err.errno != errno.EACCES:
462 raise
462 raise
463 # source repo cannot be locked.
463 # source repo cannot be locked.
464 # We do not abort the push, but just disable the local phase
464 # We do not abort the push, but just disable the local phase
465 # synchronisation.
465 # synchronisation.
466 msg = 'cannot lock source repository: %s\n' % err
466 msg = 'cannot lock source repository: %s\n' % err
467 pushop.ui.debug(msg)
467 pushop.ui.debug(msg)
468
468
469 with wlock or util.nullcontextmanager(), \
469 with wlock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
471 pushop.trmanager or util.nullcontextmanager():
471 pushop.trmanager or util.nullcontextmanager():
472 pushop.repo.checkpush(pushop)
472 pushop.repo.checkpush(pushop)
473 _pushdiscovery(pushop)
473 _pushdiscovery(pushop)
474 if not _forcebundle1(pushop):
474 if not _forcebundle1(pushop):
475 _pushbundle2(pushop)
475 _pushbundle2(pushop)
476 _pushchangeset(pushop)
476 _pushchangeset(pushop)
477 _pushsyncphase(pushop)
477 _pushsyncphase(pushop)
478 _pushobsolete(pushop)
478 _pushobsolete(pushop)
479 _pushbookmark(pushop)
479 _pushbookmark(pushop)
480
480
481 return pushop
481 return pushop
482
482
483 # list of steps to perform discovery before push
483 # list of steps to perform discovery before push
484 pushdiscoveryorder = []
484 pushdiscoveryorder = []
485
485
486 # Mapping between step name and function
486 # Mapping between step name and function
487 #
487 #
488 # This exists to help extensions wrap steps if necessary
488 # This exists to help extensions wrap steps if necessary
489 pushdiscoverymapping = {}
489 pushdiscoverymapping = {}
490
490
491 def pushdiscovery(stepname):
491 def pushdiscovery(stepname):
492 """decorator for function performing discovery before push
492 """decorator for function performing discovery before push
493
493
494 The function is added to the step -> function mapping and appended to the
494 The function is added to the step -> function mapping and appended to the
495 list of steps. Beware that decorated function will be added in order (this
495 list of steps. Beware that decorated function will be added in order (this
496 may matter).
496 may matter).
497
497
498 You can only use this decorator for a new step, if you want to wrap a step
498 You can only use this decorator for a new step, if you want to wrap a step
499 from an extension, change the pushdiscovery dictionary directly."""
499 from an extension, change the pushdiscovery dictionary directly."""
500 def dec(func):
500 def dec(func):
501 assert stepname not in pushdiscoverymapping
501 assert stepname not in pushdiscoverymapping
502 pushdiscoverymapping[stepname] = func
502 pushdiscoverymapping[stepname] = func
503 pushdiscoveryorder.append(stepname)
503 pushdiscoveryorder.append(stepname)
504 return func
504 return func
505 return dec
505 return dec
506
506
507 def _pushdiscovery(pushop):
507 def _pushdiscovery(pushop):
508 """Run all discovery steps"""
508 """Run all discovery steps"""
509 for stepname in pushdiscoveryorder:
509 for stepname in pushdiscoveryorder:
510 step = pushdiscoverymapping[stepname]
510 step = pushdiscoverymapping[stepname]
511 step(pushop)
511 step(pushop)
512
512
513 @pushdiscovery('changeset')
513 @pushdiscovery('changeset')
514 def _pushdiscoverychangeset(pushop):
514 def _pushdiscoverychangeset(pushop):
515 """discover the changeset that need to be pushed"""
515 """discover the changeset that need to be pushed"""
516 fci = discovery.findcommonincoming
516 fci = discovery.findcommonincoming
517 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
517 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
518 common, inc, remoteheads = commoninc
518 common, inc, remoteheads = commoninc
519 fco = discovery.findcommonoutgoing
519 fco = discovery.findcommonoutgoing
520 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
520 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
521 commoninc=commoninc, force=pushop.force)
521 commoninc=commoninc, force=pushop.force)
522 pushop.outgoing = outgoing
522 pushop.outgoing = outgoing
523 pushop.remoteheads = remoteheads
523 pushop.remoteheads = remoteheads
524 pushop.incoming = inc
524 pushop.incoming = inc
525
525
526 @pushdiscovery('phase')
526 @pushdiscovery('phase')
527 def _pushdiscoveryphase(pushop):
527 def _pushdiscoveryphase(pushop):
528 """discover the phase that needs to be pushed
528 """discover the phase that needs to be pushed
529
529
530 (computed for both success and failure case for changesets push)"""
530 (computed for both success and failure case for changesets push)"""
531 outgoing = pushop.outgoing
531 outgoing = pushop.outgoing
532 unfi = pushop.repo.unfiltered()
532 unfi = pushop.repo.unfiltered()
533 remotephases = pushop.remote.listkeys('phases')
533 remotephases = pushop.remote.listkeys('phases')
534 if (pushop.ui.configbool('ui', '_usedassubrepo')
534 if (pushop.ui.configbool('ui', '_usedassubrepo')
535 and remotephases # server supports phases
535 and remotephases # server supports phases
536 and not pushop.outgoing.missing # no changesets to be pushed
536 and not pushop.outgoing.missing # no changesets to be pushed
537 and remotephases.get('publishing', False)):
537 and remotephases.get('publishing', False)):
538 # When:
538 # When:
539 # - this is a subrepo push
539 # - this is a subrepo push
540 # - and remote support phase
540 # - and remote support phase
541 # - and no changeset are to be pushed
541 # - and no changeset are to be pushed
542 # - and remote is publishing
542 # - and remote is publishing
543 # We may be in issue 3781 case!
543 # We may be in issue 3781 case!
544 # We drop the possible phase synchronisation done by
544 # We drop the possible phase synchronisation done by
545 # courtesy to publish changesets possibly locally draft
545 # courtesy to publish changesets possibly locally draft
546 # on the remote.
546 # on the remote.
547 pushop.outdatedphases = []
547 pushop.outdatedphases = []
548 pushop.fallbackoutdatedphases = []
548 pushop.fallbackoutdatedphases = []
549 return
549 return
550
550
551 pushop.remotephases = phases.remotephasessummary(pushop.repo,
551 pushop.remotephases = phases.remotephasessummary(pushop.repo,
552 pushop.fallbackheads,
552 pushop.fallbackheads,
553 remotephases)
553 remotephases)
554 droots = pushop.remotephases.draftroots
554 droots = pushop.remotephases.draftroots
555
555
556 extracond = ''
556 extracond = ''
557 if not pushop.remotephases.publishing:
557 if not pushop.remotephases.publishing:
558 extracond = ' and public()'
558 extracond = ' and public()'
559 revset = 'heads((%%ln::%%ln) %s)' % extracond
559 revset = 'heads((%%ln::%%ln) %s)' % extracond
560 # Get the list of all revs draft on remote by public here.
560 # Get the list of all revs draft on remote by public here.
561 # XXX Beware that revset break if droots is not strictly
561 # XXX Beware that revset break if droots is not strictly
562 # XXX root we may want to ensure it is but it is costly
562 # XXX root we may want to ensure it is but it is costly
563 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
563 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
564 if not outgoing.missing:
564 if not outgoing.missing:
565 future = fallback
565 future = fallback
566 else:
566 else:
567 # adds changeset we are going to push as draft
567 # adds changeset we are going to push as draft
568 #
568 #
569 # should not be necessary for publishing server, but because of an
569 # should not be necessary for publishing server, but because of an
570 # issue fixed in xxxxx we have to do it anyway.
570 # issue fixed in xxxxx we have to do it anyway.
571 fdroots = list(unfi.set('roots(%ln + %ln::)',
571 fdroots = list(unfi.set('roots(%ln + %ln::)',
572 outgoing.missing, droots))
572 outgoing.missing, droots))
573 fdroots = [f.node() for f in fdroots]
573 fdroots = [f.node() for f in fdroots]
574 future = list(unfi.set(revset, fdroots, pushop.futureheads))
574 future = list(unfi.set(revset, fdroots, pushop.futureheads))
575 pushop.outdatedphases = future
575 pushop.outdatedphases = future
576 pushop.fallbackoutdatedphases = fallback
576 pushop.fallbackoutdatedphases = fallback
577
577
578 @pushdiscovery('obsmarker')
578 @pushdiscovery('obsmarker')
579 def _pushdiscoveryobsmarkers(pushop):
579 def _pushdiscoveryobsmarkers(pushop):
580 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
580 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
581 and pushop.repo.obsstore
581 and pushop.repo.obsstore
582 and 'obsolete' in pushop.remote.listkeys('namespaces')):
582 and 'obsolete' in pushop.remote.listkeys('namespaces')):
583 repo = pushop.repo
583 repo = pushop.repo
584 # very naive computation, that can be quite expensive on big repo.
584 # very naive computation, that can be quite expensive on big repo.
585 # However: evolution is currently slow on them anyway.
585 # However: evolution is currently slow on them anyway.
586 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
586 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
587 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
587 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
588
588
589 @pushdiscovery('bookmarks')
589 @pushdiscovery('bookmarks')
590 def _pushdiscoverybookmarks(pushop):
590 def _pushdiscoverybookmarks(pushop):
591 ui = pushop.ui
591 ui = pushop.ui
592 repo = pushop.repo.unfiltered()
592 repo = pushop.repo.unfiltered()
593 remote = pushop.remote
593 remote = pushop.remote
594 ui.debug("checking for updated bookmarks\n")
594 ui.debug("checking for updated bookmarks\n")
595 ancestors = ()
595 ancestors = ()
596 if pushop.revs:
596 if pushop.revs:
597 revnums = map(repo.changelog.rev, pushop.revs)
597 revnums = map(repo.changelog.rev, pushop.revs)
598 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
598 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
599 remotebookmark = remote.listkeys('bookmarks')
599 remotebookmark = remote.listkeys('bookmarks')
600
600
601 explicit = set([repo._bookmarks.expandname(bookmark)
601 explicit = set([repo._bookmarks.expandname(bookmark)
602 for bookmark in pushop.bookmarks])
602 for bookmark in pushop.bookmarks])
603
603
604 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
604 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
605 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
605 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
606
606
607 def safehex(x):
607 def safehex(x):
608 if x is None:
608 if x is None:
609 return x
609 return x
610 return hex(x)
610 return hex(x)
611
611
612 def hexifycompbookmarks(bookmarks):
612 def hexifycompbookmarks(bookmarks):
613 for b, scid, dcid in bookmarks:
613 for b, scid, dcid in bookmarks:
614 yield b, safehex(scid), safehex(dcid)
614 yield b, safehex(scid), safehex(dcid)
615
615
616 comp = [hexifycompbookmarks(marks) for marks in comp]
616 comp = [hexifycompbookmarks(marks) for marks in comp]
617 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
617 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
618
618
619 for b, scid, dcid in advsrc:
619 for b, scid, dcid in advsrc:
620 if b in explicit:
620 if b in explicit:
621 explicit.remove(b)
621 explicit.remove(b)
622 if not ancestors or repo[scid].rev() in ancestors:
622 if not ancestors or repo[scid].rev() in ancestors:
623 pushop.outbookmarks.append((b, dcid, scid))
623 pushop.outbookmarks.append((b, dcid, scid))
624 # search added bookmark
624 # search added bookmark
625 for b, scid, dcid in addsrc:
625 for b, scid, dcid in addsrc:
626 if b in explicit:
626 if b in explicit:
627 explicit.remove(b)
627 explicit.remove(b)
628 pushop.outbookmarks.append((b, '', scid))
628 pushop.outbookmarks.append((b, '', scid))
629 # search for overwritten bookmark
629 # search for overwritten bookmark
630 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
630 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
631 if b in explicit:
631 if b in explicit:
632 explicit.remove(b)
632 explicit.remove(b)
633 pushop.outbookmarks.append((b, dcid, scid))
633 pushop.outbookmarks.append((b, dcid, scid))
634 # search for bookmark to delete
634 # search for bookmark to delete
635 for b, scid, dcid in adddst:
635 for b, scid, dcid in adddst:
636 if b in explicit:
636 if b in explicit:
637 explicit.remove(b)
637 explicit.remove(b)
638 # treat as "deleted locally"
638 # treat as "deleted locally"
639 pushop.outbookmarks.append((b, dcid, ''))
639 pushop.outbookmarks.append((b, dcid, ''))
640 # identical bookmarks shouldn't get reported
640 # identical bookmarks shouldn't get reported
641 for b, scid, dcid in same:
641 for b, scid, dcid in same:
642 if b in explicit:
642 if b in explicit:
643 explicit.remove(b)
643 explicit.remove(b)
644
644
645 if explicit:
645 if explicit:
646 explicit = sorted(explicit)
646 explicit = sorted(explicit)
647 # we should probably list all of them
647 # we should probably list all of them
648 ui.warn(_('bookmark %s does not exist on the local '
648 ui.warn(_('bookmark %s does not exist on the local '
649 'or remote repository!\n') % explicit[0])
649 'or remote repository!\n') % explicit[0])
650 pushop.bkresult = 2
650 pushop.bkresult = 2
651
651
652 pushop.outbookmarks.sort()
652 pushop.outbookmarks.sort()
653
653
654 def _pushcheckoutgoing(pushop):
654 def _pushcheckoutgoing(pushop):
655 outgoing = pushop.outgoing
655 outgoing = pushop.outgoing
656 unfi = pushop.repo.unfiltered()
656 unfi = pushop.repo.unfiltered()
657 if not outgoing.missing:
657 if not outgoing.missing:
658 # nothing to push
658 # nothing to push
659 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
659 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
660 return False
660 return False
661 # something to push
661 # something to push
662 if not pushop.force:
662 if not pushop.force:
663 # if repo.obsstore == False --> no obsolete
663 # if repo.obsstore == False --> no obsolete
664 # then, save the iteration
664 # then, save the iteration
665 if unfi.obsstore:
665 if unfi.obsstore:
666 # this message are here for 80 char limit reason
666 # this message are here for 80 char limit reason
667 mso = _("push includes obsolete changeset: %s!")
667 mso = _("push includes obsolete changeset: %s!")
668 mspd = _("push includes phase-divergent changeset: %s!")
668 mspd = _("push includes phase-divergent changeset: %s!")
669 mscd = _("push includes content-divergent changeset: %s!")
669 mscd = _("push includes content-divergent changeset: %s!")
670 mst = {"orphan": _("push includes orphan changeset: %s!"),
670 mst = {"orphan": _("push includes orphan changeset: %s!"),
671 "phase-divergent": mspd,
671 "phase-divergent": mspd,
672 "content-divergent": mscd}
672 "content-divergent": mscd}
673 # If we are to push if there is at least one
673 # If we are to push if there is at least one
674 # obsolete or unstable changeset in missing, at
674 # obsolete or unstable changeset in missing, at
675 # least one of the missinghead will be obsolete or
675 # least one of the missinghead will be obsolete or
676 # unstable. So checking heads only is ok
676 # unstable. So checking heads only is ok
677 for node in outgoing.missingheads:
677 for node in outgoing.missingheads:
678 ctx = unfi[node]
678 ctx = unfi[node]
679 if ctx.obsolete():
679 if ctx.obsolete():
680 raise error.Abort(mso % ctx)
680 raise error.Abort(mso % ctx)
681 elif ctx.isunstable():
681 elif ctx.isunstable():
682 # TODO print more than one instability in the abort
682 # TODO print more than one instability in the abort
683 # message
683 # message
684 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
684 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
685
685
686 discovery.checkheads(pushop)
686 discovery.checkheads(pushop)
687 return True
687 return True
688
688
689 # List of names of steps to perform for an outgoing bundle2, order matters.
689 # List of names of steps to perform for an outgoing bundle2, order matters.
690 b2partsgenorder = []
690 b2partsgenorder = []
691
691
692 # Mapping between step name and function
692 # Mapping between step name and function
693 #
693 #
694 # This exists to help extensions wrap steps if necessary
694 # This exists to help extensions wrap steps if necessary
695 b2partsgenmapping = {}
695 b2partsgenmapping = {}
696
696
697 def b2partsgenerator(stepname, idx=None):
697 def b2partsgenerator(stepname, idx=None):
698 """decorator for function generating bundle2 part
698 """decorator for function generating bundle2 part
699
699
700 The function is added to the step -> function mapping and appended to the
700 The function is added to the step -> function mapping and appended to the
701 list of steps. Beware that decorated functions will be added in order
701 list of steps. Beware that decorated functions will be added in order
702 (this may matter).
702 (this may matter).
703
703
704 You can only use this decorator for new steps, if you want to wrap a step
704 You can only use this decorator for new steps, if you want to wrap a step
705 from an extension, attack the b2partsgenmapping dictionary directly."""
705 from an extension, attack the b2partsgenmapping dictionary directly."""
706 def dec(func):
706 def dec(func):
707 assert stepname not in b2partsgenmapping
707 assert stepname not in b2partsgenmapping
708 b2partsgenmapping[stepname] = func
708 b2partsgenmapping[stepname] = func
709 if idx is None:
709 if idx is None:
710 b2partsgenorder.append(stepname)
710 b2partsgenorder.append(stepname)
711 else:
711 else:
712 b2partsgenorder.insert(idx, stepname)
712 b2partsgenorder.insert(idx, stepname)
713 return func
713 return func
714 return dec
714 return dec
715
715
716 def _pushb2ctxcheckheads(pushop, bundler):
716 def _pushb2ctxcheckheads(pushop, bundler):
717 """Generate race condition checking parts
717 """Generate race condition checking parts
718
718
719 Exists as an independent function to aid extensions
719 Exists as an independent function to aid extensions
720 """
720 """
721 # * 'force' do not check for push race,
721 # * 'force' do not check for push race,
722 # * if we don't push anything, there are nothing to check.
722 # * if we don't push anything, there are nothing to check.
723 if not pushop.force and pushop.outgoing.missingheads:
723 if not pushop.force and pushop.outgoing.missingheads:
724 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
724 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
725 emptyremote = pushop.pushbranchmap is None
725 emptyremote = pushop.pushbranchmap is None
726 if not allowunrelated or emptyremote:
726 if not allowunrelated or emptyremote:
727 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
727 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
728 else:
728 else:
729 affected = set()
729 affected = set()
730 for branch, heads in pushop.pushbranchmap.iteritems():
730 for branch, heads in pushop.pushbranchmap.iteritems():
731 remoteheads, newheads, unsyncedheads, discardedheads = heads
731 remoteheads, newheads, unsyncedheads, discardedheads = heads
732 if remoteheads is not None:
732 if remoteheads is not None:
733 remote = set(remoteheads)
733 remote = set(remoteheads)
734 affected |= set(discardedheads) & remote
734 affected |= set(discardedheads) & remote
735 affected |= remote - set(newheads)
735 affected |= remote - set(newheads)
736 if affected:
736 if affected:
737 data = iter(sorted(affected))
737 data = iter(sorted(affected))
738 bundler.newpart('check:updated-heads', data=data)
738 bundler.newpart('check:updated-heads', data=data)
739
739
740 def _pushing(pushop):
740 def _pushing(pushop):
741 """return True if we are pushing anything"""
741 """return True if we are pushing anything"""
742 return bool(pushop.outgoing.missing
742 return bool(pushop.outgoing.missing
743 or pushop.outdatedphases
743 or pushop.outdatedphases
744 or pushop.outobsmarkers
744 or pushop.outobsmarkers
745 or pushop.outbookmarks)
745 or pushop.outbookmarks)
746
746
747 @b2partsgenerator('check-bookmarks')
747 @b2partsgenerator('check-bookmarks')
748 def _pushb2checkbookmarks(pushop, bundler):
748 def _pushb2checkbookmarks(pushop, bundler):
749 """insert bookmark move checking"""
749 """insert bookmark move checking"""
750 if not _pushing(pushop) or pushop.force:
750 if not _pushing(pushop) or pushop.force:
751 return
751 return
752 b2caps = bundle2.bundle2caps(pushop.remote)
752 b2caps = bundle2.bundle2caps(pushop.remote)
753 hasbookmarkcheck = 'bookmarks' in b2caps
753 hasbookmarkcheck = 'bookmarks' in b2caps
754 if not (pushop.outbookmarks and hasbookmarkcheck):
754 if not (pushop.outbookmarks and hasbookmarkcheck):
755 return
755 return
756 data = []
756 data = []
757 for book, old, new in pushop.outbookmarks:
757 for book, old, new in pushop.outbookmarks:
758 old = bin(old)
758 old = bin(old)
759 data.append((book, old))
759 data.append((book, old))
760 checkdata = bookmod.binaryencode(data)
760 checkdata = bookmod.binaryencode(data)
761 bundler.newpart('check:bookmarks', data=checkdata)
761 bundler.newpart('check:bookmarks', data=checkdata)
762
762
763 @b2partsgenerator('check-phases')
763 @b2partsgenerator('check-phases')
764 def _pushb2checkphases(pushop, bundler):
764 def _pushb2checkphases(pushop, bundler):
765 """insert phase move checking"""
765 """insert phase move checking"""
766 if not _pushing(pushop) or pushop.force:
766 if not _pushing(pushop) or pushop.force:
767 return
767 return
768 b2caps = bundle2.bundle2caps(pushop.remote)
768 b2caps = bundle2.bundle2caps(pushop.remote)
769 hasphaseheads = 'heads' in b2caps.get('phases', ())
769 hasphaseheads = 'heads' in b2caps.get('phases', ())
770 if pushop.remotephases is not None and hasphaseheads:
770 if pushop.remotephases is not None and hasphaseheads:
771 # check that the remote phase has not changed
771 # check that the remote phase has not changed
772 checks = [[] for p in phases.allphases]
772 checks = [[] for p in phases.allphases]
773 checks[phases.public].extend(pushop.remotephases.publicheads)
773 checks[phases.public].extend(pushop.remotephases.publicheads)
774 checks[phases.draft].extend(pushop.remotephases.draftroots)
774 checks[phases.draft].extend(pushop.remotephases.draftroots)
775 if any(checks):
775 if any(checks):
776 for nodes in checks:
776 for nodes in checks:
777 nodes.sort()
777 nodes.sort()
778 checkdata = phases.binaryencode(checks)
778 checkdata = phases.binaryencode(checks)
779 bundler.newpart('check:phases', data=checkdata)
779 bundler.newpart('check:phases', data=checkdata)
780
780
781 @b2partsgenerator('changeset')
781 @b2partsgenerator('changeset')
782 def _pushb2ctx(pushop, bundler):
782 def _pushb2ctx(pushop, bundler):
783 """handle changegroup push through bundle2
783 """handle changegroup push through bundle2
784
784
785 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
785 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
786 """
786 """
787 if 'changesets' in pushop.stepsdone:
787 if 'changesets' in pushop.stepsdone:
788 return
788 return
789 pushop.stepsdone.add('changesets')
789 pushop.stepsdone.add('changesets')
790 # Send known heads to the server for race detection.
790 # Send known heads to the server for race detection.
791 if not _pushcheckoutgoing(pushop):
791 if not _pushcheckoutgoing(pushop):
792 return
792 return
793 pushop.repo.prepushoutgoinghooks(pushop)
793 pushop.repo.prepushoutgoinghooks(pushop)
794
794
795 _pushb2ctxcheckheads(pushop, bundler)
795 _pushb2ctxcheckheads(pushop, bundler)
796
796
797 b2caps = bundle2.bundle2caps(pushop.remote)
797 b2caps = bundle2.bundle2caps(pushop.remote)
798 version = '01'
798 version = '01'
799 cgversions = b2caps.get('changegroup')
799 cgversions = b2caps.get('changegroup')
800 if cgversions: # 3.1 and 3.2 ship with an empty value
800 if cgversions: # 3.1 and 3.2 ship with an empty value
801 cgversions = [v for v in cgversions
801 cgversions = [v for v in cgversions
802 if v in changegroup.supportedoutgoingversions(
802 if v in changegroup.supportedoutgoingversions(
803 pushop.repo)]
803 pushop.repo)]
804 if not cgversions:
804 if not cgversions:
805 raise ValueError(_('no common changegroup version'))
805 raise ValueError(_('no common changegroup version'))
806 version = max(cgversions)
806 version = max(cgversions)
807 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
807 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
808 'push')
808 'push')
809 cgpart = bundler.newpart('changegroup', data=cgstream)
809 cgpart = bundler.newpart('changegroup', data=cgstream)
810 if cgversions:
810 if cgversions:
811 cgpart.addparam('version', version)
811 cgpart.addparam('version', version)
812 if 'treemanifest' in pushop.repo.requirements:
812 if 'treemanifest' in pushop.repo.requirements:
813 cgpart.addparam('treemanifest', '1')
813 cgpart.addparam('treemanifest', '1')
814 def handlereply(op):
814 def handlereply(op):
815 """extract addchangegroup returns from server reply"""
815 """extract addchangegroup returns from server reply"""
816 cgreplies = op.records.getreplies(cgpart.id)
816 cgreplies = op.records.getreplies(cgpart.id)
817 assert len(cgreplies['changegroup']) == 1
817 assert len(cgreplies['changegroup']) == 1
818 pushop.cgresult = cgreplies['changegroup'][0]['return']
818 pushop.cgresult = cgreplies['changegroup'][0]['return']
819 return handlereply
819 return handlereply
820
820
821 @b2partsgenerator('phase')
821 @b2partsgenerator('phase')
822 def _pushb2phases(pushop, bundler):
822 def _pushb2phases(pushop, bundler):
823 """handle phase push through bundle2"""
823 """handle phase push through bundle2"""
824 if 'phases' in pushop.stepsdone:
824 if 'phases' in pushop.stepsdone:
825 return
825 return
826 b2caps = bundle2.bundle2caps(pushop.remote)
826 b2caps = bundle2.bundle2caps(pushop.remote)
827 ui = pushop.repo.ui
827 ui = pushop.repo.ui
828
828
829 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
829 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
830 haspushkey = 'pushkey' in b2caps
830 haspushkey = 'pushkey' in b2caps
831 hasphaseheads = 'heads' in b2caps.get('phases', ())
831 hasphaseheads = 'heads' in b2caps.get('phases', ())
832
832
833 if hasphaseheads and not legacyphase:
833 if hasphaseheads and not legacyphase:
834 return _pushb2phaseheads(pushop, bundler)
834 return _pushb2phaseheads(pushop, bundler)
835 elif haspushkey:
835 elif haspushkey:
836 return _pushb2phasespushkey(pushop, bundler)
836 return _pushb2phasespushkey(pushop, bundler)
837
837
838 def _pushb2phaseheads(pushop, bundler):
838 def _pushb2phaseheads(pushop, bundler):
839 """push phase information through a bundle2 - binary part"""
839 """push phase information through a bundle2 - binary part"""
840 pushop.stepsdone.add('phases')
840 pushop.stepsdone.add('phases')
841 if pushop.outdatedphases:
841 if pushop.outdatedphases:
842 updates = [[] for p in phases.allphases]
842 updates = [[] for p in phases.allphases]
843 updates[0].extend(h.node() for h in pushop.outdatedphases)
843 updates[0].extend(h.node() for h in pushop.outdatedphases)
844 phasedata = phases.binaryencode(updates)
844 phasedata = phases.binaryencode(updates)
845 bundler.newpart('phase-heads', data=phasedata)
845 bundler.newpart('phase-heads', data=phasedata)
846
846
847 def _pushb2phasespushkey(pushop, bundler):
847 def _pushb2phasespushkey(pushop, bundler):
848 """push phase information through a bundle2 - pushkey part"""
848 """push phase information through a bundle2 - pushkey part"""
849 pushop.stepsdone.add('phases')
849 pushop.stepsdone.add('phases')
850 part2node = []
850 part2node = []
851
851
852 def handlefailure(pushop, exc):
852 def handlefailure(pushop, exc):
853 targetid = int(exc.partid)
853 targetid = int(exc.partid)
854 for partid, node in part2node:
854 for partid, node in part2node:
855 if partid == targetid:
855 if partid == targetid:
856 raise error.Abort(_('updating %s to public failed') % node)
856 raise error.Abort(_('updating %s to public failed') % node)
857
857
858 enc = pushkey.encode
858 enc = pushkey.encode
859 for newremotehead in pushop.outdatedphases:
859 for newremotehead in pushop.outdatedphases:
860 part = bundler.newpart('pushkey')
860 part = bundler.newpart('pushkey')
861 part.addparam('namespace', enc('phases'))
861 part.addparam('namespace', enc('phases'))
862 part.addparam('key', enc(newremotehead.hex()))
862 part.addparam('key', enc(newremotehead.hex()))
863 part.addparam('old', enc('%d' % phases.draft))
863 part.addparam('old', enc('%d' % phases.draft))
864 part.addparam('new', enc('%d' % phases.public))
864 part.addparam('new', enc('%d' % phases.public))
865 part2node.append((part.id, newremotehead))
865 part2node.append((part.id, newremotehead))
866 pushop.pkfailcb[part.id] = handlefailure
866 pushop.pkfailcb[part.id] = handlefailure
867
867
868 def handlereply(op):
868 def handlereply(op):
869 for partid, node in part2node:
869 for partid, node in part2node:
870 partrep = op.records.getreplies(partid)
870 partrep = op.records.getreplies(partid)
871 results = partrep['pushkey']
871 results = partrep['pushkey']
872 assert len(results) <= 1
872 assert len(results) <= 1
873 msg = None
873 msg = None
874 if not results:
874 if not results:
875 msg = _('server ignored update of %s to public!\n') % node
875 msg = _('server ignored update of %s to public!\n') % node
876 elif not int(results[0]['return']):
876 elif not int(results[0]['return']):
877 msg = _('updating %s to public failed!\n') % node
877 msg = _('updating %s to public failed!\n') % node
878 if msg is not None:
878 if msg is not None:
879 pushop.ui.warn(msg)
879 pushop.ui.warn(msg)
880 return handlereply
880 return handlereply
881
881
882 @b2partsgenerator('obsmarkers')
882 @b2partsgenerator('obsmarkers')
883 def _pushb2obsmarkers(pushop, bundler):
883 def _pushb2obsmarkers(pushop, bundler):
884 if 'obsmarkers' in pushop.stepsdone:
884 if 'obsmarkers' in pushop.stepsdone:
885 return
885 return
886 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
886 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
887 if obsolete.commonversion(remoteversions) is None:
887 if obsolete.commonversion(remoteversions) is None:
888 return
888 return
889 pushop.stepsdone.add('obsmarkers')
889 pushop.stepsdone.add('obsmarkers')
890 if pushop.outobsmarkers:
890 if pushop.outobsmarkers:
891 markers = sorted(pushop.outobsmarkers)
891 markers = sorted(pushop.outobsmarkers)
892 bundle2.buildobsmarkerspart(bundler, markers)
892 bundle2.buildobsmarkerspart(bundler, markers)
893
893
894 @b2partsgenerator('bookmarks')
894 @b2partsgenerator('bookmarks')
895 def _pushb2bookmarks(pushop, bundler):
895 def _pushb2bookmarks(pushop, bundler):
896 """handle bookmark push through bundle2"""
896 """handle bookmark push through bundle2"""
897 if 'bookmarks' in pushop.stepsdone:
897 if 'bookmarks' in pushop.stepsdone:
898 return
898 return
899 b2caps = bundle2.bundle2caps(pushop.remote)
899 b2caps = bundle2.bundle2caps(pushop.remote)
900 if 'pushkey' not in b2caps:
900 if 'pushkey' in b2caps:
901 return
901 return _pushb2bookmarkspushkey(pushop, bundler)
902
903 def _pushb2bookmarkspushkey(pushop, bundler):
902 pushop.stepsdone.add('bookmarks')
904 pushop.stepsdone.add('bookmarks')
903 part2book = []
905 part2book = []
904 enc = pushkey.encode
906 enc = pushkey.encode
905
907
906 def handlefailure(pushop, exc):
908 def handlefailure(pushop, exc):
907 targetid = int(exc.partid)
909 targetid = int(exc.partid)
908 for partid, book, action in part2book:
910 for partid, book, action in part2book:
909 if partid == targetid:
911 if partid == targetid:
910 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
912 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
911 # we should not be called for part we did not generated
913 # we should not be called for part we did not generated
912 assert False
914 assert False
913
915
914 for book, old, new in pushop.outbookmarks:
916 for book, old, new in pushop.outbookmarks:
915 part = bundler.newpart('pushkey')
917 part = bundler.newpart('pushkey')
916 part.addparam('namespace', enc('bookmarks'))
918 part.addparam('namespace', enc('bookmarks'))
917 part.addparam('key', enc(book))
919 part.addparam('key', enc(book))
918 part.addparam('old', enc(old))
920 part.addparam('old', enc(old))
919 part.addparam('new', enc(new))
921 part.addparam('new', enc(new))
920 action = 'update'
922 action = 'update'
921 if not old:
923 if not old:
922 action = 'export'
924 action = 'export'
923 elif not new:
925 elif not new:
924 action = 'delete'
926 action = 'delete'
925 part2book.append((part.id, book, action))
927 part2book.append((part.id, book, action))
926 pushop.pkfailcb[part.id] = handlefailure
928 pushop.pkfailcb[part.id] = handlefailure
927
929
928 def handlereply(op):
930 def handlereply(op):
929 ui = pushop.ui
931 ui = pushop.ui
930 for partid, book, action in part2book:
932 for partid, book, action in part2book:
931 partrep = op.records.getreplies(partid)
933 partrep = op.records.getreplies(partid)
932 results = partrep['pushkey']
934 results = partrep['pushkey']
933 assert len(results) <= 1
935 assert len(results) <= 1
934 if not results:
936 if not results:
935 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
937 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
936 else:
938 else:
937 ret = int(results[0]['return'])
939 ret = int(results[0]['return'])
938 if ret:
940 if ret:
939 ui.status(bookmsgmap[action][0] % book)
941 ui.status(bookmsgmap[action][0] % book)
940 else:
942 else:
941 ui.warn(bookmsgmap[action][1] % book)
943 ui.warn(bookmsgmap[action][1] % book)
942 if pushop.bkresult is not None:
944 if pushop.bkresult is not None:
943 pushop.bkresult = 1
945 pushop.bkresult = 1
944 return handlereply
946 return handlereply
945
947
946 @b2partsgenerator('pushvars', idx=0)
948 @b2partsgenerator('pushvars', idx=0)
947 def _getbundlesendvars(pushop, bundler):
949 def _getbundlesendvars(pushop, bundler):
948 '''send shellvars via bundle2'''
950 '''send shellvars via bundle2'''
949 pushvars = pushop.pushvars
951 pushvars = pushop.pushvars
950 if pushvars:
952 if pushvars:
951 shellvars = {}
953 shellvars = {}
952 for raw in pushvars:
954 for raw in pushvars:
953 if '=' not in raw:
955 if '=' not in raw:
954 msg = ("unable to parse variable '%s', should follow "
956 msg = ("unable to parse variable '%s', should follow "
955 "'KEY=VALUE' or 'KEY=' format")
957 "'KEY=VALUE' or 'KEY=' format")
956 raise error.Abort(msg % raw)
958 raise error.Abort(msg % raw)
957 k, v = raw.split('=', 1)
959 k, v = raw.split('=', 1)
958 shellvars[k] = v
960 shellvars[k] = v
959
961
960 part = bundler.newpart('pushvars')
962 part = bundler.newpart('pushvars')
961
963
962 for key, value in shellvars.iteritems():
964 for key, value in shellvars.iteritems():
963 part.addparam(key, value, mandatory=False)
965 part.addparam(key, value, mandatory=False)
964
966
965 def _pushbundle2(pushop):
967 def _pushbundle2(pushop):
966 """push data to the remote using bundle2
968 """push data to the remote using bundle2
967
969
968 The only currently supported type of data is changegroup but this will
970 The only currently supported type of data is changegroup but this will
969 evolve in the future."""
971 evolve in the future."""
970 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
972 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
971 pushback = (pushop.trmanager
973 pushback = (pushop.trmanager
972 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
974 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
973
975
974 # create reply capability
976 # create reply capability
975 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
977 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
976 allowpushback=pushback))
978 allowpushback=pushback))
977 bundler.newpart('replycaps', data=capsblob)
979 bundler.newpart('replycaps', data=capsblob)
978 replyhandlers = []
980 replyhandlers = []
979 for partgenname in b2partsgenorder:
981 for partgenname in b2partsgenorder:
980 partgen = b2partsgenmapping[partgenname]
982 partgen = b2partsgenmapping[partgenname]
981 ret = partgen(pushop, bundler)
983 ret = partgen(pushop, bundler)
982 if callable(ret):
984 if callable(ret):
983 replyhandlers.append(ret)
985 replyhandlers.append(ret)
984 # do not push if nothing to push
986 # do not push if nothing to push
985 if bundler.nbparts <= 1:
987 if bundler.nbparts <= 1:
986 return
988 return
987 stream = util.chunkbuffer(bundler.getchunks())
989 stream = util.chunkbuffer(bundler.getchunks())
988 try:
990 try:
989 try:
991 try:
990 reply = pushop.remote.unbundle(
992 reply = pushop.remote.unbundle(
991 stream, ['force'], pushop.remote.url())
993 stream, ['force'], pushop.remote.url())
992 except error.BundleValueError as exc:
994 except error.BundleValueError as exc:
993 raise error.Abort(_('missing support for %s') % exc)
995 raise error.Abort(_('missing support for %s') % exc)
994 try:
996 try:
995 trgetter = None
997 trgetter = None
996 if pushback:
998 if pushback:
997 trgetter = pushop.trmanager.transaction
999 trgetter = pushop.trmanager.transaction
998 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1000 op = bundle2.processbundle(pushop.repo, reply, trgetter)
999 except error.BundleValueError as exc:
1001 except error.BundleValueError as exc:
1000 raise error.Abort(_('missing support for %s') % exc)
1002 raise error.Abort(_('missing support for %s') % exc)
1001 except bundle2.AbortFromPart as exc:
1003 except bundle2.AbortFromPart as exc:
1002 pushop.ui.status(_('remote: %s\n') % exc)
1004 pushop.ui.status(_('remote: %s\n') % exc)
1003 if exc.hint is not None:
1005 if exc.hint is not None:
1004 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1006 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1005 raise error.Abort(_('push failed on remote'))
1007 raise error.Abort(_('push failed on remote'))
1006 except error.PushkeyFailed as exc:
1008 except error.PushkeyFailed as exc:
1007 partid = int(exc.partid)
1009 partid = int(exc.partid)
1008 if partid not in pushop.pkfailcb:
1010 if partid not in pushop.pkfailcb:
1009 raise
1011 raise
1010 pushop.pkfailcb[partid](pushop, exc)
1012 pushop.pkfailcb[partid](pushop, exc)
1011 for rephand in replyhandlers:
1013 for rephand in replyhandlers:
1012 rephand(op)
1014 rephand(op)
1013
1015
1014 def _pushchangeset(pushop):
1016 def _pushchangeset(pushop):
1015 """Make the actual push of changeset bundle to remote repo"""
1017 """Make the actual push of changeset bundle to remote repo"""
1016 if 'changesets' in pushop.stepsdone:
1018 if 'changesets' in pushop.stepsdone:
1017 return
1019 return
1018 pushop.stepsdone.add('changesets')
1020 pushop.stepsdone.add('changesets')
1019 if not _pushcheckoutgoing(pushop):
1021 if not _pushcheckoutgoing(pushop):
1020 return
1022 return
1021
1023
1022 # Should have verified this in push().
1024 # Should have verified this in push().
1023 assert pushop.remote.capable('unbundle')
1025 assert pushop.remote.capable('unbundle')
1024
1026
1025 pushop.repo.prepushoutgoinghooks(pushop)
1027 pushop.repo.prepushoutgoinghooks(pushop)
1026 outgoing = pushop.outgoing
1028 outgoing = pushop.outgoing
1027 # TODO: get bundlecaps from remote
1029 # TODO: get bundlecaps from remote
1028 bundlecaps = None
1030 bundlecaps = None
1029 # create a changegroup from local
1031 # create a changegroup from local
1030 if pushop.revs is None and not (outgoing.excluded
1032 if pushop.revs is None and not (outgoing.excluded
1031 or pushop.repo.changelog.filteredrevs):
1033 or pushop.repo.changelog.filteredrevs):
1032 # push everything,
1034 # push everything,
1033 # use the fast path, no race possible on push
1035 # use the fast path, no race possible on push
1034 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1036 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1035 fastpath=True, bundlecaps=bundlecaps)
1037 fastpath=True, bundlecaps=bundlecaps)
1036 else:
1038 else:
1037 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1039 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1038 'push', bundlecaps=bundlecaps)
1040 'push', bundlecaps=bundlecaps)
1039
1041
1040 # apply changegroup to remote
1042 # apply changegroup to remote
1041 # local repo finds heads on server, finds out what
1043 # local repo finds heads on server, finds out what
1042 # revs it must push. once revs transferred, if server
1044 # revs it must push. once revs transferred, if server
1043 # finds it has different heads (someone else won
1045 # finds it has different heads (someone else won
1044 # commit/push race), server aborts.
1046 # commit/push race), server aborts.
1045 if pushop.force:
1047 if pushop.force:
1046 remoteheads = ['force']
1048 remoteheads = ['force']
1047 else:
1049 else:
1048 remoteheads = pushop.remoteheads
1050 remoteheads = pushop.remoteheads
1049 # ssh: return remote's addchangegroup()
1051 # ssh: return remote's addchangegroup()
1050 # http: return remote's addchangegroup() or 0 for error
1052 # http: return remote's addchangegroup() or 0 for error
1051 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1053 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1052 pushop.repo.url())
1054 pushop.repo.url())
1053
1055
1054 def _pushsyncphase(pushop):
1056 def _pushsyncphase(pushop):
1055 """synchronise phase information locally and remotely"""
1057 """synchronise phase information locally and remotely"""
1056 cheads = pushop.commonheads
1058 cheads = pushop.commonheads
1057 # even when we don't push, exchanging phase data is useful
1059 # even when we don't push, exchanging phase data is useful
1058 remotephases = pushop.remote.listkeys('phases')
1060 remotephases = pushop.remote.listkeys('phases')
1059 if (pushop.ui.configbool('ui', '_usedassubrepo')
1061 if (pushop.ui.configbool('ui', '_usedassubrepo')
1060 and remotephases # server supports phases
1062 and remotephases # server supports phases
1061 and pushop.cgresult is None # nothing was pushed
1063 and pushop.cgresult is None # nothing was pushed
1062 and remotephases.get('publishing', False)):
1064 and remotephases.get('publishing', False)):
1063 # When:
1065 # When:
1064 # - this is a subrepo push
1066 # - this is a subrepo push
1065 # - and remote support phase
1067 # - and remote support phase
1066 # - and no changeset was pushed
1068 # - and no changeset was pushed
1067 # - and remote is publishing
1069 # - and remote is publishing
1068 # We may be in issue 3871 case!
1070 # We may be in issue 3871 case!
1069 # We drop the possible phase synchronisation done by
1071 # We drop the possible phase synchronisation done by
1070 # courtesy to publish changesets possibly locally draft
1072 # courtesy to publish changesets possibly locally draft
1071 # on the remote.
1073 # on the remote.
1072 remotephases = {'publishing': 'True'}
1074 remotephases = {'publishing': 'True'}
1073 if not remotephases: # old server or public only reply from non-publishing
1075 if not remotephases: # old server or public only reply from non-publishing
1074 _localphasemove(pushop, cheads)
1076 _localphasemove(pushop, cheads)
1075 # don't push any phase data as there is nothing to push
1077 # don't push any phase data as there is nothing to push
1076 else:
1078 else:
1077 ana = phases.analyzeremotephases(pushop.repo, cheads,
1079 ana = phases.analyzeremotephases(pushop.repo, cheads,
1078 remotephases)
1080 remotephases)
1079 pheads, droots = ana
1081 pheads, droots = ana
1080 ### Apply remote phase on local
1082 ### Apply remote phase on local
1081 if remotephases.get('publishing', False):
1083 if remotephases.get('publishing', False):
1082 _localphasemove(pushop, cheads)
1084 _localphasemove(pushop, cheads)
1083 else: # publish = False
1085 else: # publish = False
1084 _localphasemove(pushop, pheads)
1086 _localphasemove(pushop, pheads)
1085 _localphasemove(pushop, cheads, phases.draft)
1087 _localphasemove(pushop, cheads, phases.draft)
1086 ### Apply local phase on remote
1088 ### Apply local phase on remote
1087
1089
1088 if pushop.cgresult:
1090 if pushop.cgresult:
1089 if 'phases' in pushop.stepsdone:
1091 if 'phases' in pushop.stepsdone:
1090 # phases already pushed though bundle2
1092 # phases already pushed though bundle2
1091 return
1093 return
1092 outdated = pushop.outdatedphases
1094 outdated = pushop.outdatedphases
1093 else:
1095 else:
1094 outdated = pushop.fallbackoutdatedphases
1096 outdated = pushop.fallbackoutdatedphases
1095
1097
1096 pushop.stepsdone.add('phases')
1098 pushop.stepsdone.add('phases')
1097
1099
1098 # filter heads already turned public by the push
1100 # filter heads already turned public by the push
1099 outdated = [c for c in outdated if c.node() not in pheads]
1101 outdated = [c for c in outdated if c.node() not in pheads]
1100 # fallback to independent pushkey command
1102 # fallback to independent pushkey command
1101 for newremotehead in outdated:
1103 for newremotehead in outdated:
1102 r = pushop.remote.pushkey('phases',
1104 r = pushop.remote.pushkey('phases',
1103 newremotehead.hex(),
1105 newremotehead.hex(),
1104 str(phases.draft),
1106 str(phases.draft),
1105 str(phases.public))
1107 str(phases.public))
1106 if not r:
1108 if not r:
1107 pushop.ui.warn(_('updating %s to public failed!\n')
1109 pushop.ui.warn(_('updating %s to public failed!\n')
1108 % newremotehead)
1110 % newremotehead)
1109
1111
1110 def _localphasemove(pushop, nodes, phase=phases.public):
1112 def _localphasemove(pushop, nodes, phase=phases.public):
1111 """move <nodes> to <phase> in the local source repo"""
1113 """move <nodes> to <phase> in the local source repo"""
1112 if pushop.trmanager:
1114 if pushop.trmanager:
1113 phases.advanceboundary(pushop.repo,
1115 phases.advanceboundary(pushop.repo,
1114 pushop.trmanager.transaction(),
1116 pushop.trmanager.transaction(),
1115 phase,
1117 phase,
1116 nodes)
1118 nodes)
1117 else:
1119 else:
1118 # repo is not locked, do not change any phases!
1120 # repo is not locked, do not change any phases!
1119 # Informs the user that phases should have been moved when
1121 # Informs the user that phases should have been moved when
1120 # applicable.
1122 # applicable.
1121 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1123 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1122 phasestr = phases.phasenames[phase]
1124 phasestr = phases.phasenames[phase]
1123 if actualmoves:
1125 if actualmoves:
1124 pushop.ui.status(_('cannot lock source repo, skipping '
1126 pushop.ui.status(_('cannot lock source repo, skipping '
1125 'local %s phase update\n') % phasestr)
1127 'local %s phase update\n') % phasestr)
1126
1128
1127 def _pushobsolete(pushop):
1129 def _pushobsolete(pushop):
1128 """utility function to push obsolete markers to a remote"""
1130 """utility function to push obsolete markers to a remote"""
1129 if 'obsmarkers' in pushop.stepsdone:
1131 if 'obsmarkers' in pushop.stepsdone:
1130 return
1132 return
1131 repo = pushop.repo
1133 repo = pushop.repo
1132 remote = pushop.remote
1134 remote = pushop.remote
1133 pushop.stepsdone.add('obsmarkers')
1135 pushop.stepsdone.add('obsmarkers')
1134 if pushop.outobsmarkers:
1136 if pushop.outobsmarkers:
1135 pushop.ui.debug('try to push obsolete markers to remote\n')
1137 pushop.ui.debug('try to push obsolete markers to remote\n')
1136 rslts = []
1138 rslts = []
1137 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1139 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1138 for key in sorted(remotedata, reverse=True):
1140 for key in sorted(remotedata, reverse=True):
1139 # reverse sort to ensure we end with dump0
1141 # reverse sort to ensure we end with dump0
1140 data = remotedata[key]
1142 data = remotedata[key]
1141 rslts.append(remote.pushkey('obsolete', key, '', data))
1143 rslts.append(remote.pushkey('obsolete', key, '', data))
1142 if [r for r in rslts if not r]:
1144 if [r for r in rslts if not r]:
1143 msg = _('failed to push some obsolete markers!\n')
1145 msg = _('failed to push some obsolete markers!\n')
1144 repo.ui.warn(msg)
1146 repo.ui.warn(msg)
1145
1147
1146 def _pushbookmark(pushop):
1148 def _pushbookmark(pushop):
1147 """Update bookmark position on remote"""
1149 """Update bookmark position on remote"""
1148 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1150 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1149 return
1151 return
1150 pushop.stepsdone.add('bookmarks')
1152 pushop.stepsdone.add('bookmarks')
1151 ui = pushop.ui
1153 ui = pushop.ui
1152 remote = pushop.remote
1154 remote = pushop.remote
1153
1155
1154 for b, old, new in pushop.outbookmarks:
1156 for b, old, new in pushop.outbookmarks:
1155 action = 'update'
1157 action = 'update'
1156 if not old:
1158 if not old:
1157 action = 'export'
1159 action = 'export'
1158 elif not new:
1160 elif not new:
1159 action = 'delete'
1161 action = 'delete'
1160 if remote.pushkey('bookmarks', b, old, new):
1162 if remote.pushkey('bookmarks', b, old, new):
1161 ui.status(bookmsgmap[action][0] % b)
1163 ui.status(bookmsgmap[action][0] % b)
1162 else:
1164 else:
1163 ui.warn(bookmsgmap[action][1] % b)
1165 ui.warn(bookmsgmap[action][1] % b)
1164 # discovery can have set the value form invalid entry
1166 # discovery can have set the value form invalid entry
1165 if pushop.bkresult is not None:
1167 if pushop.bkresult is not None:
1166 pushop.bkresult = 1
1168 pushop.bkresult = 1
1167
1169
1168 class pulloperation(object):
1170 class pulloperation(object):
1169 """A object that represent a single pull operation
1171 """A object that represent a single pull operation
1170
1172
1171 It purpose is to carry pull related state and very common operation.
1173 It purpose is to carry pull related state and very common operation.
1172
1174
1173 A new should be created at the beginning of each pull and discarded
1175 A new should be created at the beginning of each pull and discarded
1174 afterward.
1176 afterward.
1175 """
1177 """
1176
1178
1177 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1179 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1178 remotebookmarks=None, streamclonerequested=None):
1180 remotebookmarks=None, streamclonerequested=None):
1179 # repo we pull into
1181 # repo we pull into
1180 self.repo = repo
1182 self.repo = repo
1181 # repo we pull from
1183 # repo we pull from
1182 self.remote = remote
1184 self.remote = remote
1183 # revision we try to pull (None is "all")
1185 # revision we try to pull (None is "all")
1184 self.heads = heads
1186 self.heads = heads
1185 # bookmark pulled explicitly
1187 # bookmark pulled explicitly
1186 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1188 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1187 for bookmark in bookmarks]
1189 for bookmark in bookmarks]
1188 # do we force pull?
1190 # do we force pull?
1189 self.force = force
1191 self.force = force
1190 # whether a streaming clone was requested
1192 # whether a streaming clone was requested
1191 self.streamclonerequested = streamclonerequested
1193 self.streamclonerequested = streamclonerequested
1192 # transaction manager
1194 # transaction manager
1193 self.trmanager = None
1195 self.trmanager = None
1194 # set of common changeset between local and remote before pull
1196 # set of common changeset between local and remote before pull
1195 self.common = None
1197 self.common = None
1196 # set of pulled head
1198 # set of pulled head
1197 self.rheads = None
1199 self.rheads = None
1198 # list of missing changeset to fetch remotely
1200 # list of missing changeset to fetch remotely
1199 self.fetch = None
1201 self.fetch = None
1200 # remote bookmarks data
1202 # remote bookmarks data
1201 self.remotebookmarks = remotebookmarks
1203 self.remotebookmarks = remotebookmarks
1202 # result of changegroup pulling (used as return code by pull)
1204 # result of changegroup pulling (used as return code by pull)
1203 self.cgresult = None
1205 self.cgresult = None
1204 # list of step already done
1206 # list of step already done
1205 self.stepsdone = set()
1207 self.stepsdone = set()
1206 # Whether we attempted a clone from pre-generated bundles.
1208 # Whether we attempted a clone from pre-generated bundles.
1207 self.clonebundleattempted = False
1209 self.clonebundleattempted = False
1208
1210
1209 @util.propertycache
1211 @util.propertycache
1210 def pulledsubset(self):
1212 def pulledsubset(self):
1211 """heads of the set of changeset target by the pull"""
1213 """heads of the set of changeset target by the pull"""
1212 # compute target subset
1214 # compute target subset
1213 if self.heads is None:
1215 if self.heads is None:
1214 # We pulled every thing possible
1216 # We pulled every thing possible
1215 # sync on everything common
1217 # sync on everything common
1216 c = set(self.common)
1218 c = set(self.common)
1217 ret = list(self.common)
1219 ret = list(self.common)
1218 for n in self.rheads:
1220 for n in self.rheads:
1219 if n not in c:
1221 if n not in c:
1220 ret.append(n)
1222 ret.append(n)
1221 return ret
1223 return ret
1222 else:
1224 else:
1223 # We pulled a specific subset
1225 # We pulled a specific subset
1224 # sync on this subset
1226 # sync on this subset
1225 return self.heads
1227 return self.heads
1226
1228
1227 @util.propertycache
1229 @util.propertycache
1228 def canusebundle2(self):
1230 def canusebundle2(self):
1229 return not _forcebundle1(self)
1231 return not _forcebundle1(self)
1230
1232
1231 @util.propertycache
1233 @util.propertycache
1232 def remotebundle2caps(self):
1234 def remotebundle2caps(self):
1233 return bundle2.bundle2caps(self.remote)
1235 return bundle2.bundle2caps(self.remote)
1234
1236
1235 def gettransaction(self):
1237 def gettransaction(self):
1236 # deprecated; talk to trmanager directly
1238 # deprecated; talk to trmanager directly
1237 return self.trmanager.transaction()
1239 return self.trmanager.transaction()
1238
1240
1239 class transactionmanager(util.transactional):
1241 class transactionmanager(util.transactional):
1240 """An object to manage the life cycle of a transaction
1242 """An object to manage the life cycle of a transaction
1241
1243
1242 It creates the transaction on demand and calls the appropriate hooks when
1244 It creates the transaction on demand and calls the appropriate hooks when
1243 closing the transaction."""
1245 closing the transaction."""
1244 def __init__(self, repo, source, url):
1246 def __init__(self, repo, source, url):
1245 self.repo = repo
1247 self.repo = repo
1246 self.source = source
1248 self.source = source
1247 self.url = url
1249 self.url = url
1248 self._tr = None
1250 self._tr = None
1249
1251
1250 def transaction(self):
1252 def transaction(self):
1251 """Return an open transaction object, constructing if necessary"""
1253 """Return an open transaction object, constructing if necessary"""
1252 if not self._tr:
1254 if not self._tr:
1253 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1255 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1254 self._tr = self.repo.transaction(trname)
1256 self._tr = self.repo.transaction(trname)
1255 self._tr.hookargs['source'] = self.source
1257 self._tr.hookargs['source'] = self.source
1256 self._tr.hookargs['url'] = self.url
1258 self._tr.hookargs['url'] = self.url
1257 return self._tr
1259 return self._tr
1258
1260
1259 def close(self):
1261 def close(self):
1260 """close transaction if created"""
1262 """close transaction if created"""
1261 if self._tr is not None:
1263 if self._tr is not None:
1262 self._tr.close()
1264 self._tr.close()
1263
1265
1264 def release(self):
1266 def release(self):
1265 """release transaction if created"""
1267 """release transaction if created"""
1266 if self._tr is not None:
1268 if self._tr is not None:
1267 self._tr.release()
1269 self._tr.release()
1268
1270
1269 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1271 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1270 streamclonerequested=None):
1272 streamclonerequested=None):
1271 """Fetch repository data from a remote.
1273 """Fetch repository data from a remote.
1272
1274
1273 This is the main function used to retrieve data from a remote repository.
1275 This is the main function used to retrieve data from a remote repository.
1274
1276
1275 ``repo`` is the local repository to clone into.
1277 ``repo`` is the local repository to clone into.
1276 ``remote`` is a peer instance.
1278 ``remote`` is a peer instance.
1277 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1279 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1278 default) means to pull everything from the remote.
1280 default) means to pull everything from the remote.
1279 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1281 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1280 default, all remote bookmarks are pulled.
1282 default, all remote bookmarks are pulled.
1281 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1283 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1282 initialization.
1284 initialization.
1283 ``streamclonerequested`` is a boolean indicating whether a "streaming
1285 ``streamclonerequested`` is a boolean indicating whether a "streaming
1284 clone" is requested. A "streaming clone" is essentially a raw file copy
1286 clone" is requested. A "streaming clone" is essentially a raw file copy
1285 of revlogs from the server. This only works when the local repository is
1287 of revlogs from the server. This only works when the local repository is
1286 empty. The default value of ``None`` means to respect the server
1288 empty. The default value of ``None`` means to respect the server
1287 configuration for preferring stream clones.
1289 configuration for preferring stream clones.
1288
1290
1289 Returns the ``pulloperation`` created for this pull.
1291 Returns the ``pulloperation`` created for this pull.
1290 """
1292 """
1291 if opargs is None:
1293 if opargs is None:
1292 opargs = {}
1294 opargs = {}
1293 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1295 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1294 streamclonerequested=streamclonerequested, **opargs)
1296 streamclonerequested=streamclonerequested, **opargs)
1295
1297
1296 peerlocal = pullop.remote.local()
1298 peerlocal = pullop.remote.local()
1297 if peerlocal:
1299 if peerlocal:
1298 missing = set(peerlocal.requirements) - pullop.repo.supported
1300 missing = set(peerlocal.requirements) - pullop.repo.supported
1299 if missing:
1301 if missing:
1300 msg = _("required features are not"
1302 msg = _("required features are not"
1301 " supported in the destination:"
1303 " supported in the destination:"
1302 " %s") % (', '.join(sorted(missing)))
1304 " %s") % (', '.join(sorted(missing)))
1303 raise error.Abort(msg)
1305 raise error.Abort(msg)
1304
1306
1305 wlock = lock = None
1307 wlock = lock = None
1306 try:
1308 try:
1307 wlock = pullop.repo.wlock()
1309 wlock = pullop.repo.wlock()
1308 lock = pullop.repo.lock()
1310 lock = pullop.repo.lock()
1309 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1311 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1310 # This should ideally be in _pullbundle2(). However, it needs to run
1312 # This should ideally be in _pullbundle2(). However, it needs to run
1311 # before discovery to avoid extra work.
1313 # before discovery to avoid extra work.
1312 _maybeapplyclonebundle(pullop)
1314 _maybeapplyclonebundle(pullop)
1313 streamclone.maybeperformlegacystreamclone(pullop)
1315 streamclone.maybeperformlegacystreamclone(pullop)
1314 _pulldiscovery(pullop)
1316 _pulldiscovery(pullop)
1315 if pullop.canusebundle2:
1317 if pullop.canusebundle2:
1316 _pullbundle2(pullop)
1318 _pullbundle2(pullop)
1317 _pullchangeset(pullop)
1319 _pullchangeset(pullop)
1318 _pullphase(pullop)
1320 _pullphase(pullop)
1319 _pullbookmarks(pullop)
1321 _pullbookmarks(pullop)
1320 _pullobsolete(pullop)
1322 _pullobsolete(pullop)
1321 pullop.trmanager.close()
1323 pullop.trmanager.close()
1322 finally:
1324 finally:
1323 lockmod.release(pullop.trmanager, lock, wlock)
1325 lockmod.release(pullop.trmanager, lock, wlock)
1324
1326
1325 # storing remotenames
1327 # storing remotenames
1326 if repo.ui.configbool('experimental', 'remotenames'):
1328 if repo.ui.configbool('experimental', 'remotenames'):
1327 remotenames.pullremotenames(repo, remote)
1329 remotenames.pullremotenames(repo, remote)
1328
1330
1329 return pullop
1331 return pullop
1330
1332
1331 # list of steps to perform discovery before pull
1333 # list of steps to perform discovery before pull
1332 pulldiscoveryorder = []
1334 pulldiscoveryorder = []
1333
1335
1334 # Mapping between step name and function
1336 # Mapping between step name and function
1335 #
1337 #
1336 # This exists to help extensions wrap steps if necessary
1338 # This exists to help extensions wrap steps if necessary
1337 pulldiscoverymapping = {}
1339 pulldiscoverymapping = {}
1338
1340
1339 def pulldiscovery(stepname):
1341 def pulldiscovery(stepname):
1340 """decorator for function performing discovery before pull
1342 """decorator for function performing discovery before pull
1341
1343
1342 The function is added to the step -> function mapping and appended to the
1344 The function is added to the step -> function mapping and appended to the
1343 list of steps. Beware that decorated function will be added in order (this
1345 list of steps. Beware that decorated function will be added in order (this
1344 may matter).
1346 may matter).
1345
1347
1346 You can only use this decorator for a new step, if you want to wrap a step
1348 You can only use this decorator for a new step, if you want to wrap a step
1347 from an extension, change the pulldiscovery dictionary directly."""
1349 from an extension, change the pulldiscovery dictionary directly."""
1348 def dec(func):
1350 def dec(func):
1349 assert stepname not in pulldiscoverymapping
1351 assert stepname not in pulldiscoverymapping
1350 pulldiscoverymapping[stepname] = func
1352 pulldiscoverymapping[stepname] = func
1351 pulldiscoveryorder.append(stepname)
1353 pulldiscoveryorder.append(stepname)
1352 return func
1354 return func
1353 return dec
1355 return dec
1354
1356
1355 def _pulldiscovery(pullop):
1357 def _pulldiscovery(pullop):
1356 """Run all discovery steps"""
1358 """Run all discovery steps"""
1357 for stepname in pulldiscoveryorder:
1359 for stepname in pulldiscoveryorder:
1358 step = pulldiscoverymapping[stepname]
1360 step = pulldiscoverymapping[stepname]
1359 step(pullop)
1361 step(pullop)
1360
1362
1361 @pulldiscovery('b1:bookmarks')
1363 @pulldiscovery('b1:bookmarks')
1362 def _pullbookmarkbundle1(pullop):
1364 def _pullbookmarkbundle1(pullop):
1363 """fetch bookmark data in bundle1 case
1365 """fetch bookmark data in bundle1 case
1364
1366
1365 If not using bundle2, we have to fetch bookmarks before changeset
1367 If not using bundle2, we have to fetch bookmarks before changeset
1366 discovery to reduce the chance and impact of race conditions."""
1368 discovery to reduce the chance and impact of race conditions."""
1367 if pullop.remotebookmarks is not None:
1369 if pullop.remotebookmarks is not None:
1368 return
1370 return
1369 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1371 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1370 # all known bundle2 servers now support listkeys, but lets be nice with
1372 # all known bundle2 servers now support listkeys, but lets be nice with
1371 # new implementation.
1373 # new implementation.
1372 return
1374 return
1373 books = pullop.remote.listkeys('bookmarks')
1375 books = pullop.remote.listkeys('bookmarks')
1374 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1376 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1375
1377
1376
1378
1377 @pulldiscovery('changegroup')
1379 @pulldiscovery('changegroup')
1378 def _pulldiscoverychangegroup(pullop):
1380 def _pulldiscoverychangegroup(pullop):
1379 """discovery phase for the pull
1381 """discovery phase for the pull
1380
1382
1381 Current handle changeset discovery only, will change handle all discovery
1383 Current handle changeset discovery only, will change handle all discovery
1382 at some point."""
1384 at some point."""
1383 tmp = discovery.findcommonincoming(pullop.repo,
1385 tmp = discovery.findcommonincoming(pullop.repo,
1384 pullop.remote,
1386 pullop.remote,
1385 heads=pullop.heads,
1387 heads=pullop.heads,
1386 force=pullop.force)
1388 force=pullop.force)
1387 common, fetch, rheads = tmp
1389 common, fetch, rheads = tmp
1388 nm = pullop.repo.unfiltered().changelog.nodemap
1390 nm = pullop.repo.unfiltered().changelog.nodemap
1389 if fetch and rheads:
1391 if fetch and rheads:
1390 # If a remote heads is filtered locally, put in back in common.
1392 # If a remote heads is filtered locally, put in back in common.
1391 #
1393 #
1392 # This is a hackish solution to catch most of "common but locally
1394 # This is a hackish solution to catch most of "common but locally
1393 # hidden situation". We do not performs discovery on unfiltered
1395 # hidden situation". We do not performs discovery on unfiltered
1394 # repository because it end up doing a pathological amount of round
1396 # repository because it end up doing a pathological amount of round
1395 # trip for w huge amount of changeset we do not care about.
1397 # trip for w huge amount of changeset we do not care about.
1396 #
1398 #
1397 # If a set of such "common but filtered" changeset exist on the server
1399 # If a set of such "common but filtered" changeset exist on the server
1398 # but are not including a remote heads, we'll not be able to detect it,
1400 # but are not including a remote heads, we'll not be able to detect it,
1399 scommon = set(common)
1401 scommon = set(common)
1400 for n in rheads:
1402 for n in rheads:
1401 if n in nm:
1403 if n in nm:
1402 if n not in scommon:
1404 if n not in scommon:
1403 common.append(n)
1405 common.append(n)
1404 if set(rheads).issubset(set(common)):
1406 if set(rheads).issubset(set(common)):
1405 fetch = []
1407 fetch = []
1406 pullop.common = common
1408 pullop.common = common
1407 pullop.fetch = fetch
1409 pullop.fetch = fetch
1408 pullop.rheads = rheads
1410 pullop.rheads = rheads
1409
1411
1410 def _pullbundle2(pullop):
1412 def _pullbundle2(pullop):
1411 """pull data using bundle2
1413 """pull data using bundle2
1412
1414
1413 For now, the only supported data are changegroup."""
1415 For now, the only supported data are changegroup."""
1414 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1416 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1415
1417
1416 # At the moment we don't do stream clones over bundle2. If that is
1418 # At the moment we don't do stream clones over bundle2. If that is
1417 # implemented then here's where the check for that will go.
1419 # implemented then here's where the check for that will go.
1418 streaming = False
1420 streaming = False
1419
1421
1420 # pulling changegroup
1422 # pulling changegroup
1421 pullop.stepsdone.add('changegroup')
1423 pullop.stepsdone.add('changegroup')
1422
1424
1423 kwargs['common'] = pullop.common
1425 kwargs['common'] = pullop.common
1424 kwargs['heads'] = pullop.heads or pullop.rheads
1426 kwargs['heads'] = pullop.heads or pullop.rheads
1425 kwargs['cg'] = pullop.fetch
1427 kwargs['cg'] = pullop.fetch
1426
1428
1427 ui = pullop.repo.ui
1429 ui = pullop.repo.ui
1428 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1430 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1429 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1431 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1430 if (not legacyphase and hasbinaryphase):
1432 if (not legacyphase and hasbinaryphase):
1431 kwargs['phases'] = True
1433 kwargs['phases'] = True
1432 pullop.stepsdone.add('phases')
1434 pullop.stepsdone.add('phases')
1433
1435
1434 if 'listkeys' in pullop.remotebundle2caps:
1436 if 'listkeys' in pullop.remotebundle2caps:
1435 if 'phases' not in pullop.stepsdone:
1437 if 'phases' not in pullop.stepsdone:
1436 kwargs['listkeys'] = ['phases']
1438 kwargs['listkeys'] = ['phases']
1437 if pullop.remotebookmarks is None:
1439 if pullop.remotebookmarks is None:
1438 # make sure to always includes bookmark data when migrating
1440 # make sure to always includes bookmark data when migrating
1439 # `hg incoming --bundle` to using this function.
1441 # `hg incoming --bundle` to using this function.
1440 kwargs.setdefault('listkeys', []).append('bookmarks')
1442 kwargs.setdefault('listkeys', []).append('bookmarks')
1441
1443
1442 # If this is a full pull / clone and the server supports the clone bundles
1444 # If this is a full pull / clone and the server supports the clone bundles
1443 # feature, tell the server whether we attempted a clone bundle. The
1445 # feature, tell the server whether we attempted a clone bundle. The
1444 # presence of this flag indicates the client supports clone bundles. This
1446 # presence of this flag indicates the client supports clone bundles. This
1445 # will enable the server to treat clients that support clone bundles
1447 # will enable the server to treat clients that support clone bundles
1446 # differently from those that don't.
1448 # differently from those that don't.
1447 if (pullop.remote.capable('clonebundles')
1449 if (pullop.remote.capable('clonebundles')
1448 and pullop.heads is None and list(pullop.common) == [nullid]):
1450 and pullop.heads is None and list(pullop.common) == [nullid]):
1449 kwargs['cbattempted'] = pullop.clonebundleattempted
1451 kwargs['cbattempted'] = pullop.clonebundleattempted
1450
1452
1451 if streaming:
1453 if streaming:
1452 pullop.repo.ui.status(_('streaming all changes\n'))
1454 pullop.repo.ui.status(_('streaming all changes\n'))
1453 elif not pullop.fetch:
1455 elif not pullop.fetch:
1454 pullop.repo.ui.status(_("no changes found\n"))
1456 pullop.repo.ui.status(_("no changes found\n"))
1455 pullop.cgresult = 0
1457 pullop.cgresult = 0
1456 else:
1458 else:
1457 if pullop.heads is None and list(pullop.common) == [nullid]:
1459 if pullop.heads is None and list(pullop.common) == [nullid]:
1458 pullop.repo.ui.status(_("requesting all changes\n"))
1460 pullop.repo.ui.status(_("requesting all changes\n"))
1459 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1461 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1460 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1462 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1461 if obsolete.commonversion(remoteversions) is not None:
1463 if obsolete.commonversion(remoteversions) is not None:
1462 kwargs['obsmarkers'] = True
1464 kwargs['obsmarkers'] = True
1463 pullop.stepsdone.add('obsmarkers')
1465 pullop.stepsdone.add('obsmarkers')
1464 _pullbundle2extraprepare(pullop, kwargs)
1466 _pullbundle2extraprepare(pullop, kwargs)
1465 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1467 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1466 try:
1468 try:
1467 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1469 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1468 except bundle2.AbortFromPart as exc:
1470 except bundle2.AbortFromPart as exc:
1469 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1471 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1470 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1472 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1471 except error.BundleValueError as exc:
1473 except error.BundleValueError as exc:
1472 raise error.Abort(_('missing support for %s') % exc)
1474 raise error.Abort(_('missing support for %s') % exc)
1473
1475
1474 if pullop.fetch:
1476 if pullop.fetch:
1475 pullop.cgresult = bundle2.combinechangegroupresults(op)
1477 pullop.cgresult = bundle2.combinechangegroupresults(op)
1476
1478
1477 # processing phases change
1479 # processing phases change
1478 for namespace, value in op.records['listkeys']:
1480 for namespace, value in op.records['listkeys']:
1479 if namespace == 'phases':
1481 if namespace == 'phases':
1480 _pullapplyphases(pullop, value)
1482 _pullapplyphases(pullop, value)
1481
1483
1482 # processing bookmark update
1484 # processing bookmark update
1483 for namespace, value in op.records['listkeys']:
1485 for namespace, value in op.records['listkeys']:
1484 if namespace == 'bookmarks':
1486 if namespace == 'bookmarks':
1485 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1487 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1486
1488
1487 # bookmark data were either already there or pulled in the bundle
1489 # bookmark data were either already there or pulled in the bundle
1488 if pullop.remotebookmarks is not None:
1490 if pullop.remotebookmarks is not None:
1489 _pullbookmarks(pullop)
1491 _pullbookmarks(pullop)
1490
1492
1491 def _pullbundle2extraprepare(pullop, kwargs):
1493 def _pullbundle2extraprepare(pullop, kwargs):
1492 """hook function so that extensions can extend the getbundle call"""
1494 """hook function so that extensions can extend the getbundle call"""
1493
1495
1494 def _pullchangeset(pullop):
1496 def _pullchangeset(pullop):
1495 """pull changeset from unbundle into the local repo"""
1497 """pull changeset from unbundle into the local repo"""
1496 # We delay the open of the transaction as late as possible so we
1498 # We delay the open of the transaction as late as possible so we
1497 # don't open transaction for nothing or you break future useful
1499 # don't open transaction for nothing or you break future useful
1498 # rollback call
1500 # rollback call
1499 if 'changegroup' in pullop.stepsdone:
1501 if 'changegroup' in pullop.stepsdone:
1500 return
1502 return
1501 pullop.stepsdone.add('changegroup')
1503 pullop.stepsdone.add('changegroup')
1502 if not pullop.fetch:
1504 if not pullop.fetch:
1503 pullop.repo.ui.status(_("no changes found\n"))
1505 pullop.repo.ui.status(_("no changes found\n"))
1504 pullop.cgresult = 0
1506 pullop.cgresult = 0
1505 return
1507 return
1506 tr = pullop.gettransaction()
1508 tr = pullop.gettransaction()
1507 if pullop.heads is None and list(pullop.common) == [nullid]:
1509 if pullop.heads is None and list(pullop.common) == [nullid]:
1508 pullop.repo.ui.status(_("requesting all changes\n"))
1510 pullop.repo.ui.status(_("requesting all changes\n"))
1509 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1511 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1510 # issue1320, avoid a race if remote changed after discovery
1512 # issue1320, avoid a race if remote changed after discovery
1511 pullop.heads = pullop.rheads
1513 pullop.heads = pullop.rheads
1512
1514
1513 if pullop.remote.capable('getbundle'):
1515 if pullop.remote.capable('getbundle'):
1514 # TODO: get bundlecaps from remote
1516 # TODO: get bundlecaps from remote
1515 cg = pullop.remote.getbundle('pull', common=pullop.common,
1517 cg = pullop.remote.getbundle('pull', common=pullop.common,
1516 heads=pullop.heads or pullop.rheads)
1518 heads=pullop.heads or pullop.rheads)
1517 elif pullop.heads is None:
1519 elif pullop.heads is None:
1518 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1520 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1519 elif not pullop.remote.capable('changegroupsubset'):
1521 elif not pullop.remote.capable('changegroupsubset'):
1520 raise error.Abort(_("partial pull cannot be done because "
1522 raise error.Abort(_("partial pull cannot be done because "
1521 "other repository doesn't support "
1523 "other repository doesn't support "
1522 "changegroupsubset."))
1524 "changegroupsubset."))
1523 else:
1525 else:
1524 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1526 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1525 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1527 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1526 pullop.remote.url())
1528 pullop.remote.url())
1527 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1529 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1528
1530
1529 def _pullphase(pullop):
1531 def _pullphase(pullop):
1530 # Get remote phases data from remote
1532 # Get remote phases data from remote
1531 if 'phases' in pullop.stepsdone:
1533 if 'phases' in pullop.stepsdone:
1532 return
1534 return
1533 remotephases = pullop.remote.listkeys('phases')
1535 remotephases = pullop.remote.listkeys('phases')
1534 _pullapplyphases(pullop, remotephases)
1536 _pullapplyphases(pullop, remotephases)
1535
1537
1536 def _pullapplyphases(pullop, remotephases):
1538 def _pullapplyphases(pullop, remotephases):
1537 """apply phase movement from observed remote state"""
1539 """apply phase movement from observed remote state"""
1538 if 'phases' in pullop.stepsdone:
1540 if 'phases' in pullop.stepsdone:
1539 return
1541 return
1540 pullop.stepsdone.add('phases')
1542 pullop.stepsdone.add('phases')
1541 publishing = bool(remotephases.get('publishing', False))
1543 publishing = bool(remotephases.get('publishing', False))
1542 if remotephases and not publishing:
1544 if remotephases and not publishing:
1543 # remote is new and non-publishing
1545 # remote is new and non-publishing
1544 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1546 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1545 pullop.pulledsubset,
1547 pullop.pulledsubset,
1546 remotephases)
1548 remotephases)
1547 dheads = pullop.pulledsubset
1549 dheads = pullop.pulledsubset
1548 else:
1550 else:
1549 # Remote is old or publishing all common changesets
1551 # Remote is old or publishing all common changesets
1550 # should be seen as public
1552 # should be seen as public
1551 pheads = pullop.pulledsubset
1553 pheads = pullop.pulledsubset
1552 dheads = []
1554 dheads = []
1553 unfi = pullop.repo.unfiltered()
1555 unfi = pullop.repo.unfiltered()
1554 phase = unfi._phasecache.phase
1556 phase = unfi._phasecache.phase
1555 rev = unfi.changelog.nodemap.get
1557 rev = unfi.changelog.nodemap.get
1556 public = phases.public
1558 public = phases.public
1557 draft = phases.draft
1559 draft = phases.draft
1558
1560
1559 # exclude changesets already public locally and update the others
1561 # exclude changesets already public locally and update the others
1560 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1562 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1561 if pheads:
1563 if pheads:
1562 tr = pullop.gettransaction()
1564 tr = pullop.gettransaction()
1563 phases.advanceboundary(pullop.repo, tr, public, pheads)
1565 phases.advanceboundary(pullop.repo, tr, public, pheads)
1564
1566
1565 # exclude changesets already draft locally and update the others
1567 # exclude changesets already draft locally and update the others
1566 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1568 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1567 if dheads:
1569 if dheads:
1568 tr = pullop.gettransaction()
1570 tr = pullop.gettransaction()
1569 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1571 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1570
1572
1571 def _pullbookmarks(pullop):
1573 def _pullbookmarks(pullop):
1572 """process the remote bookmark information to update the local one"""
1574 """process the remote bookmark information to update the local one"""
1573 if 'bookmarks' in pullop.stepsdone:
1575 if 'bookmarks' in pullop.stepsdone:
1574 return
1576 return
1575 pullop.stepsdone.add('bookmarks')
1577 pullop.stepsdone.add('bookmarks')
1576 repo = pullop.repo
1578 repo = pullop.repo
1577 remotebookmarks = pullop.remotebookmarks
1579 remotebookmarks = pullop.remotebookmarks
1578 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1580 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1579 pullop.remote.url(),
1581 pullop.remote.url(),
1580 pullop.gettransaction,
1582 pullop.gettransaction,
1581 explicit=pullop.explicitbookmarks)
1583 explicit=pullop.explicitbookmarks)
1582
1584
1583 def _pullobsolete(pullop):
1585 def _pullobsolete(pullop):
1584 """utility function to pull obsolete markers from a remote
1586 """utility function to pull obsolete markers from a remote
1585
1587
1586 The `gettransaction` is function that return the pull transaction, creating
1588 The `gettransaction` is function that return the pull transaction, creating
1587 one if necessary. We return the transaction to inform the calling code that
1589 one if necessary. We return the transaction to inform the calling code that
1588 a new transaction have been created (when applicable).
1590 a new transaction have been created (when applicable).
1589
1591
1590 Exists mostly to allow overriding for experimentation purpose"""
1592 Exists mostly to allow overriding for experimentation purpose"""
1591 if 'obsmarkers' in pullop.stepsdone:
1593 if 'obsmarkers' in pullop.stepsdone:
1592 return
1594 return
1593 pullop.stepsdone.add('obsmarkers')
1595 pullop.stepsdone.add('obsmarkers')
1594 tr = None
1596 tr = None
1595 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1597 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1596 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1598 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1597 remoteobs = pullop.remote.listkeys('obsolete')
1599 remoteobs = pullop.remote.listkeys('obsolete')
1598 if 'dump0' in remoteobs:
1600 if 'dump0' in remoteobs:
1599 tr = pullop.gettransaction()
1601 tr = pullop.gettransaction()
1600 markers = []
1602 markers = []
1601 for key in sorted(remoteobs, reverse=True):
1603 for key in sorted(remoteobs, reverse=True):
1602 if key.startswith('dump'):
1604 if key.startswith('dump'):
1603 data = util.b85decode(remoteobs[key])
1605 data = util.b85decode(remoteobs[key])
1604 version, newmarks = obsolete._readmarkers(data)
1606 version, newmarks = obsolete._readmarkers(data)
1605 markers += newmarks
1607 markers += newmarks
1606 if markers:
1608 if markers:
1607 pullop.repo.obsstore.add(tr, markers)
1609 pullop.repo.obsstore.add(tr, markers)
1608 pullop.repo.invalidatevolatilesets()
1610 pullop.repo.invalidatevolatilesets()
1609 return tr
1611 return tr
1610
1612
1611 def caps20to10(repo):
1613 def caps20to10(repo):
1612 """return a set with appropriate options to use bundle20 during getbundle"""
1614 """return a set with appropriate options to use bundle20 during getbundle"""
1613 caps = {'HG20'}
1615 caps = {'HG20'}
1614 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1616 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1615 caps.add('bundle2=' + urlreq.quote(capsblob))
1617 caps.add('bundle2=' + urlreq.quote(capsblob))
1616 return caps
1618 return caps
1617
1619
1618 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1620 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1619 getbundle2partsorder = []
1621 getbundle2partsorder = []
1620
1622
1621 # Mapping between step name and function
1623 # Mapping between step name and function
1622 #
1624 #
1623 # This exists to help extensions wrap steps if necessary
1625 # This exists to help extensions wrap steps if necessary
1624 getbundle2partsmapping = {}
1626 getbundle2partsmapping = {}
1625
1627
1626 def getbundle2partsgenerator(stepname, idx=None):
1628 def getbundle2partsgenerator(stepname, idx=None):
1627 """decorator for function generating bundle2 part for getbundle
1629 """decorator for function generating bundle2 part for getbundle
1628
1630
1629 The function is added to the step -> function mapping and appended to the
1631 The function is added to the step -> function mapping and appended to the
1630 list of steps. Beware that decorated functions will be added in order
1632 list of steps. Beware that decorated functions will be added in order
1631 (this may matter).
1633 (this may matter).
1632
1634
1633 You can only use this decorator for new steps, if you want to wrap a step
1635 You can only use this decorator for new steps, if you want to wrap a step
1634 from an extension, attack the getbundle2partsmapping dictionary directly."""
1636 from an extension, attack the getbundle2partsmapping dictionary directly."""
1635 def dec(func):
1637 def dec(func):
1636 assert stepname not in getbundle2partsmapping
1638 assert stepname not in getbundle2partsmapping
1637 getbundle2partsmapping[stepname] = func
1639 getbundle2partsmapping[stepname] = func
1638 if idx is None:
1640 if idx is None:
1639 getbundle2partsorder.append(stepname)
1641 getbundle2partsorder.append(stepname)
1640 else:
1642 else:
1641 getbundle2partsorder.insert(idx, stepname)
1643 getbundle2partsorder.insert(idx, stepname)
1642 return func
1644 return func
1643 return dec
1645 return dec
1644
1646
1645 def bundle2requested(bundlecaps):
1647 def bundle2requested(bundlecaps):
1646 if bundlecaps is not None:
1648 if bundlecaps is not None:
1647 return any(cap.startswith('HG2') for cap in bundlecaps)
1649 return any(cap.startswith('HG2') for cap in bundlecaps)
1648 return False
1650 return False
1649
1651
1650 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1652 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1651 **kwargs):
1653 **kwargs):
1652 """Return chunks constituting a bundle's raw data.
1654 """Return chunks constituting a bundle's raw data.
1653
1655
1654 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1656 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1655 passed.
1657 passed.
1656
1658
1657 Returns an iterator over raw chunks (of varying sizes).
1659 Returns an iterator over raw chunks (of varying sizes).
1658 """
1660 """
1659 kwargs = pycompat.byteskwargs(kwargs)
1661 kwargs = pycompat.byteskwargs(kwargs)
1660 usebundle2 = bundle2requested(bundlecaps)
1662 usebundle2 = bundle2requested(bundlecaps)
1661 # bundle10 case
1663 # bundle10 case
1662 if not usebundle2:
1664 if not usebundle2:
1663 if bundlecaps and not kwargs.get('cg', True):
1665 if bundlecaps and not kwargs.get('cg', True):
1664 raise ValueError(_('request for bundle10 must include changegroup'))
1666 raise ValueError(_('request for bundle10 must include changegroup'))
1665
1667
1666 if kwargs:
1668 if kwargs:
1667 raise ValueError(_('unsupported getbundle arguments: %s')
1669 raise ValueError(_('unsupported getbundle arguments: %s')
1668 % ', '.join(sorted(kwargs.keys())))
1670 % ', '.join(sorted(kwargs.keys())))
1669 outgoing = _computeoutgoing(repo, heads, common)
1671 outgoing = _computeoutgoing(repo, heads, common)
1670 return changegroup.makestream(repo, outgoing, '01', source,
1672 return changegroup.makestream(repo, outgoing, '01', source,
1671 bundlecaps=bundlecaps)
1673 bundlecaps=bundlecaps)
1672
1674
1673 # bundle20 case
1675 # bundle20 case
1674 b2caps = {}
1676 b2caps = {}
1675 for bcaps in bundlecaps:
1677 for bcaps in bundlecaps:
1676 if bcaps.startswith('bundle2='):
1678 if bcaps.startswith('bundle2='):
1677 blob = urlreq.unquote(bcaps[len('bundle2='):])
1679 blob = urlreq.unquote(bcaps[len('bundle2='):])
1678 b2caps.update(bundle2.decodecaps(blob))
1680 b2caps.update(bundle2.decodecaps(blob))
1679 bundler = bundle2.bundle20(repo.ui, b2caps)
1681 bundler = bundle2.bundle20(repo.ui, b2caps)
1680
1682
1681 kwargs['heads'] = heads
1683 kwargs['heads'] = heads
1682 kwargs['common'] = common
1684 kwargs['common'] = common
1683
1685
1684 for name in getbundle2partsorder:
1686 for name in getbundle2partsorder:
1685 func = getbundle2partsmapping[name]
1687 func = getbundle2partsmapping[name]
1686 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1688 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1687 **pycompat.strkwargs(kwargs))
1689 **pycompat.strkwargs(kwargs))
1688
1690
1689 return bundler.getchunks()
1691 return bundler.getchunks()
1690
1692
1691 @getbundle2partsgenerator('changegroup')
1693 @getbundle2partsgenerator('changegroup')
1692 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1694 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1693 b2caps=None, heads=None, common=None, **kwargs):
1695 b2caps=None, heads=None, common=None, **kwargs):
1694 """add a changegroup part to the requested bundle"""
1696 """add a changegroup part to the requested bundle"""
1695 cgstream = None
1697 cgstream = None
1696 if kwargs.get('cg', True):
1698 if kwargs.get('cg', True):
1697 # build changegroup bundle here.
1699 # build changegroup bundle here.
1698 version = '01'
1700 version = '01'
1699 cgversions = b2caps.get('changegroup')
1701 cgversions = b2caps.get('changegroup')
1700 if cgversions: # 3.1 and 3.2 ship with an empty value
1702 if cgversions: # 3.1 and 3.2 ship with an empty value
1701 cgversions = [v for v in cgversions
1703 cgversions = [v for v in cgversions
1702 if v in changegroup.supportedoutgoingversions(repo)]
1704 if v in changegroup.supportedoutgoingversions(repo)]
1703 if not cgversions:
1705 if not cgversions:
1704 raise ValueError(_('no common changegroup version'))
1706 raise ValueError(_('no common changegroup version'))
1705 version = max(cgversions)
1707 version = max(cgversions)
1706 outgoing = _computeoutgoing(repo, heads, common)
1708 outgoing = _computeoutgoing(repo, heads, common)
1707 if outgoing.missing:
1709 if outgoing.missing:
1708 cgstream = changegroup.makestream(repo, outgoing, version, source,
1710 cgstream = changegroup.makestream(repo, outgoing, version, source,
1709 bundlecaps=bundlecaps)
1711 bundlecaps=bundlecaps)
1710
1712
1711 if cgstream:
1713 if cgstream:
1712 part = bundler.newpart('changegroup', data=cgstream)
1714 part = bundler.newpart('changegroup', data=cgstream)
1713 if cgversions:
1715 if cgversions:
1714 part.addparam('version', version)
1716 part.addparam('version', version)
1715 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1717 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1716 mandatory=False)
1718 mandatory=False)
1717 if 'treemanifest' in repo.requirements:
1719 if 'treemanifest' in repo.requirements:
1718 part.addparam('treemanifest', '1')
1720 part.addparam('treemanifest', '1')
1719
1721
1720 @getbundle2partsgenerator('listkeys')
1722 @getbundle2partsgenerator('listkeys')
1721 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1723 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1722 b2caps=None, **kwargs):
1724 b2caps=None, **kwargs):
1723 """add parts containing listkeys namespaces to the requested bundle"""
1725 """add parts containing listkeys namespaces to the requested bundle"""
1724 listkeys = kwargs.get('listkeys', ())
1726 listkeys = kwargs.get('listkeys', ())
1725 for namespace in listkeys:
1727 for namespace in listkeys:
1726 part = bundler.newpart('listkeys')
1728 part = bundler.newpart('listkeys')
1727 part.addparam('namespace', namespace)
1729 part.addparam('namespace', namespace)
1728 keys = repo.listkeys(namespace).items()
1730 keys = repo.listkeys(namespace).items()
1729 part.data = pushkey.encodekeys(keys)
1731 part.data = pushkey.encodekeys(keys)
1730
1732
1731 @getbundle2partsgenerator('obsmarkers')
1733 @getbundle2partsgenerator('obsmarkers')
1732 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1734 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1733 b2caps=None, heads=None, **kwargs):
1735 b2caps=None, heads=None, **kwargs):
1734 """add an obsolescence markers part to the requested bundle"""
1736 """add an obsolescence markers part to the requested bundle"""
1735 if kwargs.get('obsmarkers', False):
1737 if kwargs.get('obsmarkers', False):
1736 if heads is None:
1738 if heads is None:
1737 heads = repo.heads()
1739 heads = repo.heads()
1738 subset = [c.node() for c in repo.set('::%ln', heads)]
1740 subset = [c.node() for c in repo.set('::%ln', heads)]
1739 markers = repo.obsstore.relevantmarkers(subset)
1741 markers = repo.obsstore.relevantmarkers(subset)
1740 markers = sorted(markers)
1742 markers = sorted(markers)
1741 bundle2.buildobsmarkerspart(bundler, markers)
1743 bundle2.buildobsmarkerspart(bundler, markers)
1742
1744
1743 @getbundle2partsgenerator('phases')
1745 @getbundle2partsgenerator('phases')
1744 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1746 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1745 b2caps=None, heads=None, **kwargs):
1747 b2caps=None, heads=None, **kwargs):
1746 """add phase heads part to the requested bundle"""
1748 """add phase heads part to the requested bundle"""
1747 if kwargs.get('phases', False):
1749 if kwargs.get('phases', False):
1748 if not 'heads' in b2caps.get('phases'):
1750 if not 'heads' in b2caps.get('phases'):
1749 raise ValueError(_('no common phases exchange method'))
1751 raise ValueError(_('no common phases exchange method'))
1750 if heads is None:
1752 if heads is None:
1751 heads = repo.heads()
1753 heads = repo.heads()
1752
1754
1753 headsbyphase = collections.defaultdict(set)
1755 headsbyphase = collections.defaultdict(set)
1754 if repo.publishing():
1756 if repo.publishing():
1755 headsbyphase[phases.public] = heads
1757 headsbyphase[phases.public] = heads
1756 else:
1758 else:
1757 # find the appropriate heads to move
1759 # find the appropriate heads to move
1758
1760
1759 phase = repo._phasecache.phase
1761 phase = repo._phasecache.phase
1760 node = repo.changelog.node
1762 node = repo.changelog.node
1761 rev = repo.changelog.rev
1763 rev = repo.changelog.rev
1762 for h in heads:
1764 for h in heads:
1763 headsbyphase[phase(repo, rev(h))].add(h)
1765 headsbyphase[phase(repo, rev(h))].add(h)
1764 seenphases = list(headsbyphase.keys())
1766 seenphases = list(headsbyphase.keys())
1765
1767
1766 # We do not handle anything but public and draft phase for now)
1768 # We do not handle anything but public and draft phase for now)
1767 if seenphases:
1769 if seenphases:
1768 assert max(seenphases) <= phases.draft
1770 assert max(seenphases) <= phases.draft
1769
1771
1770 # if client is pulling non-public changesets, we need to find
1772 # if client is pulling non-public changesets, we need to find
1771 # intermediate public heads.
1773 # intermediate public heads.
1772 draftheads = headsbyphase.get(phases.draft, set())
1774 draftheads = headsbyphase.get(phases.draft, set())
1773 if draftheads:
1775 if draftheads:
1774 publicheads = headsbyphase.get(phases.public, set())
1776 publicheads = headsbyphase.get(phases.public, set())
1775
1777
1776 revset = 'heads(only(%ln, %ln) and public())'
1778 revset = 'heads(only(%ln, %ln) and public())'
1777 extraheads = repo.revs(revset, draftheads, publicheads)
1779 extraheads = repo.revs(revset, draftheads, publicheads)
1778 for r in extraheads:
1780 for r in extraheads:
1779 headsbyphase[phases.public].add(node(r))
1781 headsbyphase[phases.public].add(node(r))
1780
1782
1781 # transform data in a format used by the encoding function
1783 # transform data in a format used by the encoding function
1782 phasemapping = []
1784 phasemapping = []
1783 for phase in phases.allphases:
1785 for phase in phases.allphases:
1784 phasemapping.append(sorted(headsbyphase[phase]))
1786 phasemapping.append(sorted(headsbyphase[phase]))
1785
1787
1786 # generate the actual part
1788 # generate the actual part
1787 phasedata = phases.binaryencode(phasemapping)
1789 phasedata = phases.binaryencode(phasemapping)
1788 bundler.newpart('phase-heads', data=phasedata)
1790 bundler.newpart('phase-heads', data=phasedata)
1789
1791
1790 @getbundle2partsgenerator('hgtagsfnodes')
1792 @getbundle2partsgenerator('hgtagsfnodes')
1791 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1793 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1792 b2caps=None, heads=None, common=None,
1794 b2caps=None, heads=None, common=None,
1793 **kwargs):
1795 **kwargs):
1794 """Transfer the .hgtags filenodes mapping.
1796 """Transfer the .hgtags filenodes mapping.
1795
1797
1796 Only values for heads in this bundle will be transferred.
1798 Only values for heads in this bundle will be transferred.
1797
1799
1798 The part data consists of pairs of 20 byte changeset node and .hgtags
1800 The part data consists of pairs of 20 byte changeset node and .hgtags
1799 filenodes raw values.
1801 filenodes raw values.
1800 """
1802 """
1801 # Don't send unless:
1803 # Don't send unless:
1802 # - changeset are being exchanged,
1804 # - changeset are being exchanged,
1803 # - the client supports it.
1805 # - the client supports it.
1804 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1806 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1805 return
1807 return
1806
1808
1807 outgoing = _computeoutgoing(repo, heads, common)
1809 outgoing = _computeoutgoing(repo, heads, common)
1808 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1810 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1809
1811
1810 def check_heads(repo, their_heads, context):
1812 def check_heads(repo, their_heads, context):
1811 """check if the heads of a repo have been modified
1813 """check if the heads of a repo have been modified
1812
1814
1813 Used by peer for unbundling.
1815 Used by peer for unbundling.
1814 """
1816 """
1815 heads = repo.heads()
1817 heads = repo.heads()
1816 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1818 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1817 if not (their_heads == ['force'] or their_heads == heads or
1819 if not (their_heads == ['force'] or their_heads == heads or
1818 their_heads == ['hashed', heads_hash]):
1820 their_heads == ['hashed', heads_hash]):
1819 # someone else committed/pushed/unbundled while we
1821 # someone else committed/pushed/unbundled while we
1820 # were transferring data
1822 # were transferring data
1821 raise error.PushRaced('repository changed while %s - '
1823 raise error.PushRaced('repository changed while %s - '
1822 'please try again' % context)
1824 'please try again' % context)
1823
1825
1824 def unbundle(repo, cg, heads, source, url):
1826 def unbundle(repo, cg, heads, source, url):
1825 """Apply a bundle to a repo.
1827 """Apply a bundle to a repo.
1826
1828
1827 this function makes sure the repo is locked during the application and have
1829 this function makes sure the repo is locked during the application and have
1828 mechanism to check that no push race occurred between the creation of the
1830 mechanism to check that no push race occurred between the creation of the
1829 bundle and its application.
1831 bundle and its application.
1830
1832
1831 If the push was raced as PushRaced exception is raised."""
1833 If the push was raced as PushRaced exception is raised."""
1832 r = 0
1834 r = 0
1833 # need a transaction when processing a bundle2 stream
1835 # need a transaction when processing a bundle2 stream
1834 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1836 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1835 lockandtr = [None, None, None]
1837 lockandtr = [None, None, None]
1836 recordout = None
1838 recordout = None
1837 # quick fix for output mismatch with bundle2 in 3.4
1839 # quick fix for output mismatch with bundle2 in 3.4
1838 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1840 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1839 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1841 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1840 captureoutput = True
1842 captureoutput = True
1841 try:
1843 try:
1842 # note: outside bundle1, 'heads' is expected to be empty and this
1844 # note: outside bundle1, 'heads' is expected to be empty and this
1843 # 'check_heads' call wil be a no-op
1845 # 'check_heads' call wil be a no-op
1844 check_heads(repo, heads, 'uploading changes')
1846 check_heads(repo, heads, 'uploading changes')
1845 # push can proceed
1847 # push can proceed
1846 if not isinstance(cg, bundle2.unbundle20):
1848 if not isinstance(cg, bundle2.unbundle20):
1847 # legacy case: bundle1 (changegroup 01)
1849 # legacy case: bundle1 (changegroup 01)
1848 txnname = "\n".join([source, util.hidepassword(url)])
1850 txnname = "\n".join([source, util.hidepassword(url)])
1849 with repo.lock(), repo.transaction(txnname) as tr:
1851 with repo.lock(), repo.transaction(txnname) as tr:
1850 op = bundle2.applybundle(repo, cg, tr, source, url)
1852 op = bundle2.applybundle(repo, cg, tr, source, url)
1851 r = bundle2.combinechangegroupresults(op)
1853 r = bundle2.combinechangegroupresults(op)
1852 else:
1854 else:
1853 r = None
1855 r = None
1854 try:
1856 try:
1855 def gettransaction():
1857 def gettransaction():
1856 if not lockandtr[2]:
1858 if not lockandtr[2]:
1857 lockandtr[0] = repo.wlock()
1859 lockandtr[0] = repo.wlock()
1858 lockandtr[1] = repo.lock()
1860 lockandtr[1] = repo.lock()
1859 lockandtr[2] = repo.transaction(source)
1861 lockandtr[2] = repo.transaction(source)
1860 lockandtr[2].hookargs['source'] = source
1862 lockandtr[2].hookargs['source'] = source
1861 lockandtr[2].hookargs['url'] = url
1863 lockandtr[2].hookargs['url'] = url
1862 lockandtr[2].hookargs['bundle2'] = '1'
1864 lockandtr[2].hookargs['bundle2'] = '1'
1863 return lockandtr[2]
1865 return lockandtr[2]
1864
1866
1865 # Do greedy locking by default until we're satisfied with lazy
1867 # Do greedy locking by default until we're satisfied with lazy
1866 # locking.
1868 # locking.
1867 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1869 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1868 gettransaction()
1870 gettransaction()
1869
1871
1870 op = bundle2.bundleoperation(repo, gettransaction,
1872 op = bundle2.bundleoperation(repo, gettransaction,
1871 captureoutput=captureoutput)
1873 captureoutput=captureoutput)
1872 try:
1874 try:
1873 op = bundle2.processbundle(repo, cg, op=op)
1875 op = bundle2.processbundle(repo, cg, op=op)
1874 finally:
1876 finally:
1875 r = op.reply
1877 r = op.reply
1876 if captureoutput and r is not None:
1878 if captureoutput and r is not None:
1877 repo.ui.pushbuffer(error=True, subproc=True)
1879 repo.ui.pushbuffer(error=True, subproc=True)
1878 def recordout(output):
1880 def recordout(output):
1879 r.newpart('output', data=output, mandatory=False)
1881 r.newpart('output', data=output, mandatory=False)
1880 if lockandtr[2] is not None:
1882 if lockandtr[2] is not None:
1881 lockandtr[2].close()
1883 lockandtr[2].close()
1882 except BaseException as exc:
1884 except BaseException as exc:
1883 exc.duringunbundle2 = True
1885 exc.duringunbundle2 = True
1884 if captureoutput and r is not None:
1886 if captureoutput and r is not None:
1885 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1887 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1886 def recordout(output):
1888 def recordout(output):
1887 part = bundle2.bundlepart('output', data=output,
1889 part = bundle2.bundlepart('output', data=output,
1888 mandatory=False)
1890 mandatory=False)
1889 parts.append(part)
1891 parts.append(part)
1890 raise
1892 raise
1891 finally:
1893 finally:
1892 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1894 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1893 if recordout is not None:
1895 if recordout is not None:
1894 recordout(repo.ui.popbuffer())
1896 recordout(repo.ui.popbuffer())
1895 return r
1897 return r
1896
1898
1897 def _maybeapplyclonebundle(pullop):
1899 def _maybeapplyclonebundle(pullop):
1898 """Apply a clone bundle from a remote, if possible."""
1900 """Apply a clone bundle from a remote, if possible."""
1899
1901
1900 repo = pullop.repo
1902 repo = pullop.repo
1901 remote = pullop.remote
1903 remote = pullop.remote
1902
1904
1903 if not repo.ui.configbool('ui', 'clonebundles'):
1905 if not repo.ui.configbool('ui', 'clonebundles'):
1904 return
1906 return
1905
1907
1906 # Only run if local repo is empty.
1908 # Only run if local repo is empty.
1907 if len(repo):
1909 if len(repo):
1908 return
1910 return
1909
1911
1910 if pullop.heads:
1912 if pullop.heads:
1911 return
1913 return
1912
1914
1913 if not remote.capable('clonebundles'):
1915 if not remote.capable('clonebundles'):
1914 return
1916 return
1915
1917
1916 res = remote._call('clonebundles')
1918 res = remote._call('clonebundles')
1917
1919
1918 # If we call the wire protocol command, that's good enough to record the
1920 # If we call the wire protocol command, that's good enough to record the
1919 # attempt.
1921 # attempt.
1920 pullop.clonebundleattempted = True
1922 pullop.clonebundleattempted = True
1921
1923
1922 entries = parseclonebundlesmanifest(repo, res)
1924 entries = parseclonebundlesmanifest(repo, res)
1923 if not entries:
1925 if not entries:
1924 repo.ui.note(_('no clone bundles available on remote; '
1926 repo.ui.note(_('no clone bundles available on remote; '
1925 'falling back to regular clone\n'))
1927 'falling back to regular clone\n'))
1926 return
1928 return
1927
1929
1928 entries = filterclonebundleentries(
1930 entries = filterclonebundleentries(
1929 repo, entries, streamclonerequested=pullop.streamclonerequested)
1931 repo, entries, streamclonerequested=pullop.streamclonerequested)
1930
1932
1931 if not entries:
1933 if not entries:
1932 # There is a thundering herd concern here. However, if a server
1934 # There is a thundering herd concern here. However, if a server
1933 # operator doesn't advertise bundles appropriate for its clients,
1935 # operator doesn't advertise bundles appropriate for its clients,
1934 # they deserve what's coming. Furthermore, from a client's
1936 # they deserve what's coming. Furthermore, from a client's
1935 # perspective, no automatic fallback would mean not being able to
1937 # perspective, no automatic fallback would mean not being able to
1936 # clone!
1938 # clone!
1937 repo.ui.warn(_('no compatible clone bundles available on server; '
1939 repo.ui.warn(_('no compatible clone bundles available on server; '
1938 'falling back to regular clone\n'))
1940 'falling back to regular clone\n'))
1939 repo.ui.warn(_('(you may want to report this to the server '
1941 repo.ui.warn(_('(you may want to report this to the server '
1940 'operator)\n'))
1942 'operator)\n'))
1941 return
1943 return
1942
1944
1943 entries = sortclonebundleentries(repo.ui, entries)
1945 entries = sortclonebundleentries(repo.ui, entries)
1944
1946
1945 url = entries[0]['URL']
1947 url = entries[0]['URL']
1946 repo.ui.status(_('applying clone bundle from %s\n') % url)
1948 repo.ui.status(_('applying clone bundle from %s\n') % url)
1947 if trypullbundlefromurl(repo.ui, repo, url):
1949 if trypullbundlefromurl(repo.ui, repo, url):
1948 repo.ui.status(_('finished applying clone bundle\n'))
1950 repo.ui.status(_('finished applying clone bundle\n'))
1949 # Bundle failed.
1951 # Bundle failed.
1950 #
1952 #
1951 # We abort by default to avoid the thundering herd of
1953 # We abort by default to avoid the thundering herd of
1952 # clients flooding a server that was expecting expensive
1954 # clients flooding a server that was expecting expensive
1953 # clone load to be offloaded.
1955 # clone load to be offloaded.
1954 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1956 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1955 repo.ui.warn(_('falling back to normal clone\n'))
1957 repo.ui.warn(_('falling back to normal clone\n'))
1956 else:
1958 else:
1957 raise error.Abort(_('error applying bundle'),
1959 raise error.Abort(_('error applying bundle'),
1958 hint=_('if this error persists, consider contacting '
1960 hint=_('if this error persists, consider contacting '
1959 'the server operator or disable clone '
1961 'the server operator or disable clone '
1960 'bundles via '
1962 'bundles via '
1961 '"--config ui.clonebundles=false"'))
1963 '"--config ui.clonebundles=false"'))
1962
1964
1963 def parseclonebundlesmanifest(repo, s):
1965 def parseclonebundlesmanifest(repo, s):
1964 """Parses the raw text of a clone bundles manifest.
1966 """Parses the raw text of a clone bundles manifest.
1965
1967
1966 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1968 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1967 to the URL and other keys are the attributes for the entry.
1969 to the URL and other keys are the attributes for the entry.
1968 """
1970 """
1969 m = []
1971 m = []
1970 for line in s.splitlines():
1972 for line in s.splitlines():
1971 fields = line.split()
1973 fields = line.split()
1972 if not fields:
1974 if not fields:
1973 continue
1975 continue
1974 attrs = {'URL': fields[0]}
1976 attrs = {'URL': fields[0]}
1975 for rawattr in fields[1:]:
1977 for rawattr in fields[1:]:
1976 key, value = rawattr.split('=', 1)
1978 key, value = rawattr.split('=', 1)
1977 key = urlreq.unquote(key)
1979 key = urlreq.unquote(key)
1978 value = urlreq.unquote(value)
1980 value = urlreq.unquote(value)
1979 attrs[key] = value
1981 attrs[key] = value
1980
1982
1981 # Parse BUNDLESPEC into components. This makes client-side
1983 # Parse BUNDLESPEC into components. This makes client-side
1982 # preferences easier to specify since you can prefer a single
1984 # preferences easier to specify since you can prefer a single
1983 # component of the BUNDLESPEC.
1985 # component of the BUNDLESPEC.
1984 if key == 'BUNDLESPEC':
1986 if key == 'BUNDLESPEC':
1985 try:
1987 try:
1986 comp, version, params = parsebundlespec(repo, value,
1988 comp, version, params = parsebundlespec(repo, value,
1987 externalnames=True)
1989 externalnames=True)
1988 attrs['COMPRESSION'] = comp
1990 attrs['COMPRESSION'] = comp
1989 attrs['VERSION'] = version
1991 attrs['VERSION'] = version
1990 except error.InvalidBundleSpecification:
1992 except error.InvalidBundleSpecification:
1991 pass
1993 pass
1992 except error.UnsupportedBundleSpecification:
1994 except error.UnsupportedBundleSpecification:
1993 pass
1995 pass
1994
1996
1995 m.append(attrs)
1997 m.append(attrs)
1996
1998
1997 return m
1999 return m
1998
2000
1999 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2001 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2000 """Remove incompatible clone bundle manifest entries.
2002 """Remove incompatible clone bundle manifest entries.
2001
2003
2002 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2004 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2003 and returns a new list consisting of only the entries that this client
2005 and returns a new list consisting of only the entries that this client
2004 should be able to apply.
2006 should be able to apply.
2005
2007
2006 There is no guarantee we'll be able to apply all returned entries because
2008 There is no guarantee we'll be able to apply all returned entries because
2007 the metadata we use to filter on may be missing or wrong.
2009 the metadata we use to filter on may be missing or wrong.
2008 """
2010 """
2009 newentries = []
2011 newentries = []
2010 for entry in entries:
2012 for entry in entries:
2011 spec = entry.get('BUNDLESPEC')
2013 spec = entry.get('BUNDLESPEC')
2012 if spec:
2014 if spec:
2013 try:
2015 try:
2014 comp, version, params = parsebundlespec(repo, spec, strict=True)
2016 comp, version, params = parsebundlespec(repo, spec, strict=True)
2015
2017
2016 # If a stream clone was requested, filter out non-streamclone
2018 # If a stream clone was requested, filter out non-streamclone
2017 # entries.
2019 # entries.
2018 if streamclonerequested and (comp != 'UN' or version != 's1'):
2020 if streamclonerequested and (comp != 'UN' or version != 's1'):
2019 repo.ui.debug('filtering %s because not a stream clone\n' %
2021 repo.ui.debug('filtering %s because not a stream clone\n' %
2020 entry['URL'])
2022 entry['URL'])
2021 continue
2023 continue
2022
2024
2023 except error.InvalidBundleSpecification as e:
2025 except error.InvalidBundleSpecification as e:
2024 repo.ui.debug(str(e) + '\n')
2026 repo.ui.debug(str(e) + '\n')
2025 continue
2027 continue
2026 except error.UnsupportedBundleSpecification as e:
2028 except error.UnsupportedBundleSpecification as e:
2027 repo.ui.debug('filtering %s because unsupported bundle '
2029 repo.ui.debug('filtering %s because unsupported bundle '
2028 'spec: %s\n' % (entry['URL'], str(e)))
2030 'spec: %s\n' % (entry['URL'], str(e)))
2029 continue
2031 continue
2030 # If we don't have a spec and requested a stream clone, we don't know
2032 # If we don't have a spec and requested a stream clone, we don't know
2031 # what the entry is so don't attempt to apply it.
2033 # what the entry is so don't attempt to apply it.
2032 elif streamclonerequested:
2034 elif streamclonerequested:
2033 repo.ui.debug('filtering %s because cannot determine if a stream '
2035 repo.ui.debug('filtering %s because cannot determine if a stream '
2034 'clone bundle\n' % entry['URL'])
2036 'clone bundle\n' % entry['URL'])
2035 continue
2037 continue
2036
2038
2037 if 'REQUIRESNI' in entry and not sslutil.hassni:
2039 if 'REQUIRESNI' in entry and not sslutil.hassni:
2038 repo.ui.debug('filtering %s because SNI not supported\n' %
2040 repo.ui.debug('filtering %s because SNI not supported\n' %
2039 entry['URL'])
2041 entry['URL'])
2040 continue
2042 continue
2041
2043
2042 newentries.append(entry)
2044 newentries.append(entry)
2043
2045
2044 return newentries
2046 return newentries
2045
2047
2046 class clonebundleentry(object):
2048 class clonebundleentry(object):
2047 """Represents an item in a clone bundles manifest.
2049 """Represents an item in a clone bundles manifest.
2048
2050
2049 This rich class is needed to support sorting since sorted() in Python 3
2051 This rich class is needed to support sorting since sorted() in Python 3
2050 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2052 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2051 won't work.
2053 won't work.
2052 """
2054 """
2053
2055
2054 def __init__(self, value, prefers):
2056 def __init__(self, value, prefers):
2055 self.value = value
2057 self.value = value
2056 self.prefers = prefers
2058 self.prefers = prefers
2057
2059
2058 def _cmp(self, other):
2060 def _cmp(self, other):
2059 for prefkey, prefvalue in self.prefers:
2061 for prefkey, prefvalue in self.prefers:
2060 avalue = self.value.get(prefkey)
2062 avalue = self.value.get(prefkey)
2061 bvalue = other.value.get(prefkey)
2063 bvalue = other.value.get(prefkey)
2062
2064
2063 # Special case for b missing attribute and a matches exactly.
2065 # Special case for b missing attribute and a matches exactly.
2064 if avalue is not None and bvalue is None and avalue == prefvalue:
2066 if avalue is not None and bvalue is None and avalue == prefvalue:
2065 return -1
2067 return -1
2066
2068
2067 # Special case for a missing attribute and b matches exactly.
2069 # Special case for a missing attribute and b matches exactly.
2068 if bvalue is not None and avalue is None and bvalue == prefvalue:
2070 if bvalue is not None and avalue is None and bvalue == prefvalue:
2069 return 1
2071 return 1
2070
2072
2071 # We can't compare unless attribute present on both.
2073 # We can't compare unless attribute present on both.
2072 if avalue is None or bvalue is None:
2074 if avalue is None or bvalue is None:
2073 continue
2075 continue
2074
2076
2075 # Same values should fall back to next attribute.
2077 # Same values should fall back to next attribute.
2076 if avalue == bvalue:
2078 if avalue == bvalue:
2077 continue
2079 continue
2078
2080
2079 # Exact matches come first.
2081 # Exact matches come first.
2080 if avalue == prefvalue:
2082 if avalue == prefvalue:
2081 return -1
2083 return -1
2082 if bvalue == prefvalue:
2084 if bvalue == prefvalue:
2083 return 1
2085 return 1
2084
2086
2085 # Fall back to next attribute.
2087 # Fall back to next attribute.
2086 continue
2088 continue
2087
2089
2088 # If we got here we couldn't sort by attributes and prefers. Fall
2090 # If we got here we couldn't sort by attributes and prefers. Fall
2089 # back to index order.
2091 # back to index order.
2090 return 0
2092 return 0
2091
2093
2092 def __lt__(self, other):
2094 def __lt__(self, other):
2093 return self._cmp(other) < 0
2095 return self._cmp(other) < 0
2094
2096
2095 def __gt__(self, other):
2097 def __gt__(self, other):
2096 return self._cmp(other) > 0
2098 return self._cmp(other) > 0
2097
2099
2098 def __eq__(self, other):
2100 def __eq__(self, other):
2099 return self._cmp(other) == 0
2101 return self._cmp(other) == 0
2100
2102
2101 def __le__(self, other):
2103 def __le__(self, other):
2102 return self._cmp(other) <= 0
2104 return self._cmp(other) <= 0
2103
2105
2104 def __ge__(self, other):
2106 def __ge__(self, other):
2105 return self._cmp(other) >= 0
2107 return self._cmp(other) >= 0
2106
2108
2107 def __ne__(self, other):
2109 def __ne__(self, other):
2108 return self._cmp(other) != 0
2110 return self._cmp(other) != 0
2109
2111
2110 def sortclonebundleentries(ui, entries):
2112 def sortclonebundleentries(ui, entries):
2111 prefers = ui.configlist('ui', 'clonebundleprefers')
2113 prefers = ui.configlist('ui', 'clonebundleprefers')
2112 if not prefers:
2114 if not prefers:
2113 return list(entries)
2115 return list(entries)
2114
2116
2115 prefers = [p.split('=', 1) for p in prefers]
2117 prefers = [p.split('=', 1) for p in prefers]
2116
2118
2117 items = sorted(clonebundleentry(v, prefers) for v in entries)
2119 items = sorted(clonebundleentry(v, prefers) for v in entries)
2118 return [i.value for i in items]
2120 return [i.value for i in items]
2119
2121
2120 def trypullbundlefromurl(ui, repo, url):
2122 def trypullbundlefromurl(ui, repo, url):
2121 """Attempt to apply a bundle from a URL."""
2123 """Attempt to apply a bundle from a URL."""
2122 with repo.lock(), repo.transaction('bundleurl') as tr:
2124 with repo.lock(), repo.transaction('bundleurl') as tr:
2123 try:
2125 try:
2124 fh = urlmod.open(ui, url)
2126 fh = urlmod.open(ui, url)
2125 cg = readbundle(ui, fh, 'stream')
2127 cg = readbundle(ui, fh, 'stream')
2126
2128
2127 if isinstance(cg, streamclone.streamcloneapplier):
2129 if isinstance(cg, streamclone.streamcloneapplier):
2128 cg.apply(repo)
2130 cg.apply(repo)
2129 else:
2131 else:
2130 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2132 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2131 return True
2133 return True
2132 except urlerr.httperror as e:
2134 except urlerr.httperror as e:
2133 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2135 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2134 except urlerr.urlerror as e:
2136 except urlerr.urlerror as e:
2135 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2137 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2136
2138
2137 return False
2139 return False
General Comments 0
You need to be logged in to leave comments. Login now