##// END OF EJS Templates
exchange: drop unused '_getbookmarks' function...
Boris Feld -
r35032:6370ed65 default
parent child Browse files
Show More
@@ -1,2126 +1,2115 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 )
18 )
19 from . import (
19 from . import (
20 bookmarks as bookmod,
20 bookmarks as bookmod,
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 discovery,
23 discovery,
24 error,
24 error,
25 lock as lockmod,
25 lock as lockmod,
26 obsolete,
26 obsolete,
27 phases,
27 phases,
28 pushkey,
28 pushkey,
29 pycompat,
29 pycompat,
30 scmutil,
30 scmutil,
31 sslutil,
31 sslutil,
32 streamclone,
32 streamclone,
33 url as urlmod,
33 url as urlmod,
34 util,
34 util,
35 )
35 )
36
36
37 urlerr = util.urlerr
37 urlerr = util.urlerr
38 urlreq = util.urlreq
38 urlreq = util.urlreq
39
39
40 # Maps bundle version human names to changegroup versions.
40 # Maps bundle version human names to changegroup versions.
41 _bundlespeccgversions = {'v1': '01',
41 _bundlespeccgversions = {'v1': '01',
42 'v2': '02',
42 'v2': '02',
43 'packed1': 's1',
43 'packed1': 's1',
44 'bundle2': '02', #legacy
44 'bundle2': '02', #legacy
45 }
45 }
46
46
47 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
48 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
49
49
50 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 def parsebundlespec(repo, spec, strict=True, externalnames=False):
51 """Parse a bundle string specification into parts.
51 """Parse a bundle string specification into parts.
52
52
53 Bundle specifications denote a well-defined bundle/exchange format.
53 Bundle specifications denote a well-defined bundle/exchange format.
54 The content of a given specification should not change over time in
54 The content of a given specification should not change over time in
55 order to ensure that bundles produced by a newer version of Mercurial are
55 order to ensure that bundles produced by a newer version of Mercurial are
56 readable from an older version.
56 readable from an older version.
57
57
58 The string currently has the form:
58 The string currently has the form:
59
59
60 <compression>-<type>[;<parameter0>[;<parameter1>]]
60 <compression>-<type>[;<parameter0>[;<parameter1>]]
61
61
62 Where <compression> is one of the supported compression formats
62 Where <compression> is one of the supported compression formats
63 and <type> is (currently) a version string. A ";" can follow the type and
63 and <type> is (currently) a version string. A ";" can follow the type and
64 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 all text afterwards is interpreted as URI encoded, ";" delimited key=value
65 pairs.
65 pairs.
66
66
67 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 If ``strict`` is True (the default) <compression> is required. Otherwise,
68 it is optional.
68 it is optional.
69
69
70 If ``externalnames`` is False (the default), the human-centric names will
70 If ``externalnames`` is False (the default), the human-centric names will
71 be converted to their internal representation.
71 be converted to their internal representation.
72
72
73 Returns a 3-tuple of (compression, version, parameters). Compression will
73 Returns a 3-tuple of (compression, version, parameters). Compression will
74 be ``None`` if not in strict mode and a compression isn't defined.
74 be ``None`` if not in strict mode and a compression isn't defined.
75
75
76 An ``InvalidBundleSpecification`` is raised when the specification is
76 An ``InvalidBundleSpecification`` is raised when the specification is
77 not syntactically well formed.
77 not syntactically well formed.
78
78
79 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 An ``UnsupportedBundleSpecification`` is raised when the compression or
80 bundle type/version is not recognized.
80 bundle type/version is not recognized.
81
81
82 Note: this function will likely eventually return a more complex data
82 Note: this function will likely eventually return a more complex data
83 structure, including bundle2 part information.
83 structure, including bundle2 part information.
84 """
84 """
85 def parseparams(s):
85 def parseparams(s):
86 if ';' not in s:
86 if ';' not in s:
87 return s, {}
87 return s, {}
88
88
89 params = {}
89 params = {}
90 version, paramstr = s.split(';', 1)
90 version, paramstr = s.split(';', 1)
91
91
92 for p in paramstr.split(';'):
92 for p in paramstr.split(';'):
93 if '=' not in p:
93 if '=' not in p:
94 raise error.InvalidBundleSpecification(
94 raise error.InvalidBundleSpecification(
95 _('invalid bundle specification: '
95 _('invalid bundle specification: '
96 'missing "=" in parameter: %s') % p)
96 'missing "=" in parameter: %s') % p)
97
97
98 key, value = p.split('=', 1)
98 key, value = p.split('=', 1)
99 key = urlreq.unquote(key)
99 key = urlreq.unquote(key)
100 value = urlreq.unquote(value)
100 value = urlreq.unquote(value)
101 params[key] = value
101 params[key] = value
102
102
103 return version, params
103 return version, params
104
104
105
105
106 if strict and '-' not in spec:
106 if strict and '-' not in spec:
107 raise error.InvalidBundleSpecification(
107 raise error.InvalidBundleSpecification(
108 _('invalid bundle specification; '
108 _('invalid bundle specification; '
109 'must be prefixed with compression: %s') % spec)
109 'must be prefixed with compression: %s') % spec)
110
110
111 if '-' in spec:
111 if '-' in spec:
112 compression, version = spec.split('-', 1)
112 compression, version = spec.split('-', 1)
113
113
114 if compression not in util.compengines.supportedbundlenames:
114 if compression not in util.compengines.supportedbundlenames:
115 raise error.UnsupportedBundleSpecification(
115 raise error.UnsupportedBundleSpecification(
116 _('%s compression is not supported') % compression)
116 _('%s compression is not supported') % compression)
117
117
118 version, params = parseparams(version)
118 version, params = parseparams(version)
119
119
120 if version not in _bundlespeccgversions:
120 if version not in _bundlespeccgversions:
121 raise error.UnsupportedBundleSpecification(
121 raise error.UnsupportedBundleSpecification(
122 _('%s is not a recognized bundle version') % version)
122 _('%s is not a recognized bundle version') % version)
123 else:
123 else:
124 # Value could be just the compression or just the version, in which
124 # Value could be just the compression or just the version, in which
125 # case some defaults are assumed (but only when not in strict mode).
125 # case some defaults are assumed (but only when not in strict mode).
126 assert not strict
126 assert not strict
127
127
128 spec, params = parseparams(spec)
128 spec, params = parseparams(spec)
129
129
130 if spec in util.compengines.supportedbundlenames:
130 if spec in util.compengines.supportedbundlenames:
131 compression = spec
131 compression = spec
132 version = 'v1'
132 version = 'v1'
133 # Generaldelta repos require v2.
133 # Generaldelta repos require v2.
134 if 'generaldelta' in repo.requirements:
134 if 'generaldelta' in repo.requirements:
135 version = 'v2'
135 version = 'v2'
136 # Modern compression engines require v2.
136 # Modern compression engines require v2.
137 if compression not in _bundlespecv1compengines:
137 if compression not in _bundlespecv1compengines:
138 version = 'v2'
138 version = 'v2'
139 elif spec in _bundlespeccgversions:
139 elif spec in _bundlespeccgversions:
140 if spec == 'packed1':
140 if spec == 'packed1':
141 compression = 'none'
141 compression = 'none'
142 else:
142 else:
143 compression = 'bzip2'
143 compression = 'bzip2'
144 version = spec
144 version = spec
145 else:
145 else:
146 raise error.UnsupportedBundleSpecification(
146 raise error.UnsupportedBundleSpecification(
147 _('%s is not a recognized bundle specification') % spec)
147 _('%s is not a recognized bundle specification') % spec)
148
148
149 # Bundle version 1 only supports a known set of compression engines.
149 # Bundle version 1 only supports a known set of compression engines.
150 if version == 'v1' and compression not in _bundlespecv1compengines:
150 if version == 'v1' and compression not in _bundlespecv1compengines:
151 raise error.UnsupportedBundleSpecification(
151 raise error.UnsupportedBundleSpecification(
152 _('compression engine %s is not supported on v1 bundles') %
152 _('compression engine %s is not supported on v1 bundles') %
153 compression)
153 compression)
154
154
155 # The specification for packed1 can optionally declare the data formats
155 # The specification for packed1 can optionally declare the data formats
156 # required to apply it. If we see this metadata, compare against what the
156 # required to apply it. If we see this metadata, compare against what the
157 # repo supports and error if the bundle isn't compatible.
157 # repo supports and error if the bundle isn't compatible.
158 if version == 'packed1' and 'requirements' in params:
158 if version == 'packed1' and 'requirements' in params:
159 requirements = set(params['requirements'].split(','))
159 requirements = set(params['requirements'].split(','))
160 missingreqs = requirements - repo.supportedformats
160 missingreqs = requirements - repo.supportedformats
161 if missingreqs:
161 if missingreqs:
162 raise error.UnsupportedBundleSpecification(
162 raise error.UnsupportedBundleSpecification(
163 _('missing support for repository features: %s') %
163 _('missing support for repository features: %s') %
164 ', '.join(sorted(missingreqs)))
164 ', '.join(sorted(missingreqs)))
165
165
166 if not externalnames:
166 if not externalnames:
167 engine = util.compengines.forbundlename(compression)
167 engine = util.compengines.forbundlename(compression)
168 compression = engine.bundletype()[1]
168 compression = engine.bundletype()[1]
169 version = _bundlespeccgversions[version]
169 version = _bundlespeccgversions[version]
170 return compression, version, params
170 return compression, version, params
171
171
172 def readbundle(ui, fh, fname, vfs=None):
172 def readbundle(ui, fh, fname, vfs=None):
173 header = changegroup.readexactly(fh, 4)
173 header = changegroup.readexactly(fh, 4)
174
174
175 alg = None
175 alg = None
176 if not fname:
176 if not fname:
177 fname = "stream"
177 fname = "stream"
178 if not header.startswith('HG') and header.startswith('\0'):
178 if not header.startswith('HG') and header.startswith('\0'):
179 fh = changegroup.headerlessfixup(fh, header)
179 fh = changegroup.headerlessfixup(fh, header)
180 header = "HG10"
180 header = "HG10"
181 alg = 'UN'
181 alg = 'UN'
182 elif vfs:
182 elif vfs:
183 fname = vfs.join(fname)
183 fname = vfs.join(fname)
184
184
185 magic, version = header[0:2], header[2:4]
185 magic, version = header[0:2], header[2:4]
186
186
187 if magic != 'HG':
187 if magic != 'HG':
188 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
189 if version == '10':
189 if version == '10':
190 if alg is None:
190 if alg is None:
191 alg = changegroup.readexactly(fh, 2)
191 alg = changegroup.readexactly(fh, 2)
192 return changegroup.cg1unpacker(fh, alg)
192 return changegroup.cg1unpacker(fh, alg)
193 elif version.startswith('2'):
193 elif version.startswith('2'):
194 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
195 elif version == 'S1':
195 elif version == 'S1':
196 return streamclone.streamcloneapplier(fh)
196 return streamclone.streamcloneapplier(fh)
197 else:
197 else:
198 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
199
199
200 def getbundlespec(ui, fh):
200 def getbundlespec(ui, fh):
201 """Infer the bundlespec from a bundle file handle.
201 """Infer the bundlespec from a bundle file handle.
202
202
203 The input file handle is seeked and the original seek position is not
203 The input file handle is seeked and the original seek position is not
204 restored.
204 restored.
205 """
205 """
206 def speccompression(alg):
206 def speccompression(alg):
207 try:
207 try:
208 return util.compengines.forbundletype(alg).bundletype()[0]
208 return util.compengines.forbundletype(alg).bundletype()[0]
209 except KeyError:
209 except KeyError:
210 return None
210 return None
211
211
212 b = readbundle(ui, fh, None)
212 b = readbundle(ui, fh, None)
213 if isinstance(b, changegroup.cg1unpacker):
213 if isinstance(b, changegroup.cg1unpacker):
214 alg = b._type
214 alg = b._type
215 if alg == '_truncatedBZ':
215 if alg == '_truncatedBZ':
216 alg = 'BZ'
216 alg = 'BZ'
217 comp = speccompression(alg)
217 comp = speccompression(alg)
218 if not comp:
218 if not comp:
219 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 raise error.Abort(_('unknown compression algorithm: %s') % alg)
220 return '%s-v1' % comp
220 return '%s-v1' % comp
221 elif isinstance(b, bundle2.unbundle20):
221 elif isinstance(b, bundle2.unbundle20):
222 if 'Compression' in b.params:
222 if 'Compression' in b.params:
223 comp = speccompression(b.params['Compression'])
223 comp = speccompression(b.params['Compression'])
224 if not comp:
224 if not comp:
225 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 raise error.Abort(_('unknown compression algorithm: %s') % comp)
226 else:
226 else:
227 comp = 'none'
227 comp = 'none'
228
228
229 version = None
229 version = None
230 for part in b.iterparts():
230 for part in b.iterparts():
231 if part.type == 'changegroup':
231 if part.type == 'changegroup':
232 version = part.params['version']
232 version = part.params['version']
233 if version in ('01', '02'):
233 if version in ('01', '02'):
234 version = 'v2'
234 version = 'v2'
235 else:
235 else:
236 raise error.Abort(_('changegroup version %s does not have '
236 raise error.Abort(_('changegroup version %s does not have '
237 'a known bundlespec') % version,
237 'a known bundlespec') % version,
238 hint=_('try upgrading your Mercurial '
238 hint=_('try upgrading your Mercurial '
239 'client'))
239 'client'))
240
240
241 if not version:
241 if not version:
242 raise error.Abort(_('could not identify changegroup version in '
242 raise error.Abort(_('could not identify changegroup version in '
243 'bundle'))
243 'bundle'))
244
244
245 return '%s-%s' % (comp, version)
245 return '%s-%s' % (comp, version)
246 elif isinstance(b, streamclone.streamcloneapplier):
246 elif isinstance(b, streamclone.streamcloneapplier):
247 requirements = streamclone.readbundle1header(fh)[2]
247 requirements = streamclone.readbundle1header(fh)[2]
248 params = 'requirements=%s' % ','.join(sorted(requirements))
248 params = 'requirements=%s' % ','.join(sorted(requirements))
249 return 'none-packed1;%s' % urlreq.quote(params)
249 return 'none-packed1;%s' % urlreq.quote(params)
250 else:
250 else:
251 raise error.Abort(_('unknown bundle type: %s') % b)
251 raise error.Abort(_('unknown bundle type: %s') % b)
252
252
253 def _computeoutgoing(repo, heads, common):
253 def _computeoutgoing(repo, heads, common):
254 """Computes which revs are outgoing given a set of common
254 """Computes which revs are outgoing given a set of common
255 and a set of heads.
255 and a set of heads.
256
256
257 This is a separate function so extensions can have access to
257 This is a separate function so extensions can have access to
258 the logic.
258 the logic.
259
259
260 Returns a discovery.outgoing object.
260 Returns a discovery.outgoing object.
261 """
261 """
262 cl = repo.changelog
262 cl = repo.changelog
263 if common:
263 if common:
264 hasnode = cl.hasnode
264 hasnode = cl.hasnode
265 common = [n for n in common if hasnode(n)]
265 common = [n for n in common if hasnode(n)]
266 else:
266 else:
267 common = [nullid]
267 common = [nullid]
268 if not heads:
268 if not heads:
269 heads = cl.heads()
269 heads = cl.heads()
270 return discovery.outgoing(repo, common, heads)
270 return discovery.outgoing(repo, common, heads)
271
271
272 def _forcebundle1(op):
272 def _forcebundle1(op):
273 """return true if a pull/push must use bundle1
273 """return true if a pull/push must use bundle1
274
274
275 This function is used to allow testing of the older bundle version"""
275 This function is used to allow testing of the older bundle version"""
276 ui = op.repo.ui
276 ui = op.repo.ui
277 forcebundle1 = False
277 forcebundle1 = False
278 # The goal is this config is to allow developer to choose the bundle
278 # The goal is this config is to allow developer to choose the bundle
279 # version used during exchanged. This is especially handy during test.
279 # version used during exchanged. This is especially handy during test.
280 # Value is a list of bundle version to be picked from, highest version
280 # Value is a list of bundle version to be picked from, highest version
281 # should be used.
281 # should be used.
282 #
282 #
283 # developer config: devel.legacy.exchange
283 # developer config: devel.legacy.exchange
284 exchange = ui.configlist('devel', 'legacy.exchange')
284 exchange = ui.configlist('devel', 'legacy.exchange')
285 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
286 return forcebundle1 or not op.remote.capable('bundle2')
286 return forcebundle1 or not op.remote.capable('bundle2')
287
287
288 class pushoperation(object):
288 class pushoperation(object):
289 """A object that represent a single push operation
289 """A object that represent a single push operation
290
290
291 Its purpose is to carry push related state and very common operations.
291 Its purpose is to carry push related state and very common operations.
292
292
293 A new pushoperation should be created at the beginning of each push and
293 A new pushoperation should be created at the beginning of each push and
294 discarded afterward.
294 discarded afterward.
295 """
295 """
296
296
297 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
298 bookmarks=(), pushvars=None):
298 bookmarks=(), pushvars=None):
299 # repo we push from
299 # repo we push from
300 self.repo = repo
300 self.repo = repo
301 self.ui = repo.ui
301 self.ui = repo.ui
302 # repo we push to
302 # repo we push to
303 self.remote = remote
303 self.remote = remote
304 # force option provided
304 # force option provided
305 self.force = force
305 self.force = force
306 # revs to be pushed (None is "all")
306 # revs to be pushed (None is "all")
307 self.revs = revs
307 self.revs = revs
308 # bookmark explicitly pushed
308 # bookmark explicitly pushed
309 self.bookmarks = bookmarks
309 self.bookmarks = bookmarks
310 # allow push of new branch
310 # allow push of new branch
311 self.newbranch = newbranch
311 self.newbranch = newbranch
312 # step already performed
312 # step already performed
313 # (used to check what steps have been already performed through bundle2)
313 # (used to check what steps have been already performed through bundle2)
314 self.stepsdone = set()
314 self.stepsdone = set()
315 # Integer version of the changegroup push result
315 # Integer version of the changegroup push result
316 # - None means nothing to push
316 # - None means nothing to push
317 # - 0 means HTTP error
317 # - 0 means HTTP error
318 # - 1 means we pushed and remote head count is unchanged *or*
318 # - 1 means we pushed and remote head count is unchanged *or*
319 # we have outgoing changesets but refused to push
319 # we have outgoing changesets but refused to push
320 # - other values as described by addchangegroup()
320 # - other values as described by addchangegroup()
321 self.cgresult = None
321 self.cgresult = None
322 # Boolean value for the bookmark push
322 # Boolean value for the bookmark push
323 self.bkresult = None
323 self.bkresult = None
324 # discover.outgoing object (contains common and outgoing data)
324 # discover.outgoing object (contains common and outgoing data)
325 self.outgoing = None
325 self.outgoing = None
326 # all remote topological heads before the push
326 # all remote topological heads before the push
327 self.remoteheads = None
327 self.remoteheads = None
328 # Details of the remote branch pre and post push
328 # Details of the remote branch pre and post push
329 #
329 #
330 # mapping: {'branch': ([remoteheads],
330 # mapping: {'branch': ([remoteheads],
331 # [newheads],
331 # [newheads],
332 # [unsyncedheads],
332 # [unsyncedheads],
333 # [discardedheads])}
333 # [discardedheads])}
334 # - branch: the branch name
334 # - branch: the branch name
335 # - remoteheads: the list of remote heads known locally
335 # - remoteheads: the list of remote heads known locally
336 # None if the branch is new
336 # None if the branch is new
337 # - newheads: the new remote heads (known locally) with outgoing pushed
337 # - newheads: the new remote heads (known locally) with outgoing pushed
338 # - unsyncedheads: the list of remote heads unknown locally.
338 # - unsyncedheads: the list of remote heads unknown locally.
339 # - discardedheads: the list of remote heads made obsolete by the push
339 # - discardedheads: the list of remote heads made obsolete by the push
340 self.pushbranchmap = None
340 self.pushbranchmap = None
341 # testable as a boolean indicating if any nodes are missing locally.
341 # testable as a boolean indicating if any nodes are missing locally.
342 self.incoming = None
342 self.incoming = None
343 # summary of the remote phase situation
343 # summary of the remote phase situation
344 self.remotephases = None
344 self.remotephases = None
345 # phases changes that must be pushed along side the changesets
345 # phases changes that must be pushed along side the changesets
346 self.outdatedphases = None
346 self.outdatedphases = None
347 # phases changes that must be pushed if changeset push fails
347 # phases changes that must be pushed if changeset push fails
348 self.fallbackoutdatedphases = None
348 self.fallbackoutdatedphases = None
349 # outgoing obsmarkers
349 # outgoing obsmarkers
350 self.outobsmarkers = set()
350 self.outobsmarkers = set()
351 # outgoing bookmarks
351 # outgoing bookmarks
352 self.outbookmarks = []
352 self.outbookmarks = []
353 # transaction manager
353 # transaction manager
354 self.trmanager = None
354 self.trmanager = None
355 # map { pushkey partid -> callback handling failure}
355 # map { pushkey partid -> callback handling failure}
356 # used to handle exception from mandatory pushkey part failure
356 # used to handle exception from mandatory pushkey part failure
357 self.pkfailcb = {}
357 self.pkfailcb = {}
358 # an iterable of pushvars or None
358 # an iterable of pushvars or None
359 self.pushvars = pushvars
359 self.pushvars = pushvars
360
360
361 @util.propertycache
361 @util.propertycache
362 def futureheads(self):
362 def futureheads(self):
363 """future remote heads if the changeset push succeeds"""
363 """future remote heads if the changeset push succeeds"""
364 return self.outgoing.missingheads
364 return self.outgoing.missingheads
365
365
366 @util.propertycache
366 @util.propertycache
367 def fallbackheads(self):
367 def fallbackheads(self):
368 """future remote heads if the changeset push fails"""
368 """future remote heads if the changeset push fails"""
369 if self.revs is None:
369 if self.revs is None:
370 # not target to push, all common are relevant
370 # not target to push, all common are relevant
371 return self.outgoing.commonheads
371 return self.outgoing.commonheads
372 unfi = self.repo.unfiltered()
372 unfi = self.repo.unfiltered()
373 # I want cheads = heads(::missingheads and ::commonheads)
373 # I want cheads = heads(::missingheads and ::commonheads)
374 # (missingheads is revs with secret changeset filtered out)
374 # (missingheads is revs with secret changeset filtered out)
375 #
375 #
376 # This can be expressed as:
376 # This can be expressed as:
377 # cheads = ( (missingheads and ::commonheads)
377 # cheads = ( (missingheads and ::commonheads)
378 # + (commonheads and ::missingheads))"
378 # + (commonheads and ::missingheads))"
379 # )
379 # )
380 #
380 #
381 # while trying to push we already computed the following:
381 # while trying to push we already computed the following:
382 # common = (::commonheads)
382 # common = (::commonheads)
383 # missing = ((commonheads::missingheads) - commonheads)
383 # missing = ((commonheads::missingheads) - commonheads)
384 #
384 #
385 # We can pick:
385 # We can pick:
386 # * missingheads part of common (::commonheads)
386 # * missingheads part of common (::commonheads)
387 common = self.outgoing.common
387 common = self.outgoing.common
388 nm = self.repo.changelog.nodemap
388 nm = self.repo.changelog.nodemap
389 cheads = [node for node in self.revs if nm[node] in common]
389 cheads = [node for node in self.revs if nm[node] in common]
390 # and
390 # and
391 # * commonheads parents on missing
391 # * commonheads parents on missing
392 revset = unfi.set('%ln and parents(roots(%ln))',
392 revset = unfi.set('%ln and parents(roots(%ln))',
393 self.outgoing.commonheads,
393 self.outgoing.commonheads,
394 self.outgoing.missing)
394 self.outgoing.missing)
395 cheads.extend(c.node() for c in revset)
395 cheads.extend(c.node() for c in revset)
396 return cheads
396 return cheads
397
397
398 @property
398 @property
399 def commonheads(self):
399 def commonheads(self):
400 """set of all common heads after changeset bundle push"""
400 """set of all common heads after changeset bundle push"""
401 if self.cgresult:
401 if self.cgresult:
402 return self.futureheads
402 return self.futureheads
403 else:
403 else:
404 return self.fallbackheads
404 return self.fallbackheads
405
405
406 # mapping of message used when pushing bookmark
406 # mapping of message used when pushing bookmark
407 bookmsgmap = {'update': (_("updating bookmark %s\n"),
407 bookmsgmap = {'update': (_("updating bookmark %s\n"),
408 _('updating bookmark %s failed!\n')),
408 _('updating bookmark %s failed!\n')),
409 'export': (_("exporting bookmark %s\n"),
409 'export': (_("exporting bookmark %s\n"),
410 _('exporting bookmark %s failed!\n')),
410 _('exporting bookmark %s failed!\n')),
411 'delete': (_("deleting remote bookmark %s\n"),
411 'delete': (_("deleting remote bookmark %s\n"),
412 _('deleting remote bookmark %s failed!\n')),
412 _('deleting remote bookmark %s failed!\n')),
413 }
413 }
414
414
415
415
416 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
416 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
417 opargs=None):
417 opargs=None):
418 '''Push outgoing changesets (limited by revs) from a local
418 '''Push outgoing changesets (limited by revs) from a local
419 repository to remote. Return an integer:
419 repository to remote. Return an integer:
420 - None means nothing to push
420 - None means nothing to push
421 - 0 means HTTP error
421 - 0 means HTTP error
422 - 1 means we pushed and remote head count is unchanged *or*
422 - 1 means we pushed and remote head count is unchanged *or*
423 we have outgoing changesets but refused to push
423 we have outgoing changesets but refused to push
424 - other values as described by addchangegroup()
424 - other values as described by addchangegroup()
425 '''
425 '''
426 if opargs is None:
426 if opargs is None:
427 opargs = {}
427 opargs = {}
428 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
428 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
429 **pycompat.strkwargs(opargs))
429 **pycompat.strkwargs(opargs))
430 if pushop.remote.local():
430 if pushop.remote.local():
431 missing = (set(pushop.repo.requirements)
431 missing = (set(pushop.repo.requirements)
432 - pushop.remote.local().supported)
432 - pushop.remote.local().supported)
433 if missing:
433 if missing:
434 msg = _("required features are not"
434 msg = _("required features are not"
435 " supported in the destination:"
435 " supported in the destination:"
436 " %s") % (', '.join(sorted(missing)))
436 " %s") % (', '.join(sorted(missing)))
437 raise error.Abort(msg)
437 raise error.Abort(msg)
438
438
439 if not pushop.remote.canpush():
439 if not pushop.remote.canpush():
440 raise error.Abort(_("destination does not support push"))
440 raise error.Abort(_("destination does not support push"))
441
441
442 if not pushop.remote.capable('unbundle'):
442 if not pushop.remote.capable('unbundle'):
443 raise error.Abort(_('cannot push: destination does not support the '
443 raise error.Abort(_('cannot push: destination does not support the '
444 'unbundle wire protocol command'))
444 'unbundle wire protocol command'))
445
445
446 # get lock as we might write phase data
446 # get lock as we might write phase data
447 wlock = lock = None
447 wlock = lock = None
448 try:
448 try:
449 # bundle2 push may receive a reply bundle touching bookmarks or other
449 # bundle2 push may receive a reply bundle touching bookmarks or other
450 # things requiring the wlock. Take it now to ensure proper ordering.
450 # things requiring the wlock. Take it now to ensure proper ordering.
451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
452 if (not _forcebundle1(pushop)) and maypushback:
452 if (not _forcebundle1(pushop)) and maypushback:
453 wlock = pushop.repo.wlock()
453 wlock = pushop.repo.wlock()
454 lock = pushop.repo.lock()
454 lock = pushop.repo.lock()
455 pushop.trmanager = transactionmanager(pushop.repo,
455 pushop.trmanager = transactionmanager(pushop.repo,
456 'push-response',
456 'push-response',
457 pushop.remote.url())
457 pushop.remote.url())
458 except IOError as err:
458 except IOError as err:
459 if err.errno != errno.EACCES:
459 if err.errno != errno.EACCES:
460 raise
460 raise
461 # source repo cannot be locked.
461 # source repo cannot be locked.
462 # We do not abort the push, but just disable the local phase
462 # We do not abort the push, but just disable the local phase
463 # synchronisation.
463 # synchronisation.
464 msg = 'cannot lock source repository: %s\n' % err
464 msg = 'cannot lock source repository: %s\n' % err
465 pushop.ui.debug(msg)
465 pushop.ui.debug(msg)
466
466
467 with wlock or util.nullcontextmanager(), \
467 with wlock or util.nullcontextmanager(), \
468 lock or util.nullcontextmanager(), \
468 lock or util.nullcontextmanager(), \
469 pushop.trmanager or util.nullcontextmanager():
469 pushop.trmanager or util.nullcontextmanager():
470 pushop.repo.checkpush(pushop)
470 pushop.repo.checkpush(pushop)
471 _pushdiscovery(pushop)
471 _pushdiscovery(pushop)
472 if not _forcebundle1(pushop):
472 if not _forcebundle1(pushop):
473 _pushbundle2(pushop)
473 _pushbundle2(pushop)
474 _pushchangeset(pushop)
474 _pushchangeset(pushop)
475 _pushsyncphase(pushop)
475 _pushsyncphase(pushop)
476 _pushobsolete(pushop)
476 _pushobsolete(pushop)
477 _pushbookmark(pushop)
477 _pushbookmark(pushop)
478
478
479 return pushop
479 return pushop
480
480
481 # list of steps to perform discovery before push
481 # list of steps to perform discovery before push
482 pushdiscoveryorder = []
482 pushdiscoveryorder = []
483
483
484 # Mapping between step name and function
484 # Mapping between step name and function
485 #
485 #
486 # This exists to help extensions wrap steps if necessary
486 # This exists to help extensions wrap steps if necessary
487 pushdiscoverymapping = {}
487 pushdiscoverymapping = {}
488
488
489 def pushdiscovery(stepname):
489 def pushdiscovery(stepname):
490 """decorator for function performing discovery before push
490 """decorator for function performing discovery before push
491
491
492 The function is added to the step -> function mapping and appended to the
492 The function is added to the step -> function mapping and appended to the
493 list of steps. Beware that decorated function will be added in order (this
493 list of steps. Beware that decorated function will be added in order (this
494 may matter).
494 may matter).
495
495
496 You can only use this decorator for a new step, if you want to wrap a step
496 You can only use this decorator for a new step, if you want to wrap a step
497 from an extension, change the pushdiscovery dictionary directly."""
497 from an extension, change the pushdiscovery dictionary directly."""
498 def dec(func):
498 def dec(func):
499 assert stepname not in pushdiscoverymapping
499 assert stepname not in pushdiscoverymapping
500 pushdiscoverymapping[stepname] = func
500 pushdiscoverymapping[stepname] = func
501 pushdiscoveryorder.append(stepname)
501 pushdiscoveryorder.append(stepname)
502 return func
502 return func
503 return dec
503 return dec
504
504
505 def _pushdiscovery(pushop):
505 def _pushdiscovery(pushop):
506 """Run all discovery steps"""
506 """Run all discovery steps"""
507 for stepname in pushdiscoveryorder:
507 for stepname in pushdiscoveryorder:
508 step = pushdiscoverymapping[stepname]
508 step = pushdiscoverymapping[stepname]
509 step(pushop)
509 step(pushop)
510
510
511 @pushdiscovery('changeset')
511 @pushdiscovery('changeset')
512 def _pushdiscoverychangeset(pushop):
512 def _pushdiscoverychangeset(pushop):
513 """discover the changeset that need to be pushed"""
513 """discover the changeset that need to be pushed"""
514 fci = discovery.findcommonincoming
514 fci = discovery.findcommonincoming
515 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
515 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
516 common, inc, remoteheads = commoninc
516 common, inc, remoteheads = commoninc
517 fco = discovery.findcommonoutgoing
517 fco = discovery.findcommonoutgoing
518 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
518 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
519 commoninc=commoninc, force=pushop.force)
519 commoninc=commoninc, force=pushop.force)
520 pushop.outgoing = outgoing
520 pushop.outgoing = outgoing
521 pushop.remoteheads = remoteheads
521 pushop.remoteheads = remoteheads
522 pushop.incoming = inc
522 pushop.incoming = inc
523
523
524 @pushdiscovery('phase')
524 @pushdiscovery('phase')
525 def _pushdiscoveryphase(pushop):
525 def _pushdiscoveryphase(pushop):
526 """discover the phase that needs to be pushed
526 """discover the phase that needs to be pushed
527
527
528 (computed for both success and failure case for changesets push)"""
528 (computed for both success and failure case for changesets push)"""
529 outgoing = pushop.outgoing
529 outgoing = pushop.outgoing
530 unfi = pushop.repo.unfiltered()
530 unfi = pushop.repo.unfiltered()
531 remotephases = pushop.remote.listkeys('phases')
531 remotephases = pushop.remote.listkeys('phases')
532 if (pushop.ui.configbool('ui', '_usedassubrepo')
532 if (pushop.ui.configbool('ui', '_usedassubrepo')
533 and remotephases # server supports phases
533 and remotephases # server supports phases
534 and not pushop.outgoing.missing # no changesets to be pushed
534 and not pushop.outgoing.missing # no changesets to be pushed
535 and remotephases.get('publishing', False)):
535 and remotephases.get('publishing', False)):
536 # When:
536 # When:
537 # - this is a subrepo push
537 # - this is a subrepo push
538 # - and remote support phase
538 # - and remote support phase
539 # - and no changeset are to be pushed
539 # - and no changeset are to be pushed
540 # - and remote is publishing
540 # - and remote is publishing
541 # We may be in issue 3781 case!
541 # We may be in issue 3781 case!
542 # We drop the possible phase synchronisation done by
542 # We drop the possible phase synchronisation done by
543 # courtesy to publish changesets possibly locally draft
543 # courtesy to publish changesets possibly locally draft
544 # on the remote.
544 # on the remote.
545 pushop.outdatedphases = []
545 pushop.outdatedphases = []
546 pushop.fallbackoutdatedphases = []
546 pushop.fallbackoutdatedphases = []
547 return
547 return
548
548
549 pushop.remotephases = phases.remotephasessummary(pushop.repo,
549 pushop.remotephases = phases.remotephasessummary(pushop.repo,
550 pushop.fallbackheads,
550 pushop.fallbackheads,
551 remotephases)
551 remotephases)
552 droots = pushop.remotephases.draftroots
552 droots = pushop.remotephases.draftroots
553
553
554 extracond = ''
554 extracond = ''
555 if not pushop.remotephases.publishing:
555 if not pushop.remotephases.publishing:
556 extracond = ' and public()'
556 extracond = ' and public()'
557 revset = 'heads((%%ln::%%ln) %s)' % extracond
557 revset = 'heads((%%ln::%%ln) %s)' % extracond
558 # Get the list of all revs draft on remote by public here.
558 # Get the list of all revs draft on remote by public here.
559 # XXX Beware that revset break if droots is not strictly
559 # XXX Beware that revset break if droots is not strictly
560 # XXX root we may want to ensure it is but it is costly
560 # XXX root we may want to ensure it is but it is costly
561 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
561 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
562 if not outgoing.missing:
562 if not outgoing.missing:
563 future = fallback
563 future = fallback
564 else:
564 else:
565 # adds changeset we are going to push as draft
565 # adds changeset we are going to push as draft
566 #
566 #
567 # should not be necessary for publishing server, but because of an
567 # should not be necessary for publishing server, but because of an
568 # issue fixed in xxxxx we have to do it anyway.
568 # issue fixed in xxxxx we have to do it anyway.
569 fdroots = list(unfi.set('roots(%ln + %ln::)',
569 fdroots = list(unfi.set('roots(%ln + %ln::)',
570 outgoing.missing, droots))
570 outgoing.missing, droots))
571 fdroots = [f.node() for f in fdroots]
571 fdroots = [f.node() for f in fdroots]
572 future = list(unfi.set(revset, fdroots, pushop.futureheads))
572 future = list(unfi.set(revset, fdroots, pushop.futureheads))
573 pushop.outdatedphases = future
573 pushop.outdatedphases = future
574 pushop.fallbackoutdatedphases = fallback
574 pushop.fallbackoutdatedphases = fallback
575
575
576 @pushdiscovery('obsmarker')
576 @pushdiscovery('obsmarker')
577 def _pushdiscoveryobsmarkers(pushop):
577 def _pushdiscoveryobsmarkers(pushop):
578 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
578 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
579 and pushop.repo.obsstore
579 and pushop.repo.obsstore
580 and 'obsolete' in pushop.remote.listkeys('namespaces')):
580 and 'obsolete' in pushop.remote.listkeys('namespaces')):
581 repo = pushop.repo
581 repo = pushop.repo
582 # very naive computation, that can be quite expensive on big repo.
582 # very naive computation, that can be quite expensive on big repo.
583 # However: evolution is currently slow on them anyway.
583 # However: evolution is currently slow on them anyway.
584 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
584 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
585 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
585 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
586
586
587 @pushdiscovery('bookmarks')
587 @pushdiscovery('bookmarks')
588 def _pushdiscoverybookmarks(pushop):
588 def _pushdiscoverybookmarks(pushop):
589 ui = pushop.ui
589 ui = pushop.ui
590 repo = pushop.repo.unfiltered()
590 repo = pushop.repo.unfiltered()
591 remote = pushop.remote
591 remote = pushop.remote
592 ui.debug("checking for updated bookmarks\n")
592 ui.debug("checking for updated bookmarks\n")
593 ancestors = ()
593 ancestors = ()
594 if pushop.revs:
594 if pushop.revs:
595 revnums = map(repo.changelog.rev, pushop.revs)
595 revnums = map(repo.changelog.rev, pushop.revs)
596 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
596 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
597 remotebookmark = remote.listkeys('bookmarks')
597 remotebookmark = remote.listkeys('bookmarks')
598
598
599 explicit = set([repo._bookmarks.expandname(bookmark)
599 explicit = set([repo._bookmarks.expandname(bookmark)
600 for bookmark in pushop.bookmarks])
600 for bookmark in pushop.bookmarks])
601
601
602 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
602 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
603 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
603 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
604
604
605 def safehex(x):
605 def safehex(x):
606 if x is None:
606 if x is None:
607 return x
607 return x
608 return hex(x)
608 return hex(x)
609
609
610 def hexifycompbookmarks(bookmarks):
610 def hexifycompbookmarks(bookmarks):
611 for b, scid, dcid in bookmarks:
611 for b, scid, dcid in bookmarks:
612 yield b, safehex(scid), safehex(dcid)
612 yield b, safehex(scid), safehex(dcid)
613
613
614 comp = [hexifycompbookmarks(marks) for marks in comp]
614 comp = [hexifycompbookmarks(marks) for marks in comp]
615 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
615 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
616
616
617 for b, scid, dcid in advsrc:
617 for b, scid, dcid in advsrc:
618 if b in explicit:
618 if b in explicit:
619 explicit.remove(b)
619 explicit.remove(b)
620 if not ancestors or repo[scid].rev() in ancestors:
620 if not ancestors or repo[scid].rev() in ancestors:
621 pushop.outbookmarks.append((b, dcid, scid))
621 pushop.outbookmarks.append((b, dcid, scid))
622 # search added bookmark
622 # search added bookmark
623 for b, scid, dcid in addsrc:
623 for b, scid, dcid in addsrc:
624 if b in explicit:
624 if b in explicit:
625 explicit.remove(b)
625 explicit.remove(b)
626 pushop.outbookmarks.append((b, '', scid))
626 pushop.outbookmarks.append((b, '', scid))
627 # search for overwritten bookmark
627 # search for overwritten bookmark
628 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
628 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
629 if b in explicit:
629 if b in explicit:
630 explicit.remove(b)
630 explicit.remove(b)
631 pushop.outbookmarks.append((b, dcid, scid))
631 pushop.outbookmarks.append((b, dcid, scid))
632 # search for bookmark to delete
632 # search for bookmark to delete
633 for b, scid, dcid in adddst:
633 for b, scid, dcid in adddst:
634 if b in explicit:
634 if b in explicit:
635 explicit.remove(b)
635 explicit.remove(b)
636 # treat as "deleted locally"
636 # treat as "deleted locally"
637 pushop.outbookmarks.append((b, dcid, ''))
637 pushop.outbookmarks.append((b, dcid, ''))
638 # identical bookmarks shouldn't get reported
638 # identical bookmarks shouldn't get reported
639 for b, scid, dcid in same:
639 for b, scid, dcid in same:
640 if b in explicit:
640 if b in explicit:
641 explicit.remove(b)
641 explicit.remove(b)
642
642
643 if explicit:
643 if explicit:
644 explicit = sorted(explicit)
644 explicit = sorted(explicit)
645 # we should probably list all of them
645 # we should probably list all of them
646 ui.warn(_('bookmark %s does not exist on the local '
646 ui.warn(_('bookmark %s does not exist on the local '
647 'or remote repository!\n') % explicit[0])
647 'or remote repository!\n') % explicit[0])
648 pushop.bkresult = 2
648 pushop.bkresult = 2
649
649
650 pushop.outbookmarks.sort()
650 pushop.outbookmarks.sort()
651
651
652 def _pushcheckoutgoing(pushop):
652 def _pushcheckoutgoing(pushop):
653 outgoing = pushop.outgoing
653 outgoing = pushop.outgoing
654 unfi = pushop.repo.unfiltered()
654 unfi = pushop.repo.unfiltered()
655 if not outgoing.missing:
655 if not outgoing.missing:
656 # nothing to push
656 # nothing to push
657 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
657 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
658 return False
658 return False
659 # something to push
659 # something to push
660 if not pushop.force:
660 if not pushop.force:
661 # if repo.obsstore == False --> no obsolete
661 # if repo.obsstore == False --> no obsolete
662 # then, save the iteration
662 # then, save the iteration
663 if unfi.obsstore:
663 if unfi.obsstore:
664 # this message are here for 80 char limit reason
664 # this message are here for 80 char limit reason
665 mso = _("push includes obsolete changeset: %s!")
665 mso = _("push includes obsolete changeset: %s!")
666 mspd = _("push includes phase-divergent changeset: %s!")
666 mspd = _("push includes phase-divergent changeset: %s!")
667 mscd = _("push includes content-divergent changeset: %s!")
667 mscd = _("push includes content-divergent changeset: %s!")
668 mst = {"orphan": _("push includes orphan changeset: %s!"),
668 mst = {"orphan": _("push includes orphan changeset: %s!"),
669 "phase-divergent": mspd,
669 "phase-divergent": mspd,
670 "content-divergent": mscd}
670 "content-divergent": mscd}
671 # If we are to push if there is at least one
671 # If we are to push if there is at least one
672 # obsolete or unstable changeset in missing, at
672 # obsolete or unstable changeset in missing, at
673 # least one of the missinghead will be obsolete or
673 # least one of the missinghead will be obsolete or
674 # unstable. So checking heads only is ok
674 # unstable. So checking heads only is ok
675 for node in outgoing.missingheads:
675 for node in outgoing.missingheads:
676 ctx = unfi[node]
676 ctx = unfi[node]
677 if ctx.obsolete():
677 if ctx.obsolete():
678 raise error.Abort(mso % ctx)
678 raise error.Abort(mso % ctx)
679 elif ctx.isunstable():
679 elif ctx.isunstable():
680 # TODO print more than one instability in the abort
680 # TODO print more than one instability in the abort
681 # message
681 # message
682 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
682 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
683
683
684 discovery.checkheads(pushop)
684 discovery.checkheads(pushop)
685 return True
685 return True
686
686
687 # List of names of steps to perform for an outgoing bundle2, order matters.
687 # List of names of steps to perform for an outgoing bundle2, order matters.
688 b2partsgenorder = []
688 b2partsgenorder = []
689
689
690 # Mapping between step name and function
690 # Mapping between step name and function
691 #
691 #
692 # This exists to help extensions wrap steps if necessary
692 # This exists to help extensions wrap steps if necessary
693 b2partsgenmapping = {}
693 b2partsgenmapping = {}
694
694
695 def b2partsgenerator(stepname, idx=None):
695 def b2partsgenerator(stepname, idx=None):
696 """decorator for function generating bundle2 part
696 """decorator for function generating bundle2 part
697
697
698 The function is added to the step -> function mapping and appended to the
698 The function is added to the step -> function mapping and appended to the
699 list of steps. Beware that decorated functions will be added in order
699 list of steps. Beware that decorated functions will be added in order
700 (this may matter).
700 (this may matter).
701
701
702 You can only use this decorator for new steps, if you want to wrap a step
702 You can only use this decorator for new steps, if you want to wrap a step
703 from an extension, attack the b2partsgenmapping dictionary directly."""
703 from an extension, attack the b2partsgenmapping dictionary directly."""
704 def dec(func):
704 def dec(func):
705 assert stepname not in b2partsgenmapping
705 assert stepname not in b2partsgenmapping
706 b2partsgenmapping[stepname] = func
706 b2partsgenmapping[stepname] = func
707 if idx is None:
707 if idx is None:
708 b2partsgenorder.append(stepname)
708 b2partsgenorder.append(stepname)
709 else:
709 else:
710 b2partsgenorder.insert(idx, stepname)
710 b2partsgenorder.insert(idx, stepname)
711 return func
711 return func
712 return dec
712 return dec
713
713
714 def _pushb2ctxcheckheads(pushop, bundler):
714 def _pushb2ctxcheckheads(pushop, bundler):
715 """Generate race condition checking parts
715 """Generate race condition checking parts
716
716
717 Exists as an independent function to aid extensions
717 Exists as an independent function to aid extensions
718 """
718 """
719 # * 'force' do not check for push race,
719 # * 'force' do not check for push race,
720 # * if we don't push anything, there are nothing to check.
720 # * if we don't push anything, there are nothing to check.
721 if not pushop.force and pushop.outgoing.missingheads:
721 if not pushop.force and pushop.outgoing.missingheads:
722 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
722 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
723 emptyremote = pushop.pushbranchmap is None
723 emptyremote = pushop.pushbranchmap is None
724 if not allowunrelated or emptyremote:
724 if not allowunrelated or emptyremote:
725 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
725 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
726 else:
726 else:
727 affected = set()
727 affected = set()
728 for branch, heads in pushop.pushbranchmap.iteritems():
728 for branch, heads in pushop.pushbranchmap.iteritems():
729 remoteheads, newheads, unsyncedheads, discardedheads = heads
729 remoteheads, newheads, unsyncedheads, discardedheads = heads
730 if remoteheads is not None:
730 if remoteheads is not None:
731 remote = set(remoteheads)
731 remote = set(remoteheads)
732 affected |= set(discardedheads) & remote
732 affected |= set(discardedheads) & remote
733 affected |= remote - set(newheads)
733 affected |= remote - set(newheads)
734 if affected:
734 if affected:
735 data = iter(sorted(affected))
735 data = iter(sorted(affected))
736 bundler.newpart('check:updated-heads', data=data)
736 bundler.newpart('check:updated-heads', data=data)
737
737
738 def _pushing(pushop):
738 def _pushing(pushop):
739 """return True if we are pushing anything"""
739 """return True if we are pushing anything"""
740 return bool(pushop.outgoing.missing
740 return bool(pushop.outgoing.missing
741 or pushop.outdatedphases
741 or pushop.outdatedphases
742 or pushop.outobsmarkers
742 or pushop.outobsmarkers
743 or pushop.outbookmarks)
743 or pushop.outbookmarks)
744
744
745 @b2partsgenerator('check-phases')
745 @b2partsgenerator('check-phases')
746 def _pushb2checkphases(pushop, bundler):
746 def _pushb2checkphases(pushop, bundler):
747 """insert phase move checking"""
747 """insert phase move checking"""
748 if not _pushing(pushop) or pushop.force:
748 if not _pushing(pushop) or pushop.force:
749 return
749 return
750 b2caps = bundle2.bundle2caps(pushop.remote)
750 b2caps = bundle2.bundle2caps(pushop.remote)
751 hasphaseheads = 'heads' in b2caps.get('phases', ())
751 hasphaseheads = 'heads' in b2caps.get('phases', ())
752 if pushop.remotephases is not None and hasphaseheads:
752 if pushop.remotephases is not None and hasphaseheads:
753 # check that the remote phase has not changed
753 # check that the remote phase has not changed
754 checks = [[] for p in phases.allphases]
754 checks = [[] for p in phases.allphases]
755 checks[phases.public].extend(pushop.remotephases.publicheads)
755 checks[phases.public].extend(pushop.remotephases.publicheads)
756 checks[phases.draft].extend(pushop.remotephases.draftroots)
756 checks[phases.draft].extend(pushop.remotephases.draftroots)
757 if any(checks):
757 if any(checks):
758 for nodes in checks:
758 for nodes in checks:
759 nodes.sort()
759 nodes.sort()
760 checkdata = phases.binaryencode(checks)
760 checkdata = phases.binaryencode(checks)
761 bundler.newpart('check:phases', data=checkdata)
761 bundler.newpart('check:phases', data=checkdata)
762
762
763 @b2partsgenerator('changeset')
763 @b2partsgenerator('changeset')
764 def _pushb2ctx(pushop, bundler):
764 def _pushb2ctx(pushop, bundler):
765 """handle changegroup push through bundle2
765 """handle changegroup push through bundle2
766
766
767 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
767 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
768 """
768 """
769 if 'changesets' in pushop.stepsdone:
769 if 'changesets' in pushop.stepsdone:
770 return
770 return
771 pushop.stepsdone.add('changesets')
771 pushop.stepsdone.add('changesets')
772 # Send known heads to the server for race detection.
772 # Send known heads to the server for race detection.
773 if not _pushcheckoutgoing(pushop):
773 if not _pushcheckoutgoing(pushop):
774 return
774 return
775 pushop.repo.prepushoutgoinghooks(pushop)
775 pushop.repo.prepushoutgoinghooks(pushop)
776
776
777 _pushb2ctxcheckheads(pushop, bundler)
777 _pushb2ctxcheckheads(pushop, bundler)
778
778
779 b2caps = bundle2.bundle2caps(pushop.remote)
779 b2caps = bundle2.bundle2caps(pushop.remote)
780 version = '01'
780 version = '01'
781 cgversions = b2caps.get('changegroup')
781 cgversions = b2caps.get('changegroup')
782 if cgversions: # 3.1 and 3.2 ship with an empty value
782 if cgversions: # 3.1 and 3.2 ship with an empty value
783 cgversions = [v for v in cgversions
783 cgversions = [v for v in cgversions
784 if v in changegroup.supportedoutgoingversions(
784 if v in changegroup.supportedoutgoingversions(
785 pushop.repo)]
785 pushop.repo)]
786 if not cgversions:
786 if not cgversions:
787 raise ValueError(_('no common changegroup version'))
787 raise ValueError(_('no common changegroup version'))
788 version = max(cgversions)
788 version = max(cgversions)
789 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
789 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
790 'push')
790 'push')
791 cgpart = bundler.newpart('changegroup', data=cgstream)
791 cgpart = bundler.newpart('changegroup', data=cgstream)
792 if cgversions:
792 if cgversions:
793 cgpart.addparam('version', version)
793 cgpart.addparam('version', version)
794 if 'treemanifest' in pushop.repo.requirements:
794 if 'treemanifest' in pushop.repo.requirements:
795 cgpart.addparam('treemanifest', '1')
795 cgpart.addparam('treemanifest', '1')
796 def handlereply(op):
796 def handlereply(op):
797 """extract addchangegroup returns from server reply"""
797 """extract addchangegroup returns from server reply"""
798 cgreplies = op.records.getreplies(cgpart.id)
798 cgreplies = op.records.getreplies(cgpart.id)
799 assert len(cgreplies['changegroup']) == 1
799 assert len(cgreplies['changegroup']) == 1
800 pushop.cgresult = cgreplies['changegroup'][0]['return']
800 pushop.cgresult = cgreplies['changegroup'][0]['return']
801 return handlereply
801 return handlereply
802
802
803 @b2partsgenerator('phase')
803 @b2partsgenerator('phase')
804 def _pushb2phases(pushop, bundler):
804 def _pushb2phases(pushop, bundler):
805 """handle phase push through bundle2"""
805 """handle phase push through bundle2"""
806 if 'phases' in pushop.stepsdone:
806 if 'phases' in pushop.stepsdone:
807 return
807 return
808 b2caps = bundle2.bundle2caps(pushop.remote)
808 b2caps = bundle2.bundle2caps(pushop.remote)
809 ui = pushop.repo.ui
809 ui = pushop.repo.ui
810
810
811 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
811 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
812 haspushkey = 'pushkey' in b2caps
812 haspushkey = 'pushkey' in b2caps
813 hasphaseheads = 'heads' in b2caps.get('phases', ())
813 hasphaseheads = 'heads' in b2caps.get('phases', ())
814
814
815 if hasphaseheads and not legacyphase:
815 if hasphaseheads and not legacyphase:
816 return _pushb2phaseheads(pushop, bundler)
816 return _pushb2phaseheads(pushop, bundler)
817 elif haspushkey:
817 elif haspushkey:
818 return _pushb2phasespushkey(pushop, bundler)
818 return _pushb2phasespushkey(pushop, bundler)
819
819
820 def _pushb2phaseheads(pushop, bundler):
820 def _pushb2phaseheads(pushop, bundler):
821 """push phase information through a bundle2 - binary part"""
821 """push phase information through a bundle2 - binary part"""
822 pushop.stepsdone.add('phases')
822 pushop.stepsdone.add('phases')
823 if pushop.outdatedphases:
823 if pushop.outdatedphases:
824 updates = [[] for p in phases.allphases]
824 updates = [[] for p in phases.allphases]
825 updates[0].extend(h.node() for h in pushop.outdatedphases)
825 updates[0].extend(h.node() for h in pushop.outdatedphases)
826 phasedata = phases.binaryencode(updates)
826 phasedata = phases.binaryencode(updates)
827 bundler.newpart('phase-heads', data=phasedata)
827 bundler.newpart('phase-heads', data=phasedata)
828
828
829 def _pushb2phasespushkey(pushop, bundler):
829 def _pushb2phasespushkey(pushop, bundler):
830 """push phase information through a bundle2 - pushkey part"""
830 """push phase information through a bundle2 - pushkey part"""
831 pushop.stepsdone.add('phases')
831 pushop.stepsdone.add('phases')
832 part2node = []
832 part2node = []
833
833
834 def handlefailure(pushop, exc):
834 def handlefailure(pushop, exc):
835 targetid = int(exc.partid)
835 targetid = int(exc.partid)
836 for partid, node in part2node:
836 for partid, node in part2node:
837 if partid == targetid:
837 if partid == targetid:
838 raise error.Abort(_('updating %s to public failed') % node)
838 raise error.Abort(_('updating %s to public failed') % node)
839
839
840 enc = pushkey.encode
840 enc = pushkey.encode
841 for newremotehead in pushop.outdatedphases:
841 for newremotehead in pushop.outdatedphases:
842 part = bundler.newpart('pushkey')
842 part = bundler.newpart('pushkey')
843 part.addparam('namespace', enc('phases'))
843 part.addparam('namespace', enc('phases'))
844 part.addparam('key', enc(newremotehead.hex()))
844 part.addparam('key', enc(newremotehead.hex()))
845 part.addparam('old', enc('%d' % phases.draft))
845 part.addparam('old', enc('%d' % phases.draft))
846 part.addparam('new', enc('%d' % phases.public))
846 part.addparam('new', enc('%d' % phases.public))
847 part2node.append((part.id, newremotehead))
847 part2node.append((part.id, newremotehead))
848 pushop.pkfailcb[part.id] = handlefailure
848 pushop.pkfailcb[part.id] = handlefailure
849
849
850 def handlereply(op):
850 def handlereply(op):
851 for partid, node in part2node:
851 for partid, node in part2node:
852 partrep = op.records.getreplies(partid)
852 partrep = op.records.getreplies(partid)
853 results = partrep['pushkey']
853 results = partrep['pushkey']
854 assert len(results) <= 1
854 assert len(results) <= 1
855 msg = None
855 msg = None
856 if not results:
856 if not results:
857 msg = _('server ignored update of %s to public!\n') % node
857 msg = _('server ignored update of %s to public!\n') % node
858 elif not int(results[0]['return']):
858 elif not int(results[0]['return']):
859 msg = _('updating %s to public failed!\n') % node
859 msg = _('updating %s to public failed!\n') % node
860 if msg is not None:
860 if msg is not None:
861 pushop.ui.warn(msg)
861 pushop.ui.warn(msg)
862 return handlereply
862 return handlereply
863
863
864 @b2partsgenerator('obsmarkers')
864 @b2partsgenerator('obsmarkers')
865 def _pushb2obsmarkers(pushop, bundler):
865 def _pushb2obsmarkers(pushop, bundler):
866 if 'obsmarkers' in pushop.stepsdone:
866 if 'obsmarkers' in pushop.stepsdone:
867 return
867 return
868 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
868 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
869 if obsolete.commonversion(remoteversions) is None:
869 if obsolete.commonversion(remoteversions) is None:
870 return
870 return
871 pushop.stepsdone.add('obsmarkers')
871 pushop.stepsdone.add('obsmarkers')
872 if pushop.outobsmarkers:
872 if pushop.outobsmarkers:
873 markers = sorted(pushop.outobsmarkers)
873 markers = sorted(pushop.outobsmarkers)
874 bundle2.buildobsmarkerspart(bundler, markers)
874 bundle2.buildobsmarkerspart(bundler, markers)
875
875
876 @b2partsgenerator('bookmarks')
876 @b2partsgenerator('bookmarks')
877 def _pushb2bookmarks(pushop, bundler):
877 def _pushb2bookmarks(pushop, bundler):
878 """handle bookmark push through bundle2"""
878 """handle bookmark push through bundle2"""
879 if 'bookmarks' in pushop.stepsdone:
879 if 'bookmarks' in pushop.stepsdone:
880 return
880 return
881 b2caps = bundle2.bundle2caps(pushop.remote)
881 b2caps = bundle2.bundle2caps(pushop.remote)
882 if 'pushkey' not in b2caps:
882 if 'pushkey' not in b2caps:
883 return
883 return
884 pushop.stepsdone.add('bookmarks')
884 pushop.stepsdone.add('bookmarks')
885 part2book = []
885 part2book = []
886 enc = pushkey.encode
886 enc = pushkey.encode
887
887
888 def handlefailure(pushop, exc):
888 def handlefailure(pushop, exc):
889 targetid = int(exc.partid)
889 targetid = int(exc.partid)
890 for partid, book, action in part2book:
890 for partid, book, action in part2book:
891 if partid == targetid:
891 if partid == targetid:
892 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
892 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
893 # we should not be called for part we did not generated
893 # we should not be called for part we did not generated
894 assert False
894 assert False
895
895
896 for book, old, new in pushop.outbookmarks:
896 for book, old, new in pushop.outbookmarks:
897 part = bundler.newpart('pushkey')
897 part = bundler.newpart('pushkey')
898 part.addparam('namespace', enc('bookmarks'))
898 part.addparam('namespace', enc('bookmarks'))
899 part.addparam('key', enc(book))
899 part.addparam('key', enc(book))
900 part.addparam('old', enc(old))
900 part.addparam('old', enc(old))
901 part.addparam('new', enc(new))
901 part.addparam('new', enc(new))
902 action = 'update'
902 action = 'update'
903 if not old:
903 if not old:
904 action = 'export'
904 action = 'export'
905 elif not new:
905 elif not new:
906 action = 'delete'
906 action = 'delete'
907 part2book.append((part.id, book, action))
907 part2book.append((part.id, book, action))
908 pushop.pkfailcb[part.id] = handlefailure
908 pushop.pkfailcb[part.id] = handlefailure
909
909
910 def handlereply(op):
910 def handlereply(op):
911 ui = pushop.ui
911 ui = pushop.ui
912 for partid, book, action in part2book:
912 for partid, book, action in part2book:
913 partrep = op.records.getreplies(partid)
913 partrep = op.records.getreplies(partid)
914 results = partrep['pushkey']
914 results = partrep['pushkey']
915 assert len(results) <= 1
915 assert len(results) <= 1
916 if not results:
916 if not results:
917 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
917 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
918 else:
918 else:
919 ret = int(results[0]['return'])
919 ret = int(results[0]['return'])
920 if ret:
920 if ret:
921 ui.status(bookmsgmap[action][0] % book)
921 ui.status(bookmsgmap[action][0] % book)
922 else:
922 else:
923 ui.warn(bookmsgmap[action][1] % book)
923 ui.warn(bookmsgmap[action][1] % book)
924 if pushop.bkresult is not None:
924 if pushop.bkresult is not None:
925 pushop.bkresult = 1
925 pushop.bkresult = 1
926 return handlereply
926 return handlereply
927
927
928 @b2partsgenerator('pushvars', idx=0)
928 @b2partsgenerator('pushvars', idx=0)
929 def _getbundlesendvars(pushop, bundler):
929 def _getbundlesendvars(pushop, bundler):
930 '''send shellvars via bundle2'''
930 '''send shellvars via bundle2'''
931 pushvars = pushop.pushvars
931 pushvars = pushop.pushvars
932 if pushvars:
932 if pushvars:
933 shellvars = {}
933 shellvars = {}
934 for raw in pushvars:
934 for raw in pushvars:
935 if '=' not in raw:
935 if '=' not in raw:
936 msg = ("unable to parse variable '%s', should follow "
936 msg = ("unable to parse variable '%s', should follow "
937 "'KEY=VALUE' or 'KEY=' format")
937 "'KEY=VALUE' or 'KEY=' format")
938 raise error.Abort(msg % raw)
938 raise error.Abort(msg % raw)
939 k, v = raw.split('=', 1)
939 k, v = raw.split('=', 1)
940 shellvars[k] = v
940 shellvars[k] = v
941
941
942 part = bundler.newpart('pushvars')
942 part = bundler.newpart('pushvars')
943
943
944 for key, value in shellvars.iteritems():
944 for key, value in shellvars.iteritems():
945 part.addparam(key, value, mandatory=False)
945 part.addparam(key, value, mandatory=False)
946
946
947 def _pushbundle2(pushop):
947 def _pushbundle2(pushop):
948 """push data to the remote using bundle2
948 """push data to the remote using bundle2
949
949
950 The only currently supported type of data is changegroup but this will
950 The only currently supported type of data is changegroup but this will
951 evolve in the future."""
951 evolve in the future."""
952 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
952 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
953 pushback = (pushop.trmanager
953 pushback = (pushop.trmanager
954 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
954 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
955
955
956 # create reply capability
956 # create reply capability
957 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
957 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
958 allowpushback=pushback))
958 allowpushback=pushback))
959 bundler.newpart('replycaps', data=capsblob)
959 bundler.newpart('replycaps', data=capsblob)
960 replyhandlers = []
960 replyhandlers = []
961 for partgenname in b2partsgenorder:
961 for partgenname in b2partsgenorder:
962 partgen = b2partsgenmapping[partgenname]
962 partgen = b2partsgenmapping[partgenname]
963 ret = partgen(pushop, bundler)
963 ret = partgen(pushop, bundler)
964 if callable(ret):
964 if callable(ret):
965 replyhandlers.append(ret)
965 replyhandlers.append(ret)
966 # do not push if nothing to push
966 # do not push if nothing to push
967 if bundler.nbparts <= 1:
967 if bundler.nbparts <= 1:
968 return
968 return
969 stream = util.chunkbuffer(bundler.getchunks())
969 stream = util.chunkbuffer(bundler.getchunks())
970 try:
970 try:
971 try:
971 try:
972 reply = pushop.remote.unbundle(
972 reply = pushop.remote.unbundle(
973 stream, ['force'], pushop.remote.url())
973 stream, ['force'], pushop.remote.url())
974 except error.BundleValueError as exc:
974 except error.BundleValueError as exc:
975 raise error.Abort(_('missing support for %s') % exc)
975 raise error.Abort(_('missing support for %s') % exc)
976 try:
976 try:
977 trgetter = None
977 trgetter = None
978 if pushback:
978 if pushback:
979 trgetter = pushop.trmanager.transaction
979 trgetter = pushop.trmanager.transaction
980 op = bundle2.processbundle(pushop.repo, reply, trgetter)
980 op = bundle2.processbundle(pushop.repo, reply, trgetter)
981 except error.BundleValueError as exc:
981 except error.BundleValueError as exc:
982 raise error.Abort(_('missing support for %s') % exc)
982 raise error.Abort(_('missing support for %s') % exc)
983 except bundle2.AbortFromPart as exc:
983 except bundle2.AbortFromPart as exc:
984 pushop.ui.status(_('remote: %s\n') % exc)
984 pushop.ui.status(_('remote: %s\n') % exc)
985 if exc.hint is not None:
985 if exc.hint is not None:
986 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
986 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
987 raise error.Abort(_('push failed on remote'))
987 raise error.Abort(_('push failed on remote'))
988 except error.PushkeyFailed as exc:
988 except error.PushkeyFailed as exc:
989 partid = int(exc.partid)
989 partid = int(exc.partid)
990 if partid not in pushop.pkfailcb:
990 if partid not in pushop.pkfailcb:
991 raise
991 raise
992 pushop.pkfailcb[partid](pushop, exc)
992 pushop.pkfailcb[partid](pushop, exc)
993 for rephand in replyhandlers:
993 for rephand in replyhandlers:
994 rephand(op)
994 rephand(op)
995
995
996 def _pushchangeset(pushop):
996 def _pushchangeset(pushop):
997 """Make the actual push of changeset bundle to remote repo"""
997 """Make the actual push of changeset bundle to remote repo"""
998 if 'changesets' in pushop.stepsdone:
998 if 'changesets' in pushop.stepsdone:
999 return
999 return
1000 pushop.stepsdone.add('changesets')
1000 pushop.stepsdone.add('changesets')
1001 if not _pushcheckoutgoing(pushop):
1001 if not _pushcheckoutgoing(pushop):
1002 return
1002 return
1003
1003
1004 # Should have verified this in push().
1004 # Should have verified this in push().
1005 assert pushop.remote.capable('unbundle')
1005 assert pushop.remote.capable('unbundle')
1006
1006
1007 pushop.repo.prepushoutgoinghooks(pushop)
1007 pushop.repo.prepushoutgoinghooks(pushop)
1008 outgoing = pushop.outgoing
1008 outgoing = pushop.outgoing
1009 # TODO: get bundlecaps from remote
1009 # TODO: get bundlecaps from remote
1010 bundlecaps = None
1010 bundlecaps = None
1011 # create a changegroup from local
1011 # create a changegroup from local
1012 if pushop.revs is None and not (outgoing.excluded
1012 if pushop.revs is None and not (outgoing.excluded
1013 or pushop.repo.changelog.filteredrevs):
1013 or pushop.repo.changelog.filteredrevs):
1014 # push everything,
1014 # push everything,
1015 # use the fast path, no race possible on push
1015 # use the fast path, no race possible on push
1016 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1016 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1017 fastpath=True, bundlecaps=bundlecaps)
1017 fastpath=True, bundlecaps=bundlecaps)
1018 else:
1018 else:
1019 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1019 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1020 'push', bundlecaps=bundlecaps)
1020 'push', bundlecaps=bundlecaps)
1021
1021
1022 # apply changegroup to remote
1022 # apply changegroup to remote
1023 # local repo finds heads on server, finds out what
1023 # local repo finds heads on server, finds out what
1024 # revs it must push. once revs transferred, if server
1024 # revs it must push. once revs transferred, if server
1025 # finds it has different heads (someone else won
1025 # finds it has different heads (someone else won
1026 # commit/push race), server aborts.
1026 # commit/push race), server aborts.
1027 if pushop.force:
1027 if pushop.force:
1028 remoteheads = ['force']
1028 remoteheads = ['force']
1029 else:
1029 else:
1030 remoteheads = pushop.remoteheads
1030 remoteheads = pushop.remoteheads
1031 # ssh: return remote's addchangegroup()
1031 # ssh: return remote's addchangegroup()
1032 # http: return remote's addchangegroup() or 0 for error
1032 # http: return remote's addchangegroup() or 0 for error
1033 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1033 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1034 pushop.repo.url())
1034 pushop.repo.url())
1035
1035
1036 def _pushsyncphase(pushop):
1036 def _pushsyncphase(pushop):
1037 """synchronise phase information locally and remotely"""
1037 """synchronise phase information locally and remotely"""
1038 cheads = pushop.commonheads
1038 cheads = pushop.commonheads
1039 # even when we don't push, exchanging phase data is useful
1039 # even when we don't push, exchanging phase data is useful
1040 remotephases = pushop.remote.listkeys('phases')
1040 remotephases = pushop.remote.listkeys('phases')
1041 if (pushop.ui.configbool('ui', '_usedassubrepo')
1041 if (pushop.ui.configbool('ui', '_usedassubrepo')
1042 and remotephases # server supports phases
1042 and remotephases # server supports phases
1043 and pushop.cgresult is None # nothing was pushed
1043 and pushop.cgresult is None # nothing was pushed
1044 and remotephases.get('publishing', False)):
1044 and remotephases.get('publishing', False)):
1045 # When:
1045 # When:
1046 # - this is a subrepo push
1046 # - this is a subrepo push
1047 # - and remote support phase
1047 # - and remote support phase
1048 # - and no changeset was pushed
1048 # - and no changeset was pushed
1049 # - and remote is publishing
1049 # - and remote is publishing
1050 # We may be in issue 3871 case!
1050 # We may be in issue 3871 case!
1051 # We drop the possible phase synchronisation done by
1051 # We drop the possible phase synchronisation done by
1052 # courtesy to publish changesets possibly locally draft
1052 # courtesy to publish changesets possibly locally draft
1053 # on the remote.
1053 # on the remote.
1054 remotephases = {'publishing': 'True'}
1054 remotephases = {'publishing': 'True'}
1055 if not remotephases: # old server or public only reply from non-publishing
1055 if not remotephases: # old server or public only reply from non-publishing
1056 _localphasemove(pushop, cheads)
1056 _localphasemove(pushop, cheads)
1057 # don't push any phase data as there is nothing to push
1057 # don't push any phase data as there is nothing to push
1058 else:
1058 else:
1059 ana = phases.analyzeremotephases(pushop.repo, cheads,
1059 ana = phases.analyzeremotephases(pushop.repo, cheads,
1060 remotephases)
1060 remotephases)
1061 pheads, droots = ana
1061 pheads, droots = ana
1062 ### Apply remote phase on local
1062 ### Apply remote phase on local
1063 if remotephases.get('publishing', False):
1063 if remotephases.get('publishing', False):
1064 _localphasemove(pushop, cheads)
1064 _localphasemove(pushop, cheads)
1065 else: # publish = False
1065 else: # publish = False
1066 _localphasemove(pushop, pheads)
1066 _localphasemove(pushop, pheads)
1067 _localphasemove(pushop, cheads, phases.draft)
1067 _localphasemove(pushop, cheads, phases.draft)
1068 ### Apply local phase on remote
1068 ### Apply local phase on remote
1069
1069
1070 if pushop.cgresult:
1070 if pushop.cgresult:
1071 if 'phases' in pushop.stepsdone:
1071 if 'phases' in pushop.stepsdone:
1072 # phases already pushed though bundle2
1072 # phases already pushed though bundle2
1073 return
1073 return
1074 outdated = pushop.outdatedphases
1074 outdated = pushop.outdatedphases
1075 else:
1075 else:
1076 outdated = pushop.fallbackoutdatedphases
1076 outdated = pushop.fallbackoutdatedphases
1077
1077
1078 pushop.stepsdone.add('phases')
1078 pushop.stepsdone.add('phases')
1079
1079
1080 # filter heads already turned public by the push
1080 # filter heads already turned public by the push
1081 outdated = [c for c in outdated if c.node() not in pheads]
1081 outdated = [c for c in outdated if c.node() not in pheads]
1082 # fallback to independent pushkey command
1082 # fallback to independent pushkey command
1083 for newremotehead in outdated:
1083 for newremotehead in outdated:
1084 r = pushop.remote.pushkey('phases',
1084 r = pushop.remote.pushkey('phases',
1085 newremotehead.hex(),
1085 newremotehead.hex(),
1086 str(phases.draft),
1086 str(phases.draft),
1087 str(phases.public))
1087 str(phases.public))
1088 if not r:
1088 if not r:
1089 pushop.ui.warn(_('updating %s to public failed!\n')
1089 pushop.ui.warn(_('updating %s to public failed!\n')
1090 % newremotehead)
1090 % newremotehead)
1091
1091
1092 def _localphasemove(pushop, nodes, phase=phases.public):
1092 def _localphasemove(pushop, nodes, phase=phases.public):
1093 """move <nodes> to <phase> in the local source repo"""
1093 """move <nodes> to <phase> in the local source repo"""
1094 if pushop.trmanager:
1094 if pushop.trmanager:
1095 phases.advanceboundary(pushop.repo,
1095 phases.advanceboundary(pushop.repo,
1096 pushop.trmanager.transaction(),
1096 pushop.trmanager.transaction(),
1097 phase,
1097 phase,
1098 nodes)
1098 nodes)
1099 else:
1099 else:
1100 # repo is not locked, do not change any phases!
1100 # repo is not locked, do not change any phases!
1101 # Informs the user that phases should have been moved when
1101 # Informs the user that phases should have been moved when
1102 # applicable.
1102 # applicable.
1103 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1103 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1104 phasestr = phases.phasenames[phase]
1104 phasestr = phases.phasenames[phase]
1105 if actualmoves:
1105 if actualmoves:
1106 pushop.ui.status(_('cannot lock source repo, skipping '
1106 pushop.ui.status(_('cannot lock source repo, skipping '
1107 'local %s phase update\n') % phasestr)
1107 'local %s phase update\n') % phasestr)
1108
1108
1109 def _pushobsolete(pushop):
1109 def _pushobsolete(pushop):
1110 """utility function to push obsolete markers to a remote"""
1110 """utility function to push obsolete markers to a remote"""
1111 if 'obsmarkers' in pushop.stepsdone:
1111 if 'obsmarkers' in pushop.stepsdone:
1112 return
1112 return
1113 repo = pushop.repo
1113 repo = pushop.repo
1114 remote = pushop.remote
1114 remote = pushop.remote
1115 pushop.stepsdone.add('obsmarkers')
1115 pushop.stepsdone.add('obsmarkers')
1116 if pushop.outobsmarkers:
1116 if pushop.outobsmarkers:
1117 pushop.ui.debug('try to push obsolete markers to remote\n')
1117 pushop.ui.debug('try to push obsolete markers to remote\n')
1118 rslts = []
1118 rslts = []
1119 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1119 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1120 for key in sorted(remotedata, reverse=True):
1120 for key in sorted(remotedata, reverse=True):
1121 # reverse sort to ensure we end with dump0
1121 # reverse sort to ensure we end with dump0
1122 data = remotedata[key]
1122 data = remotedata[key]
1123 rslts.append(remote.pushkey('obsolete', key, '', data))
1123 rslts.append(remote.pushkey('obsolete', key, '', data))
1124 if [r for r in rslts if not r]:
1124 if [r for r in rslts if not r]:
1125 msg = _('failed to push some obsolete markers!\n')
1125 msg = _('failed to push some obsolete markers!\n')
1126 repo.ui.warn(msg)
1126 repo.ui.warn(msg)
1127
1127
1128 def _pushbookmark(pushop):
1128 def _pushbookmark(pushop):
1129 """Update bookmark position on remote"""
1129 """Update bookmark position on remote"""
1130 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1130 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1131 return
1131 return
1132 pushop.stepsdone.add('bookmarks')
1132 pushop.stepsdone.add('bookmarks')
1133 ui = pushop.ui
1133 ui = pushop.ui
1134 remote = pushop.remote
1134 remote = pushop.remote
1135
1135
1136 for b, old, new in pushop.outbookmarks:
1136 for b, old, new in pushop.outbookmarks:
1137 action = 'update'
1137 action = 'update'
1138 if not old:
1138 if not old:
1139 action = 'export'
1139 action = 'export'
1140 elif not new:
1140 elif not new:
1141 action = 'delete'
1141 action = 'delete'
1142 if remote.pushkey('bookmarks', b, old, new):
1142 if remote.pushkey('bookmarks', b, old, new):
1143 ui.status(bookmsgmap[action][0] % b)
1143 ui.status(bookmsgmap[action][0] % b)
1144 else:
1144 else:
1145 ui.warn(bookmsgmap[action][1] % b)
1145 ui.warn(bookmsgmap[action][1] % b)
1146 # discovery can have set the value form invalid entry
1146 # discovery can have set the value form invalid entry
1147 if pushop.bkresult is not None:
1147 if pushop.bkresult is not None:
1148 pushop.bkresult = 1
1148 pushop.bkresult = 1
1149
1149
1150 class pulloperation(object):
1150 class pulloperation(object):
1151 """A object that represent a single pull operation
1151 """A object that represent a single pull operation
1152
1152
1153 It purpose is to carry pull related state and very common operation.
1153 It purpose is to carry pull related state and very common operation.
1154
1154
1155 A new should be created at the beginning of each pull and discarded
1155 A new should be created at the beginning of each pull and discarded
1156 afterward.
1156 afterward.
1157 """
1157 """
1158
1158
1159 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1159 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1160 remotebookmarks=None, streamclonerequested=None):
1160 remotebookmarks=None, streamclonerequested=None):
1161 # repo we pull into
1161 # repo we pull into
1162 self.repo = repo
1162 self.repo = repo
1163 # repo we pull from
1163 # repo we pull from
1164 self.remote = remote
1164 self.remote = remote
1165 # revision we try to pull (None is "all")
1165 # revision we try to pull (None is "all")
1166 self.heads = heads
1166 self.heads = heads
1167 # bookmark pulled explicitly
1167 # bookmark pulled explicitly
1168 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1168 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1169 for bookmark in bookmarks]
1169 for bookmark in bookmarks]
1170 # do we force pull?
1170 # do we force pull?
1171 self.force = force
1171 self.force = force
1172 # whether a streaming clone was requested
1172 # whether a streaming clone was requested
1173 self.streamclonerequested = streamclonerequested
1173 self.streamclonerequested = streamclonerequested
1174 # transaction manager
1174 # transaction manager
1175 self.trmanager = None
1175 self.trmanager = None
1176 # set of common changeset between local and remote before pull
1176 # set of common changeset between local and remote before pull
1177 self.common = None
1177 self.common = None
1178 # set of pulled head
1178 # set of pulled head
1179 self.rheads = None
1179 self.rheads = None
1180 # list of missing changeset to fetch remotely
1180 # list of missing changeset to fetch remotely
1181 self.fetch = None
1181 self.fetch = None
1182 # remote bookmarks data
1182 # remote bookmarks data
1183 self.remotebookmarks = remotebookmarks
1183 self.remotebookmarks = remotebookmarks
1184 # result of changegroup pulling (used as return code by pull)
1184 # result of changegroup pulling (used as return code by pull)
1185 self.cgresult = None
1185 self.cgresult = None
1186 # list of step already done
1186 # list of step already done
1187 self.stepsdone = set()
1187 self.stepsdone = set()
1188 # Whether we attempted a clone from pre-generated bundles.
1188 # Whether we attempted a clone from pre-generated bundles.
1189 self.clonebundleattempted = False
1189 self.clonebundleattempted = False
1190
1190
1191 @util.propertycache
1191 @util.propertycache
1192 def pulledsubset(self):
1192 def pulledsubset(self):
1193 """heads of the set of changeset target by the pull"""
1193 """heads of the set of changeset target by the pull"""
1194 # compute target subset
1194 # compute target subset
1195 if self.heads is None:
1195 if self.heads is None:
1196 # We pulled every thing possible
1196 # We pulled every thing possible
1197 # sync on everything common
1197 # sync on everything common
1198 c = set(self.common)
1198 c = set(self.common)
1199 ret = list(self.common)
1199 ret = list(self.common)
1200 for n in self.rheads:
1200 for n in self.rheads:
1201 if n not in c:
1201 if n not in c:
1202 ret.append(n)
1202 ret.append(n)
1203 return ret
1203 return ret
1204 else:
1204 else:
1205 # We pulled a specific subset
1205 # We pulled a specific subset
1206 # sync on this subset
1206 # sync on this subset
1207 return self.heads
1207 return self.heads
1208
1208
1209 @util.propertycache
1209 @util.propertycache
1210 def canusebundle2(self):
1210 def canusebundle2(self):
1211 return not _forcebundle1(self)
1211 return not _forcebundle1(self)
1212
1212
1213 @util.propertycache
1213 @util.propertycache
1214 def remotebundle2caps(self):
1214 def remotebundle2caps(self):
1215 return bundle2.bundle2caps(self.remote)
1215 return bundle2.bundle2caps(self.remote)
1216
1216
1217 def gettransaction(self):
1217 def gettransaction(self):
1218 # deprecated; talk to trmanager directly
1218 # deprecated; talk to trmanager directly
1219 return self.trmanager.transaction()
1219 return self.trmanager.transaction()
1220
1220
1221 class transactionmanager(util.transactional):
1221 class transactionmanager(util.transactional):
1222 """An object to manage the life cycle of a transaction
1222 """An object to manage the life cycle of a transaction
1223
1223
1224 It creates the transaction on demand and calls the appropriate hooks when
1224 It creates the transaction on demand and calls the appropriate hooks when
1225 closing the transaction."""
1225 closing the transaction."""
1226 def __init__(self, repo, source, url):
1226 def __init__(self, repo, source, url):
1227 self.repo = repo
1227 self.repo = repo
1228 self.source = source
1228 self.source = source
1229 self.url = url
1229 self.url = url
1230 self._tr = None
1230 self._tr = None
1231
1231
1232 def transaction(self):
1232 def transaction(self):
1233 """Return an open transaction object, constructing if necessary"""
1233 """Return an open transaction object, constructing if necessary"""
1234 if not self._tr:
1234 if not self._tr:
1235 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1235 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1236 self._tr = self.repo.transaction(trname)
1236 self._tr = self.repo.transaction(trname)
1237 self._tr.hookargs['source'] = self.source
1237 self._tr.hookargs['source'] = self.source
1238 self._tr.hookargs['url'] = self.url
1238 self._tr.hookargs['url'] = self.url
1239 return self._tr
1239 return self._tr
1240
1240
1241 def close(self):
1241 def close(self):
1242 """close transaction if created"""
1242 """close transaction if created"""
1243 if self._tr is not None:
1243 if self._tr is not None:
1244 self._tr.close()
1244 self._tr.close()
1245
1245
1246 def release(self):
1246 def release(self):
1247 """release transaction if created"""
1247 """release transaction if created"""
1248 if self._tr is not None:
1248 if self._tr is not None:
1249 self._tr.release()
1249 self._tr.release()
1250
1250
1251 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1251 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1252 streamclonerequested=None):
1252 streamclonerequested=None):
1253 """Fetch repository data from a remote.
1253 """Fetch repository data from a remote.
1254
1254
1255 This is the main function used to retrieve data from a remote repository.
1255 This is the main function used to retrieve data from a remote repository.
1256
1256
1257 ``repo`` is the local repository to clone into.
1257 ``repo`` is the local repository to clone into.
1258 ``remote`` is a peer instance.
1258 ``remote`` is a peer instance.
1259 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1259 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1260 default) means to pull everything from the remote.
1260 default) means to pull everything from the remote.
1261 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1261 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1262 default, all remote bookmarks are pulled.
1262 default, all remote bookmarks are pulled.
1263 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1263 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1264 initialization.
1264 initialization.
1265 ``streamclonerequested`` is a boolean indicating whether a "streaming
1265 ``streamclonerequested`` is a boolean indicating whether a "streaming
1266 clone" is requested. A "streaming clone" is essentially a raw file copy
1266 clone" is requested. A "streaming clone" is essentially a raw file copy
1267 of revlogs from the server. This only works when the local repository is
1267 of revlogs from the server. This only works when the local repository is
1268 empty. The default value of ``None`` means to respect the server
1268 empty. The default value of ``None`` means to respect the server
1269 configuration for preferring stream clones.
1269 configuration for preferring stream clones.
1270
1270
1271 Returns the ``pulloperation`` created for this pull.
1271 Returns the ``pulloperation`` created for this pull.
1272 """
1272 """
1273 if opargs is None:
1273 if opargs is None:
1274 opargs = {}
1274 opargs = {}
1275 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1275 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1276 streamclonerequested=streamclonerequested, **opargs)
1276 streamclonerequested=streamclonerequested, **opargs)
1277
1277
1278 peerlocal = pullop.remote.local()
1278 peerlocal = pullop.remote.local()
1279 if peerlocal:
1279 if peerlocal:
1280 missing = set(peerlocal.requirements) - pullop.repo.supported
1280 missing = set(peerlocal.requirements) - pullop.repo.supported
1281 if missing:
1281 if missing:
1282 msg = _("required features are not"
1282 msg = _("required features are not"
1283 " supported in the destination:"
1283 " supported in the destination:"
1284 " %s") % (', '.join(sorted(missing)))
1284 " %s") % (', '.join(sorted(missing)))
1285 raise error.Abort(msg)
1285 raise error.Abort(msg)
1286
1286
1287 wlock = lock = None
1287 wlock = lock = None
1288 try:
1288 try:
1289 wlock = pullop.repo.wlock()
1289 wlock = pullop.repo.wlock()
1290 lock = pullop.repo.lock()
1290 lock = pullop.repo.lock()
1291 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1291 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1292 # This should ideally be in _pullbundle2(). However, it needs to run
1292 # This should ideally be in _pullbundle2(). However, it needs to run
1293 # before discovery to avoid extra work.
1293 # before discovery to avoid extra work.
1294 _maybeapplyclonebundle(pullop)
1294 _maybeapplyclonebundle(pullop)
1295 streamclone.maybeperformlegacystreamclone(pullop)
1295 streamclone.maybeperformlegacystreamclone(pullop)
1296 _pulldiscovery(pullop)
1296 _pulldiscovery(pullop)
1297 if pullop.canusebundle2:
1297 if pullop.canusebundle2:
1298 _pullbundle2(pullop)
1298 _pullbundle2(pullop)
1299 _pullchangeset(pullop)
1299 _pullchangeset(pullop)
1300 _pullphase(pullop)
1300 _pullphase(pullop)
1301 _pullbookmarks(pullop)
1301 _pullbookmarks(pullop)
1302 _pullobsolete(pullop)
1302 _pullobsolete(pullop)
1303 pullop.trmanager.close()
1303 pullop.trmanager.close()
1304 finally:
1304 finally:
1305 lockmod.release(pullop.trmanager, lock, wlock)
1305 lockmod.release(pullop.trmanager, lock, wlock)
1306
1306
1307 return pullop
1307 return pullop
1308
1308
1309 # list of steps to perform discovery before pull
1309 # list of steps to perform discovery before pull
1310 pulldiscoveryorder = []
1310 pulldiscoveryorder = []
1311
1311
1312 # Mapping between step name and function
1312 # Mapping between step name and function
1313 #
1313 #
1314 # This exists to help extensions wrap steps if necessary
1314 # This exists to help extensions wrap steps if necessary
1315 pulldiscoverymapping = {}
1315 pulldiscoverymapping = {}
1316
1316
1317 def pulldiscovery(stepname):
1317 def pulldiscovery(stepname):
1318 """decorator for function performing discovery before pull
1318 """decorator for function performing discovery before pull
1319
1319
1320 The function is added to the step -> function mapping and appended to the
1320 The function is added to the step -> function mapping and appended to the
1321 list of steps. Beware that decorated function will be added in order (this
1321 list of steps. Beware that decorated function will be added in order (this
1322 may matter).
1322 may matter).
1323
1323
1324 You can only use this decorator for a new step, if you want to wrap a step
1324 You can only use this decorator for a new step, if you want to wrap a step
1325 from an extension, change the pulldiscovery dictionary directly."""
1325 from an extension, change the pulldiscovery dictionary directly."""
1326 def dec(func):
1326 def dec(func):
1327 assert stepname not in pulldiscoverymapping
1327 assert stepname not in pulldiscoverymapping
1328 pulldiscoverymapping[stepname] = func
1328 pulldiscoverymapping[stepname] = func
1329 pulldiscoveryorder.append(stepname)
1329 pulldiscoveryorder.append(stepname)
1330 return func
1330 return func
1331 return dec
1331 return dec
1332
1332
1333 def _pulldiscovery(pullop):
1333 def _pulldiscovery(pullop):
1334 """Run all discovery steps"""
1334 """Run all discovery steps"""
1335 for stepname in pulldiscoveryorder:
1335 for stepname in pulldiscoveryorder:
1336 step = pulldiscoverymapping[stepname]
1336 step = pulldiscoverymapping[stepname]
1337 step(pullop)
1337 step(pullop)
1338
1338
1339 @pulldiscovery('b1:bookmarks')
1339 @pulldiscovery('b1:bookmarks')
1340 def _pullbookmarkbundle1(pullop):
1340 def _pullbookmarkbundle1(pullop):
1341 """fetch bookmark data in bundle1 case
1341 """fetch bookmark data in bundle1 case
1342
1342
1343 If not using bundle2, we have to fetch bookmarks before changeset
1343 If not using bundle2, we have to fetch bookmarks before changeset
1344 discovery to reduce the chance and impact of race conditions."""
1344 discovery to reduce the chance and impact of race conditions."""
1345 if pullop.remotebookmarks is not None:
1345 if pullop.remotebookmarks is not None:
1346 return
1346 return
1347 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1347 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1348 # all known bundle2 servers now support listkeys, but lets be nice with
1348 # all known bundle2 servers now support listkeys, but lets be nice with
1349 # new implementation.
1349 # new implementation.
1350 return
1350 return
1351 books = pullop.remote.listkeys('bookmarks')
1351 books = pullop.remote.listkeys('bookmarks')
1352 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1352 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1353
1353
1354
1354
1355 @pulldiscovery('changegroup')
1355 @pulldiscovery('changegroup')
1356 def _pulldiscoverychangegroup(pullop):
1356 def _pulldiscoverychangegroup(pullop):
1357 """discovery phase for the pull
1357 """discovery phase for the pull
1358
1358
1359 Current handle changeset discovery only, will change handle all discovery
1359 Current handle changeset discovery only, will change handle all discovery
1360 at some point."""
1360 at some point."""
1361 tmp = discovery.findcommonincoming(pullop.repo,
1361 tmp = discovery.findcommonincoming(pullop.repo,
1362 pullop.remote,
1362 pullop.remote,
1363 heads=pullop.heads,
1363 heads=pullop.heads,
1364 force=pullop.force)
1364 force=pullop.force)
1365 common, fetch, rheads = tmp
1365 common, fetch, rheads = tmp
1366 nm = pullop.repo.unfiltered().changelog.nodemap
1366 nm = pullop.repo.unfiltered().changelog.nodemap
1367 if fetch and rheads:
1367 if fetch and rheads:
1368 # If a remote heads is filtered locally, put in back in common.
1368 # If a remote heads is filtered locally, put in back in common.
1369 #
1369 #
1370 # This is a hackish solution to catch most of "common but locally
1370 # This is a hackish solution to catch most of "common but locally
1371 # hidden situation". We do not performs discovery on unfiltered
1371 # hidden situation". We do not performs discovery on unfiltered
1372 # repository because it end up doing a pathological amount of round
1372 # repository because it end up doing a pathological amount of round
1373 # trip for w huge amount of changeset we do not care about.
1373 # trip for w huge amount of changeset we do not care about.
1374 #
1374 #
1375 # If a set of such "common but filtered" changeset exist on the server
1375 # If a set of such "common but filtered" changeset exist on the server
1376 # but are not including a remote heads, we'll not be able to detect it,
1376 # but are not including a remote heads, we'll not be able to detect it,
1377 scommon = set(common)
1377 scommon = set(common)
1378 for n in rheads:
1378 for n in rheads:
1379 if n in nm:
1379 if n in nm:
1380 if n not in scommon:
1380 if n not in scommon:
1381 common.append(n)
1381 common.append(n)
1382 if set(rheads).issubset(set(common)):
1382 if set(rheads).issubset(set(common)):
1383 fetch = []
1383 fetch = []
1384 pullop.common = common
1384 pullop.common = common
1385 pullop.fetch = fetch
1385 pullop.fetch = fetch
1386 pullop.rheads = rheads
1386 pullop.rheads = rheads
1387
1387
1388 def _pullbundle2(pullop):
1388 def _pullbundle2(pullop):
1389 """pull data using bundle2
1389 """pull data using bundle2
1390
1390
1391 For now, the only supported data are changegroup."""
1391 For now, the only supported data are changegroup."""
1392 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1392 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1393
1393
1394 # At the moment we don't do stream clones over bundle2. If that is
1394 # At the moment we don't do stream clones over bundle2. If that is
1395 # implemented then here's where the check for that will go.
1395 # implemented then here's where the check for that will go.
1396 streaming = False
1396 streaming = False
1397
1397
1398 # pulling changegroup
1398 # pulling changegroup
1399 pullop.stepsdone.add('changegroup')
1399 pullop.stepsdone.add('changegroup')
1400
1400
1401 kwargs['common'] = pullop.common
1401 kwargs['common'] = pullop.common
1402 kwargs['heads'] = pullop.heads or pullop.rheads
1402 kwargs['heads'] = pullop.heads or pullop.rheads
1403 kwargs['cg'] = pullop.fetch
1403 kwargs['cg'] = pullop.fetch
1404
1404
1405 ui = pullop.repo.ui
1405 ui = pullop.repo.ui
1406 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1406 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1407 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1407 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1408 if (not legacyphase and hasbinaryphase):
1408 if (not legacyphase and hasbinaryphase):
1409 kwargs['phases'] = True
1409 kwargs['phases'] = True
1410 pullop.stepsdone.add('phases')
1410 pullop.stepsdone.add('phases')
1411
1411
1412 if 'listkeys' in pullop.remotebundle2caps:
1412 if 'listkeys' in pullop.remotebundle2caps:
1413 if 'phases' not in pullop.stepsdone:
1413 if 'phases' not in pullop.stepsdone:
1414 kwargs['listkeys'] = ['phases']
1414 kwargs['listkeys'] = ['phases']
1415 if pullop.remotebookmarks is None:
1415 if pullop.remotebookmarks is None:
1416 # make sure to always includes bookmark data when migrating
1416 # make sure to always includes bookmark data when migrating
1417 # `hg incoming --bundle` to using this function.
1417 # `hg incoming --bundle` to using this function.
1418 kwargs.setdefault('listkeys', []).append('bookmarks')
1418 kwargs.setdefault('listkeys', []).append('bookmarks')
1419
1419
1420 # If this is a full pull / clone and the server supports the clone bundles
1420 # If this is a full pull / clone and the server supports the clone bundles
1421 # feature, tell the server whether we attempted a clone bundle. The
1421 # feature, tell the server whether we attempted a clone bundle. The
1422 # presence of this flag indicates the client supports clone bundles. This
1422 # presence of this flag indicates the client supports clone bundles. This
1423 # will enable the server to treat clients that support clone bundles
1423 # will enable the server to treat clients that support clone bundles
1424 # differently from those that don't.
1424 # differently from those that don't.
1425 if (pullop.remote.capable('clonebundles')
1425 if (pullop.remote.capable('clonebundles')
1426 and pullop.heads is None and list(pullop.common) == [nullid]):
1426 and pullop.heads is None and list(pullop.common) == [nullid]):
1427 kwargs['cbattempted'] = pullop.clonebundleattempted
1427 kwargs['cbattempted'] = pullop.clonebundleattempted
1428
1428
1429 if streaming:
1429 if streaming:
1430 pullop.repo.ui.status(_('streaming all changes\n'))
1430 pullop.repo.ui.status(_('streaming all changes\n'))
1431 elif not pullop.fetch:
1431 elif not pullop.fetch:
1432 pullop.repo.ui.status(_("no changes found\n"))
1432 pullop.repo.ui.status(_("no changes found\n"))
1433 pullop.cgresult = 0
1433 pullop.cgresult = 0
1434 else:
1434 else:
1435 if pullop.heads is None and list(pullop.common) == [nullid]:
1435 if pullop.heads is None and list(pullop.common) == [nullid]:
1436 pullop.repo.ui.status(_("requesting all changes\n"))
1436 pullop.repo.ui.status(_("requesting all changes\n"))
1437 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1437 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1438 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1438 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1439 if obsolete.commonversion(remoteversions) is not None:
1439 if obsolete.commonversion(remoteversions) is not None:
1440 kwargs['obsmarkers'] = True
1440 kwargs['obsmarkers'] = True
1441 pullop.stepsdone.add('obsmarkers')
1441 pullop.stepsdone.add('obsmarkers')
1442 _pullbundle2extraprepare(pullop, kwargs)
1442 _pullbundle2extraprepare(pullop, kwargs)
1443 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1443 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1444 try:
1444 try:
1445 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1445 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1446 except bundle2.AbortFromPart as exc:
1446 except bundle2.AbortFromPart as exc:
1447 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1447 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1448 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1448 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1449 except error.BundleValueError as exc:
1449 except error.BundleValueError as exc:
1450 raise error.Abort(_('missing support for %s') % exc)
1450 raise error.Abort(_('missing support for %s') % exc)
1451
1451
1452 if pullop.fetch:
1452 if pullop.fetch:
1453 pullop.cgresult = bundle2.combinechangegroupresults(op)
1453 pullop.cgresult = bundle2.combinechangegroupresults(op)
1454
1454
1455 # processing phases change
1455 # processing phases change
1456 for namespace, value in op.records['listkeys']:
1456 for namespace, value in op.records['listkeys']:
1457 if namespace == 'phases':
1457 if namespace == 'phases':
1458 _pullapplyphases(pullop, value)
1458 _pullapplyphases(pullop, value)
1459
1459
1460 # processing bookmark update
1460 # processing bookmark update
1461 for namespace, value in op.records['listkeys']:
1461 for namespace, value in op.records['listkeys']:
1462 if namespace == 'bookmarks':
1462 if namespace == 'bookmarks':
1463 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1463 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1464
1464
1465 # bookmark data were either already there or pulled in the bundle
1465 # bookmark data were either already there or pulled in the bundle
1466 if pullop.remotebookmarks is not None:
1466 if pullop.remotebookmarks is not None:
1467 _pullbookmarks(pullop)
1467 _pullbookmarks(pullop)
1468
1468
1469 def _pullbundle2extraprepare(pullop, kwargs):
1469 def _pullbundle2extraprepare(pullop, kwargs):
1470 """hook function so that extensions can extend the getbundle call"""
1470 """hook function so that extensions can extend the getbundle call"""
1471
1471
1472 def _pullchangeset(pullop):
1472 def _pullchangeset(pullop):
1473 """pull changeset from unbundle into the local repo"""
1473 """pull changeset from unbundle into the local repo"""
1474 # We delay the open of the transaction as late as possible so we
1474 # We delay the open of the transaction as late as possible so we
1475 # don't open transaction for nothing or you break future useful
1475 # don't open transaction for nothing or you break future useful
1476 # rollback call
1476 # rollback call
1477 if 'changegroup' in pullop.stepsdone:
1477 if 'changegroup' in pullop.stepsdone:
1478 return
1478 return
1479 pullop.stepsdone.add('changegroup')
1479 pullop.stepsdone.add('changegroup')
1480 if not pullop.fetch:
1480 if not pullop.fetch:
1481 pullop.repo.ui.status(_("no changes found\n"))
1481 pullop.repo.ui.status(_("no changes found\n"))
1482 pullop.cgresult = 0
1482 pullop.cgresult = 0
1483 return
1483 return
1484 tr = pullop.gettransaction()
1484 tr = pullop.gettransaction()
1485 if pullop.heads is None and list(pullop.common) == [nullid]:
1485 if pullop.heads is None and list(pullop.common) == [nullid]:
1486 pullop.repo.ui.status(_("requesting all changes\n"))
1486 pullop.repo.ui.status(_("requesting all changes\n"))
1487 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1487 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1488 # issue1320, avoid a race if remote changed after discovery
1488 # issue1320, avoid a race if remote changed after discovery
1489 pullop.heads = pullop.rheads
1489 pullop.heads = pullop.rheads
1490
1490
1491 if pullop.remote.capable('getbundle'):
1491 if pullop.remote.capable('getbundle'):
1492 # TODO: get bundlecaps from remote
1492 # TODO: get bundlecaps from remote
1493 cg = pullop.remote.getbundle('pull', common=pullop.common,
1493 cg = pullop.remote.getbundle('pull', common=pullop.common,
1494 heads=pullop.heads or pullop.rheads)
1494 heads=pullop.heads or pullop.rheads)
1495 elif pullop.heads is None:
1495 elif pullop.heads is None:
1496 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1496 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1497 elif not pullop.remote.capable('changegroupsubset'):
1497 elif not pullop.remote.capable('changegroupsubset'):
1498 raise error.Abort(_("partial pull cannot be done because "
1498 raise error.Abort(_("partial pull cannot be done because "
1499 "other repository doesn't support "
1499 "other repository doesn't support "
1500 "changegroupsubset."))
1500 "changegroupsubset."))
1501 else:
1501 else:
1502 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1502 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1503 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1503 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1504 pullop.remote.url())
1504 pullop.remote.url())
1505 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1505 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1506
1506
1507 def _pullphase(pullop):
1507 def _pullphase(pullop):
1508 # Get remote phases data from remote
1508 # Get remote phases data from remote
1509 if 'phases' in pullop.stepsdone:
1509 if 'phases' in pullop.stepsdone:
1510 return
1510 return
1511 remotephases = pullop.remote.listkeys('phases')
1511 remotephases = pullop.remote.listkeys('phases')
1512 _pullapplyphases(pullop, remotephases)
1512 _pullapplyphases(pullop, remotephases)
1513
1513
1514 def _pullapplyphases(pullop, remotephases):
1514 def _pullapplyphases(pullop, remotephases):
1515 """apply phase movement from observed remote state"""
1515 """apply phase movement from observed remote state"""
1516 if 'phases' in pullop.stepsdone:
1516 if 'phases' in pullop.stepsdone:
1517 return
1517 return
1518 pullop.stepsdone.add('phases')
1518 pullop.stepsdone.add('phases')
1519 publishing = bool(remotephases.get('publishing', False))
1519 publishing = bool(remotephases.get('publishing', False))
1520 if remotephases and not publishing:
1520 if remotephases and not publishing:
1521 # remote is new and non-publishing
1521 # remote is new and non-publishing
1522 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1522 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1523 pullop.pulledsubset,
1523 pullop.pulledsubset,
1524 remotephases)
1524 remotephases)
1525 dheads = pullop.pulledsubset
1525 dheads = pullop.pulledsubset
1526 else:
1526 else:
1527 # Remote is old or publishing all common changesets
1527 # Remote is old or publishing all common changesets
1528 # should be seen as public
1528 # should be seen as public
1529 pheads = pullop.pulledsubset
1529 pheads = pullop.pulledsubset
1530 dheads = []
1530 dheads = []
1531 unfi = pullop.repo.unfiltered()
1531 unfi = pullop.repo.unfiltered()
1532 phase = unfi._phasecache.phase
1532 phase = unfi._phasecache.phase
1533 rev = unfi.changelog.nodemap.get
1533 rev = unfi.changelog.nodemap.get
1534 public = phases.public
1534 public = phases.public
1535 draft = phases.draft
1535 draft = phases.draft
1536
1536
1537 # exclude changesets already public locally and update the others
1537 # exclude changesets already public locally and update the others
1538 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1538 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1539 if pheads:
1539 if pheads:
1540 tr = pullop.gettransaction()
1540 tr = pullop.gettransaction()
1541 phases.advanceboundary(pullop.repo, tr, public, pheads)
1541 phases.advanceboundary(pullop.repo, tr, public, pheads)
1542
1542
1543 # exclude changesets already draft locally and update the others
1543 # exclude changesets already draft locally and update the others
1544 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1544 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1545 if dheads:
1545 if dheads:
1546 tr = pullop.gettransaction()
1546 tr = pullop.gettransaction()
1547 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1547 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1548
1548
1549 def _pullbookmarks(pullop):
1549 def _pullbookmarks(pullop):
1550 """process the remote bookmark information to update the local one"""
1550 """process the remote bookmark information to update the local one"""
1551 if 'bookmarks' in pullop.stepsdone:
1551 if 'bookmarks' in pullop.stepsdone:
1552 return
1552 return
1553 pullop.stepsdone.add('bookmarks')
1553 pullop.stepsdone.add('bookmarks')
1554 repo = pullop.repo
1554 repo = pullop.repo
1555 remotebookmarks = pullop.remotebookmarks
1555 remotebookmarks = pullop.remotebookmarks
1556 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1556 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1557 pullop.remote.url(),
1557 pullop.remote.url(),
1558 pullop.gettransaction,
1558 pullop.gettransaction,
1559 explicit=pullop.explicitbookmarks)
1559 explicit=pullop.explicitbookmarks)
1560
1560
1561 def _pullobsolete(pullop):
1561 def _pullobsolete(pullop):
1562 """utility function to pull obsolete markers from a remote
1562 """utility function to pull obsolete markers from a remote
1563
1563
1564 The `gettransaction` is function that return the pull transaction, creating
1564 The `gettransaction` is function that return the pull transaction, creating
1565 one if necessary. We return the transaction to inform the calling code that
1565 one if necessary. We return the transaction to inform the calling code that
1566 a new transaction have been created (when applicable).
1566 a new transaction have been created (when applicable).
1567
1567
1568 Exists mostly to allow overriding for experimentation purpose"""
1568 Exists mostly to allow overriding for experimentation purpose"""
1569 if 'obsmarkers' in pullop.stepsdone:
1569 if 'obsmarkers' in pullop.stepsdone:
1570 return
1570 return
1571 pullop.stepsdone.add('obsmarkers')
1571 pullop.stepsdone.add('obsmarkers')
1572 tr = None
1572 tr = None
1573 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1573 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1574 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1574 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1575 remoteobs = pullop.remote.listkeys('obsolete')
1575 remoteobs = pullop.remote.listkeys('obsolete')
1576 if 'dump0' in remoteobs:
1576 if 'dump0' in remoteobs:
1577 tr = pullop.gettransaction()
1577 tr = pullop.gettransaction()
1578 markers = []
1578 markers = []
1579 for key in sorted(remoteobs, reverse=True):
1579 for key in sorted(remoteobs, reverse=True):
1580 if key.startswith('dump'):
1580 if key.startswith('dump'):
1581 data = util.b85decode(remoteobs[key])
1581 data = util.b85decode(remoteobs[key])
1582 version, newmarks = obsolete._readmarkers(data)
1582 version, newmarks = obsolete._readmarkers(data)
1583 markers += newmarks
1583 markers += newmarks
1584 if markers:
1584 if markers:
1585 pullop.repo.obsstore.add(tr, markers)
1585 pullop.repo.obsstore.add(tr, markers)
1586 pullop.repo.invalidatevolatilesets()
1586 pullop.repo.invalidatevolatilesets()
1587 return tr
1587 return tr
1588
1588
1589 def caps20to10(repo):
1589 def caps20to10(repo):
1590 """return a set with appropriate options to use bundle20 during getbundle"""
1590 """return a set with appropriate options to use bundle20 during getbundle"""
1591 caps = {'HG20'}
1591 caps = {'HG20'}
1592 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1592 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1593 caps.add('bundle2=' + urlreq.quote(capsblob))
1593 caps.add('bundle2=' + urlreq.quote(capsblob))
1594 return caps
1594 return caps
1595
1595
1596 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1596 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1597 getbundle2partsorder = []
1597 getbundle2partsorder = []
1598
1598
1599 # Mapping between step name and function
1599 # Mapping between step name and function
1600 #
1600 #
1601 # This exists to help extensions wrap steps if necessary
1601 # This exists to help extensions wrap steps if necessary
1602 getbundle2partsmapping = {}
1602 getbundle2partsmapping = {}
1603
1603
1604 def getbundle2partsgenerator(stepname, idx=None):
1604 def getbundle2partsgenerator(stepname, idx=None):
1605 """decorator for function generating bundle2 part for getbundle
1605 """decorator for function generating bundle2 part for getbundle
1606
1606
1607 The function is added to the step -> function mapping and appended to the
1607 The function is added to the step -> function mapping and appended to the
1608 list of steps. Beware that decorated functions will be added in order
1608 list of steps. Beware that decorated functions will be added in order
1609 (this may matter).
1609 (this may matter).
1610
1610
1611 You can only use this decorator for new steps, if you want to wrap a step
1611 You can only use this decorator for new steps, if you want to wrap a step
1612 from an extension, attack the getbundle2partsmapping dictionary directly."""
1612 from an extension, attack the getbundle2partsmapping dictionary directly."""
1613 def dec(func):
1613 def dec(func):
1614 assert stepname not in getbundle2partsmapping
1614 assert stepname not in getbundle2partsmapping
1615 getbundle2partsmapping[stepname] = func
1615 getbundle2partsmapping[stepname] = func
1616 if idx is None:
1616 if idx is None:
1617 getbundle2partsorder.append(stepname)
1617 getbundle2partsorder.append(stepname)
1618 else:
1618 else:
1619 getbundle2partsorder.insert(idx, stepname)
1619 getbundle2partsorder.insert(idx, stepname)
1620 return func
1620 return func
1621 return dec
1621 return dec
1622
1622
1623 def bundle2requested(bundlecaps):
1623 def bundle2requested(bundlecaps):
1624 if bundlecaps is not None:
1624 if bundlecaps is not None:
1625 return any(cap.startswith('HG2') for cap in bundlecaps)
1625 return any(cap.startswith('HG2') for cap in bundlecaps)
1626 return False
1626 return False
1627
1627
1628 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1628 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1629 **kwargs):
1629 **kwargs):
1630 """Return chunks constituting a bundle's raw data.
1630 """Return chunks constituting a bundle's raw data.
1631
1631
1632 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1632 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1633 passed.
1633 passed.
1634
1634
1635 Returns an iterator over raw chunks (of varying sizes).
1635 Returns an iterator over raw chunks (of varying sizes).
1636 """
1636 """
1637 kwargs = pycompat.byteskwargs(kwargs)
1637 kwargs = pycompat.byteskwargs(kwargs)
1638 usebundle2 = bundle2requested(bundlecaps)
1638 usebundle2 = bundle2requested(bundlecaps)
1639 # bundle10 case
1639 # bundle10 case
1640 if not usebundle2:
1640 if not usebundle2:
1641 if bundlecaps and not kwargs.get('cg', True):
1641 if bundlecaps and not kwargs.get('cg', True):
1642 raise ValueError(_('request for bundle10 must include changegroup'))
1642 raise ValueError(_('request for bundle10 must include changegroup'))
1643
1643
1644 if kwargs:
1644 if kwargs:
1645 raise ValueError(_('unsupported getbundle arguments: %s')
1645 raise ValueError(_('unsupported getbundle arguments: %s')
1646 % ', '.join(sorted(kwargs.keys())))
1646 % ', '.join(sorted(kwargs.keys())))
1647 outgoing = _computeoutgoing(repo, heads, common)
1647 outgoing = _computeoutgoing(repo, heads, common)
1648 return changegroup.makestream(repo, outgoing, '01', source,
1648 return changegroup.makestream(repo, outgoing, '01', source,
1649 bundlecaps=bundlecaps)
1649 bundlecaps=bundlecaps)
1650
1650
1651 # bundle20 case
1651 # bundle20 case
1652 b2caps = {}
1652 b2caps = {}
1653 for bcaps in bundlecaps:
1653 for bcaps in bundlecaps:
1654 if bcaps.startswith('bundle2='):
1654 if bcaps.startswith('bundle2='):
1655 blob = urlreq.unquote(bcaps[len('bundle2='):])
1655 blob = urlreq.unquote(bcaps[len('bundle2='):])
1656 b2caps.update(bundle2.decodecaps(blob))
1656 b2caps.update(bundle2.decodecaps(blob))
1657 bundler = bundle2.bundle20(repo.ui, b2caps)
1657 bundler = bundle2.bundle20(repo.ui, b2caps)
1658
1658
1659 kwargs['heads'] = heads
1659 kwargs['heads'] = heads
1660 kwargs['common'] = common
1660 kwargs['common'] = common
1661
1661
1662 for name in getbundle2partsorder:
1662 for name in getbundle2partsorder:
1663 func = getbundle2partsmapping[name]
1663 func = getbundle2partsmapping[name]
1664 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1664 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1665 **pycompat.strkwargs(kwargs))
1665 **pycompat.strkwargs(kwargs))
1666
1666
1667 return bundler.getchunks()
1667 return bundler.getchunks()
1668
1668
1669 @getbundle2partsgenerator('changegroup')
1669 @getbundle2partsgenerator('changegroup')
1670 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1670 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1671 b2caps=None, heads=None, common=None, **kwargs):
1671 b2caps=None, heads=None, common=None, **kwargs):
1672 """add a changegroup part to the requested bundle"""
1672 """add a changegroup part to the requested bundle"""
1673 cgstream = None
1673 cgstream = None
1674 if kwargs.get('cg', True):
1674 if kwargs.get('cg', True):
1675 # build changegroup bundle here.
1675 # build changegroup bundle here.
1676 version = '01'
1676 version = '01'
1677 cgversions = b2caps.get('changegroup')
1677 cgversions = b2caps.get('changegroup')
1678 if cgversions: # 3.1 and 3.2 ship with an empty value
1678 if cgversions: # 3.1 and 3.2 ship with an empty value
1679 cgversions = [v for v in cgversions
1679 cgversions = [v for v in cgversions
1680 if v in changegroup.supportedoutgoingversions(repo)]
1680 if v in changegroup.supportedoutgoingversions(repo)]
1681 if not cgversions:
1681 if not cgversions:
1682 raise ValueError(_('no common changegroup version'))
1682 raise ValueError(_('no common changegroup version'))
1683 version = max(cgversions)
1683 version = max(cgversions)
1684 outgoing = _computeoutgoing(repo, heads, common)
1684 outgoing = _computeoutgoing(repo, heads, common)
1685 if outgoing.missing:
1685 if outgoing.missing:
1686 cgstream = changegroup.makestream(repo, outgoing, version, source,
1686 cgstream = changegroup.makestream(repo, outgoing, version, source,
1687 bundlecaps=bundlecaps)
1687 bundlecaps=bundlecaps)
1688
1688
1689 if cgstream:
1689 if cgstream:
1690 part = bundler.newpart('changegroup', data=cgstream)
1690 part = bundler.newpart('changegroup', data=cgstream)
1691 if cgversions:
1691 if cgversions:
1692 part.addparam('version', version)
1692 part.addparam('version', version)
1693 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1693 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1694 mandatory=False)
1694 mandatory=False)
1695 if 'treemanifest' in repo.requirements:
1695 if 'treemanifest' in repo.requirements:
1696 part.addparam('treemanifest', '1')
1696 part.addparam('treemanifest', '1')
1697
1697
1698 @getbundle2partsgenerator('listkeys')
1698 @getbundle2partsgenerator('listkeys')
1699 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1699 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1700 b2caps=None, **kwargs):
1700 b2caps=None, **kwargs):
1701 """add parts containing listkeys namespaces to the requested bundle"""
1701 """add parts containing listkeys namespaces to the requested bundle"""
1702 listkeys = kwargs.get('listkeys', ())
1702 listkeys = kwargs.get('listkeys', ())
1703 for namespace in listkeys:
1703 for namespace in listkeys:
1704 part = bundler.newpart('listkeys')
1704 part = bundler.newpart('listkeys')
1705 part.addparam('namespace', namespace)
1705 part.addparam('namespace', namespace)
1706 keys = repo.listkeys(namespace).items()
1706 keys = repo.listkeys(namespace).items()
1707 part.data = pushkey.encodekeys(keys)
1707 part.data = pushkey.encodekeys(keys)
1708
1708
1709 @getbundle2partsgenerator('obsmarkers')
1709 @getbundle2partsgenerator('obsmarkers')
1710 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1710 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1711 b2caps=None, heads=None, **kwargs):
1711 b2caps=None, heads=None, **kwargs):
1712 """add an obsolescence markers part to the requested bundle"""
1712 """add an obsolescence markers part to the requested bundle"""
1713 if kwargs.get('obsmarkers', False):
1713 if kwargs.get('obsmarkers', False):
1714 if heads is None:
1714 if heads is None:
1715 heads = repo.heads()
1715 heads = repo.heads()
1716 subset = [c.node() for c in repo.set('::%ln', heads)]
1716 subset = [c.node() for c in repo.set('::%ln', heads)]
1717 markers = repo.obsstore.relevantmarkers(subset)
1717 markers = repo.obsstore.relevantmarkers(subset)
1718 markers = sorted(markers)
1718 markers = sorted(markers)
1719 bundle2.buildobsmarkerspart(bundler, markers)
1719 bundle2.buildobsmarkerspart(bundler, markers)
1720
1720
1721 @getbundle2partsgenerator('phases')
1721 @getbundle2partsgenerator('phases')
1722 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1722 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1723 b2caps=None, heads=None, **kwargs):
1723 b2caps=None, heads=None, **kwargs):
1724 """add phase heads part to the requested bundle"""
1724 """add phase heads part to the requested bundle"""
1725 if kwargs.get('phases', False):
1725 if kwargs.get('phases', False):
1726 if not 'heads' in b2caps.get('phases'):
1726 if not 'heads' in b2caps.get('phases'):
1727 raise ValueError(_('no common phases exchange method'))
1727 raise ValueError(_('no common phases exchange method'))
1728 if heads is None:
1728 if heads is None:
1729 heads = repo.heads()
1729 heads = repo.heads()
1730
1730
1731 headsbyphase = collections.defaultdict(set)
1731 headsbyphase = collections.defaultdict(set)
1732 if repo.publishing():
1732 if repo.publishing():
1733 headsbyphase[phases.public] = heads
1733 headsbyphase[phases.public] = heads
1734 else:
1734 else:
1735 # find the appropriate heads to move
1735 # find the appropriate heads to move
1736
1736
1737 phase = repo._phasecache.phase
1737 phase = repo._phasecache.phase
1738 node = repo.changelog.node
1738 node = repo.changelog.node
1739 rev = repo.changelog.rev
1739 rev = repo.changelog.rev
1740 for h in heads:
1740 for h in heads:
1741 headsbyphase[phase(repo, rev(h))].add(h)
1741 headsbyphase[phase(repo, rev(h))].add(h)
1742 seenphases = list(headsbyphase.keys())
1742 seenphases = list(headsbyphase.keys())
1743
1743
1744 # We do not handle anything but public and draft phase for now)
1744 # We do not handle anything but public and draft phase for now)
1745 if seenphases:
1745 if seenphases:
1746 assert max(seenphases) <= phases.draft
1746 assert max(seenphases) <= phases.draft
1747
1747
1748 # if client is pulling non-public changesets, we need to find
1748 # if client is pulling non-public changesets, we need to find
1749 # intermediate public heads.
1749 # intermediate public heads.
1750 draftheads = headsbyphase.get(phases.draft, set())
1750 draftheads = headsbyphase.get(phases.draft, set())
1751 if draftheads:
1751 if draftheads:
1752 publicheads = headsbyphase.get(phases.public, set())
1752 publicheads = headsbyphase.get(phases.public, set())
1753
1753
1754 revset = 'heads(only(%ln, %ln) and public())'
1754 revset = 'heads(only(%ln, %ln) and public())'
1755 extraheads = repo.revs(revset, draftheads, publicheads)
1755 extraheads = repo.revs(revset, draftheads, publicheads)
1756 for r in extraheads:
1756 for r in extraheads:
1757 headsbyphase[phases.public].add(node(r))
1757 headsbyphase[phases.public].add(node(r))
1758
1758
1759 # transform data in a format used by the encoding function
1759 # transform data in a format used by the encoding function
1760 phasemapping = []
1760 phasemapping = []
1761 for phase in phases.allphases:
1761 for phase in phases.allphases:
1762 phasemapping.append(sorted(headsbyphase[phase]))
1762 phasemapping.append(sorted(headsbyphase[phase]))
1763
1763
1764 # generate the actual part
1764 # generate the actual part
1765 phasedata = phases.binaryencode(phasemapping)
1765 phasedata = phases.binaryencode(phasemapping)
1766 bundler.newpart('phase-heads', data=phasedata)
1766 bundler.newpart('phase-heads', data=phasedata)
1767
1767
1768 @getbundle2partsgenerator('hgtagsfnodes')
1768 @getbundle2partsgenerator('hgtagsfnodes')
1769 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1769 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1770 b2caps=None, heads=None, common=None,
1770 b2caps=None, heads=None, common=None,
1771 **kwargs):
1771 **kwargs):
1772 """Transfer the .hgtags filenodes mapping.
1772 """Transfer the .hgtags filenodes mapping.
1773
1773
1774 Only values for heads in this bundle will be transferred.
1774 Only values for heads in this bundle will be transferred.
1775
1775
1776 The part data consists of pairs of 20 byte changeset node and .hgtags
1776 The part data consists of pairs of 20 byte changeset node and .hgtags
1777 filenodes raw values.
1777 filenodes raw values.
1778 """
1778 """
1779 # Don't send unless:
1779 # Don't send unless:
1780 # - changeset are being exchanged,
1780 # - changeset are being exchanged,
1781 # - the client supports it.
1781 # - the client supports it.
1782 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1782 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1783 return
1783 return
1784
1784
1785 outgoing = _computeoutgoing(repo, heads, common)
1785 outgoing = _computeoutgoing(repo, heads, common)
1786 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1786 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1787
1787
1788 def _getbookmarks(repo, **kwargs):
1789 """Returns bookmark to node mapping.
1790
1791 This function is primarily used to generate `bookmarks` bundle2 part.
1792 It is a separate function in order to make it easy to wrap it
1793 in extensions. Passing `kwargs` to the function makes it easy to
1794 add new parameters in extensions.
1795 """
1796
1797 return dict(bookmod.listbinbookmarks(repo))
1798
1799 def check_heads(repo, their_heads, context):
1788 def check_heads(repo, their_heads, context):
1800 """check if the heads of a repo have been modified
1789 """check if the heads of a repo have been modified
1801
1790
1802 Used by peer for unbundling.
1791 Used by peer for unbundling.
1803 """
1792 """
1804 heads = repo.heads()
1793 heads = repo.heads()
1805 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1794 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1806 if not (their_heads == ['force'] or their_heads == heads or
1795 if not (their_heads == ['force'] or their_heads == heads or
1807 their_heads == ['hashed', heads_hash]):
1796 their_heads == ['hashed', heads_hash]):
1808 # someone else committed/pushed/unbundled while we
1797 # someone else committed/pushed/unbundled while we
1809 # were transferring data
1798 # were transferring data
1810 raise error.PushRaced('repository changed while %s - '
1799 raise error.PushRaced('repository changed while %s - '
1811 'please try again' % context)
1800 'please try again' % context)
1812
1801
1813 def unbundle(repo, cg, heads, source, url):
1802 def unbundle(repo, cg, heads, source, url):
1814 """Apply a bundle to a repo.
1803 """Apply a bundle to a repo.
1815
1804
1816 this function makes sure the repo is locked during the application and have
1805 this function makes sure the repo is locked during the application and have
1817 mechanism to check that no push race occurred between the creation of the
1806 mechanism to check that no push race occurred between the creation of the
1818 bundle and its application.
1807 bundle and its application.
1819
1808
1820 If the push was raced as PushRaced exception is raised."""
1809 If the push was raced as PushRaced exception is raised."""
1821 r = 0
1810 r = 0
1822 # need a transaction when processing a bundle2 stream
1811 # need a transaction when processing a bundle2 stream
1823 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1812 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1824 lockandtr = [None, None, None]
1813 lockandtr = [None, None, None]
1825 recordout = None
1814 recordout = None
1826 # quick fix for output mismatch with bundle2 in 3.4
1815 # quick fix for output mismatch with bundle2 in 3.4
1827 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1816 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1828 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1817 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1829 captureoutput = True
1818 captureoutput = True
1830 try:
1819 try:
1831 # note: outside bundle1, 'heads' is expected to be empty and this
1820 # note: outside bundle1, 'heads' is expected to be empty and this
1832 # 'check_heads' call wil be a no-op
1821 # 'check_heads' call wil be a no-op
1833 check_heads(repo, heads, 'uploading changes')
1822 check_heads(repo, heads, 'uploading changes')
1834 # push can proceed
1823 # push can proceed
1835 if not isinstance(cg, bundle2.unbundle20):
1824 if not isinstance(cg, bundle2.unbundle20):
1836 # legacy case: bundle1 (changegroup 01)
1825 # legacy case: bundle1 (changegroup 01)
1837 txnname = "\n".join([source, util.hidepassword(url)])
1826 txnname = "\n".join([source, util.hidepassword(url)])
1838 with repo.lock(), repo.transaction(txnname) as tr:
1827 with repo.lock(), repo.transaction(txnname) as tr:
1839 op = bundle2.applybundle(repo, cg, tr, source, url)
1828 op = bundle2.applybundle(repo, cg, tr, source, url)
1840 r = bundle2.combinechangegroupresults(op)
1829 r = bundle2.combinechangegroupresults(op)
1841 else:
1830 else:
1842 r = None
1831 r = None
1843 try:
1832 try:
1844 def gettransaction():
1833 def gettransaction():
1845 if not lockandtr[2]:
1834 if not lockandtr[2]:
1846 lockandtr[0] = repo.wlock()
1835 lockandtr[0] = repo.wlock()
1847 lockandtr[1] = repo.lock()
1836 lockandtr[1] = repo.lock()
1848 lockandtr[2] = repo.transaction(source)
1837 lockandtr[2] = repo.transaction(source)
1849 lockandtr[2].hookargs['source'] = source
1838 lockandtr[2].hookargs['source'] = source
1850 lockandtr[2].hookargs['url'] = url
1839 lockandtr[2].hookargs['url'] = url
1851 lockandtr[2].hookargs['bundle2'] = '1'
1840 lockandtr[2].hookargs['bundle2'] = '1'
1852 return lockandtr[2]
1841 return lockandtr[2]
1853
1842
1854 # Do greedy locking by default until we're satisfied with lazy
1843 # Do greedy locking by default until we're satisfied with lazy
1855 # locking.
1844 # locking.
1856 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1845 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1857 gettransaction()
1846 gettransaction()
1858
1847
1859 op = bundle2.bundleoperation(repo, gettransaction,
1848 op = bundle2.bundleoperation(repo, gettransaction,
1860 captureoutput=captureoutput)
1849 captureoutput=captureoutput)
1861 try:
1850 try:
1862 op = bundle2.processbundle(repo, cg, op=op)
1851 op = bundle2.processbundle(repo, cg, op=op)
1863 finally:
1852 finally:
1864 r = op.reply
1853 r = op.reply
1865 if captureoutput and r is not None:
1854 if captureoutput and r is not None:
1866 repo.ui.pushbuffer(error=True, subproc=True)
1855 repo.ui.pushbuffer(error=True, subproc=True)
1867 def recordout(output):
1856 def recordout(output):
1868 r.newpart('output', data=output, mandatory=False)
1857 r.newpart('output', data=output, mandatory=False)
1869 if lockandtr[2] is not None:
1858 if lockandtr[2] is not None:
1870 lockandtr[2].close()
1859 lockandtr[2].close()
1871 except BaseException as exc:
1860 except BaseException as exc:
1872 exc.duringunbundle2 = True
1861 exc.duringunbundle2 = True
1873 if captureoutput and r is not None:
1862 if captureoutput and r is not None:
1874 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1863 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1875 def recordout(output):
1864 def recordout(output):
1876 part = bundle2.bundlepart('output', data=output,
1865 part = bundle2.bundlepart('output', data=output,
1877 mandatory=False)
1866 mandatory=False)
1878 parts.append(part)
1867 parts.append(part)
1879 raise
1868 raise
1880 finally:
1869 finally:
1881 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1870 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1882 if recordout is not None:
1871 if recordout is not None:
1883 recordout(repo.ui.popbuffer())
1872 recordout(repo.ui.popbuffer())
1884 return r
1873 return r
1885
1874
1886 def _maybeapplyclonebundle(pullop):
1875 def _maybeapplyclonebundle(pullop):
1887 """Apply a clone bundle from a remote, if possible."""
1876 """Apply a clone bundle from a remote, if possible."""
1888
1877
1889 repo = pullop.repo
1878 repo = pullop.repo
1890 remote = pullop.remote
1879 remote = pullop.remote
1891
1880
1892 if not repo.ui.configbool('ui', 'clonebundles'):
1881 if not repo.ui.configbool('ui', 'clonebundles'):
1893 return
1882 return
1894
1883
1895 # Only run if local repo is empty.
1884 # Only run if local repo is empty.
1896 if len(repo):
1885 if len(repo):
1897 return
1886 return
1898
1887
1899 if pullop.heads:
1888 if pullop.heads:
1900 return
1889 return
1901
1890
1902 if not remote.capable('clonebundles'):
1891 if not remote.capable('clonebundles'):
1903 return
1892 return
1904
1893
1905 res = remote._call('clonebundles')
1894 res = remote._call('clonebundles')
1906
1895
1907 # If we call the wire protocol command, that's good enough to record the
1896 # If we call the wire protocol command, that's good enough to record the
1908 # attempt.
1897 # attempt.
1909 pullop.clonebundleattempted = True
1898 pullop.clonebundleattempted = True
1910
1899
1911 entries = parseclonebundlesmanifest(repo, res)
1900 entries = parseclonebundlesmanifest(repo, res)
1912 if not entries:
1901 if not entries:
1913 repo.ui.note(_('no clone bundles available on remote; '
1902 repo.ui.note(_('no clone bundles available on remote; '
1914 'falling back to regular clone\n'))
1903 'falling back to regular clone\n'))
1915 return
1904 return
1916
1905
1917 entries = filterclonebundleentries(
1906 entries = filterclonebundleentries(
1918 repo, entries, streamclonerequested=pullop.streamclonerequested)
1907 repo, entries, streamclonerequested=pullop.streamclonerequested)
1919
1908
1920 if not entries:
1909 if not entries:
1921 # There is a thundering herd concern here. However, if a server
1910 # There is a thundering herd concern here. However, if a server
1922 # operator doesn't advertise bundles appropriate for its clients,
1911 # operator doesn't advertise bundles appropriate for its clients,
1923 # they deserve what's coming. Furthermore, from a client's
1912 # they deserve what's coming. Furthermore, from a client's
1924 # perspective, no automatic fallback would mean not being able to
1913 # perspective, no automatic fallback would mean not being able to
1925 # clone!
1914 # clone!
1926 repo.ui.warn(_('no compatible clone bundles available on server; '
1915 repo.ui.warn(_('no compatible clone bundles available on server; '
1927 'falling back to regular clone\n'))
1916 'falling back to regular clone\n'))
1928 repo.ui.warn(_('(you may want to report this to the server '
1917 repo.ui.warn(_('(you may want to report this to the server '
1929 'operator)\n'))
1918 'operator)\n'))
1930 return
1919 return
1931
1920
1932 entries = sortclonebundleentries(repo.ui, entries)
1921 entries = sortclonebundleentries(repo.ui, entries)
1933
1922
1934 url = entries[0]['URL']
1923 url = entries[0]['URL']
1935 repo.ui.status(_('applying clone bundle from %s\n') % url)
1924 repo.ui.status(_('applying clone bundle from %s\n') % url)
1936 if trypullbundlefromurl(repo.ui, repo, url):
1925 if trypullbundlefromurl(repo.ui, repo, url):
1937 repo.ui.status(_('finished applying clone bundle\n'))
1926 repo.ui.status(_('finished applying clone bundle\n'))
1938 # Bundle failed.
1927 # Bundle failed.
1939 #
1928 #
1940 # We abort by default to avoid the thundering herd of
1929 # We abort by default to avoid the thundering herd of
1941 # clients flooding a server that was expecting expensive
1930 # clients flooding a server that was expecting expensive
1942 # clone load to be offloaded.
1931 # clone load to be offloaded.
1943 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1932 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1944 repo.ui.warn(_('falling back to normal clone\n'))
1933 repo.ui.warn(_('falling back to normal clone\n'))
1945 else:
1934 else:
1946 raise error.Abort(_('error applying bundle'),
1935 raise error.Abort(_('error applying bundle'),
1947 hint=_('if this error persists, consider contacting '
1936 hint=_('if this error persists, consider contacting '
1948 'the server operator or disable clone '
1937 'the server operator or disable clone '
1949 'bundles via '
1938 'bundles via '
1950 '"--config ui.clonebundles=false"'))
1939 '"--config ui.clonebundles=false"'))
1951
1940
1952 def parseclonebundlesmanifest(repo, s):
1941 def parseclonebundlesmanifest(repo, s):
1953 """Parses the raw text of a clone bundles manifest.
1942 """Parses the raw text of a clone bundles manifest.
1954
1943
1955 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1944 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1956 to the URL and other keys are the attributes for the entry.
1945 to the URL and other keys are the attributes for the entry.
1957 """
1946 """
1958 m = []
1947 m = []
1959 for line in s.splitlines():
1948 for line in s.splitlines():
1960 fields = line.split()
1949 fields = line.split()
1961 if not fields:
1950 if not fields:
1962 continue
1951 continue
1963 attrs = {'URL': fields[0]}
1952 attrs = {'URL': fields[0]}
1964 for rawattr in fields[1:]:
1953 for rawattr in fields[1:]:
1965 key, value = rawattr.split('=', 1)
1954 key, value = rawattr.split('=', 1)
1966 key = urlreq.unquote(key)
1955 key = urlreq.unquote(key)
1967 value = urlreq.unquote(value)
1956 value = urlreq.unquote(value)
1968 attrs[key] = value
1957 attrs[key] = value
1969
1958
1970 # Parse BUNDLESPEC into components. This makes client-side
1959 # Parse BUNDLESPEC into components. This makes client-side
1971 # preferences easier to specify since you can prefer a single
1960 # preferences easier to specify since you can prefer a single
1972 # component of the BUNDLESPEC.
1961 # component of the BUNDLESPEC.
1973 if key == 'BUNDLESPEC':
1962 if key == 'BUNDLESPEC':
1974 try:
1963 try:
1975 comp, version, params = parsebundlespec(repo, value,
1964 comp, version, params = parsebundlespec(repo, value,
1976 externalnames=True)
1965 externalnames=True)
1977 attrs['COMPRESSION'] = comp
1966 attrs['COMPRESSION'] = comp
1978 attrs['VERSION'] = version
1967 attrs['VERSION'] = version
1979 except error.InvalidBundleSpecification:
1968 except error.InvalidBundleSpecification:
1980 pass
1969 pass
1981 except error.UnsupportedBundleSpecification:
1970 except error.UnsupportedBundleSpecification:
1982 pass
1971 pass
1983
1972
1984 m.append(attrs)
1973 m.append(attrs)
1985
1974
1986 return m
1975 return m
1987
1976
1988 def filterclonebundleentries(repo, entries, streamclonerequested=False):
1977 def filterclonebundleentries(repo, entries, streamclonerequested=False):
1989 """Remove incompatible clone bundle manifest entries.
1978 """Remove incompatible clone bundle manifest entries.
1990
1979
1991 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1980 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1992 and returns a new list consisting of only the entries that this client
1981 and returns a new list consisting of only the entries that this client
1993 should be able to apply.
1982 should be able to apply.
1994
1983
1995 There is no guarantee we'll be able to apply all returned entries because
1984 There is no guarantee we'll be able to apply all returned entries because
1996 the metadata we use to filter on may be missing or wrong.
1985 the metadata we use to filter on may be missing or wrong.
1997 """
1986 """
1998 newentries = []
1987 newentries = []
1999 for entry in entries:
1988 for entry in entries:
2000 spec = entry.get('BUNDLESPEC')
1989 spec = entry.get('BUNDLESPEC')
2001 if spec:
1990 if spec:
2002 try:
1991 try:
2003 comp, version, params = parsebundlespec(repo, spec, strict=True)
1992 comp, version, params = parsebundlespec(repo, spec, strict=True)
2004
1993
2005 # If a stream clone was requested, filter out non-streamclone
1994 # If a stream clone was requested, filter out non-streamclone
2006 # entries.
1995 # entries.
2007 if streamclonerequested and (comp != 'UN' or version != 's1'):
1996 if streamclonerequested and (comp != 'UN' or version != 's1'):
2008 repo.ui.debug('filtering %s because not a stream clone\n' %
1997 repo.ui.debug('filtering %s because not a stream clone\n' %
2009 entry['URL'])
1998 entry['URL'])
2010 continue
1999 continue
2011
2000
2012 except error.InvalidBundleSpecification as e:
2001 except error.InvalidBundleSpecification as e:
2013 repo.ui.debug(str(e) + '\n')
2002 repo.ui.debug(str(e) + '\n')
2014 continue
2003 continue
2015 except error.UnsupportedBundleSpecification as e:
2004 except error.UnsupportedBundleSpecification as e:
2016 repo.ui.debug('filtering %s because unsupported bundle '
2005 repo.ui.debug('filtering %s because unsupported bundle '
2017 'spec: %s\n' % (entry['URL'], str(e)))
2006 'spec: %s\n' % (entry['URL'], str(e)))
2018 continue
2007 continue
2019 # If we don't have a spec and requested a stream clone, we don't know
2008 # If we don't have a spec and requested a stream clone, we don't know
2020 # what the entry is so don't attempt to apply it.
2009 # what the entry is so don't attempt to apply it.
2021 elif streamclonerequested:
2010 elif streamclonerequested:
2022 repo.ui.debug('filtering %s because cannot determine if a stream '
2011 repo.ui.debug('filtering %s because cannot determine if a stream '
2023 'clone bundle\n' % entry['URL'])
2012 'clone bundle\n' % entry['URL'])
2024 continue
2013 continue
2025
2014
2026 if 'REQUIRESNI' in entry and not sslutil.hassni:
2015 if 'REQUIRESNI' in entry and not sslutil.hassni:
2027 repo.ui.debug('filtering %s because SNI not supported\n' %
2016 repo.ui.debug('filtering %s because SNI not supported\n' %
2028 entry['URL'])
2017 entry['URL'])
2029 continue
2018 continue
2030
2019
2031 newentries.append(entry)
2020 newentries.append(entry)
2032
2021
2033 return newentries
2022 return newentries
2034
2023
2035 class clonebundleentry(object):
2024 class clonebundleentry(object):
2036 """Represents an item in a clone bundles manifest.
2025 """Represents an item in a clone bundles manifest.
2037
2026
2038 This rich class is needed to support sorting since sorted() in Python 3
2027 This rich class is needed to support sorting since sorted() in Python 3
2039 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2028 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2040 won't work.
2029 won't work.
2041 """
2030 """
2042
2031
2043 def __init__(self, value, prefers):
2032 def __init__(self, value, prefers):
2044 self.value = value
2033 self.value = value
2045 self.prefers = prefers
2034 self.prefers = prefers
2046
2035
2047 def _cmp(self, other):
2036 def _cmp(self, other):
2048 for prefkey, prefvalue in self.prefers:
2037 for prefkey, prefvalue in self.prefers:
2049 avalue = self.value.get(prefkey)
2038 avalue = self.value.get(prefkey)
2050 bvalue = other.value.get(prefkey)
2039 bvalue = other.value.get(prefkey)
2051
2040
2052 # Special case for b missing attribute and a matches exactly.
2041 # Special case for b missing attribute and a matches exactly.
2053 if avalue is not None and bvalue is None and avalue == prefvalue:
2042 if avalue is not None and bvalue is None and avalue == prefvalue:
2054 return -1
2043 return -1
2055
2044
2056 # Special case for a missing attribute and b matches exactly.
2045 # Special case for a missing attribute and b matches exactly.
2057 if bvalue is not None and avalue is None and bvalue == prefvalue:
2046 if bvalue is not None and avalue is None and bvalue == prefvalue:
2058 return 1
2047 return 1
2059
2048
2060 # We can't compare unless attribute present on both.
2049 # We can't compare unless attribute present on both.
2061 if avalue is None or bvalue is None:
2050 if avalue is None or bvalue is None:
2062 continue
2051 continue
2063
2052
2064 # Same values should fall back to next attribute.
2053 # Same values should fall back to next attribute.
2065 if avalue == bvalue:
2054 if avalue == bvalue:
2066 continue
2055 continue
2067
2056
2068 # Exact matches come first.
2057 # Exact matches come first.
2069 if avalue == prefvalue:
2058 if avalue == prefvalue:
2070 return -1
2059 return -1
2071 if bvalue == prefvalue:
2060 if bvalue == prefvalue:
2072 return 1
2061 return 1
2073
2062
2074 # Fall back to next attribute.
2063 # Fall back to next attribute.
2075 continue
2064 continue
2076
2065
2077 # If we got here we couldn't sort by attributes and prefers. Fall
2066 # If we got here we couldn't sort by attributes and prefers. Fall
2078 # back to index order.
2067 # back to index order.
2079 return 0
2068 return 0
2080
2069
2081 def __lt__(self, other):
2070 def __lt__(self, other):
2082 return self._cmp(other) < 0
2071 return self._cmp(other) < 0
2083
2072
2084 def __gt__(self, other):
2073 def __gt__(self, other):
2085 return self._cmp(other) > 0
2074 return self._cmp(other) > 0
2086
2075
2087 def __eq__(self, other):
2076 def __eq__(self, other):
2088 return self._cmp(other) == 0
2077 return self._cmp(other) == 0
2089
2078
2090 def __le__(self, other):
2079 def __le__(self, other):
2091 return self._cmp(other) <= 0
2080 return self._cmp(other) <= 0
2092
2081
2093 def __ge__(self, other):
2082 def __ge__(self, other):
2094 return self._cmp(other) >= 0
2083 return self._cmp(other) >= 0
2095
2084
2096 def __ne__(self, other):
2085 def __ne__(self, other):
2097 return self._cmp(other) != 0
2086 return self._cmp(other) != 0
2098
2087
2099 def sortclonebundleentries(ui, entries):
2088 def sortclonebundleentries(ui, entries):
2100 prefers = ui.configlist('ui', 'clonebundleprefers')
2089 prefers = ui.configlist('ui', 'clonebundleprefers')
2101 if not prefers:
2090 if not prefers:
2102 return list(entries)
2091 return list(entries)
2103
2092
2104 prefers = [p.split('=', 1) for p in prefers]
2093 prefers = [p.split('=', 1) for p in prefers]
2105
2094
2106 items = sorted(clonebundleentry(v, prefers) for v in entries)
2095 items = sorted(clonebundleentry(v, prefers) for v in entries)
2107 return [i.value for i in items]
2096 return [i.value for i in items]
2108
2097
2109 def trypullbundlefromurl(ui, repo, url):
2098 def trypullbundlefromurl(ui, repo, url):
2110 """Attempt to apply a bundle from a URL."""
2099 """Attempt to apply a bundle from a URL."""
2111 with repo.lock(), repo.transaction('bundleurl') as tr:
2100 with repo.lock(), repo.transaction('bundleurl') as tr:
2112 try:
2101 try:
2113 fh = urlmod.open(ui, url)
2102 fh = urlmod.open(ui, url)
2114 cg = readbundle(ui, fh, 'stream')
2103 cg = readbundle(ui, fh, 'stream')
2115
2104
2116 if isinstance(cg, streamclone.streamcloneapplier):
2105 if isinstance(cg, streamclone.streamcloneapplier):
2117 cg.apply(repo)
2106 cg.apply(repo)
2118 else:
2107 else:
2119 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2108 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2120 return True
2109 return True
2121 except urlerr.httperror as e:
2110 except urlerr.httperror as e:
2122 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2111 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2123 except urlerr.urlerror as e:
2112 except urlerr.urlerror as e:
2124 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2113 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2125
2114
2126 return False
2115 return False
General Comments 0
You need to be logged in to leave comments. Login now