##// END OF EJS Templates
phase: isolate logic to update remote phrase through bundle2 pushkey...
Boris Feld -
r34823:c1e7ce11 default
parent child Browse files
Show More
@@ -1,2106 +1,2109
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 )
18 )
19 from . import (
19 from . import (
20 bookmarks as bookmod,
20 bookmarks as bookmod,
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 discovery,
23 discovery,
24 error,
24 error,
25 lock as lockmod,
25 lock as lockmod,
26 obsolete,
26 obsolete,
27 phases,
27 phases,
28 pushkey,
28 pushkey,
29 pycompat,
29 pycompat,
30 scmutil,
30 scmutil,
31 sslutil,
31 sslutil,
32 streamclone,
32 streamclone,
33 url as urlmod,
33 url as urlmod,
34 util,
34 util,
35 )
35 )
36
36
37 urlerr = util.urlerr
37 urlerr = util.urlerr
38 urlreq = util.urlreq
38 urlreq = util.urlreq
39
39
40 # Maps bundle version human names to changegroup versions.
40 # Maps bundle version human names to changegroup versions.
41 _bundlespeccgversions = {'v1': '01',
41 _bundlespeccgversions = {'v1': '01',
42 'v2': '02',
42 'v2': '02',
43 'packed1': 's1',
43 'packed1': 's1',
44 'bundle2': '02', #legacy
44 'bundle2': '02', #legacy
45 }
45 }
46
46
47 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
48 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
49
49
50 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 def parsebundlespec(repo, spec, strict=True, externalnames=False):
51 """Parse a bundle string specification into parts.
51 """Parse a bundle string specification into parts.
52
52
53 Bundle specifications denote a well-defined bundle/exchange format.
53 Bundle specifications denote a well-defined bundle/exchange format.
54 The content of a given specification should not change over time in
54 The content of a given specification should not change over time in
55 order to ensure that bundles produced by a newer version of Mercurial are
55 order to ensure that bundles produced by a newer version of Mercurial are
56 readable from an older version.
56 readable from an older version.
57
57
58 The string currently has the form:
58 The string currently has the form:
59
59
60 <compression>-<type>[;<parameter0>[;<parameter1>]]
60 <compression>-<type>[;<parameter0>[;<parameter1>]]
61
61
62 Where <compression> is one of the supported compression formats
62 Where <compression> is one of the supported compression formats
63 and <type> is (currently) a version string. A ";" can follow the type and
63 and <type> is (currently) a version string. A ";" can follow the type and
64 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 all text afterwards is interpreted as URI encoded, ";" delimited key=value
65 pairs.
65 pairs.
66
66
67 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 If ``strict`` is True (the default) <compression> is required. Otherwise,
68 it is optional.
68 it is optional.
69
69
70 If ``externalnames`` is False (the default), the human-centric names will
70 If ``externalnames`` is False (the default), the human-centric names will
71 be converted to their internal representation.
71 be converted to their internal representation.
72
72
73 Returns a 3-tuple of (compression, version, parameters). Compression will
73 Returns a 3-tuple of (compression, version, parameters). Compression will
74 be ``None`` if not in strict mode and a compression isn't defined.
74 be ``None`` if not in strict mode and a compression isn't defined.
75
75
76 An ``InvalidBundleSpecification`` is raised when the specification is
76 An ``InvalidBundleSpecification`` is raised when the specification is
77 not syntactically well formed.
77 not syntactically well formed.
78
78
79 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 An ``UnsupportedBundleSpecification`` is raised when the compression or
80 bundle type/version is not recognized.
80 bundle type/version is not recognized.
81
81
82 Note: this function will likely eventually return a more complex data
82 Note: this function will likely eventually return a more complex data
83 structure, including bundle2 part information.
83 structure, including bundle2 part information.
84 """
84 """
85 def parseparams(s):
85 def parseparams(s):
86 if ';' not in s:
86 if ';' not in s:
87 return s, {}
87 return s, {}
88
88
89 params = {}
89 params = {}
90 version, paramstr = s.split(';', 1)
90 version, paramstr = s.split(';', 1)
91
91
92 for p in paramstr.split(';'):
92 for p in paramstr.split(';'):
93 if '=' not in p:
93 if '=' not in p:
94 raise error.InvalidBundleSpecification(
94 raise error.InvalidBundleSpecification(
95 _('invalid bundle specification: '
95 _('invalid bundle specification: '
96 'missing "=" in parameter: %s') % p)
96 'missing "=" in parameter: %s') % p)
97
97
98 key, value = p.split('=', 1)
98 key, value = p.split('=', 1)
99 key = urlreq.unquote(key)
99 key = urlreq.unquote(key)
100 value = urlreq.unquote(value)
100 value = urlreq.unquote(value)
101 params[key] = value
101 params[key] = value
102
102
103 return version, params
103 return version, params
104
104
105
105
106 if strict and '-' not in spec:
106 if strict and '-' not in spec:
107 raise error.InvalidBundleSpecification(
107 raise error.InvalidBundleSpecification(
108 _('invalid bundle specification; '
108 _('invalid bundle specification; '
109 'must be prefixed with compression: %s') % spec)
109 'must be prefixed with compression: %s') % spec)
110
110
111 if '-' in spec:
111 if '-' in spec:
112 compression, version = spec.split('-', 1)
112 compression, version = spec.split('-', 1)
113
113
114 if compression not in util.compengines.supportedbundlenames:
114 if compression not in util.compengines.supportedbundlenames:
115 raise error.UnsupportedBundleSpecification(
115 raise error.UnsupportedBundleSpecification(
116 _('%s compression is not supported') % compression)
116 _('%s compression is not supported') % compression)
117
117
118 version, params = parseparams(version)
118 version, params = parseparams(version)
119
119
120 if version not in _bundlespeccgversions:
120 if version not in _bundlespeccgversions:
121 raise error.UnsupportedBundleSpecification(
121 raise error.UnsupportedBundleSpecification(
122 _('%s is not a recognized bundle version') % version)
122 _('%s is not a recognized bundle version') % version)
123 else:
123 else:
124 # Value could be just the compression or just the version, in which
124 # Value could be just the compression or just the version, in which
125 # case some defaults are assumed (but only when not in strict mode).
125 # case some defaults are assumed (but only when not in strict mode).
126 assert not strict
126 assert not strict
127
127
128 spec, params = parseparams(spec)
128 spec, params = parseparams(spec)
129
129
130 if spec in util.compengines.supportedbundlenames:
130 if spec in util.compengines.supportedbundlenames:
131 compression = spec
131 compression = spec
132 version = 'v1'
132 version = 'v1'
133 # Generaldelta repos require v2.
133 # Generaldelta repos require v2.
134 if 'generaldelta' in repo.requirements:
134 if 'generaldelta' in repo.requirements:
135 version = 'v2'
135 version = 'v2'
136 # Modern compression engines require v2.
136 # Modern compression engines require v2.
137 if compression not in _bundlespecv1compengines:
137 if compression not in _bundlespecv1compengines:
138 version = 'v2'
138 version = 'v2'
139 elif spec in _bundlespeccgversions:
139 elif spec in _bundlespeccgversions:
140 if spec == 'packed1':
140 if spec == 'packed1':
141 compression = 'none'
141 compression = 'none'
142 else:
142 else:
143 compression = 'bzip2'
143 compression = 'bzip2'
144 version = spec
144 version = spec
145 else:
145 else:
146 raise error.UnsupportedBundleSpecification(
146 raise error.UnsupportedBundleSpecification(
147 _('%s is not a recognized bundle specification') % spec)
147 _('%s is not a recognized bundle specification') % spec)
148
148
149 # Bundle version 1 only supports a known set of compression engines.
149 # Bundle version 1 only supports a known set of compression engines.
150 if version == 'v1' and compression not in _bundlespecv1compengines:
150 if version == 'v1' and compression not in _bundlespecv1compengines:
151 raise error.UnsupportedBundleSpecification(
151 raise error.UnsupportedBundleSpecification(
152 _('compression engine %s is not supported on v1 bundles') %
152 _('compression engine %s is not supported on v1 bundles') %
153 compression)
153 compression)
154
154
155 # The specification for packed1 can optionally declare the data formats
155 # The specification for packed1 can optionally declare the data formats
156 # required to apply it. If we see this metadata, compare against what the
156 # required to apply it. If we see this metadata, compare against what the
157 # repo supports and error if the bundle isn't compatible.
157 # repo supports and error if the bundle isn't compatible.
158 if version == 'packed1' and 'requirements' in params:
158 if version == 'packed1' and 'requirements' in params:
159 requirements = set(params['requirements'].split(','))
159 requirements = set(params['requirements'].split(','))
160 missingreqs = requirements - repo.supportedformats
160 missingreqs = requirements - repo.supportedformats
161 if missingreqs:
161 if missingreqs:
162 raise error.UnsupportedBundleSpecification(
162 raise error.UnsupportedBundleSpecification(
163 _('missing support for repository features: %s') %
163 _('missing support for repository features: %s') %
164 ', '.join(sorted(missingreqs)))
164 ', '.join(sorted(missingreqs)))
165
165
166 if not externalnames:
166 if not externalnames:
167 engine = util.compengines.forbundlename(compression)
167 engine = util.compengines.forbundlename(compression)
168 compression = engine.bundletype()[1]
168 compression = engine.bundletype()[1]
169 version = _bundlespeccgversions[version]
169 version = _bundlespeccgversions[version]
170 return compression, version, params
170 return compression, version, params
171
171
172 def readbundle(ui, fh, fname, vfs=None):
172 def readbundle(ui, fh, fname, vfs=None):
173 header = changegroup.readexactly(fh, 4)
173 header = changegroup.readexactly(fh, 4)
174
174
175 alg = None
175 alg = None
176 if not fname:
176 if not fname:
177 fname = "stream"
177 fname = "stream"
178 if not header.startswith('HG') and header.startswith('\0'):
178 if not header.startswith('HG') and header.startswith('\0'):
179 fh = changegroup.headerlessfixup(fh, header)
179 fh = changegroup.headerlessfixup(fh, header)
180 header = "HG10"
180 header = "HG10"
181 alg = 'UN'
181 alg = 'UN'
182 elif vfs:
182 elif vfs:
183 fname = vfs.join(fname)
183 fname = vfs.join(fname)
184
184
185 magic, version = header[0:2], header[2:4]
185 magic, version = header[0:2], header[2:4]
186
186
187 if magic != 'HG':
187 if magic != 'HG':
188 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
189 if version == '10':
189 if version == '10':
190 if alg is None:
190 if alg is None:
191 alg = changegroup.readexactly(fh, 2)
191 alg = changegroup.readexactly(fh, 2)
192 return changegroup.cg1unpacker(fh, alg)
192 return changegroup.cg1unpacker(fh, alg)
193 elif version.startswith('2'):
193 elif version.startswith('2'):
194 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
195 elif version == 'S1':
195 elif version == 'S1':
196 return streamclone.streamcloneapplier(fh)
196 return streamclone.streamcloneapplier(fh)
197 else:
197 else:
198 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
199
199
200 def getbundlespec(ui, fh):
200 def getbundlespec(ui, fh):
201 """Infer the bundlespec from a bundle file handle.
201 """Infer the bundlespec from a bundle file handle.
202
202
203 The input file handle is seeked and the original seek position is not
203 The input file handle is seeked and the original seek position is not
204 restored.
204 restored.
205 """
205 """
206 def speccompression(alg):
206 def speccompression(alg):
207 try:
207 try:
208 return util.compengines.forbundletype(alg).bundletype()[0]
208 return util.compengines.forbundletype(alg).bundletype()[0]
209 except KeyError:
209 except KeyError:
210 return None
210 return None
211
211
212 b = readbundle(ui, fh, None)
212 b = readbundle(ui, fh, None)
213 if isinstance(b, changegroup.cg1unpacker):
213 if isinstance(b, changegroup.cg1unpacker):
214 alg = b._type
214 alg = b._type
215 if alg == '_truncatedBZ':
215 if alg == '_truncatedBZ':
216 alg = 'BZ'
216 alg = 'BZ'
217 comp = speccompression(alg)
217 comp = speccompression(alg)
218 if not comp:
218 if not comp:
219 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 raise error.Abort(_('unknown compression algorithm: %s') % alg)
220 return '%s-v1' % comp
220 return '%s-v1' % comp
221 elif isinstance(b, bundle2.unbundle20):
221 elif isinstance(b, bundle2.unbundle20):
222 if 'Compression' in b.params:
222 if 'Compression' in b.params:
223 comp = speccompression(b.params['Compression'])
223 comp = speccompression(b.params['Compression'])
224 if not comp:
224 if not comp:
225 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 raise error.Abort(_('unknown compression algorithm: %s') % comp)
226 else:
226 else:
227 comp = 'none'
227 comp = 'none'
228
228
229 version = None
229 version = None
230 for part in b.iterparts():
230 for part in b.iterparts():
231 if part.type == 'changegroup':
231 if part.type == 'changegroup':
232 version = part.params['version']
232 version = part.params['version']
233 if version in ('01', '02'):
233 if version in ('01', '02'):
234 version = 'v2'
234 version = 'v2'
235 else:
235 else:
236 raise error.Abort(_('changegroup version %s does not have '
236 raise error.Abort(_('changegroup version %s does not have '
237 'a known bundlespec') % version,
237 'a known bundlespec') % version,
238 hint=_('try upgrading your Mercurial '
238 hint=_('try upgrading your Mercurial '
239 'client'))
239 'client'))
240
240
241 if not version:
241 if not version:
242 raise error.Abort(_('could not identify changegroup version in '
242 raise error.Abort(_('could not identify changegroup version in '
243 'bundle'))
243 'bundle'))
244
244
245 return '%s-%s' % (comp, version)
245 return '%s-%s' % (comp, version)
246 elif isinstance(b, streamclone.streamcloneapplier):
246 elif isinstance(b, streamclone.streamcloneapplier):
247 requirements = streamclone.readbundle1header(fh)[2]
247 requirements = streamclone.readbundle1header(fh)[2]
248 params = 'requirements=%s' % ','.join(sorted(requirements))
248 params = 'requirements=%s' % ','.join(sorted(requirements))
249 return 'none-packed1;%s' % urlreq.quote(params)
249 return 'none-packed1;%s' % urlreq.quote(params)
250 else:
250 else:
251 raise error.Abort(_('unknown bundle type: %s') % b)
251 raise error.Abort(_('unknown bundle type: %s') % b)
252
252
253 def _computeoutgoing(repo, heads, common):
253 def _computeoutgoing(repo, heads, common):
254 """Computes which revs are outgoing given a set of common
254 """Computes which revs are outgoing given a set of common
255 and a set of heads.
255 and a set of heads.
256
256
257 This is a separate function so extensions can have access to
257 This is a separate function so extensions can have access to
258 the logic.
258 the logic.
259
259
260 Returns a discovery.outgoing object.
260 Returns a discovery.outgoing object.
261 """
261 """
262 cl = repo.changelog
262 cl = repo.changelog
263 if common:
263 if common:
264 hasnode = cl.hasnode
264 hasnode = cl.hasnode
265 common = [n for n in common if hasnode(n)]
265 common = [n for n in common if hasnode(n)]
266 else:
266 else:
267 common = [nullid]
267 common = [nullid]
268 if not heads:
268 if not heads:
269 heads = cl.heads()
269 heads = cl.heads()
270 return discovery.outgoing(repo, common, heads)
270 return discovery.outgoing(repo, common, heads)
271
271
272 def _forcebundle1(op):
272 def _forcebundle1(op):
273 """return true if a pull/push must use bundle1
273 """return true if a pull/push must use bundle1
274
274
275 This function is used to allow testing of the older bundle version"""
275 This function is used to allow testing of the older bundle version"""
276 ui = op.repo.ui
276 ui = op.repo.ui
277 forcebundle1 = False
277 forcebundle1 = False
278 # The goal is this config is to allow developer to choose the bundle
278 # The goal is this config is to allow developer to choose the bundle
279 # version used during exchanged. This is especially handy during test.
279 # version used during exchanged. This is especially handy during test.
280 # Value is a list of bundle version to be picked from, highest version
280 # Value is a list of bundle version to be picked from, highest version
281 # should be used.
281 # should be used.
282 #
282 #
283 # developer config: devel.legacy.exchange
283 # developer config: devel.legacy.exchange
284 exchange = ui.configlist('devel', 'legacy.exchange')
284 exchange = ui.configlist('devel', 'legacy.exchange')
285 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
286 return forcebundle1 or not op.remote.capable('bundle2')
286 return forcebundle1 or not op.remote.capable('bundle2')
287
287
288 class pushoperation(object):
288 class pushoperation(object):
289 """A object that represent a single push operation
289 """A object that represent a single push operation
290
290
291 Its purpose is to carry push related state and very common operations.
291 Its purpose is to carry push related state and very common operations.
292
292
293 A new pushoperation should be created at the beginning of each push and
293 A new pushoperation should be created at the beginning of each push and
294 discarded afterward.
294 discarded afterward.
295 """
295 """
296
296
297 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
298 bookmarks=(), pushvars=None):
298 bookmarks=(), pushvars=None):
299 # repo we push from
299 # repo we push from
300 self.repo = repo
300 self.repo = repo
301 self.ui = repo.ui
301 self.ui = repo.ui
302 # repo we push to
302 # repo we push to
303 self.remote = remote
303 self.remote = remote
304 # force option provided
304 # force option provided
305 self.force = force
305 self.force = force
306 # revs to be pushed (None is "all")
306 # revs to be pushed (None is "all")
307 self.revs = revs
307 self.revs = revs
308 # bookmark explicitly pushed
308 # bookmark explicitly pushed
309 self.bookmarks = bookmarks
309 self.bookmarks = bookmarks
310 # allow push of new branch
310 # allow push of new branch
311 self.newbranch = newbranch
311 self.newbranch = newbranch
312 # step already performed
312 # step already performed
313 # (used to check what steps have been already performed through bundle2)
313 # (used to check what steps have been already performed through bundle2)
314 self.stepsdone = set()
314 self.stepsdone = set()
315 # Integer version of the changegroup push result
315 # Integer version of the changegroup push result
316 # - None means nothing to push
316 # - None means nothing to push
317 # - 0 means HTTP error
317 # - 0 means HTTP error
318 # - 1 means we pushed and remote head count is unchanged *or*
318 # - 1 means we pushed and remote head count is unchanged *or*
319 # we have outgoing changesets but refused to push
319 # we have outgoing changesets but refused to push
320 # - other values as described by addchangegroup()
320 # - other values as described by addchangegroup()
321 self.cgresult = None
321 self.cgresult = None
322 # Boolean value for the bookmark push
322 # Boolean value for the bookmark push
323 self.bkresult = None
323 self.bkresult = None
324 # discover.outgoing object (contains common and outgoing data)
324 # discover.outgoing object (contains common and outgoing data)
325 self.outgoing = None
325 self.outgoing = None
326 # all remote topological heads before the push
326 # all remote topological heads before the push
327 self.remoteheads = None
327 self.remoteheads = None
328 # Details of the remote branch pre and post push
328 # Details of the remote branch pre and post push
329 #
329 #
330 # mapping: {'branch': ([remoteheads],
330 # mapping: {'branch': ([remoteheads],
331 # [newheads],
331 # [newheads],
332 # [unsyncedheads],
332 # [unsyncedheads],
333 # [discardedheads])}
333 # [discardedheads])}
334 # - branch: the branch name
334 # - branch: the branch name
335 # - remoteheads: the list of remote heads known locally
335 # - remoteheads: the list of remote heads known locally
336 # None if the branch is new
336 # None if the branch is new
337 # - newheads: the new remote heads (known locally) with outgoing pushed
337 # - newheads: the new remote heads (known locally) with outgoing pushed
338 # - unsyncedheads: the list of remote heads unknown locally.
338 # - unsyncedheads: the list of remote heads unknown locally.
339 # - discardedheads: the list of remote heads made obsolete by the push
339 # - discardedheads: the list of remote heads made obsolete by the push
340 self.pushbranchmap = None
340 self.pushbranchmap = None
341 # testable as a boolean indicating if any nodes are missing locally.
341 # testable as a boolean indicating if any nodes are missing locally.
342 self.incoming = None
342 self.incoming = None
343 # summary of the remote phase situation
343 # summary of the remote phase situation
344 self.remotephases = None
344 self.remotephases = None
345 # phases changes that must be pushed along side the changesets
345 # phases changes that must be pushed along side the changesets
346 self.outdatedphases = None
346 self.outdatedphases = None
347 # phases changes that must be pushed if changeset push fails
347 # phases changes that must be pushed if changeset push fails
348 self.fallbackoutdatedphases = None
348 self.fallbackoutdatedphases = None
349 # outgoing obsmarkers
349 # outgoing obsmarkers
350 self.outobsmarkers = set()
350 self.outobsmarkers = set()
351 # outgoing bookmarks
351 # outgoing bookmarks
352 self.outbookmarks = []
352 self.outbookmarks = []
353 # transaction manager
353 # transaction manager
354 self.trmanager = None
354 self.trmanager = None
355 # map { pushkey partid -> callback handling failure}
355 # map { pushkey partid -> callback handling failure}
356 # used to handle exception from mandatory pushkey part failure
356 # used to handle exception from mandatory pushkey part failure
357 self.pkfailcb = {}
357 self.pkfailcb = {}
358 # an iterable of pushvars or None
358 # an iterable of pushvars or None
359 self.pushvars = pushvars
359 self.pushvars = pushvars
360
360
361 @util.propertycache
361 @util.propertycache
362 def futureheads(self):
362 def futureheads(self):
363 """future remote heads if the changeset push succeeds"""
363 """future remote heads if the changeset push succeeds"""
364 return self.outgoing.missingheads
364 return self.outgoing.missingheads
365
365
366 @util.propertycache
366 @util.propertycache
367 def fallbackheads(self):
367 def fallbackheads(self):
368 """future remote heads if the changeset push fails"""
368 """future remote heads if the changeset push fails"""
369 if self.revs is None:
369 if self.revs is None:
370 # not target to push, all common are relevant
370 # not target to push, all common are relevant
371 return self.outgoing.commonheads
371 return self.outgoing.commonheads
372 unfi = self.repo.unfiltered()
372 unfi = self.repo.unfiltered()
373 # I want cheads = heads(::missingheads and ::commonheads)
373 # I want cheads = heads(::missingheads and ::commonheads)
374 # (missingheads is revs with secret changeset filtered out)
374 # (missingheads is revs with secret changeset filtered out)
375 #
375 #
376 # This can be expressed as:
376 # This can be expressed as:
377 # cheads = ( (missingheads and ::commonheads)
377 # cheads = ( (missingheads and ::commonheads)
378 # + (commonheads and ::missingheads))"
378 # + (commonheads and ::missingheads))"
379 # )
379 # )
380 #
380 #
381 # while trying to push we already computed the following:
381 # while trying to push we already computed the following:
382 # common = (::commonheads)
382 # common = (::commonheads)
383 # missing = ((commonheads::missingheads) - commonheads)
383 # missing = ((commonheads::missingheads) - commonheads)
384 #
384 #
385 # We can pick:
385 # We can pick:
386 # * missingheads part of common (::commonheads)
386 # * missingheads part of common (::commonheads)
387 common = self.outgoing.common
387 common = self.outgoing.common
388 nm = self.repo.changelog.nodemap
388 nm = self.repo.changelog.nodemap
389 cheads = [node for node in self.revs if nm[node] in common]
389 cheads = [node for node in self.revs if nm[node] in common]
390 # and
390 # and
391 # * commonheads parents on missing
391 # * commonheads parents on missing
392 revset = unfi.set('%ln and parents(roots(%ln))',
392 revset = unfi.set('%ln and parents(roots(%ln))',
393 self.outgoing.commonheads,
393 self.outgoing.commonheads,
394 self.outgoing.missing)
394 self.outgoing.missing)
395 cheads.extend(c.node() for c in revset)
395 cheads.extend(c.node() for c in revset)
396 return cheads
396 return cheads
397
397
398 @property
398 @property
399 def commonheads(self):
399 def commonheads(self):
400 """set of all common heads after changeset bundle push"""
400 """set of all common heads after changeset bundle push"""
401 if self.cgresult:
401 if self.cgresult:
402 return self.futureheads
402 return self.futureheads
403 else:
403 else:
404 return self.fallbackheads
404 return self.fallbackheads
405
405
406 # mapping of message used when pushing bookmark
406 # mapping of message used when pushing bookmark
407 bookmsgmap = {'update': (_("updating bookmark %s\n"),
407 bookmsgmap = {'update': (_("updating bookmark %s\n"),
408 _('updating bookmark %s failed!\n')),
408 _('updating bookmark %s failed!\n')),
409 'export': (_("exporting bookmark %s\n"),
409 'export': (_("exporting bookmark %s\n"),
410 _('exporting bookmark %s failed!\n')),
410 _('exporting bookmark %s failed!\n')),
411 'delete': (_("deleting remote bookmark %s\n"),
411 'delete': (_("deleting remote bookmark %s\n"),
412 _('deleting remote bookmark %s failed!\n')),
412 _('deleting remote bookmark %s failed!\n')),
413 }
413 }
414
414
415
415
416 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
416 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
417 opargs=None):
417 opargs=None):
418 '''Push outgoing changesets (limited by revs) from a local
418 '''Push outgoing changesets (limited by revs) from a local
419 repository to remote. Return an integer:
419 repository to remote. Return an integer:
420 - None means nothing to push
420 - None means nothing to push
421 - 0 means HTTP error
421 - 0 means HTTP error
422 - 1 means we pushed and remote head count is unchanged *or*
422 - 1 means we pushed and remote head count is unchanged *or*
423 we have outgoing changesets but refused to push
423 we have outgoing changesets but refused to push
424 - other values as described by addchangegroup()
424 - other values as described by addchangegroup()
425 '''
425 '''
426 if opargs is None:
426 if opargs is None:
427 opargs = {}
427 opargs = {}
428 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
428 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
429 **pycompat.strkwargs(opargs))
429 **pycompat.strkwargs(opargs))
430 if pushop.remote.local():
430 if pushop.remote.local():
431 missing = (set(pushop.repo.requirements)
431 missing = (set(pushop.repo.requirements)
432 - pushop.remote.local().supported)
432 - pushop.remote.local().supported)
433 if missing:
433 if missing:
434 msg = _("required features are not"
434 msg = _("required features are not"
435 " supported in the destination:"
435 " supported in the destination:"
436 " %s") % (', '.join(sorted(missing)))
436 " %s") % (', '.join(sorted(missing)))
437 raise error.Abort(msg)
437 raise error.Abort(msg)
438
438
439 if not pushop.remote.canpush():
439 if not pushop.remote.canpush():
440 raise error.Abort(_("destination does not support push"))
440 raise error.Abort(_("destination does not support push"))
441
441
442 if not pushop.remote.capable('unbundle'):
442 if not pushop.remote.capable('unbundle'):
443 raise error.Abort(_('cannot push: destination does not support the '
443 raise error.Abort(_('cannot push: destination does not support the '
444 'unbundle wire protocol command'))
444 'unbundle wire protocol command'))
445
445
446 # get lock as we might write phase data
446 # get lock as we might write phase data
447 wlock = lock = None
447 wlock = lock = None
448 try:
448 try:
449 # bundle2 push may receive a reply bundle touching bookmarks or other
449 # bundle2 push may receive a reply bundle touching bookmarks or other
450 # things requiring the wlock. Take it now to ensure proper ordering.
450 # things requiring the wlock. Take it now to ensure proper ordering.
451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
452 if (not _forcebundle1(pushop)) and maypushback:
452 if (not _forcebundle1(pushop)) and maypushback:
453 wlock = pushop.repo.wlock()
453 wlock = pushop.repo.wlock()
454 lock = pushop.repo.lock()
454 lock = pushop.repo.lock()
455 pushop.trmanager = transactionmanager(pushop.repo,
455 pushop.trmanager = transactionmanager(pushop.repo,
456 'push-response',
456 'push-response',
457 pushop.remote.url())
457 pushop.remote.url())
458 except IOError as err:
458 except IOError as err:
459 if err.errno != errno.EACCES:
459 if err.errno != errno.EACCES:
460 raise
460 raise
461 # source repo cannot be locked.
461 # source repo cannot be locked.
462 # We do not abort the push, but just disable the local phase
462 # We do not abort the push, but just disable the local phase
463 # synchronisation.
463 # synchronisation.
464 msg = 'cannot lock source repository: %s\n' % err
464 msg = 'cannot lock source repository: %s\n' % err
465 pushop.ui.debug(msg)
465 pushop.ui.debug(msg)
466
466
467 with wlock or util.nullcontextmanager(), \
467 with wlock or util.nullcontextmanager(), \
468 lock or util.nullcontextmanager(), \
468 lock or util.nullcontextmanager(), \
469 pushop.trmanager or util.nullcontextmanager():
469 pushop.trmanager or util.nullcontextmanager():
470 pushop.repo.checkpush(pushop)
470 pushop.repo.checkpush(pushop)
471 _pushdiscovery(pushop)
471 _pushdiscovery(pushop)
472 if not _forcebundle1(pushop):
472 if not _forcebundle1(pushop):
473 _pushbundle2(pushop)
473 _pushbundle2(pushop)
474 _pushchangeset(pushop)
474 _pushchangeset(pushop)
475 _pushsyncphase(pushop)
475 _pushsyncphase(pushop)
476 _pushobsolete(pushop)
476 _pushobsolete(pushop)
477 _pushbookmark(pushop)
477 _pushbookmark(pushop)
478
478
479 return pushop
479 return pushop
480
480
481 # list of steps to perform discovery before push
481 # list of steps to perform discovery before push
482 pushdiscoveryorder = []
482 pushdiscoveryorder = []
483
483
484 # Mapping between step name and function
484 # Mapping between step name and function
485 #
485 #
486 # This exists to help extensions wrap steps if necessary
486 # This exists to help extensions wrap steps if necessary
487 pushdiscoverymapping = {}
487 pushdiscoverymapping = {}
488
488
489 def pushdiscovery(stepname):
489 def pushdiscovery(stepname):
490 """decorator for function performing discovery before push
490 """decorator for function performing discovery before push
491
491
492 The function is added to the step -> function mapping and appended to the
492 The function is added to the step -> function mapping and appended to the
493 list of steps. Beware that decorated function will be added in order (this
493 list of steps. Beware that decorated function will be added in order (this
494 may matter).
494 may matter).
495
495
496 You can only use this decorator for a new step, if you want to wrap a step
496 You can only use this decorator for a new step, if you want to wrap a step
497 from an extension, change the pushdiscovery dictionary directly."""
497 from an extension, change the pushdiscovery dictionary directly."""
498 def dec(func):
498 def dec(func):
499 assert stepname not in pushdiscoverymapping
499 assert stepname not in pushdiscoverymapping
500 pushdiscoverymapping[stepname] = func
500 pushdiscoverymapping[stepname] = func
501 pushdiscoveryorder.append(stepname)
501 pushdiscoveryorder.append(stepname)
502 return func
502 return func
503 return dec
503 return dec
504
504
505 def _pushdiscovery(pushop):
505 def _pushdiscovery(pushop):
506 """Run all discovery steps"""
506 """Run all discovery steps"""
507 for stepname in pushdiscoveryorder:
507 for stepname in pushdiscoveryorder:
508 step = pushdiscoverymapping[stepname]
508 step = pushdiscoverymapping[stepname]
509 step(pushop)
509 step(pushop)
510
510
511 @pushdiscovery('changeset')
511 @pushdiscovery('changeset')
512 def _pushdiscoverychangeset(pushop):
512 def _pushdiscoverychangeset(pushop):
513 """discover the changeset that need to be pushed"""
513 """discover the changeset that need to be pushed"""
514 fci = discovery.findcommonincoming
514 fci = discovery.findcommonincoming
515 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
515 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
516 common, inc, remoteheads = commoninc
516 common, inc, remoteheads = commoninc
517 fco = discovery.findcommonoutgoing
517 fco = discovery.findcommonoutgoing
518 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
518 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
519 commoninc=commoninc, force=pushop.force)
519 commoninc=commoninc, force=pushop.force)
520 pushop.outgoing = outgoing
520 pushop.outgoing = outgoing
521 pushop.remoteheads = remoteheads
521 pushop.remoteheads = remoteheads
522 pushop.incoming = inc
522 pushop.incoming = inc
523
523
524 @pushdiscovery('phase')
524 @pushdiscovery('phase')
525 def _pushdiscoveryphase(pushop):
525 def _pushdiscoveryphase(pushop):
526 """discover the phase that needs to be pushed
526 """discover the phase that needs to be pushed
527
527
528 (computed for both success and failure case for changesets push)"""
528 (computed for both success and failure case for changesets push)"""
529 outgoing = pushop.outgoing
529 outgoing = pushop.outgoing
530 unfi = pushop.repo.unfiltered()
530 unfi = pushop.repo.unfiltered()
531 remotephases = pushop.remote.listkeys('phases')
531 remotephases = pushop.remote.listkeys('phases')
532 if (pushop.ui.configbool('ui', '_usedassubrepo')
532 if (pushop.ui.configbool('ui', '_usedassubrepo')
533 and remotephases # server supports phases
533 and remotephases # server supports phases
534 and not pushop.outgoing.missing # no changesets to be pushed
534 and not pushop.outgoing.missing # no changesets to be pushed
535 and remotephases.get('publishing', False)):
535 and remotephases.get('publishing', False)):
536 # When:
536 # When:
537 # - this is a subrepo push
537 # - this is a subrepo push
538 # - and remote support phase
538 # - and remote support phase
539 # - and no changeset are to be pushed
539 # - and no changeset are to be pushed
540 # - and remote is publishing
540 # - and remote is publishing
541 # We may be in issue 3781 case!
541 # We may be in issue 3781 case!
542 # We drop the possible phase synchronisation done by
542 # We drop the possible phase synchronisation done by
543 # courtesy to publish changesets possibly locally draft
543 # courtesy to publish changesets possibly locally draft
544 # on the remote.
544 # on the remote.
545 pushop.outdatedphases = []
545 pushop.outdatedphases = []
546 pushop.fallbackoutdatedphases = []
546 pushop.fallbackoutdatedphases = []
547 return
547 return
548
548
549 pushop.remotephases = phases.remotephasessummary(pushop.repo,
549 pushop.remotephases = phases.remotephasessummary(pushop.repo,
550 pushop.fallbackheads,
550 pushop.fallbackheads,
551 remotephases)
551 remotephases)
552 droots = pushop.remotephases.draftroots
552 droots = pushop.remotephases.draftroots
553
553
554 extracond = ''
554 extracond = ''
555 if not pushop.remotephases.publishing:
555 if not pushop.remotephases.publishing:
556 extracond = ' and public()'
556 extracond = ' and public()'
557 revset = 'heads((%%ln::%%ln) %s)' % extracond
557 revset = 'heads((%%ln::%%ln) %s)' % extracond
558 # Get the list of all revs draft on remote by public here.
558 # Get the list of all revs draft on remote by public here.
559 # XXX Beware that revset break if droots is not strictly
559 # XXX Beware that revset break if droots is not strictly
560 # XXX root we may want to ensure it is but it is costly
560 # XXX root we may want to ensure it is but it is costly
561 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
561 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
562 if not outgoing.missing:
562 if not outgoing.missing:
563 future = fallback
563 future = fallback
564 else:
564 else:
565 # adds changeset we are going to push as draft
565 # adds changeset we are going to push as draft
566 #
566 #
567 # should not be necessary for publishing server, but because of an
567 # should not be necessary for publishing server, but because of an
568 # issue fixed in xxxxx we have to do it anyway.
568 # issue fixed in xxxxx we have to do it anyway.
569 fdroots = list(unfi.set('roots(%ln + %ln::)',
569 fdroots = list(unfi.set('roots(%ln + %ln::)',
570 outgoing.missing, droots))
570 outgoing.missing, droots))
571 fdroots = [f.node() for f in fdroots]
571 fdroots = [f.node() for f in fdroots]
572 future = list(unfi.set(revset, fdroots, pushop.futureheads))
572 future = list(unfi.set(revset, fdroots, pushop.futureheads))
573 pushop.outdatedphases = future
573 pushop.outdatedphases = future
574 pushop.fallbackoutdatedphases = fallback
574 pushop.fallbackoutdatedphases = fallback
575
575
576 @pushdiscovery('obsmarker')
576 @pushdiscovery('obsmarker')
577 def _pushdiscoveryobsmarkers(pushop):
577 def _pushdiscoveryobsmarkers(pushop):
578 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
578 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
579 and pushop.repo.obsstore
579 and pushop.repo.obsstore
580 and 'obsolete' in pushop.remote.listkeys('namespaces')):
580 and 'obsolete' in pushop.remote.listkeys('namespaces')):
581 repo = pushop.repo
581 repo = pushop.repo
582 # very naive computation, that can be quite expensive on big repo.
582 # very naive computation, that can be quite expensive on big repo.
583 # However: evolution is currently slow on them anyway.
583 # However: evolution is currently slow on them anyway.
584 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
584 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
585 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
585 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
586
586
587 @pushdiscovery('bookmarks')
587 @pushdiscovery('bookmarks')
588 def _pushdiscoverybookmarks(pushop):
588 def _pushdiscoverybookmarks(pushop):
589 ui = pushop.ui
589 ui = pushop.ui
590 repo = pushop.repo.unfiltered()
590 repo = pushop.repo.unfiltered()
591 remote = pushop.remote
591 remote = pushop.remote
592 ui.debug("checking for updated bookmarks\n")
592 ui.debug("checking for updated bookmarks\n")
593 ancestors = ()
593 ancestors = ()
594 if pushop.revs:
594 if pushop.revs:
595 revnums = map(repo.changelog.rev, pushop.revs)
595 revnums = map(repo.changelog.rev, pushop.revs)
596 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
596 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
597 remotebookmark = remote.listkeys('bookmarks')
597 remotebookmark = remote.listkeys('bookmarks')
598
598
599 explicit = set([repo._bookmarks.expandname(bookmark)
599 explicit = set([repo._bookmarks.expandname(bookmark)
600 for bookmark in pushop.bookmarks])
600 for bookmark in pushop.bookmarks])
601
601
602 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
602 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
603 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
603 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
604
604
605 def safehex(x):
605 def safehex(x):
606 if x is None:
606 if x is None:
607 return x
607 return x
608 return hex(x)
608 return hex(x)
609
609
610 def hexifycompbookmarks(bookmarks):
610 def hexifycompbookmarks(bookmarks):
611 for b, scid, dcid in bookmarks:
611 for b, scid, dcid in bookmarks:
612 yield b, safehex(scid), safehex(dcid)
612 yield b, safehex(scid), safehex(dcid)
613
613
614 comp = [hexifycompbookmarks(marks) for marks in comp]
614 comp = [hexifycompbookmarks(marks) for marks in comp]
615 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
615 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
616
616
617 for b, scid, dcid in advsrc:
617 for b, scid, dcid in advsrc:
618 if b in explicit:
618 if b in explicit:
619 explicit.remove(b)
619 explicit.remove(b)
620 if not ancestors or repo[scid].rev() in ancestors:
620 if not ancestors or repo[scid].rev() in ancestors:
621 pushop.outbookmarks.append((b, dcid, scid))
621 pushop.outbookmarks.append((b, dcid, scid))
622 # search added bookmark
622 # search added bookmark
623 for b, scid, dcid in addsrc:
623 for b, scid, dcid in addsrc:
624 if b in explicit:
624 if b in explicit:
625 explicit.remove(b)
625 explicit.remove(b)
626 pushop.outbookmarks.append((b, '', scid))
626 pushop.outbookmarks.append((b, '', scid))
627 # search for overwritten bookmark
627 # search for overwritten bookmark
628 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
628 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
629 if b in explicit:
629 if b in explicit:
630 explicit.remove(b)
630 explicit.remove(b)
631 pushop.outbookmarks.append((b, dcid, scid))
631 pushop.outbookmarks.append((b, dcid, scid))
632 # search for bookmark to delete
632 # search for bookmark to delete
633 for b, scid, dcid in adddst:
633 for b, scid, dcid in adddst:
634 if b in explicit:
634 if b in explicit:
635 explicit.remove(b)
635 explicit.remove(b)
636 # treat as "deleted locally"
636 # treat as "deleted locally"
637 pushop.outbookmarks.append((b, dcid, ''))
637 pushop.outbookmarks.append((b, dcid, ''))
638 # identical bookmarks shouldn't get reported
638 # identical bookmarks shouldn't get reported
639 for b, scid, dcid in same:
639 for b, scid, dcid in same:
640 if b in explicit:
640 if b in explicit:
641 explicit.remove(b)
641 explicit.remove(b)
642
642
643 if explicit:
643 if explicit:
644 explicit = sorted(explicit)
644 explicit = sorted(explicit)
645 # we should probably list all of them
645 # we should probably list all of them
646 ui.warn(_('bookmark %s does not exist on the local '
646 ui.warn(_('bookmark %s does not exist on the local '
647 'or remote repository!\n') % explicit[0])
647 'or remote repository!\n') % explicit[0])
648 pushop.bkresult = 2
648 pushop.bkresult = 2
649
649
650 pushop.outbookmarks.sort()
650 pushop.outbookmarks.sort()
651
651
652 def _pushcheckoutgoing(pushop):
652 def _pushcheckoutgoing(pushop):
653 outgoing = pushop.outgoing
653 outgoing = pushop.outgoing
654 unfi = pushop.repo.unfiltered()
654 unfi = pushop.repo.unfiltered()
655 if not outgoing.missing:
655 if not outgoing.missing:
656 # nothing to push
656 # nothing to push
657 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
657 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
658 return False
658 return False
659 # something to push
659 # something to push
660 if not pushop.force:
660 if not pushop.force:
661 # if repo.obsstore == False --> no obsolete
661 # if repo.obsstore == False --> no obsolete
662 # then, save the iteration
662 # then, save the iteration
663 if unfi.obsstore:
663 if unfi.obsstore:
664 # this message are here for 80 char limit reason
664 # this message are here for 80 char limit reason
665 mso = _("push includes obsolete changeset: %s!")
665 mso = _("push includes obsolete changeset: %s!")
666 mspd = _("push includes phase-divergent changeset: %s!")
666 mspd = _("push includes phase-divergent changeset: %s!")
667 mscd = _("push includes content-divergent changeset: %s!")
667 mscd = _("push includes content-divergent changeset: %s!")
668 mst = {"orphan": _("push includes orphan changeset: %s!"),
668 mst = {"orphan": _("push includes orphan changeset: %s!"),
669 "phase-divergent": mspd,
669 "phase-divergent": mspd,
670 "content-divergent": mscd}
670 "content-divergent": mscd}
671 # If we are to push if there is at least one
671 # If we are to push if there is at least one
672 # obsolete or unstable changeset in missing, at
672 # obsolete or unstable changeset in missing, at
673 # least one of the missinghead will be obsolete or
673 # least one of the missinghead will be obsolete or
674 # unstable. So checking heads only is ok
674 # unstable. So checking heads only is ok
675 for node in outgoing.missingheads:
675 for node in outgoing.missingheads:
676 ctx = unfi[node]
676 ctx = unfi[node]
677 if ctx.obsolete():
677 if ctx.obsolete():
678 raise error.Abort(mso % ctx)
678 raise error.Abort(mso % ctx)
679 elif ctx.isunstable():
679 elif ctx.isunstable():
680 # TODO print more than one instability in the abort
680 # TODO print more than one instability in the abort
681 # message
681 # message
682 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
682 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
683
683
684 discovery.checkheads(pushop)
684 discovery.checkheads(pushop)
685 return True
685 return True
686
686
687 # List of names of steps to perform for an outgoing bundle2, order matters.
687 # List of names of steps to perform for an outgoing bundle2, order matters.
688 b2partsgenorder = []
688 b2partsgenorder = []
689
689
690 # Mapping between step name and function
690 # Mapping between step name and function
691 #
691 #
692 # This exists to help extensions wrap steps if necessary
692 # This exists to help extensions wrap steps if necessary
693 b2partsgenmapping = {}
693 b2partsgenmapping = {}
694
694
695 def b2partsgenerator(stepname, idx=None):
695 def b2partsgenerator(stepname, idx=None):
696 """decorator for function generating bundle2 part
696 """decorator for function generating bundle2 part
697
697
698 The function is added to the step -> function mapping and appended to the
698 The function is added to the step -> function mapping and appended to the
699 list of steps. Beware that decorated functions will be added in order
699 list of steps. Beware that decorated functions will be added in order
700 (this may matter).
700 (this may matter).
701
701
702 You can only use this decorator for new steps, if you want to wrap a step
702 You can only use this decorator for new steps, if you want to wrap a step
703 from an extension, attack the b2partsgenmapping dictionary directly."""
703 from an extension, attack the b2partsgenmapping dictionary directly."""
704 def dec(func):
704 def dec(func):
705 assert stepname not in b2partsgenmapping
705 assert stepname not in b2partsgenmapping
706 b2partsgenmapping[stepname] = func
706 b2partsgenmapping[stepname] = func
707 if idx is None:
707 if idx is None:
708 b2partsgenorder.append(stepname)
708 b2partsgenorder.append(stepname)
709 else:
709 else:
710 b2partsgenorder.insert(idx, stepname)
710 b2partsgenorder.insert(idx, stepname)
711 return func
711 return func
712 return dec
712 return dec
713
713
714 def _pushb2ctxcheckheads(pushop, bundler):
714 def _pushb2ctxcheckheads(pushop, bundler):
715 """Generate race condition checking parts
715 """Generate race condition checking parts
716
716
717 Exists as an independent function to aid extensions
717 Exists as an independent function to aid extensions
718 """
718 """
719 # * 'force' do not check for push race,
719 # * 'force' do not check for push race,
720 # * if we don't push anything, there are nothing to check.
720 # * if we don't push anything, there are nothing to check.
721 if not pushop.force and pushop.outgoing.missingheads:
721 if not pushop.force and pushop.outgoing.missingheads:
722 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
722 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
723 emptyremote = pushop.pushbranchmap is None
723 emptyremote = pushop.pushbranchmap is None
724 if not allowunrelated or emptyremote:
724 if not allowunrelated or emptyremote:
725 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
725 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
726 else:
726 else:
727 affected = set()
727 affected = set()
728 for branch, heads in pushop.pushbranchmap.iteritems():
728 for branch, heads in pushop.pushbranchmap.iteritems():
729 remoteheads, newheads, unsyncedheads, discardedheads = heads
729 remoteheads, newheads, unsyncedheads, discardedheads = heads
730 if remoteheads is not None:
730 if remoteheads is not None:
731 remote = set(remoteheads)
731 remote = set(remoteheads)
732 affected |= set(discardedheads) & remote
732 affected |= set(discardedheads) & remote
733 affected |= remote - set(newheads)
733 affected |= remote - set(newheads)
734 if affected:
734 if affected:
735 data = iter(sorted(affected))
735 data = iter(sorted(affected))
736 bundler.newpart('check:updated-heads', data=data)
736 bundler.newpart('check:updated-heads', data=data)
737
737
738 def _pushing(pushop):
738 def _pushing(pushop):
739 """return True if we are pushing anything"""
739 """return True if we are pushing anything"""
740 return bool(pushop.outgoing.missing
740 return bool(pushop.outgoing.missing
741 or pushop.outdatedphases
741 or pushop.outdatedphases
742 or pushop.outobsmarkers
742 or pushop.outobsmarkers
743 or pushop.outbookmarks)
743 or pushop.outbookmarks)
744
744
745 @b2partsgenerator('check-phases')
745 @b2partsgenerator('check-phases')
746 def _pushb2checkphases(pushop, bundler):
746 def _pushb2checkphases(pushop, bundler):
747 """insert phase move checking"""
747 """insert phase move checking"""
748 if not _pushing(pushop) or pushop.force:
748 if not _pushing(pushop) or pushop.force:
749 return
749 return
750 b2caps = bundle2.bundle2caps(pushop.remote)
750 b2caps = bundle2.bundle2caps(pushop.remote)
751 hasphaseheads = 'heads' in b2caps.get('phases', ())
751 hasphaseheads = 'heads' in b2caps.get('phases', ())
752 if pushop.remotephases is not None and hasphaseheads:
752 if pushop.remotephases is not None and hasphaseheads:
753 # check that the remote phase has not changed
753 # check that the remote phase has not changed
754 checks = [[] for p in phases.allphases]
754 checks = [[] for p in phases.allphases]
755 checks[phases.public].extend(pushop.remotephases.publicheads)
755 checks[phases.public].extend(pushop.remotephases.publicheads)
756 checks[phases.draft].extend(pushop.remotephases.draftroots)
756 checks[phases.draft].extend(pushop.remotephases.draftroots)
757 if any(checks):
757 if any(checks):
758 for nodes in checks:
758 for nodes in checks:
759 nodes.sort()
759 nodes.sort()
760 checkdata = phases.binaryencode(checks)
760 checkdata = phases.binaryencode(checks)
761 bundler.newpart('check:phases', data=checkdata)
761 bundler.newpart('check:phases', data=checkdata)
762
762
763 @b2partsgenerator('changeset')
763 @b2partsgenerator('changeset')
764 def _pushb2ctx(pushop, bundler):
764 def _pushb2ctx(pushop, bundler):
765 """handle changegroup push through bundle2
765 """handle changegroup push through bundle2
766
766
767 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
767 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
768 """
768 """
769 if 'changesets' in pushop.stepsdone:
769 if 'changesets' in pushop.stepsdone:
770 return
770 return
771 pushop.stepsdone.add('changesets')
771 pushop.stepsdone.add('changesets')
772 # Send known heads to the server for race detection.
772 # Send known heads to the server for race detection.
773 if not _pushcheckoutgoing(pushop):
773 if not _pushcheckoutgoing(pushop):
774 return
774 return
775 pushop.repo.prepushoutgoinghooks(pushop)
775 pushop.repo.prepushoutgoinghooks(pushop)
776
776
777 _pushb2ctxcheckheads(pushop, bundler)
777 _pushb2ctxcheckheads(pushop, bundler)
778
778
779 b2caps = bundle2.bundle2caps(pushop.remote)
779 b2caps = bundle2.bundle2caps(pushop.remote)
780 version = '01'
780 version = '01'
781 cgversions = b2caps.get('changegroup')
781 cgversions = b2caps.get('changegroup')
782 if cgversions: # 3.1 and 3.2 ship with an empty value
782 if cgversions: # 3.1 and 3.2 ship with an empty value
783 cgversions = [v for v in cgversions
783 cgversions = [v for v in cgversions
784 if v in changegroup.supportedoutgoingversions(
784 if v in changegroup.supportedoutgoingversions(
785 pushop.repo)]
785 pushop.repo)]
786 if not cgversions:
786 if not cgversions:
787 raise ValueError(_('no common changegroup version'))
787 raise ValueError(_('no common changegroup version'))
788 version = max(cgversions)
788 version = max(cgversions)
789 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
789 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
790 'push')
790 'push')
791 cgpart = bundler.newpart('changegroup', data=cgstream)
791 cgpart = bundler.newpart('changegroup', data=cgstream)
792 if cgversions:
792 if cgversions:
793 cgpart.addparam('version', version)
793 cgpart.addparam('version', version)
794 if 'treemanifest' in pushop.repo.requirements:
794 if 'treemanifest' in pushop.repo.requirements:
795 cgpart.addparam('treemanifest', '1')
795 cgpart.addparam('treemanifest', '1')
796 def handlereply(op):
796 def handlereply(op):
797 """extract addchangegroup returns from server reply"""
797 """extract addchangegroup returns from server reply"""
798 cgreplies = op.records.getreplies(cgpart.id)
798 cgreplies = op.records.getreplies(cgpart.id)
799 assert len(cgreplies['changegroup']) == 1
799 assert len(cgreplies['changegroup']) == 1
800 pushop.cgresult = cgreplies['changegroup'][0]['return']
800 pushop.cgresult = cgreplies['changegroup'][0]['return']
801 return handlereply
801 return handlereply
802
802
803 @b2partsgenerator('phase')
803 @b2partsgenerator('phase')
804 def _pushb2phases(pushop, bundler):
804 def _pushb2phases(pushop, bundler):
805 """handle phase push through bundle2"""
805 """handle phase push through bundle2"""
806 if 'phases' in pushop.stepsdone:
806 if 'phases' in pushop.stepsdone:
807 return
807 return
808 b2caps = bundle2.bundle2caps(pushop.remote)
808 b2caps = bundle2.bundle2caps(pushop.remote)
809 if not 'pushkey' in b2caps:
809 if 'pushkey' in b2caps:
810 return
810 _pushb2phasespushkey(pushop, bundler)
811
812 def _pushb2phasespushkey(pushop, bundler):
813 """push phase information through a bundle2 - pushkey part"""
811 pushop.stepsdone.add('phases')
814 pushop.stepsdone.add('phases')
812 part2node = []
815 part2node = []
813
816
814 def handlefailure(pushop, exc):
817 def handlefailure(pushop, exc):
815 targetid = int(exc.partid)
818 targetid = int(exc.partid)
816 for partid, node in part2node:
819 for partid, node in part2node:
817 if partid == targetid:
820 if partid == targetid:
818 raise error.Abort(_('updating %s to public failed') % node)
821 raise error.Abort(_('updating %s to public failed') % node)
819
822
820 enc = pushkey.encode
823 enc = pushkey.encode
821 for newremotehead in pushop.outdatedphases:
824 for newremotehead in pushop.outdatedphases:
822 part = bundler.newpart('pushkey')
825 part = bundler.newpart('pushkey')
823 part.addparam('namespace', enc('phases'))
826 part.addparam('namespace', enc('phases'))
824 part.addparam('key', enc(newremotehead.hex()))
827 part.addparam('key', enc(newremotehead.hex()))
825 part.addparam('old', enc('%d' % phases.draft))
828 part.addparam('old', enc('%d' % phases.draft))
826 part.addparam('new', enc('%d' % phases.public))
829 part.addparam('new', enc('%d' % phases.public))
827 part2node.append((part.id, newremotehead))
830 part2node.append((part.id, newremotehead))
828 pushop.pkfailcb[part.id] = handlefailure
831 pushop.pkfailcb[part.id] = handlefailure
829
832
830 def handlereply(op):
833 def handlereply(op):
831 for partid, node in part2node:
834 for partid, node in part2node:
832 partrep = op.records.getreplies(partid)
835 partrep = op.records.getreplies(partid)
833 results = partrep['pushkey']
836 results = partrep['pushkey']
834 assert len(results) <= 1
837 assert len(results) <= 1
835 msg = None
838 msg = None
836 if not results:
839 if not results:
837 msg = _('server ignored update of %s to public!\n') % node
840 msg = _('server ignored update of %s to public!\n') % node
838 elif not int(results[0]['return']):
841 elif not int(results[0]['return']):
839 msg = _('updating %s to public failed!\n') % node
842 msg = _('updating %s to public failed!\n') % node
840 if msg is not None:
843 if msg is not None:
841 pushop.ui.warn(msg)
844 pushop.ui.warn(msg)
842 return handlereply
845 return handlereply
843
846
844 @b2partsgenerator('obsmarkers')
847 @b2partsgenerator('obsmarkers')
845 def _pushb2obsmarkers(pushop, bundler):
848 def _pushb2obsmarkers(pushop, bundler):
846 if 'obsmarkers' in pushop.stepsdone:
849 if 'obsmarkers' in pushop.stepsdone:
847 return
850 return
848 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
851 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
849 if obsolete.commonversion(remoteversions) is None:
852 if obsolete.commonversion(remoteversions) is None:
850 return
853 return
851 pushop.stepsdone.add('obsmarkers')
854 pushop.stepsdone.add('obsmarkers')
852 if pushop.outobsmarkers:
855 if pushop.outobsmarkers:
853 markers = sorted(pushop.outobsmarkers)
856 markers = sorted(pushop.outobsmarkers)
854 bundle2.buildobsmarkerspart(bundler, markers)
857 bundle2.buildobsmarkerspart(bundler, markers)
855
858
856 @b2partsgenerator('bookmarks')
859 @b2partsgenerator('bookmarks')
857 def _pushb2bookmarks(pushop, bundler):
860 def _pushb2bookmarks(pushop, bundler):
858 """handle bookmark push through bundle2"""
861 """handle bookmark push through bundle2"""
859 if 'bookmarks' in pushop.stepsdone:
862 if 'bookmarks' in pushop.stepsdone:
860 return
863 return
861 b2caps = bundle2.bundle2caps(pushop.remote)
864 b2caps = bundle2.bundle2caps(pushop.remote)
862 if 'pushkey' not in b2caps:
865 if 'pushkey' not in b2caps:
863 return
866 return
864 pushop.stepsdone.add('bookmarks')
867 pushop.stepsdone.add('bookmarks')
865 part2book = []
868 part2book = []
866 enc = pushkey.encode
869 enc = pushkey.encode
867
870
868 def handlefailure(pushop, exc):
871 def handlefailure(pushop, exc):
869 targetid = int(exc.partid)
872 targetid = int(exc.partid)
870 for partid, book, action in part2book:
873 for partid, book, action in part2book:
871 if partid == targetid:
874 if partid == targetid:
872 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
875 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
873 # we should not be called for part we did not generated
876 # we should not be called for part we did not generated
874 assert False
877 assert False
875
878
876 for book, old, new in pushop.outbookmarks:
879 for book, old, new in pushop.outbookmarks:
877 part = bundler.newpart('pushkey')
880 part = bundler.newpart('pushkey')
878 part.addparam('namespace', enc('bookmarks'))
881 part.addparam('namespace', enc('bookmarks'))
879 part.addparam('key', enc(book))
882 part.addparam('key', enc(book))
880 part.addparam('old', enc(old))
883 part.addparam('old', enc(old))
881 part.addparam('new', enc(new))
884 part.addparam('new', enc(new))
882 action = 'update'
885 action = 'update'
883 if not old:
886 if not old:
884 action = 'export'
887 action = 'export'
885 elif not new:
888 elif not new:
886 action = 'delete'
889 action = 'delete'
887 part2book.append((part.id, book, action))
890 part2book.append((part.id, book, action))
888 pushop.pkfailcb[part.id] = handlefailure
891 pushop.pkfailcb[part.id] = handlefailure
889
892
890 def handlereply(op):
893 def handlereply(op):
891 ui = pushop.ui
894 ui = pushop.ui
892 for partid, book, action in part2book:
895 for partid, book, action in part2book:
893 partrep = op.records.getreplies(partid)
896 partrep = op.records.getreplies(partid)
894 results = partrep['pushkey']
897 results = partrep['pushkey']
895 assert len(results) <= 1
898 assert len(results) <= 1
896 if not results:
899 if not results:
897 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
900 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
898 else:
901 else:
899 ret = int(results[0]['return'])
902 ret = int(results[0]['return'])
900 if ret:
903 if ret:
901 ui.status(bookmsgmap[action][0] % book)
904 ui.status(bookmsgmap[action][0] % book)
902 else:
905 else:
903 ui.warn(bookmsgmap[action][1] % book)
906 ui.warn(bookmsgmap[action][1] % book)
904 if pushop.bkresult is not None:
907 if pushop.bkresult is not None:
905 pushop.bkresult = 1
908 pushop.bkresult = 1
906 return handlereply
909 return handlereply
907
910
908 @b2partsgenerator('pushvars', idx=0)
911 @b2partsgenerator('pushvars', idx=0)
909 def _getbundlesendvars(pushop, bundler):
912 def _getbundlesendvars(pushop, bundler):
910 '''send shellvars via bundle2'''
913 '''send shellvars via bundle2'''
911 pushvars = pushop.pushvars
914 pushvars = pushop.pushvars
912 if pushvars:
915 if pushvars:
913 shellvars = {}
916 shellvars = {}
914 for raw in pushvars:
917 for raw in pushvars:
915 if '=' not in raw:
918 if '=' not in raw:
916 msg = ("unable to parse variable '%s', should follow "
919 msg = ("unable to parse variable '%s', should follow "
917 "'KEY=VALUE' or 'KEY=' format")
920 "'KEY=VALUE' or 'KEY=' format")
918 raise error.Abort(msg % raw)
921 raise error.Abort(msg % raw)
919 k, v = raw.split('=', 1)
922 k, v = raw.split('=', 1)
920 shellvars[k] = v
923 shellvars[k] = v
921
924
922 part = bundler.newpart('pushvars')
925 part = bundler.newpart('pushvars')
923
926
924 for key, value in shellvars.iteritems():
927 for key, value in shellvars.iteritems():
925 part.addparam(key, value, mandatory=False)
928 part.addparam(key, value, mandatory=False)
926
929
927 def _pushbundle2(pushop):
930 def _pushbundle2(pushop):
928 """push data to the remote using bundle2
931 """push data to the remote using bundle2
929
932
930 The only currently supported type of data is changegroup but this will
933 The only currently supported type of data is changegroup but this will
931 evolve in the future."""
934 evolve in the future."""
932 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
935 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
933 pushback = (pushop.trmanager
936 pushback = (pushop.trmanager
934 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
937 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
935
938
936 # create reply capability
939 # create reply capability
937 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
940 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
938 allowpushback=pushback))
941 allowpushback=pushback))
939 bundler.newpart('replycaps', data=capsblob)
942 bundler.newpart('replycaps', data=capsblob)
940 replyhandlers = []
943 replyhandlers = []
941 for partgenname in b2partsgenorder:
944 for partgenname in b2partsgenorder:
942 partgen = b2partsgenmapping[partgenname]
945 partgen = b2partsgenmapping[partgenname]
943 ret = partgen(pushop, bundler)
946 ret = partgen(pushop, bundler)
944 if callable(ret):
947 if callable(ret):
945 replyhandlers.append(ret)
948 replyhandlers.append(ret)
946 # do not push if nothing to push
949 # do not push if nothing to push
947 if bundler.nbparts <= 1:
950 if bundler.nbparts <= 1:
948 return
951 return
949 stream = util.chunkbuffer(bundler.getchunks())
952 stream = util.chunkbuffer(bundler.getchunks())
950 try:
953 try:
951 try:
954 try:
952 reply = pushop.remote.unbundle(
955 reply = pushop.remote.unbundle(
953 stream, ['force'], pushop.remote.url())
956 stream, ['force'], pushop.remote.url())
954 except error.BundleValueError as exc:
957 except error.BundleValueError as exc:
955 raise error.Abort(_('missing support for %s') % exc)
958 raise error.Abort(_('missing support for %s') % exc)
956 try:
959 try:
957 trgetter = None
960 trgetter = None
958 if pushback:
961 if pushback:
959 trgetter = pushop.trmanager.transaction
962 trgetter = pushop.trmanager.transaction
960 op = bundle2.processbundle(pushop.repo, reply, trgetter)
963 op = bundle2.processbundle(pushop.repo, reply, trgetter)
961 except error.BundleValueError as exc:
964 except error.BundleValueError as exc:
962 raise error.Abort(_('missing support for %s') % exc)
965 raise error.Abort(_('missing support for %s') % exc)
963 except bundle2.AbortFromPart as exc:
966 except bundle2.AbortFromPart as exc:
964 pushop.ui.status(_('remote: %s\n') % exc)
967 pushop.ui.status(_('remote: %s\n') % exc)
965 if exc.hint is not None:
968 if exc.hint is not None:
966 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
969 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
967 raise error.Abort(_('push failed on remote'))
970 raise error.Abort(_('push failed on remote'))
968 except error.PushkeyFailed as exc:
971 except error.PushkeyFailed as exc:
969 partid = int(exc.partid)
972 partid = int(exc.partid)
970 if partid not in pushop.pkfailcb:
973 if partid not in pushop.pkfailcb:
971 raise
974 raise
972 pushop.pkfailcb[partid](pushop, exc)
975 pushop.pkfailcb[partid](pushop, exc)
973 for rephand in replyhandlers:
976 for rephand in replyhandlers:
974 rephand(op)
977 rephand(op)
975
978
976 def _pushchangeset(pushop):
979 def _pushchangeset(pushop):
977 """Make the actual push of changeset bundle to remote repo"""
980 """Make the actual push of changeset bundle to remote repo"""
978 if 'changesets' in pushop.stepsdone:
981 if 'changesets' in pushop.stepsdone:
979 return
982 return
980 pushop.stepsdone.add('changesets')
983 pushop.stepsdone.add('changesets')
981 if not _pushcheckoutgoing(pushop):
984 if not _pushcheckoutgoing(pushop):
982 return
985 return
983
986
984 # Should have verified this in push().
987 # Should have verified this in push().
985 assert pushop.remote.capable('unbundle')
988 assert pushop.remote.capable('unbundle')
986
989
987 pushop.repo.prepushoutgoinghooks(pushop)
990 pushop.repo.prepushoutgoinghooks(pushop)
988 outgoing = pushop.outgoing
991 outgoing = pushop.outgoing
989 # TODO: get bundlecaps from remote
992 # TODO: get bundlecaps from remote
990 bundlecaps = None
993 bundlecaps = None
991 # create a changegroup from local
994 # create a changegroup from local
992 if pushop.revs is None and not (outgoing.excluded
995 if pushop.revs is None and not (outgoing.excluded
993 or pushop.repo.changelog.filteredrevs):
996 or pushop.repo.changelog.filteredrevs):
994 # push everything,
997 # push everything,
995 # use the fast path, no race possible on push
998 # use the fast path, no race possible on push
996 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
999 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
997 fastpath=True, bundlecaps=bundlecaps)
1000 fastpath=True, bundlecaps=bundlecaps)
998 else:
1001 else:
999 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1002 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1000 'push', bundlecaps=bundlecaps)
1003 'push', bundlecaps=bundlecaps)
1001
1004
1002 # apply changegroup to remote
1005 # apply changegroup to remote
1003 # local repo finds heads on server, finds out what
1006 # local repo finds heads on server, finds out what
1004 # revs it must push. once revs transferred, if server
1007 # revs it must push. once revs transferred, if server
1005 # finds it has different heads (someone else won
1008 # finds it has different heads (someone else won
1006 # commit/push race), server aborts.
1009 # commit/push race), server aborts.
1007 if pushop.force:
1010 if pushop.force:
1008 remoteheads = ['force']
1011 remoteheads = ['force']
1009 else:
1012 else:
1010 remoteheads = pushop.remoteheads
1013 remoteheads = pushop.remoteheads
1011 # ssh: return remote's addchangegroup()
1014 # ssh: return remote's addchangegroup()
1012 # http: return remote's addchangegroup() or 0 for error
1015 # http: return remote's addchangegroup() or 0 for error
1013 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1016 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1014 pushop.repo.url())
1017 pushop.repo.url())
1015
1018
1016 def _pushsyncphase(pushop):
1019 def _pushsyncphase(pushop):
1017 """synchronise phase information locally and remotely"""
1020 """synchronise phase information locally and remotely"""
1018 cheads = pushop.commonheads
1021 cheads = pushop.commonheads
1019 # even when we don't push, exchanging phase data is useful
1022 # even when we don't push, exchanging phase data is useful
1020 remotephases = pushop.remote.listkeys('phases')
1023 remotephases = pushop.remote.listkeys('phases')
1021 if (pushop.ui.configbool('ui', '_usedassubrepo')
1024 if (pushop.ui.configbool('ui', '_usedassubrepo')
1022 and remotephases # server supports phases
1025 and remotephases # server supports phases
1023 and pushop.cgresult is None # nothing was pushed
1026 and pushop.cgresult is None # nothing was pushed
1024 and remotephases.get('publishing', False)):
1027 and remotephases.get('publishing', False)):
1025 # When:
1028 # When:
1026 # - this is a subrepo push
1029 # - this is a subrepo push
1027 # - and remote support phase
1030 # - and remote support phase
1028 # - and no changeset was pushed
1031 # - and no changeset was pushed
1029 # - and remote is publishing
1032 # - and remote is publishing
1030 # We may be in issue 3871 case!
1033 # We may be in issue 3871 case!
1031 # We drop the possible phase synchronisation done by
1034 # We drop the possible phase synchronisation done by
1032 # courtesy to publish changesets possibly locally draft
1035 # courtesy to publish changesets possibly locally draft
1033 # on the remote.
1036 # on the remote.
1034 remotephases = {'publishing': 'True'}
1037 remotephases = {'publishing': 'True'}
1035 if not remotephases: # old server or public only reply from non-publishing
1038 if not remotephases: # old server or public only reply from non-publishing
1036 _localphasemove(pushop, cheads)
1039 _localphasemove(pushop, cheads)
1037 # don't push any phase data as there is nothing to push
1040 # don't push any phase data as there is nothing to push
1038 else:
1041 else:
1039 ana = phases.analyzeremotephases(pushop.repo, cheads,
1042 ana = phases.analyzeremotephases(pushop.repo, cheads,
1040 remotephases)
1043 remotephases)
1041 pheads, droots = ana
1044 pheads, droots = ana
1042 ### Apply remote phase on local
1045 ### Apply remote phase on local
1043 if remotephases.get('publishing', False):
1046 if remotephases.get('publishing', False):
1044 _localphasemove(pushop, cheads)
1047 _localphasemove(pushop, cheads)
1045 else: # publish = False
1048 else: # publish = False
1046 _localphasemove(pushop, pheads)
1049 _localphasemove(pushop, pheads)
1047 _localphasemove(pushop, cheads, phases.draft)
1050 _localphasemove(pushop, cheads, phases.draft)
1048 ### Apply local phase on remote
1051 ### Apply local phase on remote
1049
1052
1050 if pushop.cgresult:
1053 if pushop.cgresult:
1051 if 'phases' in pushop.stepsdone:
1054 if 'phases' in pushop.stepsdone:
1052 # phases already pushed though bundle2
1055 # phases already pushed though bundle2
1053 return
1056 return
1054 outdated = pushop.outdatedphases
1057 outdated = pushop.outdatedphases
1055 else:
1058 else:
1056 outdated = pushop.fallbackoutdatedphases
1059 outdated = pushop.fallbackoutdatedphases
1057
1060
1058 pushop.stepsdone.add('phases')
1061 pushop.stepsdone.add('phases')
1059
1062
1060 # filter heads already turned public by the push
1063 # filter heads already turned public by the push
1061 outdated = [c for c in outdated if c.node() not in pheads]
1064 outdated = [c for c in outdated if c.node() not in pheads]
1062 # fallback to independent pushkey command
1065 # fallback to independent pushkey command
1063 for newremotehead in outdated:
1066 for newremotehead in outdated:
1064 r = pushop.remote.pushkey('phases',
1067 r = pushop.remote.pushkey('phases',
1065 newremotehead.hex(),
1068 newremotehead.hex(),
1066 str(phases.draft),
1069 str(phases.draft),
1067 str(phases.public))
1070 str(phases.public))
1068 if not r:
1071 if not r:
1069 pushop.ui.warn(_('updating %s to public failed!\n')
1072 pushop.ui.warn(_('updating %s to public failed!\n')
1070 % newremotehead)
1073 % newremotehead)
1071
1074
1072 def _localphasemove(pushop, nodes, phase=phases.public):
1075 def _localphasemove(pushop, nodes, phase=phases.public):
1073 """move <nodes> to <phase> in the local source repo"""
1076 """move <nodes> to <phase> in the local source repo"""
1074 if pushop.trmanager:
1077 if pushop.trmanager:
1075 phases.advanceboundary(pushop.repo,
1078 phases.advanceboundary(pushop.repo,
1076 pushop.trmanager.transaction(),
1079 pushop.trmanager.transaction(),
1077 phase,
1080 phase,
1078 nodes)
1081 nodes)
1079 else:
1082 else:
1080 # repo is not locked, do not change any phases!
1083 # repo is not locked, do not change any phases!
1081 # Informs the user that phases should have been moved when
1084 # Informs the user that phases should have been moved when
1082 # applicable.
1085 # applicable.
1083 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1086 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1084 phasestr = phases.phasenames[phase]
1087 phasestr = phases.phasenames[phase]
1085 if actualmoves:
1088 if actualmoves:
1086 pushop.ui.status(_('cannot lock source repo, skipping '
1089 pushop.ui.status(_('cannot lock source repo, skipping '
1087 'local %s phase update\n') % phasestr)
1090 'local %s phase update\n') % phasestr)
1088
1091
1089 def _pushobsolete(pushop):
1092 def _pushobsolete(pushop):
1090 """utility function to push obsolete markers to a remote"""
1093 """utility function to push obsolete markers to a remote"""
1091 if 'obsmarkers' in pushop.stepsdone:
1094 if 'obsmarkers' in pushop.stepsdone:
1092 return
1095 return
1093 repo = pushop.repo
1096 repo = pushop.repo
1094 remote = pushop.remote
1097 remote = pushop.remote
1095 pushop.stepsdone.add('obsmarkers')
1098 pushop.stepsdone.add('obsmarkers')
1096 if pushop.outobsmarkers:
1099 if pushop.outobsmarkers:
1097 pushop.ui.debug('try to push obsolete markers to remote\n')
1100 pushop.ui.debug('try to push obsolete markers to remote\n')
1098 rslts = []
1101 rslts = []
1099 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1102 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1100 for key in sorted(remotedata, reverse=True):
1103 for key in sorted(remotedata, reverse=True):
1101 # reverse sort to ensure we end with dump0
1104 # reverse sort to ensure we end with dump0
1102 data = remotedata[key]
1105 data = remotedata[key]
1103 rslts.append(remote.pushkey('obsolete', key, '', data))
1106 rslts.append(remote.pushkey('obsolete', key, '', data))
1104 if [r for r in rslts if not r]:
1107 if [r for r in rslts if not r]:
1105 msg = _('failed to push some obsolete markers!\n')
1108 msg = _('failed to push some obsolete markers!\n')
1106 repo.ui.warn(msg)
1109 repo.ui.warn(msg)
1107
1110
1108 def _pushbookmark(pushop):
1111 def _pushbookmark(pushop):
1109 """Update bookmark position on remote"""
1112 """Update bookmark position on remote"""
1110 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1113 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1111 return
1114 return
1112 pushop.stepsdone.add('bookmarks')
1115 pushop.stepsdone.add('bookmarks')
1113 ui = pushop.ui
1116 ui = pushop.ui
1114 remote = pushop.remote
1117 remote = pushop.remote
1115
1118
1116 for b, old, new in pushop.outbookmarks:
1119 for b, old, new in pushop.outbookmarks:
1117 action = 'update'
1120 action = 'update'
1118 if not old:
1121 if not old:
1119 action = 'export'
1122 action = 'export'
1120 elif not new:
1123 elif not new:
1121 action = 'delete'
1124 action = 'delete'
1122 if remote.pushkey('bookmarks', b, old, new):
1125 if remote.pushkey('bookmarks', b, old, new):
1123 ui.status(bookmsgmap[action][0] % b)
1126 ui.status(bookmsgmap[action][0] % b)
1124 else:
1127 else:
1125 ui.warn(bookmsgmap[action][1] % b)
1128 ui.warn(bookmsgmap[action][1] % b)
1126 # discovery can have set the value form invalid entry
1129 # discovery can have set the value form invalid entry
1127 if pushop.bkresult is not None:
1130 if pushop.bkresult is not None:
1128 pushop.bkresult = 1
1131 pushop.bkresult = 1
1129
1132
1130 class pulloperation(object):
1133 class pulloperation(object):
1131 """A object that represent a single pull operation
1134 """A object that represent a single pull operation
1132
1135
1133 It purpose is to carry pull related state and very common operation.
1136 It purpose is to carry pull related state and very common operation.
1134
1137
1135 A new should be created at the beginning of each pull and discarded
1138 A new should be created at the beginning of each pull and discarded
1136 afterward.
1139 afterward.
1137 """
1140 """
1138
1141
1139 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1142 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1140 remotebookmarks=None, streamclonerequested=None):
1143 remotebookmarks=None, streamclonerequested=None):
1141 # repo we pull into
1144 # repo we pull into
1142 self.repo = repo
1145 self.repo = repo
1143 # repo we pull from
1146 # repo we pull from
1144 self.remote = remote
1147 self.remote = remote
1145 # revision we try to pull (None is "all")
1148 # revision we try to pull (None is "all")
1146 self.heads = heads
1149 self.heads = heads
1147 # bookmark pulled explicitly
1150 # bookmark pulled explicitly
1148 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1151 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1149 for bookmark in bookmarks]
1152 for bookmark in bookmarks]
1150 # do we force pull?
1153 # do we force pull?
1151 self.force = force
1154 self.force = force
1152 # whether a streaming clone was requested
1155 # whether a streaming clone was requested
1153 self.streamclonerequested = streamclonerequested
1156 self.streamclonerequested = streamclonerequested
1154 # transaction manager
1157 # transaction manager
1155 self.trmanager = None
1158 self.trmanager = None
1156 # set of common changeset between local and remote before pull
1159 # set of common changeset between local and remote before pull
1157 self.common = None
1160 self.common = None
1158 # set of pulled head
1161 # set of pulled head
1159 self.rheads = None
1162 self.rheads = None
1160 # list of missing changeset to fetch remotely
1163 # list of missing changeset to fetch remotely
1161 self.fetch = None
1164 self.fetch = None
1162 # remote bookmarks data
1165 # remote bookmarks data
1163 self.remotebookmarks = remotebookmarks
1166 self.remotebookmarks = remotebookmarks
1164 # result of changegroup pulling (used as return code by pull)
1167 # result of changegroup pulling (used as return code by pull)
1165 self.cgresult = None
1168 self.cgresult = None
1166 # list of step already done
1169 # list of step already done
1167 self.stepsdone = set()
1170 self.stepsdone = set()
1168 # Whether we attempted a clone from pre-generated bundles.
1171 # Whether we attempted a clone from pre-generated bundles.
1169 self.clonebundleattempted = False
1172 self.clonebundleattempted = False
1170
1173
1171 @util.propertycache
1174 @util.propertycache
1172 def pulledsubset(self):
1175 def pulledsubset(self):
1173 """heads of the set of changeset target by the pull"""
1176 """heads of the set of changeset target by the pull"""
1174 # compute target subset
1177 # compute target subset
1175 if self.heads is None:
1178 if self.heads is None:
1176 # We pulled every thing possible
1179 # We pulled every thing possible
1177 # sync on everything common
1180 # sync on everything common
1178 c = set(self.common)
1181 c = set(self.common)
1179 ret = list(self.common)
1182 ret = list(self.common)
1180 for n in self.rheads:
1183 for n in self.rheads:
1181 if n not in c:
1184 if n not in c:
1182 ret.append(n)
1185 ret.append(n)
1183 return ret
1186 return ret
1184 else:
1187 else:
1185 # We pulled a specific subset
1188 # We pulled a specific subset
1186 # sync on this subset
1189 # sync on this subset
1187 return self.heads
1190 return self.heads
1188
1191
1189 @util.propertycache
1192 @util.propertycache
1190 def canusebundle2(self):
1193 def canusebundle2(self):
1191 return not _forcebundle1(self)
1194 return not _forcebundle1(self)
1192
1195
1193 @util.propertycache
1196 @util.propertycache
1194 def remotebundle2caps(self):
1197 def remotebundle2caps(self):
1195 return bundle2.bundle2caps(self.remote)
1198 return bundle2.bundle2caps(self.remote)
1196
1199
1197 def gettransaction(self):
1200 def gettransaction(self):
1198 # deprecated; talk to trmanager directly
1201 # deprecated; talk to trmanager directly
1199 return self.trmanager.transaction()
1202 return self.trmanager.transaction()
1200
1203
1201 class transactionmanager(util.transactional):
1204 class transactionmanager(util.transactional):
1202 """An object to manage the life cycle of a transaction
1205 """An object to manage the life cycle of a transaction
1203
1206
1204 It creates the transaction on demand and calls the appropriate hooks when
1207 It creates the transaction on demand and calls the appropriate hooks when
1205 closing the transaction."""
1208 closing the transaction."""
1206 def __init__(self, repo, source, url):
1209 def __init__(self, repo, source, url):
1207 self.repo = repo
1210 self.repo = repo
1208 self.source = source
1211 self.source = source
1209 self.url = url
1212 self.url = url
1210 self._tr = None
1213 self._tr = None
1211
1214
1212 def transaction(self):
1215 def transaction(self):
1213 """Return an open transaction object, constructing if necessary"""
1216 """Return an open transaction object, constructing if necessary"""
1214 if not self._tr:
1217 if not self._tr:
1215 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1218 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1216 self._tr = self.repo.transaction(trname)
1219 self._tr = self.repo.transaction(trname)
1217 self._tr.hookargs['source'] = self.source
1220 self._tr.hookargs['source'] = self.source
1218 self._tr.hookargs['url'] = self.url
1221 self._tr.hookargs['url'] = self.url
1219 return self._tr
1222 return self._tr
1220
1223
1221 def close(self):
1224 def close(self):
1222 """close transaction if created"""
1225 """close transaction if created"""
1223 if self._tr is not None:
1226 if self._tr is not None:
1224 self._tr.close()
1227 self._tr.close()
1225
1228
1226 def release(self):
1229 def release(self):
1227 """release transaction if created"""
1230 """release transaction if created"""
1228 if self._tr is not None:
1231 if self._tr is not None:
1229 self._tr.release()
1232 self._tr.release()
1230
1233
1231 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1234 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1232 streamclonerequested=None):
1235 streamclonerequested=None):
1233 """Fetch repository data from a remote.
1236 """Fetch repository data from a remote.
1234
1237
1235 This is the main function used to retrieve data from a remote repository.
1238 This is the main function used to retrieve data from a remote repository.
1236
1239
1237 ``repo`` is the local repository to clone into.
1240 ``repo`` is the local repository to clone into.
1238 ``remote`` is a peer instance.
1241 ``remote`` is a peer instance.
1239 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1242 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1240 default) means to pull everything from the remote.
1243 default) means to pull everything from the remote.
1241 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1244 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1242 default, all remote bookmarks are pulled.
1245 default, all remote bookmarks are pulled.
1243 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1246 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1244 initialization.
1247 initialization.
1245 ``streamclonerequested`` is a boolean indicating whether a "streaming
1248 ``streamclonerequested`` is a boolean indicating whether a "streaming
1246 clone" is requested. A "streaming clone" is essentially a raw file copy
1249 clone" is requested. A "streaming clone" is essentially a raw file copy
1247 of revlogs from the server. This only works when the local repository is
1250 of revlogs from the server. This only works when the local repository is
1248 empty. The default value of ``None`` means to respect the server
1251 empty. The default value of ``None`` means to respect the server
1249 configuration for preferring stream clones.
1252 configuration for preferring stream clones.
1250
1253
1251 Returns the ``pulloperation`` created for this pull.
1254 Returns the ``pulloperation`` created for this pull.
1252 """
1255 """
1253 if opargs is None:
1256 if opargs is None:
1254 opargs = {}
1257 opargs = {}
1255 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1258 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1256 streamclonerequested=streamclonerequested, **opargs)
1259 streamclonerequested=streamclonerequested, **opargs)
1257
1260
1258 peerlocal = pullop.remote.local()
1261 peerlocal = pullop.remote.local()
1259 if peerlocal:
1262 if peerlocal:
1260 missing = set(peerlocal.requirements) - pullop.repo.supported
1263 missing = set(peerlocal.requirements) - pullop.repo.supported
1261 if missing:
1264 if missing:
1262 msg = _("required features are not"
1265 msg = _("required features are not"
1263 " supported in the destination:"
1266 " supported in the destination:"
1264 " %s") % (', '.join(sorted(missing)))
1267 " %s") % (', '.join(sorted(missing)))
1265 raise error.Abort(msg)
1268 raise error.Abort(msg)
1266
1269
1267 wlock = lock = None
1270 wlock = lock = None
1268 try:
1271 try:
1269 wlock = pullop.repo.wlock()
1272 wlock = pullop.repo.wlock()
1270 lock = pullop.repo.lock()
1273 lock = pullop.repo.lock()
1271 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1274 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1272 # This should ideally be in _pullbundle2(). However, it needs to run
1275 # This should ideally be in _pullbundle2(). However, it needs to run
1273 # before discovery to avoid extra work.
1276 # before discovery to avoid extra work.
1274 _maybeapplyclonebundle(pullop)
1277 _maybeapplyclonebundle(pullop)
1275 streamclone.maybeperformlegacystreamclone(pullop)
1278 streamclone.maybeperformlegacystreamclone(pullop)
1276 _pulldiscovery(pullop)
1279 _pulldiscovery(pullop)
1277 if pullop.canusebundle2:
1280 if pullop.canusebundle2:
1278 _pullbundle2(pullop)
1281 _pullbundle2(pullop)
1279 _pullchangeset(pullop)
1282 _pullchangeset(pullop)
1280 _pullphase(pullop)
1283 _pullphase(pullop)
1281 _pullbookmarks(pullop)
1284 _pullbookmarks(pullop)
1282 _pullobsolete(pullop)
1285 _pullobsolete(pullop)
1283 pullop.trmanager.close()
1286 pullop.trmanager.close()
1284 finally:
1287 finally:
1285 lockmod.release(pullop.trmanager, lock, wlock)
1288 lockmod.release(pullop.trmanager, lock, wlock)
1286
1289
1287 return pullop
1290 return pullop
1288
1291
1289 # list of steps to perform discovery before pull
1292 # list of steps to perform discovery before pull
1290 pulldiscoveryorder = []
1293 pulldiscoveryorder = []
1291
1294
1292 # Mapping between step name and function
1295 # Mapping between step name and function
1293 #
1296 #
1294 # This exists to help extensions wrap steps if necessary
1297 # This exists to help extensions wrap steps if necessary
1295 pulldiscoverymapping = {}
1298 pulldiscoverymapping = {}
1296
1299
1297 def pulldiscovery(stepname):
1300 def pulldiscovery(stepname):
1298 """decorator for function performing discovery before pull
1301 """decorator for function performing discovery before pull
1299
1302
1300 The function is added to the step -> function mapping and appended to the
1303 The function is added to the step -> function mapping and appended to the
1301 list of steps. Beware that decorated function will be added in order (this
1304 list of steps. Beware that decorated function will be added in order (this
1302 may matter).
1305 may matter).
1303
1306
1304 You can only use this decorator for a new step, if you want to wrap a step
1307 You can only use this decorator for a new step, if you want to wrap a step
1305 from an extension, change the pulldiscovery dictionary directly."""
1308 from an extension, change the pulldiscovery dictionary directly."""
1306 def dec(func):
1309 def dec(func):
1307 assert stepname not in pulldiscoverymapping
1310 assert stepname not in pulldiscoverymapping
1308 pulldiscoverymapping[stepname] = func
1311 pulldiscoverymapping[stepname] = func
1309 pulldiscoveryorder.append(stepname)
1312 pulldiscoveryorder.append(stepname)
1310 return func
1313 return func
1311 return dec
1314 return dec
1312
1315
1313 def _pulldiscovery(pullop):
1316 def _pulldiscovery(pullop):
1314 """Run all discovery steps"""
1317 """Run all discovery steps"""
1315 for stepname in pulldiscoveryorder:
1318 for stepname in pulldiscoveryorder:
1316 step = pulldiscoverymapping[stepname]
1319 step = pulldiscoverymapping[stepname]
1317 step(pullop)
1320 step(pullop)
1318
1321
1319 @pulldiscovery('b1:bookmarks')
1322 @pulldiscovery('b1:bookmarks')
1320 def _pullbookmarkbundle1(pullop):
1323 def _pullbookmarkbundle1(pullop):
1321 """fetch bookmark data in bundle1 case
1324 """fetch bookmark data in bundle1 case
1322
1325
1323 If not using bundle2, we have to fetch bookmarks before changeset
1326 If not using bundle2, we have to fetch bookmarks before changeset
1324 discovery to reduce the chance and impact of race conditions."""
1327 discovery to reduce the chance and impact of race conditions."""
1325 if pullop.remotebookmarks is not None:
1328 if pullop.remotebookmarks is not None:
1326 return
1329 return
1327 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1330 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1328 # all known bundle2 servers now support listkeys, but lets be nice with
1331 # all known bundle2 servers now support listkeys, but lets be nice with
1329 # new implementation.
1332 # new implementation.
1330 return
1333 return
1331 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1334 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1332
1335
1333
1336
1334 @pulldiscovery('changegroup')
1337 @pulldiscovery('changegroup')
1335 def _pulldiscoverychangegroup(pullop):
1338 def _pulldiscoverychangegroup(pullop):
1336 """discovery phase for the pull
1339 """discovery phase for the pull
1337
1340
1338 Current handle changeset discovery only, will change handle all discovery
1341 Current handle changeset discovery only, will change handle all discovery
1339 at some point."""
1342 at some point."""
1340 tmp = discovery.findcommonincoming(pullop.repo,
1343 tmp = discovery.findcommonincoming(pullop.repo,
1341 pullop.remote,
1344 pullop.remote,
1342 heads=pullop.heads,
1345 heads=pullop.heads,
1343 force=pullop.force)
1346 force=pullop.force)
1344 common, fetch, rheads = tmp
1347 common, fetch, rheads = tmp
1345 nm = pullop.repo.unfiltered().changelog.nodemap
1348 nm = pullop.repo.unfiltered().changelog.nodemap
1346 if fetch and rheads:
1349 if fetch and rheads:
1347 # If a remote heads is filtered locally, put in back in common.
1350 # If a remote heads is filtered locally, put in back in common.
1348 #
1351 #
1349 # This is a hackish solution to catch most of "common but locally
1352 # This is a hackish solution to catch most of "common but locally
1350 # hidden situation". We do not performs discovery on unfiltered
1353 # hidden situation". We do not performs discovery on unfiltered
1351 # repository because it end up doing a pathological amount of round
1354 # repository because it end up doing a pathological amount of round
1352 # trip for w huge amount of changeset we do not care about.
1355 # trip for w huge amount of changeset we do not care about.
1353 #
1356 #
1354 # If a set of such "common but filtered" changeset exist on the server
1357 # If a set of such "common but filtered" changeset exist on the server
1355 # but are not including a remote heads, we'll not be able to detect it,
1358 # but are not including a remote heads, we'll not be able to detect it,
1356 scommon = set(common)
1359 scommon = set(common)
1357 for n in rheads:
1360 for n in rheads:
1358 if n in nm:
1361 if n in nm:
1359 if n not in scommon:
1362 if n not in scommon:
1360 common.append(n)
1363 common.append(n)
1361 if set(rheads).issubset(set(common)):
1364 if set(rheads).issubset(set(common)):
1362 fetch = []
1365 fetch = []
1363 pullop.common = common
1366 pullop.common = common
1364 pullop.fetch = fetch
1367 pullop.fetch = fetch
1365 pullop.rheads = rheads
1368 pullop.rheads = rheads
1366
1369
1367 def _pullbundle2(pullop):
1370 def _pullbundle2(pullop):
1368 """pull data using bundle2
1371 """pull data using bundle2
1369
1372
1370 For now, the only supported data are changegroup."""
1373 For now, the only supported data are changegroup."""
1371 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1374 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1372
1375
1373 # At the moment we don't do stream clones over bundle2. If that is
1376 # At the moment we don't do stream clones over bundle2. If that is
1374 # implemented then here's where the check for that will go.
1377 # implemented then here's where the check for that will go.
1375 streaming = False
1378 streaming = False
1376
1379
1377 # pulling changegroup
1380 # pulling changegroup
1378 pullop.stepsdone.add('changegroup')
1381 pullop.stepsdone.add('changegroup')
1379
1382
1380 kwargs['common'] = pullop.common
1383 kwargs['common'] = pullop.common
1381 kwargs['heads'] = pullop.heads or pullop.rheads
1384 kwargs['heads'] = pullop.heads or pullop.rheads
1382 kwargs['cg'] = pullop.fetch
1385 kwargs['cg'] = pullop.fetch
1383
1386
1384 ui = pullop.repo.ui
1387 ui = pullop.repo.ui
1385 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1388 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1386 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1389 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1387 if (not legacyphase and hasbinaryphase):
1390 if (not legacyphase and hasbinaryphase):
1388 kwargs['phases'] = True
1391 kwargs['phases'] = True
1389 pullop.stepsdone.add('phases')
1392 pullop.stepsdone.add('phases')
1390
1393
1391 if 'listkeys' in pullop.remotebundle2caps:
1394 if 'listkeys' in pullop.remotebundle2caps:
1392 if 'phases' not in pullop.stepsdone:
1395 if 'phases' not in pullop.stepsdone:
1393 kwargs['listkeys'] = ['phases']
1396 kwargs['listkeys'] = ['phases']
1394 if pullop.remotebookmarks is None:
1397 if pullop.remotebookmarks is None:
1395 # make sure to always includes bookmark data when migrating
1398 # make sure to always includes bookmark data when migrating
1396 # `hg incoming --bundle` to using this function.
1399 # `hg incoming --bundle` to using this function.
1397 kwargs.setdefault('listkeys', []).append('bookmarks')
1400 kwargs.setdefault('listkeys', []).append('bookmarks')
1398
1401
1399 # If this is a full pull / clone and the server supports the clone bundles
1402 # If this is a full pull / clone and the server supports the clone bundles
1400 # feature, tell the server whether we attempted a clone bundle. The
1403 # feature, tell the server whether we attempted a clone bundle. The
1401 # presence of this flag indicates the client supports clone bundles. This
1404 # presence of this flag indicates the client supports clone bundles. This
1402 # will enable the server to treat clients that support clone bundles
1405 # will enable the server to treat clients that support clone bundles
1403 # differently from those that don't.
1406 # differently from those that don't.
1404 if (pullop.remote.capable('clonebundles')
1407 if (pullop.remote.capable('clonebundles')
1405 and pullop.heads is None and list(pullop.common) == [nullid]):
1408 and pullop.heads is None and list(pullop.common) == [nullid]):
1406 kwargs['cbattempted'] = pullop.clonebundleattempted
1409 kwargs['cbattempted'] = pullop.clonebundleattempted
1407
1410
1408 if streaming:
1411 if streaming:
1409 pullop.repo.ui.status(_('streaming all changes\n'))
1412 pullop.repo.ui.status(_('streaming all changes\n'))
1410 elif not pullop.fetch:
1413 elif not pullop.fetch:
1411 pullop.repo.ui.status(_("no changes found\n"))
1414 pullop.repo.ui.status(_("no changes found\n"))
1412 pullop.cgresult = 0
1415 pullop.cgresult = 0
1413 else:
1416 else:
1414 if pullop.heads is None and list(pullop.common) == [nullid]:
1417 if pullop.heads is None and list(pullop.common) == [nullid]:
1415 pullop.repo.ui.status(_("requesting all changes\n"))
1418 pullop.repo.ui.status(_("requesting all changes\n"))
1416 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1419 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1417 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1420 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1418 if obsolete.commonversion(remoteversions) is not None:
1421 if obsolete.commonversion(remoteversions) is not None:
1419 kwargs['obsmarkers'] = True
1422 kwargs['obsmarkers'] = True
1420 pullop.stepsdone.add('obsmarkers')
1423 pullop.stepsdone.add('obsmarkers')
1421 _pullbundle2extraprepare(pullop, kwargs)
1424 _pullbundle2extraprepare(pullop, kwargs)
1422 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1425 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1423 try:
1426 try:
1424 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1427 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1425 except bundle2.AbortFromPart as exc:
1428 except bundle2.AbortFromPart as exc:
1426 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1429 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1427 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1430 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1428 except error.BundleValueError as exc:
1431 except error.BundleValueError as exc:
1429 raise error.Abort(_('missing support for %s') % exc)
1432 raise error.Abort(_('missing support for %s') % exc)
1430
1433
1431 if pullop.fetch:
1434 if pullop.fetch:
1432 pullop.cgresult = bundle2.combinechangegroupresults(op)
1435 pullop.cgresult = bundle2.combinechangegroupresults(op)
1433
1436
1434 # processing phases change
1437 # processing phases change
1435 for namespace, value in op.records['listkeys']:
1438 for namespace, value in op.records['listkeys']:
1436 if namespace == 'phases':
1439 if namespace == 'phases':
1437 _pullapplyphases(pullop, value)
1440 _pullapplyphases(pullop, value)
1438
1441
1439 # processing bookmark update
1442 # processing bookmark update
1440 for namespace, value in op.records['listkeys']:
1443 for namespace, value in op.records['listkeys']:
1441 if namespace == 'bookmarks':
1444 if namespace == 'bookmarks':
1442 pullop.remotebookmarks = value
1445 pullop.remotebookmarks = value
1443
1446
1444 # bookmark data were either already there or pulled in the bundle
1447 # bookmark data were either already there or pulled in the bundle
1445 if pullop.remotebookmarks is not None:
1448 if pullop.remotebookmarks is not None:
1446 _pullbookmarks(pullop)
1449 _pullbookmarks(pullop)
1447
1450
1448 def _pullbundle2extraprepare(pullop, kwargs):
1451 def _pullbundle2extraprepare(pullop, kwargs):
1449 """hook function so that extensions can extend the getbundle call"""
1452 """hook function so that extensions can extend the getbundle call"""
1450
1453
1451 def _pullchangeset(pullop):
1454 def _pullchangeset(pullop):
1452 """pull changeset from unbundle into the local repo"""
1455 """pull changeset from unbundle into the local repo"""
1453 # We delay the open of the transaction as late as possible so we
1456 # We delay the open of the transaction as late as possible so we
1454 # don't open transaction for nothing or you break future useful
1457 # don't open transaction for nothing or you break future useful
1455 # rollback call
1458 # rollback call
1456 if 'changegroup' in pullop.stepsdone:
1459 if 'changegroup' in pullop.stepsdone:
1457 return
1460 return
1458 pullop.stepsdone.add('changegroup')
1461 pullop.stepsdone.add('changegroup')
1459 if not pullop.fetch:
1462 if not pullop.fetch:
1460 pullop.repo.ui.status(_("no changes found\n"))
1463 pullop.repo.ui.status(_("no changes found\n"))
1461 pullop.cgresult = 0
1464 pullop.cgresult = 0
1462 return
1465 return
1463 tr = pullop.gettransaction()
1466 tr = pullop.gettransaction()
1464 if pullop.heads is None and list(pullop.common) == [nullid]:
1467 if pullop.heads is None and list(pullop.common) == [nullid]:
1465 pullop.repo.ui.status(_("requesting all changes\n"))
1468 pullop.repo.ui.status(_("requesting all changes\n"))
1466 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1469 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1467 # issue1320, avoid a race if remote changed after discovery
1470 # issue1320, avoid a race if remote changed after discovery
1468 pullop.heads = pullop.rheads
1471 pullop.heads = pullop.rheads
1469
1472
1470 if pullop.remote.capable('getbundle'):
1473 if pullop.remote.capable('getbundle'):
1471 # TODO: get bundlecaps from remote
1474 # TODO: get bundlecaps from remote
1472 cg = pullop.remote.getbundle('pull', common=pullop.common,
1475 cg = pullop.remote.getbundle('pull', common=pullop.common,
1473 heads=pullop.heads or pullop.rheads)
1476 heads=pullop.heads or pullop.rheads)
1474 elif pullop.heads is None:
1477 elif pullop.heads is None:
1475 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1478 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1476 elif not pullop.remote.capable('changegroupsubset'):
1479 elif not pullop.remote.capable('changegroupsubset'):
1477 raise error.Abort(_("partial pull cannot be done because "
1480 raise error.Abort(_("partial pull cannot be done because "
1478 "other repository doesn't support "
1481 "other repository doesn't support "
1479 "changegroupsubset."))
1482 "changegroupsubset."))
1480 else:
1483 else:
1481 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1484 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1482 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1485 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1483 pullop.remote.url())
1486 pullop.remote.url())
1484 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1487 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1485
1488
1486 def _pullphase(pullop):
1489 def _pullphase(pullop):
1487 # Get remote phases data from remote
1490 # Get remote phases data from remote
1488 if 'phases' in pullop.stepsdone:
1491 if 'phases' in pullop.stepsdone:
1489 return
1492 return
1490 remotephases = pullop.remote.listkeys('phases')
1493 remotephases = pullop.remote.listkeys('phases')
1491 _pullapplyphases(pullop, remotephases)
1494 _pullapplyphases(pullop, remotephases)
1492
1495
1493 def _pullapplyphases(pullop, remotephases):
1496 def _pullapplyphases(pullop, remotephases):
1494 """apply phase movement from observed remote state"""
1497 """apply phase movement from observed remote state"""
1495 if 'phases' in pullop.stepsdone:
1498 if 'phases' in pullop.stepsdone:
1496 return
1499 return
1497 pullop.stepsdone.add('phases')
1500 pullop.stepsdone.add('phases')
1498 publishing = bool(remotephases.get('publishing', False))
1501 publishing = bool(remotephases.get('publishing', False))
1499 if remotephases and not publishing:
1502 if remotephases and not publishing:
1500 # remote is new and non-publishing
1503 # remote is new and non-publishing
1501 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1504 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1502 pullop.pulledsubset,
1505 pullop.pulledsubset,
1503 remotephases)
1506 remotephases)
1504 dheads = pullop.pulledsubset
1507 dheads = pullop.pulledsubset
1505 else:
1508 else:
1506 # Remote is old or publishing all common changesets
1509 # Remote is old or publishing all common changesets
1507 # should be seen as public
1510 # should be seen as public
1508 pheads = pullop.pulledsubset
1511 pheads = pullop.pulledsubset
1509 dheads = []
1512 dheads = []
1510 unfi = pullop.repo.unfiltered()
1513 unfi = pullop.repo.unfiltered()
1511 phase = unfi._phasecache.phase
1514 phase = unfi._phasecache.phase
1512 rev = unfi.changelog.nodemap.get
1515 rev = unfi.changelog.nodemap.get
1513 public = phases.public
1516 public = phases.public
1514 draft = phases.draft
1517 draft = phases.draft
1515
1518
1516 # exclude changesets already public locally and update the others
1519 # exclude changesets already public locally and update the others
1517 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1520 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1518 if pheads:
1521 if pheads:
1519 tr = pullop.gettransaction()
1522 tr = pullop.gettransaction()
1520 phases.advanceboundary(pullop.repo, tr, public, pheads)
1523 phases.advanceboundary(pullop.repo, tr, public, pheads)
1521
1524
1522 # exclude changesets already draft locally and update the others
1525 # exclude changesets already draft locally and update the others
1523 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1526 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1524 if dheads:
1527 if dheads:
1525 tr = pullop.gettransaction()
1528 tr = pullop.gettransaction()
1526 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1529 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1527
1530
1528 def _pullbookmarks(pullop):
1531 def _pullbookmarks(pullop):
1529 """process the remote bookmark information to update the local one"""
1532 """process the remote bookmark information to update the local one"""
1530 if 'bookmarks' in pullop.stepsdone:
1533 if 'bookmarks' in pullop.stepsdone:
1531 return
1534 return
1532 pullop.stepsdone.add('bookmarks')
1535 pullop.stepsdone.add('bookmarks')
1533 repo = pullop.repo
1536 repo = pullop.repo
1534 remotebookmarks = pullop.remotebookmarks
1537 remotebookmarks = pullop.remotebookmarks
1535 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1538 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1536 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1539 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1537 pullop.remote.url(),
1540 pullop.remote.url(),
1538 pullop.gettransaction,
1541 pullop.gettransaction,
1539 explicit=pullop.explicitbookmarks)
1542 explicit=pullop.explicitbookmarks)
1540
1543
1541 def _pullobsolete(pullop):
1544 def _pullobsolete(pullop):
1542 """utility function to pull obsolete markers from a remote
1545 """utility function to pull obsolete markers from a remote
1543
1546
1544 The `gettransaction` is function that return the pull transaction, creating
1547 The `gettransaction` is function that return the pull transaction, creating
1545 one if necessary. We return the transaction to inform the calling code that
1548 one if necessary. We return the transaction to inform the calling code that
1546 a new transaction have been created (when applicable).
1549 a new transaction have been created (when applicable).
1547
1550
1548 Exists mostly to allow overriding for experimentation purpose"""
1551 Exists mostly to allow overriding for experimentation purpose"""
1549 if 'obsmarkers' in pullop.stepsdone:
1552 if 'obsmarkers' in pullop.stepsdone:
1550 return
1553 return
1551 pullop.stepsdone.add('obsmarkers')
1554 pullop.stepsdone.add('obsmarkers')
1552 tr = None
1555 tr = None
1553 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1556 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1554 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1557 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1555 remoteobs = pullop.remote.listkeys('obsolete')
1558 remoteobs = pullop.remote.listkeys('obsolete')
1556 if 'dump0' in remoteobs:
1559 if 'dump0' in remoteobs:
1557 tr = pullop.gettransaction()
1560 tr = pullop.gettransaction()
1558 markers = []
1561 markers = []
1559 for key in sorted(remoteobs, reverse=True):
1562 for key in sorted(remoteobs, reverse=True):
1560 if key.startswith('dump'):
1563 if key.startswith('dump'):
1561 data = util.b85decode(remoteobs[key])
1564 data = util.b85decode(remoteobs[key])
1562 version, newmarks = obsolete._readmarkers(data)
1565 version, newmarks = obsolete._readmarkers(data)
1563 markers += newmarks
1566 markers += newmarks
1564 if markers:
1567 if markers:
1565 pullop.repo.obsstore.add(tr, markers)
1568 pullop.repo.obsstore.add(tr, markers)
1566 pullop.repo.invalidatevolatilesets()
1569 pullop.repo.invalidatevolatilesets()
1567 return tr
1570 return tr
1568
1571
1569 def caps20to10(repo):
1572 def caps20to10(repo):
1570 """return a set with appropriate options to use bundle20 during getbundle"""
1573 """return a set with appropriate options to use bundle20 during getbundle"""
1571 caps = {'HG20'}
1574 caps = {'HG20'}
1572 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1575 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1573 caps.add('bundle2=' + urlreq.quote(capsblob))
1576 caps.add('bundle2=' + urlreq.quote(capsblob))
1574 return caps
1577 return caps
1575
1578
1576 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1579 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1577 getbundle2partsorder = []
1580 getbundle2partsorder = []
1578
1581
1579 # Mapping between step name and function
1582 # Mapping between step name and function
1580 #
1583 #
1581 # This exists to help extensions wrap steps if necessary
1584 # This exists to help extensions wrap steps if necessary
1582 getbundle2partsmapping = {}
1585 getbundle2partsmapping = {}
1583
1586
1584 def getbundle2partsgenerator(stepname, idx=None):
1587 def getbundle2partsgenerator(stepname, idx=None):
1585 """decorator for function generating bundle2 part for getbundle
1588 """decorator for function generating bundle2 part for getbundle
1586
1589
1587 The function is added to the step -> function mapping and appended to the
1590 The function is added to the step -> function mapping and appended to the
1588 list of steps. Beware that decorated functions will be added in order
1591 list of steps. Beware that decorated functions will be added in order
1589 (this may matter).
1592 (this may matter).
1590
1593
1591 You can only use this decorator for new steps, if you want to wrap a step
1594 You can only use this decorator for new steps, if you want to wrap a step
1592 from an extension, attack the getbundle2partsmapping dictionary directly."""
1595 from an extension, attack the getbundle2partsmapping dictionary directly."""
1593 def dec(func):
1596 def dec(func):
1594 assert stepname not in getbundle2partsmapping
1597 assert stepname not in getbundle2partsmapping
1595 getbundle2partsmapping[stepname] = func
1598 getbundle2partsmapping[stepname] = func
1596 if idx is None:
1599 if idx is None:
1597 getbundle2partsorder.append(stepname)
1600 getbundle2partsorder.append(stepname)
1598 else:
1601 else:
1599 getbundle2partsorder.insert(idx, stepname)
1602 getbundle2partsorder.insert(idx, stepname)
1600 return func
1603 return func
1601 return dec
1604 return dec
1602
1605
1603 def bundle2requested(bundlecaps):
1606 def bundle2requested(bundlecaps):
1604 if bundlecaps is not None:
1607 if bundlecaps is not None:
1605 return any(cap.startswith('HG2') for cap in bundlecaps)
1608 return any(cap.startswith('HG2') for cap in bundlecaps)
1606 return False
1609 return False
1607
1610
1608 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1611 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1609 **kwargs):
1612 **kwargs):
1610 """Return chunks constituting a bundle's raw data.
1613 """Return chunks constituting a bundle's raw data.
1611
1614
1612 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1615 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1613 passed.
1616 passed.
1614
1617
1615 Returns an iterator over raw chunks (of varying sizes).
1618 Returns an iterator over raw chunks (of varying sizes).
1616 """
1619 """
1617 kwargs = pycompat.byteskwargs(kwargs)
1620 kwargs = pycompat.byteskwargs(kwargs)
1618 usebundle2 = bundle2requested(bundlecaps)
1621 usebundle2 = bundle2requested(bundlecaps)
1619 # bundle10 case
1622 # bundle10 case
1620 if not usebundle2:
1623 if not usebundle2:
1621 if bundlecaps and not kwargs.get('cg', True):
1624 if bundlecaps and not kwargs.get('cg', True):
1622 raise ValueError(_('request for bundle10 must include changegroup'))
1625 raise ValueError(_('request for bundle10 must include changegroup'))
1623
1626
1624 if kwargs:
1627 if kwargs:
1625 raise ValueError(_('unsupported getbundle arguments: %s')
1628 raise ValueError(_('unsupported getbundle arguments: %s')
1626 % ', '.join(sorted(kwargs.keys())))
1629 % ', '.join(sorted(kwargs.keys())))
1627 outgoing = _computeoutgoing(repo, heads, common)
1630 outgoing = _computeoutgoing(repo, heads, common)
1628 return changegroup.makestream(repo, outgoing, '01', source,
1631 return changegroup.makestream(repo, outgoing, '01', source,
1629 bundlecaps=bundlecaps)
1632 bundlecaps=bundlecaps)
1630
1633
1631 # bundle20 case
1634 # bundle20 case
1632 b2caps = {}
1635 b2caps = {}
1633 for bcaps in bundlecaps:
1636 for bcaps in bundlecaps:
1634 if bcaps.startswith('bundle2='):
1637 if bcaps.startswith('bundle2='):
1635 blob = urlreq.unquote(bcaps[len('bundle2='):])
1638 blob = urlreq.unquote(bcaps[len('bundle2='):])
1636 b2caps.update(bundle2.decodecaps(blob))
1639 b2caps.update(bundle2.decodecaps(blob))
1637 bundler = bundle2.bundle20(repo.ui, b2caps)
1640 bundler = bundle2.bundle20(repo.ui, b2caps)
1638
1641
1639 kwargs['heads'] = heads
1642 kwargs['heads'] = heads
1640 kwargs['common'] = common
1643 kwargs['common'] = common
1641
1644
1642 for name in getbundle2partsorder:
1645 for name in getbundle2partsorder:
1643 func = getbundle2partsmapping[name]
1646 func = getbundle2partsmapping[name]
1644 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1647 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1645 **pycompat.strkwargs(kwargs))
1648 **pycompat.strkwargs(kwargs))
1646
1649
1647 return bundler.getchunks()
1650 return bundler.getchunks()
1648
1651
1649 @getbundle2partsgenerator('changegroup')
1652 @getbundle2partsgenerator('changegroup')
1650 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1653 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1651 b2caps=None, heads=None, common=None, **kwargs):
1654 b2caps=None, heads=None, common=None, **kwargs):
1652 """add a changegroup part to the requested bundle"""
1655 """add a changegroup part to the requested bundle"""
1653 cgstream = None
1656 cgstream = None
1654 if kwargs.get('cg', True):
1657 if kwargs.get('cg', True):
1655 # build changegroup bundle here.
1658 # build changegroup bundle here.
1656 version = '01'
1659 version = '01'
1657 cgversions = b2caps.get('changegroup')
1660 cgversions = b2caps.get('changegroup')
1658 if cgversions: # 3.1 and 3.2 ship with an empty value
1661 if cgversions: # 3.1 and 3.2 ship with an empty value
1659 cgversions = [v for v in cgversions
1662 cgversions = [v for v in cgversions
1660 if v in changegroup.supportedoutgoingversions(repo)]
1663 if v in changegroup.supportedoutgoingversions(repo)]
1661 if not cgversions:
1664 if not cgversions:
1662 raise ValueError(_('no common changegroup version'))
1665 raise ValueError(_('no common changegroup version'))
1663 version = max(cgversions)
1666 version = max(cgversions)
1664 outgoing = _computeoutgoing(repo, heads, common)
1667 outgoing = _computeoutgoing(repo, heads, common)
1665 if outgoing.missing:
1668 if outgoing.missing:
1666 cgstream = changegroup.makestream(repo, outgoing, version, source,
1669 cgstream = changegroup.makestream(repo, outgoing, version, source,
1667 bundlecaps=bundlecaps)
1670 bundlecaps=bundlecaps)
1668
1671
1669 if cgstream:
1672 if cgstream:
1670 part = bundler.newpart('changegroup', data=cgstream)
1673 part = bundler.newpart('changegroup', data=cgstream)
1671 if cgversions:
1674 if cgversions:
1672 part.addparam('version', version)
1675 part.addparam('version', version)
1673 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1676 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1674 mandatory=False)
1677 mandatory=False)
1675 if 'treemanifest' in repo.requirements:
1678 if 'treemanifest' in repo.requirements:
1676 part.addparam('treemanifest', '1')
1679 part.addparam('treemanifest', '1')
1677
1680
1678 @getbundle2partsgenerator('listkeys')
1681 @getbundle2partsgenerator('listkeys')
1679 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1682 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1680 b2caps=None, **kwargs):
1683 b2caps=None, **kwargs):
1681 """add parts containing listkeys namespaces to the requested bundle"""
1684 """add parts containing listkeys namespaces to the requested bundle"""
1682 listkeys = kwargs.get('listkeys', ())
1685 listkeys = kwargs.get('listkeys', ())
1683 for namespace in listkeys:
1686 for namespace in listkeys:
1684 part = bundler.newpart('listkeys')
1687 part = bundler.newpart('listkeys')
1685 part.addparam('namespace', namespace)
1688 part.addparam('namespace', namespace)
1686 keys = repo.listkeys(namespace).items()
1689 keys = repo.listkeys(namespace).items()
1687 part.data = pushkey.encodekeys(keys)
1690 part.data = pushkey.encodekeys(keys)
1688
1691
1689 @getbundle2partsgenerator('obsmarkers')
1692 @getbundle2partsgenerator('obsmarkers')
1690 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1693 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1691 b2caps=None, heads=None, **kwargs):
1694 b2caps=None, heads=None, **kwargs):
1692 """add an obsolescence markers part to the requested bundle"""
1695 """add an obsolescence markers part to the requested bundle"""
1693 if kwargs.get('obsmarkers', False):
1696 if kwargs.get('obsmarkers', False):
1694 if heads is None:
1697 if heads is None:
1695 heads = repo.heads()
1698 heads = repo.heads()
1696 subset = [c.node() for c in repo.set('::%ln', heads)]
1699 subset = [c.node() for c in repo.set('::%ln', heads)]
1697 markers = repo.obsstore.relevantmarkers(subset)
1700 markers = repo.obsstore.relevantmarkers(subset)
1698 markers = sorted(markers)
1701 markers = sorted(markers)
1699 bundle2.buildobsmarkerspart(bundler, markers)
1702 bundle2.buildobsmarkerspart(bundler, markers)
1700
1703
1701 @getbundle2partsgenerator('phases')
1704 @getbundle2partsgenerator('phases')
1702 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1705 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1703 b2caps=None, heads=None, **kwargs):
1706 b2caps=None, heads=None, **kwargs):
1704 """add phase heads part to the requested bundle"""
1707 """add phase heads part to the requested bundle"""
1705 if kwargs.get('phases', False):
1708 if kwargs.get('phases', False):
1706 if not 'heads' in b2caps.get('phases'):
1709 if not 'heads' in b2caps.get('phases'):
1707 raise ValueError(_('no common phases exchange method'))
1710 raise ValueError(_('no common phases exchange method'))
1708 if heads is None:
1711 if heads is None:
1709 heads = repo.heads()
1712 heads = repo.heads()
1710
1713
1711 headsbyphase = collections.defaultdict(set)
1714 headsbyphase = collections.defaultdict(set)
1712 if repo.publishing():
1715 if repo.publishing():
1713 headsbyphase[phases.public] = heads
1716 headsbyphase[phases.public] = heads
1714 else:
1717 else:
1715 # find the appropriate heads to move
1718 # find the appropriate heads to move
1716
1719
1717 phase = repo._phasecache.phase
1720 phase = repo._phasecache.phase
1718 node = repo.changelog.node
1721 node = repo.changelog.node
1719 rev = repo.changelog.rev
1722 rev = repo.changelog.rev
1720 for h in heads:
1723 for h in heads:
1721 headsbyphase[phase(repo, rev(h))].add(h)
1724 headsbyphase[phase(repo, rev(h))].add(h)
1722 seenphases = list(headsbyphase.keys())
1725 seenphases = list(headsbyphase.keys())
1723
1726
1724 # We do not handle anything but public and draft phase for now)
1727 # We do not handle anything but public and draft phase for now)
1725 if seenphases:
1728 if seenphases:
1726 assert max(seenphases) <= phases.draft
1729 assert max(seenphases) <= phases.draft
1727
1730
1728 # if client is pulling non-public changesets, we need to find
1731 # if client is pulling non-public changesets, we need to find
1729 # intermediate public heads.
1732 # intermediate public heads.
1730 draftheads = headsbyphase.get(phases.draft, set())
1733 draftheads = headsbyphase.get(phases.draft, set())
1731 if draftheads:
1734 if draftheads:
1732 publicheads = headsbyphase.get(phases.public, set())
1735 publicheads = headsbyphase.get(phases.public, set())
1733
1736
1734 revset = 'heads(only(%ln, %ln) and public())'
1737 revset = 'heads(only(%ln, %ln) and public())'
1735 extraheads = repo.revs(revset, draftheads, publicheads)
1738 extraheads = repo.revs(revset, draftheads, publicheads)
1736 for r in extraheads:
1739 for r in extraheads:
1737 headsbyphase[phases.public].add(node(r))
1740 headsbyphase[phases.public].add(node(r))
1738
1741
1739 # transform data in a format used by the encoding function
1742 # transform data in a format used by the encoding function
1740 phasemapping = []
1743 phasemapping = []
1741 for phase in phases.allphases:
1744 for phase in phases.allphases:
1742 phasemapping.append(sorted(headsbyphase[phase]))
1745 phasemapping.append(sorted(headsbyphase[phase]))
1743
1746
1744 # generate the actual part
1747 # generate the actual part
1745 phasedata = phases.binaryencode(phasemapping)
1748 phasedata = phases.binaryencode(phasemapping)
1746 bundler.newpart('phase-heads', data=phasedata)
1749 bundler.newpart('phase-heads', data=phasedata)
1747
1750
1748 @getbundle2partsgenerator('hgtagsfnodes')
1751 @getbundle2partsgenerator('hgtagsfnodes')
1749 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1752 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1750 b2caps=None, heads=None, common=None,
1753 b2caps=None, heads=None, common=None,
1751 **kwargs):
1754 **kwargs):
1752 """Transfer the .hgtags filenodes mapping.
1755 """Transfer the .hgtags filenodes mapping.
1753
1756
1754 Only values for heads in this bundle will be transferred.
1757 Only values for heads in this bundle will be transferred.
1755
1758
1756 The part data consists of pairs of 20 byte changeset node and .hgtags
1759 The part data consists of pairs of 20 byte changeset node and .hgtags
1757 filenodes raw values.
1760 filenodes raw values.
1758 """
1761 """
1759 # Don't send unless:
1762 # Don't send unless:
1760 # - changeset are being exchanged,
1763 # - changeset are being exchanged,
1761 # - the client supports it.
1764 # - the client supports it.
1762 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1765 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1763 return
1766 return
1764
1767
1765 outgoing = _computeoutgoing(repo, heads, common)
1768 outgoing = _computeoutgoing(repo, heads, common)
1766 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1769 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1767
1770
1768 def _getbookmarks(repo, **kwargs):
1771 def _getbookmarks(repo, **kwargs):
1769 """Returns bookmark to node mapping.
1772 """Returns bookmark to node mapping.
1770
1773
1771 This function is primarily used to generate `bookmarks` bundle2 part.
1774 This function is primarily used to generate `bookmarks` bundle2 part.
1772 It is a separate function in order to make it easy to wrap it
1775 It is a separate function in order to make it easy to wrap it
1773 in extensions. Passing `kwargs` to the function makes it easy to
1776 in extensions. Passing `kwargs` to the function makes it easy to
1774 add new parameters in extensions.
1777 add new parameters in extensions.
1775 """
1778 """
1776
1779
1777 return dict(bookmod.listbinbookmarks(repo))
1780 return dict(bookmod.listbinbookmarks(repo))
1778
1781
1779 def check_heads(repo, their_heads, context):
1782 def check_heads(repo, their_heads, context):
1780 """check if the heads of a repo have been modified
1783 """check if the heads of a repo have been modified
1781
1784
1782 Used by peer for unbundling.
1785 Used by peer for unbundling.
1783 """
1786 """
1784 heads = repo.heads()
1787 heads = repo.heads()
1785 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1788 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1786 if not (their_heads == ['force'] or their_heads == heads or
1789 if not (their_heads == ['force'] or their_heads == heads or
1787 their_heads == ['hashed', heads_hash]):
1790 their_heads == ['hashed', heads_hash]):
1788 # someone else committed/pushed/unbundled while we
1791 # someone else committed/pushed/unbundled while we
1789 # were transferring data
1792 # were transferring data
1790 raise error.PushRaced('repository changed while %s - '
1793 raise error.PushRaced('repository changed while %s - '
1791 'please try again' % context)
1794 'please try again' % context)
1792
1795
1793 def unbundle(repo, cg, heads, source, url):
1796 def unbundle(repo, cg, heads, source, url):
1794 """Apply a bundle to a repo.
1797 """Apply a bundle to a repo.
1795
1798
1796 this function makes sure the repo is locked during the application and have
1799 this function makes sure the repo is locked during the application and have
1797 mechanism to check that no push race occurred between the creation of the
1800 mechanism to check that no push race occurred between the creation of the
1798 bundle and its application.
1801 bundle and its application.
1799
1802
1800 If the push was raced as PushRaced exception is raised."""
1803 If the push was raced as PushRaced exception is raised."""
1801 r = 0
1804 r = 0
1802 # need a transaction when processing a bundle2 stream
1805 # need a transaction when processing a bundle2 stream
1803 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1806 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1804 lockandtr = [None, None, None]
1807 lockandtr = [None, None, None]
1805 recordout = None
1808 recordout = None
1806 # quick fix for output mismatch with bundle2 in 3.4
1809 # quick fix for output mismatch with bundle2 in 3.4
1807 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1810 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1808 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1811 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1809 captureoutput = True
1812 captureoutput = True
1810 try:
1813 try:
1811 # note: outside bundle1, 'heads' is expected to be empty and this
1814 # note: outside bundle1, 'heads' is expected to be empty and this
1812 # 'check_heads' call wil be a no-op
1815 # 'check_heads' call wil be a no-op
1813 check_heads(repo, heads, 'uploading changes')
1816 check_heads(repo, heads, 'uploading changes')
1814 # push can proceed
1817 # push can proceed
1815 if not isinstance(cg, bundle2.unbundle20):
1818 if not isinstance(cg, bundle2.unbundle20):
1816 # legacy case: bundle1 (changegroup 01)
1819 # legacy case: bundle1 (changegroup 01)
1817 txnname = "\n".join([source, util.hidepassword(url)])
1820 txnname = "\n".join([source, util.hidepassword(url)])
1818 with repo.lock(), repo.transaction(txnname) as tr:
1821 with repo.lock(), repo.transaction(txnname) as tr:
1819 op = bundle2.applybundle(repo, cg, tr, source, url)
1822 op = bundle2.applybundle(repo, cg, tr, source, url)
1820 r = bundle2.combinechangegroupresults(op)
1823 r = bundle2.combinechangegroupresults(op)
1821 else:
1824 else:
1822 r = None
1825 r = None
1823 try:
1826 try:
1824 def gettransaction():
1827 def gettransaction():
1825 if not lockandtr[2]:
1828 if not lockandtr[2]:
1826 lockandtr[0] = repo.wlock()
1829 lockandtr[0] = repo.wlock()
1827 lockandtr[1] = repo.lock()
1830 lockandtr[1] = repo.lock()
1828 lockandtr[2] = repo.transaction(source)
1831 lockandtr[2] = repo.transaction(source)
1829 lockandtr[2].hookargs['source'] = source
1832 lockandtr[2].hookargs['source'] = source
1830 lockandtr[2].hookargs['url'] = url
1833 lockandtr[2].hookargs['url'] = url
1831 lockandtr[2].hookargs['bundle2'] = '1'
1834 lockandtr[2].hookargs['bundle2'] = '1'
1832 return lockandtr[2]
1835 return lockandtr[2]
1833
1836
1834 # Do greedy locking by default until we're satisfied with lazy
1837 # Do greedy locking by default until we're satisfied with lazy
1835 # locking.
1838 # locking.
1836 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1839 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1837 gettransaction()
1840 gettransaction()
1838
1841
1839 op = bundle2.bundleoperation(repo, gettransaction,
1842 op = bundle2.bundleoperation(repo, gettransaction,
1840 captureoutput=captureoutput)
1843 captureoutput=captureoutput)
1841 try:
1844 try:
1842 op = bundle2.processbundle(repo, cg, op=op)
1845 op = bundle2.processbundle(repo, cg, op=op)
1843 finally:
1846 finally:
1844 r = op.reply
1847 r = op.reply
1845 if captureoutput and r is not None:
1848 if captureoutput and r is not None:
1846 repo.ui.pushbuffer(error=True, subproc=True)
1849 repo.ui.pushbuffer(error=True, subproc=True)
1847 def recordout(output):
1850 def recordout(output):
1848 r.newpart('output', data=output, mandatory=False)
1851 r.newpart('output', data=output, mandatory=False)
1849 if lockandtr[2] is not None:
1852 if lockandtr[2] is not None:
1850 lockandtr[2].close()
1853 lockandtr[2].close()
1851 except BaseException as exc:
1854 except BaseException as exc:
1852 exc.duringunbundle2 = True
1855 exc.duringunbundle2 = True
1853 if captureoutput and r is not None:
1856 if captureoutput and r is not None:
1854 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1857 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1855 def recordout(output):
1858 def recordout(output):
1856 part = bundle2.bundlepart('output', data=output,
1859 part = bundle2.bundlepart('output', data=output,
1857 mandatory=False)
1860 mandatory=False)
1858 parts.append(part)
1861 parts.append(part)
1859 raise
1862 raise
1860 finally:
1863 finally:
1861 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1864 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1862 if recordout is not None:
1865 if recordout is not None:
1863 recordout(repo.ui.popbuffer())
1866 recordout(repo.ui.popbuffer())
1864 return r
1867 return r
1865
1868
1866 def _maybeapplyclonebundle(pullop):
1869 def _maybeapplyclonebundle(pullop):
1867 """Apply a clone bundle from a remote, if possible."""
1870 """Apply a clone bundle from a remote, if possible."""
1868
1871
1869 repo = pullop.repo
1872 repo = pullop.repo
1870 remote = pullop.remote
1873 remote = pullop.remote
1871
1874
1872 if not repo.ui.configbool('ui', 'clonebundles'):
1875 if not repo.ui.configbool('ui', 'clonebundles'):
1873 return
1876 return
1874
1877
1875 # Only run if local repo is empty.
1878 # Only run if local repo is empty.
1876 if len(repo):
1879 if len(repo):
1877 return
1880 return
1878
1881
1879 if pullop.heads:
1882 if pullop.heads:
1880 return
1883 return
1881
1884
1882 if not remote.capable('clonebundles'):
1885 if not remote.capable('clonebundles'):
1883 return
1886 return
1884
1887
1885 res = remote._call('clonebundles')
1888 res = remote._call('clonebundles')
1886
1889
1887 # If we call the wire protocol command, that's good enough to record the
1890 # If we call the wire protocol command, that's good enough to record the
1888 # attempt.
1891 # attempt.
1889 pullop.clonebundleattempted = True
1892 pullop.clonebundleattempted = True
1890
1893
1891 entries = parseclonebundlesmanifest(repo, res)
1894 entries = parseclonebundlesmanifest(repo, res)
1892 if not entries:
1895 if not entries:
1893 repo.ui.note(_('no clone bundles available on remote; '
1896 repo.ui.note(_('no clone bundles available on remote; '
1894 'falling back to regular clone\n'))
1897 'falling back to regular clone\n'))
1895 return
1898 return
1896
1899
1897 entries = filterclonebundleentries(
1900 entries = filterclonebundleentries(
1898 repo, entries, streamclonerequested=pullop.streamclonerequested)
1901 repo, entries, streamclonerequested=pullop.streamclonerequested)
1899
1902
1900 if not entries:
1903 if not entries:
1901 # There is a thundering herd concern here. However, if a server
1904 # There is a thundering herd concern here. However, if a server
1902 # operator doesn't advertise bundles appropriate for its clients,
1905 # operator doesn't advertise bundles appropriate for its clients,
1903 # they deserve what's coming. Furthermore, from a client's
1906 # they deserve what's coming. Furthermore, from a client's
1904 # perspective, no automatic fallback would mean not being able to
1907 # perspective, no automatic fallback would mean not being able to
1905 # clone!
1908 # clone!
1906 repo.ui.warn(_('no compatible clone bundles available on server; '
1909 repo.ui.warn(_('no compatible clone bundles available on server; '
1907 'falling back to regular clone\n'))
1910 'falling back to regular clone\n'))
1908 repo.ui.warn(_('(you may want to report this to the server '
1911 repo.ui.warn(_('(you may want to report this to the server '
1909 'operator)\n'))
1912 'operator)\n'))
1910 return
1913 return
1911
1914
1912 entries = sortclonebundleentries(repo.ui, entries)
1915 entries = sortclonebundleentries(repo.ui, entries)
1913
1916
1914 url = entries[0]['URL']
1917 url = entries[0]['URL']
1915 repo.ui.status(_('applying clone bundle from %s\n') % url)
1918 repo.ui.status(_('applying clone bundle from %s\n') % url)
1916 if trypullbundlefromurl(repo.ui, repo, url):
1919 if trypullbundlefromurl(repo.ui, repo, url):
1917 repo.ui.status(_('finished applying clone bundle\n'))
1920 repo.ui.status(_('finished applying clone bundle\n'))
1918 # Bundle failed.
1921 # Bundle failed.
1919 #
1922 #
1920 # We abort by default to avoid the thundering herd of
1923 # We abort by default to avoid the thundering herd of
1921 # clients flooding a server that was expecting expensive
1924 # clients flooding a server that was expecting expensive
1922 # clone load to be offloaded.
1925 # clone load to be offloaded.
1923 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1926 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1924 repo.ui.warn(_('falling back to normal clone\n'))
1927 repo.ui.warn(_('falling back to normal clone\n'))
1925 else:
1928 else:
1926 raise error.Abort(_('error applying bundle'),
1929 raise error.Abort(_('error applying bundle'),
1927 hint=_('if this error persists, consider contacting '
1930 hint=_('if this error persists, consider contacting '
1928 'the server operator or disable clone '
1931 'the server operator or disable clone '
1929 'bundles via '
1932 'bundles via '
1930 '"--config ui.clonebundles=false"'))
1933 '"--config ui.clonebundles=false"'))
1931
1934
1932 def parseclonebundlesmanifest(repo, s):
1935 def parseclonebundlesmanifest(repo, s):
1933 """Parses the raw text of a clone bundles manifest.
1936 """Parses the raw text of a clone bundles manifest.
1934
1937
1935 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1938 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1936 to the URL and other keys are the attributes for the entry.
1939 to the URL and other keys are the attributes for the entry.
1937 """
1940 """
1938 m = []
1941 m = []
1939 for line in s.splitlines():
1942 for line in s.splitlines():
1940 fields = line.split()
1943 fields = line.split()
1941 if not fields:
1944 if not fields:
1942 continue
1945 continue
1943 attrs = {'URL': fields[0]}
1946 attrs = {'URL': fields[0]}
1944 for rawattr in fields[1:]:
1947 for rawattr in fields[1:]:
1945 key, value = rawattr.split('=', 1)
1948 key, value = rawattr.split('=', 1)
1946 key = urlreq.unquote(key)
1949 key = urlreq.unquote(key)
1947 value = urlreq.unquote(value)
1950 value = urlreq.unquote(value)
1948 attrs[key] = value
1951 attrs[key] = value
1949
1952
1950 # Parse BUNDLESPEC into components. This makes client-side
1953 # Parse BUNDLESPEC into components. This makes client-side
1951 # preferences easier to specify since you can prefer a single
1954 # preferences easier to specify since you can prefer a single
1952 # component of the BUNDLESPEC.
1955 # component of the BUNDLESPEC.
1953 if key == 'BUNDLESPEC':
1956 if key == 'BUNDLESPEC':
1954 try:
1957 try:
1955 comp, version, params = parsebundlespec(repo, value,
1958 comp, version, params = parsebundlespec(repo, value,
1956 externalnames=True)
1959 externalnames=True)
1957 attrs['COMPRESSION'] = comp
1960 attrs['COMPRESSION'] = comp
1958 attrs['VERSION'] = version
1961 attrs['VERSION'] = version
1959 except error.InvalidBundleSpecification:
1962 except error.InvalidBundleSpecification:
1960 pass
1963 pass
1961 except error.UnsupportedBundleSpecification:
1964 except error.UnsupportedBundleSpecification:
1962 pass
1965 pass
1963
1966
1964 m.append(attrs)
1967 m.append(attrs)
1965
1968
1966 return m
1969 return m
1967
1970
1968 def filterclonebundleentries(repo, entries, streamclonerequested=False):
1971 def filterclonebundleentries(repo, entries, streamclonerequested=False):
1969 """Remove incompatible clone bundle manifest entries.
1972 """Remove incompatible clone bundle manifest entries.
1970
1973
1971 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1974 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1972 and returns a new list consisting of only the entries that this client
1975 and returns a new list consisting of only the entries that this client
1973 should be able to apply.
1976 should be able to apply.
1974
1977
1975 There is no guarantee we'll be able to apply all returned entries because
1978 There is no guarantee we'll be able to apply all returned entries because
1976 the metadata we use to filter on may be missing or wrong.
1979 the metadata we use to filter on may be missing or wrong.
1977 """
1980 """
1978 newentries = []
1981 newentries = []
1979 for entry in entries:
1982 for entry in entries:
1980 spec = entry.get('BUNDLESPEC')
1983 spec = entry.get('BUNDLESPEC')
1981 if spec:
1984 if spec:
1982 try:
1985 try:
1983 comp, version, params = parsebundlespec(repo, spec, strict=True)
1986 comp, version, params = parsebundlespec(repo, spec, strict=True)
1984
1987
1985 # If a stream clone was requested, filter out non-streamclone
1988 # If a stream clone was requested, filter out non-streamclone
1986 # entries.
1989 # entries.
1987 if streamclonerequested and (comp != 'UN' or version != 's1'):
1990 if streamclonerequested and (comp != 'UN' or version != 's1'):
1988 repo.ui.debug('filtering %s because not a stream clone\n' %
1991 repo.ui.debug('filtering %s because not a stream clone\n' %
1989 entry['URL'])
1992 entry['URL'])
1990 continue
1993 continue
1991
1994
1992 except error.InvalidBundleSpecification as e:
1995 except error.InvalidBundleSpecification as e:
1993 repo.ui.debug(str(e) + '\n')
1996 repo.ui.debug(str(e) + '\n')
1994 continue
1997 continue
1995 except error.UnsupportedBundleSpecification as e:
1998 except error.UnsupportedBundleSpecification as e:
1996 repo.ui.debug('filtering %s because unsupported bundle '
1999 repo.ui.debug('filtering %s because unsupported bundle '
1997 'spec: %s\n' % (entry['URL'], str(e)))
2000 'spec: %s\n' % (entry['URL'], str(e)))
1998 continue
2001 continue
1999 # If we don't have a spec and requested a stream clone, we don't know
2002 # If we don't have a spec and requested a stream clone, we don't know
2000 # what the entry is so don't attempt to apply it.
2003 # what the entry is so don't attempt to apply it.
2001 elif streamclonerequested:
2004 elif streamclonerequested:
2002 repo.ui.debug('filtering %s because cannot determine if a stream '
2005 repo.ui.debug('filtering %s because cannot determine if a stream '
2003 'clone bundle\n' % entry['URL'])
2006 'clone bundle\n' % entry['URL'])
2004 continue
2007 continue
2005
2008
2006 if 'REQUIRESNI' in entry and not sslutil.hassni:
2009 if 'REQUIRESNI' in entry and not sslutil.hassni:
2007 repo.ui.debug('filtering %s because SNI not supported\n' %
2010 repo.ui.debug('filtering %s because SNI not supported\n' %
2008 entry['URL'])
2011 entry['URL'])
2009 continue
2012 continue
2010
2013
2011 newentries.append(entry)
2014 newentries.append(entry)
2012
2015
2013 return newentries
2016 return newentries
2014
2017
2015 class clonebundleentry(object):
2018 class clonebundleentry(object):
2016 """Represents an item in a clone bundles manifest.
2019 """Represents an item in a clone bundles manifest.
2017
2020
2018 This rich class is needed to support sorting since sorted() in Python 3
2021 This rich class is needed to support sorting since sorted() in Python 3
2019 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2022 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2020 won't work.
2023 won't work.
2021 """
2024 """
2022
2025
2023 def __init__(self, value, prefers):
2026 def __init__(self, value, prefers):
2024 self.value = value
2027 self.value = value
2025 self.prefers = prefers
2028 self.prefers = prefers
2026
2029
2027 def _cmp(self, other):
2030 def _cmp(self, other):
2028 for prefkey, prefvalue in self.prefers:
2031 for prefkey, prefvalue in self.prefers:
2029 avalue = self.value.get(prefkey)
2032 avalue = self.value.get(prefkey)
2030 bvalue = other.value.get(prefkey)
2033 bvalue = other.value.get(prefkey)
2031
2034
2032 # Special case for b missing attribute and a matches exactly.
2035 # Special case for b missing attribute and a matches exactly.
2033 if avalue is not None and bvalue is None and avalue == prefvalue:
2036 if avalue is not None and bvalue is None and avalue == prefvalue:
2034 return -1
2037 return -1
2035
2038
2036 # Special case for a missing attribute and b matches exactly.
2039 # Special case for a missing attribute and b matches exactly.
2037 if bvalue is not None and avalue is None and bvalue == prefvalue:
2040 if bvalue is not None and avalue is None and bvalue == prefvalue:
2038 return 1
2041 return 1
2039
2042
2040 # We can't compare unless attribute present on both.
2043 # We can't compare unless attribute present on both.
2041 if avalue is None or bvalue is None:
2044 if avalue is None or bvalue is None:
2042 continue
2045 continue
2043
2046
2044 # Same values should fall back to next attribute.
2047 # Same values should fall back to next attribute.
2045 if avalue == bvalue:
2048 if avalue == bvalue:
2046 continue
2049 continue
2047
2050
2048 # Exact matches come first.
2051 # Exact matches come first.
2049 if avalue == prefvalue:
2052 if avalue == prefvalue:
2050 return -1
2053 return -1
2051 if bvalue == prefvalue:
2054 if bvalue == prefvalue:
2052 return 1
2055 return 1
2053
2056
2054 # Fall back to next attribute.
2057 # Fall back to next attribute.
2055 continue
2058 continue
2056
2059
2057 # If we got here we couldn't sort by attributes and prefers. Fall
2060 # If we got here we couldn't sort by attributes and prefers. Fall
2058 # back to index order.
2061 # back to index order.
2059 return 0
2062 return 0
2060
2063
2061 def __lt__(self, other):
2064 def __lt__(self, other):
2062 return self._cmp(other) < 0
2065 return self._cmp(other) < 0
2063
2066
2064 def __gt__(self, other):
2067 def __gt__(self, other):
2065 return self._cmp(other) > 0
2068 return self._cmp(other) > 0
2066
2069
2067 def __eq__(self, other):
2070 def __eq__(self, other):
2068 return self._cmp(other) == 0
2071 return self._cmp(other) == 0
2069
2072
2070 def __le__(self, other):
2073 def __le__(self, other):
2071 return self._cmp(other) <= 0
2074 return self._cmp(other) <= 0
2072
2075
2073 def __ge__(self, other):
2076 def __ge__(self, other):
2074 return self._cmp(other) >= 0
2077 return self._cmp(other) >= 0
2075
2078
2076 def __ne__(self, other):
2079 def __ne__(self, other):
2077 return self._cmp(other) != 0
2080 return self._cmp(other) != 0
2078
2081
2079 def sortclonebundleentries(ui, entries):
2082 def sortclonebundleentries(ui, entries):
2080 prefers = ui.configlist('ui', 'clonebundleprefers')
2083 prefers = ui.configlist('ui', 'clonebundleprefers')
2081 if not prefers:
2084 if not prefers:
2082 return list(entries)
2085 return list(entries)
2083
2086
2084 prefers = [p.split('=', 1) for p in prefers]
2087 prefers = [p.split('=', 1) for p in prefers]
2085
2088
2086 items = sorted(clonebundleentry(v, prefers) for v in entries)
2089 items = sorted(clonebundleentry(v, prefers) for v in entries)
2087 return [i.value for i in items]
2090 return [i.value for i in items]
2088
2091
2089 def trypullbundlefromurl(ui, repo, url):
2092 def trypullbundlefromurl(ui, repo, url):
2090 """Attempt to apply a bundle from a URL."""
2093 """Attempt to apply a bundle from a URL."""
2091 with repo.lock(), repo.transaction('bundleurl') as tr:
2094 with repo.lock(), repo.transaction('bundleurl') as tr:
2092 try:
2095 try:
2093 fh = urlmod.open(ui, url)
2096 fh = urlmod.open(ui, url)
2094 cg = readbundle(ui, fh, 'stream')
2097 cg = readbundle(ui, fh, 'stream')
2095
2098
2096 if isinstance(cg, streamclone.streamcloneapplier):
2099 if isinstance(cg, streamclone.streamcloneapplier):
2097 cg.apply(repo)
2100 cg.apply(repo)
2098 else:
2101 else:
2099 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2102 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2100 return True
2103 return True
2101 except urlerr.httperror as e:
2104 except urlerr.httperror as e:
2102 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2105 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2103 except urlerr.urlerror as e:
2106 except urlerr.urlerror as e:
2104 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2107 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2105
2108
2106 return False
2109 return False
General Comments 0
You need to be logged in to leave comments. Login now